cfgrtl.c (fixup_reorder_chain): Do not emit barriers to BB_FOOTER.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "reload.h"
42 #include "diagnostic-core.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "debug.h"
48 #include "langhooks.h"
49 #include "optabs.h"
50 #include "gimple.h"
51 #include "df.h"
52 #include "params.h"
53 #include "cfgloop.h"
54 #include "opts.h"
55
56 /* Define the specific costs for a given cpu. */
57
58 struct processor_costs
59 {
60 /* multiplication */
61 const int m; /* cost of an M instruction. */
62 const int mghi; /* cost of an MGHI instruction. */
63 const int mh; /* cost of an MH instruction. */
64 const int mhi; /* cost of an MHI instruction. */
65 const int ml; /* cost of an ML instruction. */
66 const int mr; /* cost of an MR instruction. */
67 const int ms; /* cost of an MS instruction. */
68 const int msg; /* cost of an MSG instruction. */
69 const int msgf; /* cost of an MSGF instruction. */
70 const int msgfr; /* cost of an MSGFR instruction. */
71 const int msgr; /* cost of an MSGR instruction. */
72 const int msr; /* cost of an MSR instruction. */
73 const int mult_df; /* cost of multiplication in DFmode. */
74 const int mxbr;
75 /* square root */
76 const int sqxbr; /* cost of square root in TFmode. */
77 const int sqdbr; /* cost of square root in DFmode. */
78 const int sqebr; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr; /* cost of multiply and add in DFmode. */
81 const int maebr; /* cost of multiply and add in SFmode. */
82 /* division */
83 const int dxbr;
84 const int ddbr;
85 const int debr;
86 const int dlgr;
87 const int dlr;
88 const int dr;
89 const int dsgfr;
90 const int dsgr;
91 };
92
93 const struct processor_costs *s390_cost;
94
95 static const
96 struct processor_costs z900_cost =
97 {
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
125 };
126
127 static const
128 struct processor_costs z990_cost =
129 {
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
157 };
158
159 static const
160 struct processor_costs z9_109_cost =
161 {
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
189 };
190
191 static const
192 struct processor_costs z10_cost =
193 {
194 COSTS_N_INSNS (10), /* M */
195 COSTS_N_INSNS (10), /* MGHI */
196 COSTS_N_INSNS (10), /* MH */
197 COSTS_N_INSNS (10), /* MHI */
198 COSTS_N_INSNS (10), /* ML */
199 COSTS_N_INSNS (10), /* MR */
200 COSTS_N_INSNS (10), /* MS */
201 COSTS_N_INSNS (10), /* MSG */
202 COSTS_N_INSNS (10), /* MSGF */
203 COSTS_N_INSNS (10), /* MSGFR */
204 COSTS_N_INSNS (10), /* MSGR */
205 COSTS_N_INSNS (10), /* MSR */
206 COSTS_N_INSNS (1) , /* multiplication in DFmode */
207 COSTS_N_INSNS (50), /* MXBR */
208 COSTS_N_INSNS (120), /* SQXBR */
209 COSTS_N_INSNS (52), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (111), /* DXBR */
214 COSTS_N_INSNS (39), /* DDBR */
215 COSTS_N_INSNS (32), /* DEBR */
216 COSTS_N_INSNS (160), /* DLGR */
217 COSTS_N_INSNS (71), /* DLR */
218 COSTS_N_INSNS (71), /* DR */
219 COSTS_N_INSNS (71), /* DSGFR */
220 COSTS_N_INSNS (71), /* DSGR */
221 };
222
223 static const
224 struct processor_costs z196_cost =
225 {
226 COSTS_N_INSNS (7), /* M */
227 COSTS_N_INSNS (5), /* MGHI */
228 COSTS_N_INSNS (5), /* MH */
229 COSTS_N_INSNS (5), /* MHI */
230 COSTS_N_INSNS (7), /* ML */
231 COSTS_N_INSNS (7), /* MR */
232 COSTS_N_INSNS (6), /* MS */
233 COSTS_N_INSNS (8), /* MSG */
234 COSTS_N_INSNS (6), /* MSGF */
235 COSTS_N_INSNS (6), /* MSGFR */
236 COSTS_N_INSNS (8), /* MSGR */
237 COSTS_N_INSNS (6), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (40), /* MXBR B+40 */
240 COSTS_N_INSNS (100), /* SQXBR B+100 */
241 COSTS_N_INSNS (42), /* SQDBR B+42 */
242 COSTS_N_INSNS (28), /* SQEBR B+28 */
243 COSTS_N_INSNS (1), /* MADBR B */
244 COSTS_N_INSNS (1), /* MAEBR B */
245 COSTS_N_INSNS (101), /* DXBR B+101 */
246 COSTS_N_INSNS (29), /* DDBR */
247 COSTS_N_INSNS (22), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR cracked */
249 COSTS_N_INSNS (160), /* DLR cracked */
250 COSTS_N_INSNS (160), /* DR expanded */
251 COSTS_N_INSNS (160), /* DSGFR cracked */
252 COSTS_N_INSNS (160), /* DSGR cracked */
253 };
254
255 static const
256 struct processor_costs zEC12_cost =
257 {
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (131), /* DXBR B+131 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
285 };
286
287 extern int reload_completed;
288
289 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
290 static rtx last_scheduled_insn;
291
292 /* Structure used to hold the components of a S/390 memory
293 address. A legitimate address on S/390 is of the general
294 form
295 base + index + displacement
296 where any of the components is optional.
297
298 base and index are registers of the class ADDR_REGS,
299 displacement is an unsigned 12-bit immediate constant. */
300
301 struct s390_address
302 {
303 rtx base;
304 rtx indx;
305 rtx disp;
306 bool pointer;
307 bool literal_pool;
308 };
309
310 /* The following structure is embedded in the machine
311 specific part of struct function. */
312
313 struct GTY (()) s390_frame_layout
314 {
315 /* Offset within stack frame. */
316 HOST_WIDE_INT gprs_offset;
317 HOST_WIDE_INT f0_offset;
318 HOST_WIDE_INT f4_offset;
319 HOST_WIDE_INT f8_offset;
320 HOST_WIDE_INT backchain_offset;
321
322 /* Number of first and last gpr where slots in the register
323 save area are reserved for. */
324 int first_save_gpr_slot;
325 int last_save_gpr_slot;
326
327 /* Number of first and last gpr to be saved, restored. */
328 int first_save_gpr;
329 int first_restore_gpr;
330 int last_save_gpr;
331 int last_restore_gpr;
332
333 /* Bits standing for floating point registers. Set, if the
334 respective register has to be saved. Starting with reg 16 (f0)
335 at the rightmost bit.
336 Bit 15 - 8 7 6 5 4 3 2 1 0
337 fpr 15 - 8 7 5 3 1 6 4 2 0
338 reg 31 - 24 23 22 21 20 19 18 17 16 */
339 unsigned int fpr_bitmap;
340
341 /* Number of floating point registers f8-f15 which must be saved. */
342 int high_fprs;
343
344 /* Set if return address needs to be saved.
345 This flag is set by s390_return_addr_rtx if it could not use
346 the initial value of r14 and therefore depends on r14 saved
347 to the stack. */
348 bool save_return_addr_p;
349
350 /* Size of stack frame. */
351 HOST_WIDE_INT frame_size;
352 };
353
354 /* Define the structure for the machine field in struct function. */
355
356 struct GTY(()) machine_function
357 {
358 struct s390_frame_layout frame_layout;
359
360 /* Literal pool base register. */
361 rtx base_reg;
362
363 /* True if we may need to perform branch splitting. */
364 bool split_branches_pending_p;
365
366 /* Some local-dynamic TLS symbol name. */
367 const char *some_ld_name;
368
369 bool has_landing_pad_p;
370 };
371
372 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
373
374 #define cfun_frame_layout (cfun->machine->frame_layout)
375 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
376 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
377 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
378 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
379 (1 << (BITNUM)))
380 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
381 (1 << (BITNUM))))
382
383 /* Number of GPRs and FPRs used for argument passing. */
384 #define GP_ARG_NUM_REG 5
385 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
386
387 /* A couple of shortcuts. */
388 #define CONST_OK_FOR_J(x) \
389 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
390 #define CONST_OK_FOR_K(x) \
391 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
392 #define CONST_OK_FOR_Os(x) \
393 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
394 #define CONST_OK_FOR_Op(x) \
395 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
396 #define CONST_OK_FOR_On(x) \
397 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
398
399 #define REGNO_PAIR_OK(REGNO, MODE) \
400 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
401
402 /* That's the read ahead of the dynamic branch prediction unit in
403 bytes on a z10 (or higher) CPU. */
404 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
405
406 /* Return the alignment for LABEL. We default to the -falign-labels
407 value except for the literal pool base label. */
408 int
409 s390_label_align (rtx label)
410 {
411 rtx prev_insn = prev_active_insn (label);
412
413 if (prev_insn == NULL_RTX)
414 goto old;
415
416 prev_insn = single_set (prev_insn);
417
418 if (prev_insn == NULL_RTX)
419 goto old;
420
421 prev_insn = SET_SRC (prev_insn);
422
423 /* Don't align literal pool base labels. */
424 if (GET_CODE (prev_insn) == UNSPEC
425 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
426 return 0;
427
428 old:
429 return align_labels_log;
430 }
431
432 static enum machine_mode
433 s390_libgcc_cmp_return_mode (void)
434 {
435 return TARGET_64BIT ? DImode : SImode;
436 }
437
438 static enum machine_mode
439 s390_libgcc_shift_count_mode (void)
440 {
441 return TARGET_64BIT ? DImode : SImode;
442 }
443
444 static enum machine_mode
445 s390_unwind_word_mode (void)
446 {
447 return TARGET_64BIT ? DImode : SImode;
448 }
449
450 /* Return true if the back end supports mode MODE. */
451 static bool
452 s390_scalar_mode_supported_p (enum machine_mode mode)
453 {
454 /* In contrast to the default implementation reject TImode constants on 31bit
455 TARGET_ZARCH for ABI compliance. */
456 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
457 return false;
458
459 if (DECIMAL_FLOAT_MODE_P (mode))
460 return default_decimal_float_supported_p ();
461
462 return default_scalar_mode_supported_p (mode);
463 }
464
465 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
466
467 void
468 s390_set_has_landing_pad_p (bool value)
469 {
470 cfun->machine->has_landing_pad_p = value;
471 }
472
473 /* If two condition code modes are compatible, return a condition code
474 mode which is compatible with both. Otherwise, return
475 VOIDmode. */
476
477 static enum machine_mode
478 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
479 {
480 if (m1 == m2)
481 return m1;
482
483 switch (m1)
484 {
485 case CCZmode:
486 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
487 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
488 return m2;
489 return VOIDmode;
490
491 case CCSmode:
492 case CCUmode:
493 case CCTmode:
494 case CCSRmode:
495 case CCURmode:
496 case CCZ1mode:
497 if (m2 == CCZmode)
498 return m1;
499
500 return VOIDmode;
501
502 default:
503 return VOIDmode;
504 }
505 return VOIDmode;
506 }
507
508 /* Return true if SET either doesn't set the CC register, or else
509 the source and destination have matching CC modes and that
510 CC mode is at least as constrained as REQ_MODE. */
511
512 static bool
513 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
514 {
515 enum machine_mode set_mode;
516
517 gcc_assert (GET_CODE (set) == SET);
518
519 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
520 return 1;
521
522 set_mode = GET_MODE (SET_DEST (set));
523 switch (set_mode)
524 {
525 case CCSmode:
526 case CCSRmode:
527 case CCUmode:
528 case CCURmode:
529 case CCLmode:
530 case CCL1mode:
531 case CCL2mode:
532 case CCL3mode:
533 case CCT1mode:
534 case CCT2mode:
535 case CCT3mode:
536 if (req_mode != set_mode)
537 return 0;
538 break;
539
540 case CCZmode:
541 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
542 && req_mode != CCSRmode && req_mode != CCURmode)
543 return 0;
544 break;
545
546 case CCAPmode:
547 case CCANmode:
548 if (req_mode != CCAmode)
549 return 0;
550 break;
551
552 default:
553 gcc_unreachable ();
554 }
555
556 return (GET_MODE (SET_SRC (set)) == set_mode);
557 }
558
559 /* Return true if every SET in INSN that sets the CC register
560 has source and destination with matching CC modes and that
561 CC mode is at least as constrained as REQ_MODE.
562 If REQ_MODE is VOIDmode, always return false. */
563
564 bool
565 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
566 {
567 int i;
568
569 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
570 if (req_mode == VOIDmode)
571 return false;
572
573 if (GET_CODE (PATTERN (insn)) == SET)
574 return s390_match_ccmode_set (PATTERN (insn), req_mode);
575
576 if (GET_CODE (PATTERN (insn)) == PARALLEL)
577 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
578 {
579 rtx set = XVECEXP (PATTERN (insn), 0, i);
580 if (GET_CODE (set) == SET)
581 if (!s390_match_ccmode_set (set, req_mode))
582 return false;
583 }
584
585 return true;
586 }
587
588 /* If a test-under-mask instruction can be used to implement
589 (compare (and ... OP1) OP2), return the CC mode required
590 to do that. Otherwise, return VOIDmode.
591 MIXED is true if the instruction can distinguish between
592 CC1 and CC2 for mixed selected bits (TMxx), it is false
593 if the instruction cannot (TM). */
594
595 enum machine_mode
596 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
597 {
598 int bit0, bit1;
599
600 /* ??? Fixme: should work on CONST_DOUBLE as well. */
601 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
602 return VOIDmode;
603
604 /* Selected bits all zero: CC0.
605 e.g.: int a; if ((a & (16 + 128)) == 0) */
606 if (INTVAL (op2) == 0)
607 return CCTmode;
608
609 /* Selected bits all one: CC3.
610 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
611 if (INTVAL (op2) == INTVAL (op1))
612 return CCT3mode;
613
614 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
615 int a;
616 if ((a & (16 + 128)) == 16) -> CCT1
617 if ((a & (16 + 128)) == 128) -> CCT2 */
618 if (mixed)
619 {
620 bit1 = exact_log2 (INTVAL (op2));
621 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
622 if (bit0 != -1 && bit1 != -1)
623 return bit0 > bit1 ? CCT1mode : CCT2mode;
624 }
625
626 return VOIDmode;
627 }
628
629 /* Given a comparison code OP (EQ, NE, etc.) and the operands
630 OP0 and OP1 of a COMPARE, return the mode to be used for the
631 comparison. */
632
633 enum machine_mode
634 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
635 {
636 switch (code)
637 {
638 case EQ:
639 case NE:
640 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
641 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
642 return CCAPmode;
643 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
644 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
645 return CCAPmode;
646 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
647 || GET_CODE (op1) == NEG)
648 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
649 return CCLmode;
650
651 if (GET_CODE (op0) == AND)
652 {
653 /* Check whether we can potentially do it via TM. */
654 enum machine_mode ccmode;
655 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
656 if (ccmode != VOIDmode)
657 {
658 /* Relax CCTmode to CCZmode to allow fall-back to AND
659 if that turns out to be beneficial. */
660 return ccmode == CCTmode ? CCZmode : ccmode;
661 }
662 }
663
664 if (register_operand (op0, HImode)
665 && GET_CODE (op1) == CONST_INT
666 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
667 return CCT3mode;
668 if (register_operand (op0, QImode)
669 && GET_CODE (op1) == CONST_INT
670 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
671 return CCT3mode;
672
673 return CCZmode;
674
675 case LE:
676 case LT:
677 case GE:
678 case GT:
679 /* The only overflow condition of NEG and ABS happens when
680 -INT_MAX is used as parameter, which stays negative. So
681 we have an overflow from a positive value to a negative.
682 Using CCAP mode the resulting cc can be used for comparisons. */
683 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCAPmode;
686
687 /* If constants are involved in an add instruction it is possible to use
688 the resulting cc for comparisons with zero. Knowing the sign of the
689 constant the overflow behavior gets predictable. e.g.:
690 int a, b; if ((b = a + c) > 0)
691 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
692 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
693 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
694 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
695 /* Avoid INT32_MIN on 32 bit. */
696 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
697 {
698 if (INTVAL (XEXP((op0), 1)) < 0)
699 return CCANmode;
700 else
701 return CCAPmode;
702 }
703 /* Fall through. */
704 case UNORDERED:
705 case ORDERED:
706 case UNEQ:
707 case UNLE:
708 case UNLT:
709 case UNGE:
710 case UNGT:
711 case LTGT:
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCSRmode;
715 return CCSmode;
716
717 case LTU:
718 case GEU:
719 if (GET_CODE (op0) == PLUS
720 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
721 return CCL1mode;
722
723 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
724 && GET_CODE (op1) != CONST_INT)
725 return CCURmode;
726 return CCUmode;
727
728 case LEU:
729 case GTU:
730 if (GET_CODE (op0) == MINUS
731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
732 return CCL2mode;
733
734 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
735 && GET_CODE (op1) != CONST_INT)
736 return CCURmode;
737 return CCUmode;
738
739 default:
740 gcc_unreachable ();
741 }
742 }
743
744 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
745 that we can implement more efficiently. */
746
747 static void
748 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
749 bool op0_preserve_value)
750 {
751 if (op0_preserve_value)
752 return;
753
754 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == ZERO_EXTRACT
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
760 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
761 {
762 rtx inner = XEXP (*op0, 0);
763 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
764 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
765 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
766
767 if (len > 0 && len < modesize
768 && pos >= 0 && pos + len <= modesize
769 && modesize <= HOST_BITS_PER_WIDE_INT)
770 {
771 unsigned HOST_WIDE_INT block;
772 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
773 block <<= modesize - pos - len;
774
775 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
776 gen_int_mode (block, GET_MODE (inner)));
777 }
778 }
779
780 /* Narrow AND of memory against immediate to enable TM. */
781 if ((*code == EQ || *code == NE)
782 && *op1 == const0_rtx
783 && GET_CODE (*op0) == AND
784 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
785 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
786 {
787 rtx inner = XEXP (*op0, 0);
788 rtx mask = XEXP (*op0, 1);
789
790 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
791 if (GET_CODE (inner) == SUBREG
792 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
793 && (GET_MODE_SIZE (GET_MODE (inner))
794 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
795 && ((INTVAL (mask)
796 & GET_MODE_MASK (GET_MODE (inner))
797 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
798 == 0))
799 inner = SUBREG_REG (inner);
800
801 /* Do not change volatile MEMs. */
802 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
803 {
804 int part = s390_single_part (XEXP (*op0, 1),
805 GET_MODE (inner), QImode, 0);
806 if (part >= 0)
807 {
808 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
809 inner = adjust_address_nv (inner, QImode, part);
810 *op0 = gen_rtx_AND (QImode, inner, mask);
811 }
812 }
813 }
814
815 /* Narrow comparisons against 0xffff to HImode if possible. */
816 if ((*code == EQ || *code == NE)
817 && GET_CODE (*op1) == CONST_INT
818 && INTVAL (*op1) == 0xffff
819 && SCALAR_INT_MODE_P (GET_MODE (*op0))
820 && (nonzero_bits (*op0, GET_MODE (*op0))
821 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
822 {
823 *op0 = gen_lowpart (HImode, *op0);
824 *op1 = constm1_rtx;
825 }
826
827 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
828 if (GET_CODE (*op0) == UNSPEC
829 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
830 && XVECLEN (*op0, 0) == 1
831 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
832 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
833 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
834 && *op1 == const0_rtx)
835 {
836 enum rtx_code new_code = UNKNOWN;
837 switch (*code)
838 {
839 case EQ: new_code = EQ; break;
840 case NE: new_code = NE; break;
841 case LT: new_code = GTU; break;
842 case GT: new_code = LTU; break;
843 case LE: new_code = GEU; break;
844 case GE: new_code = LEU; break;
845 default: break;
846 }
847
848 if (new_code != UNKNOWN)
849 {
850 *op0 = XVECEXP (*op0, 0, 0);
851 *code = new_code;
852 }
853 }
854
855 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
856 if (GET_CODE (*op0) == UNSPEC
857 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
858 && XVECLEN (*op0, 0) == 1
859 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
860 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
861 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
862 && *op1 == const0_rtx)
863 {
864 enum rtx_code new_code = UNKNOWN;
865 switch (*code)
866 {
867 case EQ: new_code = EQ; break;
868 case NE: new_code = NE; break;
869 default: break;
870 }
871
872 if (new_code != UNKNOWN)
873 {
874 *op0 = XVECEXP (*op0, 0, 0);
875 *code = new_code;
876 }
877 }
878
879 /* Simplify cascaded EQ, NE with const0_rtx. */
880 if ((*code == NE || *code == EQ)
881 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
882 && GET_MODE (*op0) == SImode
883 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
884 && REG_P (XEXP (*op0, 0))
885 && XEXP (*op0, 1) == const0_rtx
886 && *op1 == const0_rtx)
887 {
888 if ((*code == EQ && GET_CODE (*op0) == NE)
889 || (*code == NE && GET_CODE (*op0) == EQ))
890 *code = EQ;
891 else
892 *code = NE;
893 *op0 = XEXP (*op0, 0);
894 }
895
896 /* Prefer register over memory as first operand. */
897 if (MEM_P (*op0) && REG_P (*op1))
898 {
899 rtx tem = *op0; *op0 = *op1; *op1 = tem;
900 *code = (int)swap_condition ((enum rtx_code)*code);
901 }
902 }
903
904 /* Emit a compare instruction suitable to implement the comparison
905 OP0 CODE OP1. Return the correct condition RTL to be placed in
906 the IF_THEN_ELSE of the conditional branch testing the result. */
907
908 rtx
909 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
910 {
911 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
912 rtx cc;
913
914 /* Do not output a redundant compare instruction if a compare_and_swap
915 pattern already computed the result and the machine modes are compatible. */
916 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
917 {
918 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
919 == GET_MODE (op0));
920 cc = op0;
921 }
922 else
923 {
924 cc = gen_rtx_REG (mode, CC_REGNUM);
925 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
926 }
927
928 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
929 }
930
931 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
932 matches CMP.
933 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
934 conditional branch testing the result. */
935
936 static rtx
937 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
938 rtx cmp, rtx new_rtx)
939 {
940 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
941 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
942 const0_rtx);
943 }
944
945 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
946 unconditional jump, else a conditional jump under condition COND. */
947
948 void
949 s390_emit_jump (rtx target, rtx cond)
950 {
951 rtx insn;
952
953 target = gen_rtx_LABEL_REF (VOIDmode, target);
954 if (cond)
955 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
956
957 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
958 emit_jump_insn (insn);
959 }
960
961 /* Return branch condition mask to implement a branch
962 specified by CODE. Return -1 for invalid comparisons. */
963
964 int
965 s390_branch_condition_mask (rtx code)
966 {
967 const int CC0 = 1 << 3;
968 const int CC1 = 1 << 2;
969 const int CC2 = 1 << 1;
970 const int CC3 = 1 << 0;
971
972 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
973 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
974 gcc_assert (XEXP (code, 1) == const0_rtx);
975
976 switch (GET_MODE (XEXP (code, 0)))
977 {
978 case CCZmode:
979 case CCZ1mode:
980 switch (GET_CODE (code))
981 {
982 case EQ: return CC0;
983 case NE: return CC1 | CC2 | CC3;
984 default: return -1;
985 }
986 break;
987
988 case CCT1mode:
989 switch (GET_CODE (code))
990 {
991 case EQ: return CC1;
992 case NE: return CC0 | CC2 | CC3;
993 default: return -1;
994 }
995 break;
996
997 case CCT2mode:
998 switch (GET_CODE (code))
999 {
1000 case EQ: return CC2;
1001 case NE: return CC0 | CC1 | CC3;
1002 default: return -1;
1003 }
1004 break;
1005
1006 case CCT3mode:
1007 switch (GET_CODE (code))
1008 {
1009 case EQ: return CC3;
1010 case NE: return CC0 | CC1 | CC2;
1011 default: return -1;
1012 }
1013 break;
1014
1015 case CCLmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0 | CC2;
1019 case NE: return CC1 | CC3;
1020 default: return -1;
1021 }
1022 break;
1023
1024 case CCL1mode:
1025 switch (GET_CODE (code))
1026 {
1027 case LTU: return CC2 | CC3; /* carry */
1028 case GEU: return CC0 | CC1; /* no carry */
1029 default: return -1;
1030 }
1031 break;
1032
1033 case CCL2mode:
1034 switch (GET_CODE (code))
1035 {
1036 case GTU: return CC0 | CC1; /* borrow */
1037 case LEU: return CC2 | CC3; /* no borrow */
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCL3mode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0 | CC2;
1046 case NE: return CC1 | CC3;
1047 case LTU: return CC1;
1048 case GTU: return CC3;
1049 case LEU: return CC1 | CC2;
1050 case GEU: return CC2 | CC3;
1051 default: return -1;
1052 }
1053
1054 case CCUmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LTU: return CC1;
1060 case GTU: return CC2;
1061 case LEU: return CC0 | CC1;
1062 case GEU: return CC0 | CC2;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCURmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC2 | CC1 | CC3;
1072 case LTU: return CC2;
1073 case GTU: return CC1;
1074 case LEU: return CC0 | CC2;
1075 case GEU: return CC0 | CC1;
1076 default: return -1;
1077 }
1078 break;
1079
1080 case CCAPmode:
1081 switch (GET_CODE (code))
1082 {
1083 case EQ: return CC0;
1084 case NE: return CC1 | CC2 | CC3;
1085 case LT: return CC1 | CC3;
1086 case GT: return CC2;
1087 case LE: return CC0 | CC1 | CC3;
1088 case GE: return CC0 | CC2;
1089 default: return -1;
1090 }
1091 break;
1092
1093 case CCANmode:
1094 switch (GET_CODE (code))
1095 {
1096 case EQ: return CC0;
1097 case NE: return CC1 | CC2 | CC3;
1098 case LT: return CC1;
1099 case GT: return CC2 | CC3;
1100 case LE: return CC0 | CC1;
1101 case GE: return CC0 | CC2 | CC3;
1102 default: return -1;
1103 }
1104 break;
1105
1106 case CCSmode:
1107 switch (GET_CODE (code))
1108 {
1109 case EQ: return CC0;
1110 case NE: return CC1 | CC2 | CC3;
1111 case LT: return CC1;
1112 case GT: return CC2;
1113 case LE: return CC0 | CC1;
1114 case GE: return CC0 | CC2;
1115 case UNORDERED: return CC3;
1116 case ORDERED: return CC0 | CC1 | CC2;
1117 case UNEQ: return CC0 | CC3;
1118 case UNLT: return CC1 | CC3;
1119 case UNGT: return CC2 | CC3;
1120 case UNLE: return CC0 | CC1 | CC3;
1121 case UNGE: return CC0 | CC2 | CC3;
1122 case LTGT: return CC1 | CC2;
1123 default: return -1;
1124 }
1125 break;
1126
1127 case CCSRmode:
1128 switch (GET_CODE (code))
1129 {
1130 case EQ: return CC0;
1131 case NE: return CC2 | CC1 | CC3;
1132 case LT: return CC2;
1133 case GT: return CC1;
1134 case LE: return CC0 | CC2;
1135 case GE: return CC0 | CC1;
1136 case UNORDERED: return CC3;
1137 case ORDERED: return CC0 | CC2 | CC1;
1138 case UNEQ: return CC0 | CC3;
1139 case UNLT: return CC2 | CC3;
1140 case UNGT: return CC1 | CC3;
1141 case UNLE: return CC0 | CC2 | CC3;
1142 case UNGE: return CC0 | CC1 | CC3;
1143 case LTGT: return CC2 | CC1;
1144 default: return -1;
1145 }
1146 break;
1147
1148 default:
1149 return -1;
1150 }
1151 }
1152
1153
1154 /* Return branch condition mask to implement a compare and branch
1155 specified by CODE. Return -1 for invalid comparisons. */
1156
1157 int
1158 s390_compare_and_branch_condition_mask (rtx code)
1159 {
1160 const int CC0 = 1 << 3;
1161 const int CC1 = 1 << 2;
1162 const int CC2 = 1 << 1;
1163
1164 switch (GET_CODE (code))
1165 {
1166 case EQ:
1167 return CC0;
1168 case NE:
1169 return CC1 | CC2;
1170 case LT:
1171 case LTU:
1172 return CC1;
1173 case GT:
1174 case GTU:
1175 return CC2;
1176 case LE:
1177 case LEU:
1178 return CC0 | CC1;
1179 case GE:
1180 case GEU:
1181 return CC0 | CC2;
1182 default:
1183 gcc_unreachable ();
1184 }
1185 return -1;
1186 }
1187
1188 /* If INV is false, return assembler mnemonic string to implement
1189 a branch specified by CODE. If INV is true, return mnemonic
1190 for the corresponding inverted branch. */
1191
1192 static const char *
1193 s390_branch_condition_mnemonic (rtx code, int inv)
1194 {
1195 int mask;
1196
1197 static const char *const mnemonic[16] =
1198 {
1199 NULL, "o", "h", "nle",
1200 "l", "nhe", "lh", "ne",
1201 "e", "nlh", "he", "nl",
1202 "le", "nh", "no", NULL
1203 };
1204
1205 if (GET_CODE (XEXP (code, 0)) == REG
1206 && REGNO (XEXP (code, 0)) == CC_REGNUM
1207 && XEXP (code, 1) == const0_rtx)
1208 mask = s390_branch_condition_mask (code);
1209 else
1210 mask = s390_compare_and_branch_condition_mask (code);
1211
1212 gcc_assert (mask >= 0);
1213
1214 if (inv)
1215 mask ^= 15;
1216
1217 gcc_assert (mask >= 1 && mask <= 14);
1218
1219 return mnemonic[mask];
1220 }
1221
1222 /* Return the part of op which has a value different from def.
1223 The size of the part is determined by mode.
1224 Use this function only if you already know that op really
1225 contains such a part. */
1226
1227 unsigned HOST_WIDE_INT
1228 s390_extract_part (rtx op, enum machine_mode mode, int def)
1229 {
1230 unsigned HOST_WIDE_INT value = 0;
1231 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1232 int part_bits = GET_MODE_BITSIZE (mode);
1233 unsigned HOST_WIDE_INT part_mask
1234 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1235 int i;
1236
1237 for (i = 0; i < max_parts; i++)
1238 {
1239 if (i == 0)
1240 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1241 else
1242 value >>= part_bits;
1243
1244 if ((value & part_mask) != (def & part_mask))
1245 return value & part_mask;
1246 }
1247
1248 gcc_unreachable ();
1249 }
1250
1251 /* If OP is an integer constant of mode MODE with exactly one
1252 part of mode PART_MODE unequal to DEF, return the number of that
1253 part. Otherwise, return -1. */
1254
1255 int
1256 s390_single_part (rtx op,
1257 enum machine_mode mode,
1258 enum machine_mode part_mode,
1259 int def)
1260 {
1261 unsigned HOST_WIDE_INT value = 0;
1262 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1263 unsigned HOST_WIDE_INT part_mask
1264 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1265 int i, part = -1;
1266
1267 if (GET_CODE (op) != CONST_INT)
1268 return -1;
1269
1270 for (i = 0; i < n_parts; i++)
1271 {
1272 if (i == 0)
1273 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1274 else
1275 value >>= GET_MODE_BITSIZE (part_mode);
1276
1277 if ((value & part_mask) != (def & part_mask))
1278 {
1279 if (part != -1)
1280 return -1;
1281 else
1282 part = i;
1283 }
1284 }
1285 return part == -1 ? -1 : n_parts - 1 - part;
1286 }
1287
1288 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1289 bits and no other bits are set in IN. POS and LENGTH can be used
1290 to obtain the start position and the length of the bitfield.
1291
1292 POS gives the position of the first bit of the bitfield counting
1293 from the lowest order bit starting with zero. In order to use this
1294 value for S/390 instructions this has to be converted to "bits big
1295 endian" style. */
1296
1297 bool
1298 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1299 int *pos, int *length)
1300 {
1301 int tmp_pos = 0;
1302 int tmp_length = 0;
1303 int i;
1304 unsigned HOST_WIDE_INT mask = 1ULL;
1305 bool contiguous = false;
1306
1307 for (i = 0; i < size; mask <<= 1, i++)
1308 {
1309 if (contiguous)
1310 {
1311 if (mask & in)
1312 tmp_length++;
1313 else
1314 break;
1315 }
1316 else
1317 {
1318 if (mask & in)
1319 {
1320 contiguous = true;
1321 tmp_length++;
1322 }
1323 else
1324 tmp_pos++;
1325 }
1326 }
1327
1328 if (!tmp_length)
1329 return false;
1330
1331 /* Calculate a mask for all bits beyond the contiguous bits. */
1332 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1333
1334 if (mask & in)
1335 return false;
1336
1337 if (tmp_length + tmp_pos - 1 > size)
1338 return false;
1339
1340 if (length)
1341 *length = tmp_length;
1342
1343 if (pos)
1344 *pos = tmp_pos;
1345
1346 return true;
1347 }
1348
1349 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1350 equivalent to a shift followed by the AND. In particular, CONTIG
1351 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1352 for ROTL indicate a rotate to the right. */
1353
1354 bool
1355 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1356 {
1357 int pos, len;
1358 bool ok;
1359
1360 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1361 gcc_assert (ok);
1362
1363 return ((rotl >= 0 && rotl <= pos)
1364 || (rotl < 0 && -rotl <= bitsize - len - pos));
1365 }
1366
1367 /* Check whether we can (and want to) split a double-word
1368 move in mode MODE from SRC to DST into two single-word
1369 moves, moving the subword FIRST_SUBWORD first. */
1370
1371 bool
1372 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1373 {
1374 /* Floating point registers cannot be split. */
1375 if (FP_REG_P (src) || FP_REG_P (dst))
1376 return false;
1377
1378 /* We don't need to split if operands are directly accessible. */
1379 if (s_operand (src, mode) || s_operand (dst, mode))
1380 return false;
1381
1382 /* Non-offsettable memory references cannot be split. */
1383 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1384 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1385 return false;
1386
1387 /* Moving the first subword must not clobber a register
1388 needed to move the second subword. */
1389 if (register_operand (dst, mode))
1390 {
1391 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1392 if (reg_overlap_mentioned_p (subreg, src))
1393 return false;
1394 }
1395
1396 return true;
1397 }
1398
1399 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1400 and [MEM2, MEM2 + SIZE] do overlap and false
1401 otherwise. */
1402
1403 bool
1404 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1405 {
1406 rtx addr1, addr2, addr_delta;
1407 HOST_WIDE_INT delta;
1408
1409 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1410 return true;
1411
1412 if (size == 0)
1413 return false;
1414
1415 addr1 = XEXP (mem1, 0);
1416 addr2 = XEXP (mem2, 0);
1417
1418 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1419
1420 /* This overlapping check is used by peepholes merging memory block operations.
1421 Overlapping operations would otherwise be recognized by the S/390 hardware
1422 and would fall back to a slower implementation. Allowing overlapping
1423 operations would lead to slow code but not to wrong code. Therefore we are
1424 somewhat optimistic if we cannot prove that the memory blocks are
1425 overlapping.
1426 That's why we return false here although this may accept operations on
1427 overlapping memory areas. */
1428 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1429 return false;
1430
1431 delta = INTVAL (addr_delta);
1432
1433 if (delta == 0
1434 || (delta > 0 && delta < size)
1435 || (delta < 0 && -delta < size))
1436 return true;
1437
1438 return false;
1439 }
1440
1441 /* Check whether the address of memory reference MEM2 equals exactly
1442 the address of memory reference MEM1 plus DELTA. Return true if
1443 we can prove this to be the case, false otherwise. */
1444
1445 bool
1446 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1447 {
1448 rtx addr1, addr2, addr_delta;
1449
1450 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1451 return false;
1452
1453 addr1 = XEXP (mem1, 0);
1454 addr2 = XEXP (mem2, 0);
1455
1456 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1457 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1458 return false;
1459
1460 return true;
1461 }
1462
1463 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1464
1465 void
1466 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1467 rtx *operands)
1468 {
1469 enum machine_mode wmode = mode;
1470 rtx dst = operands[0];
1471 rtx src1 = operands[1];
1472 rtx src2 = operands[2];
1473 rtx op, clob, tem;
1474
1475 /* If we cannot handle the operation directly, use a temp register. */
1476 if (!s390_logical_operator_ok_p (operands))
1477 dst = gen_reg_rtx (mode);
1478
1479 /* QImode and HImode patterns make sense only if we have a destination
1480 in memory. Otherwise perform the operation in SImode. */
1481 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1482 wmode = SImode;
1483
1484 /* Widen operands if required. */
1485 if (mode != wmode)
1486 {
1487 if (GET_CODE (dst) == SUBREG
1488 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1489 dst = tem;
1490 else if (REG_P (dst))
1491 dst = gen_rtx_SUBREG (wmode, dst, 0);
1492 else
1493 dst = gen_reg_rtx (wmode);
1494
1495 if (GET_CODE (src1) == SUBREG
1496 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1497 src1 = tem;
1498 else if (GET_MODE (src1) != VOIDmode)
1499 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1500
1501 if (GET_CODE (src2) == SUBREG
1502 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1503 src2 = tem;
1504 else if (GET_MODE (src2) != VOIDmode)
1505 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1506 }
1507
1508 /* Emit the instruction. */
1509 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1510 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1511 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1512
1513 /* Fix up the destination if needed. */
1514 if (dst != operands[0])
1515 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1516 }
1517
1518 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1519
1520 bool
1521 s390_logical_operator_ok_p (rtx *operands)
1522 {
1523 /* If the destination operand is in memory, it needs to coincide
1524 with one of the source operands. After reload, it has to be
1525 the first source operand. */
1526 if (GET_CODE (operands[0]) == MEM)
1527 return rtx_equal_p (operands[0], operands[1])
1528 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1529
1530 return true;
1531 }
1532
1533 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1534 operand IMMOP to switch from SS to SI type instructions. */
1535
1536 void
1537 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1538 {
1539 int def = code == AND ? -1 : 0;
1540 HOST_WIDE_INT mask;
1541 int part;
1542
1543 gcc_assert (GET_CODE (*memop) == MEM);
1544 gcc_assert (!MEM_VOLATILE_P (*memop));
1545
1546 mask = s390_extract_part (*immop, QImode, def);
1547 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1548 gcc_assert (part >= 0);
1549
1550 *memop = adjust_address (*memop, QImode, part);
1551 *immop = gen_int_mode (mask, QImode);
1552 }
1553
1554
1555 /* How to allocate a 'struct machine_function'. */
1556
1557 static struct machine_function *
1558 s390_init_machine_status (void)
1559 {
1560 return ggc_alloc_cleared_machine_function ();
1561 }
1562
1563 static void
1564 s390_option_override (void)
1565 {
1566 /* Set up function hooks. */
1567 init_machine_status = s390_init_machine_status;
1568
1569 /* Architecture mode defaults according to ABI. */
1570 if (!(target_flags_explicit & MASK_ZARCH))
1571 {
1572 if (TARGET_64BIT)
1573 target_flags |= MASK_ZARCH;
1574 else
1575 target_flags &= ~MASK_ZARCH;
1576 }
1577
1578 /* Set the march default in case it hasn't been specified on
1579 cmdline. */
1580 if (s390_arch == PROCESSOR_max)
1581 {
1582 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1583 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1584 s390_arch_flags = processor_flags_table[(int)s390_arch];
1585 }
1586
1587 /* Determine processor to tune for. */
1588 if (s390_tune == PROCESSOR_max)
1589 {
1590 s390_tune = s390_arch;
1591 s390_tune_flags = s390_arch_flags;
1592 }
1593
1594 /* Sanity checks. */
1595 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1596 error ("z/Architecture mode not supported on %s", s390_arch_string);
1597 if (TARGET_64BIT && !TARGET_ZARCH)
1598 error ("64-bit ABI not supported in ESA/390 mode");
1599
1600 /* Use hardware DFP if available and not explicitly disabled by
1601 user. E.g. with -m31 -march=z10 -mzarch */
1602 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1603 target_flags |= MASK_HARD_DFP;
1604
1605 if (TARGET_HARD_DFP && !TARGET_DFP)
1606 {
1607 if (target_flags_explicit & MASK_HARD_DFP)
1608 {
1609 if (!TARGET_CPU_DFP)
1610 error ("hardware decimal floating point instructions"
1611 " not available on %s", s390_arch_string);
1612 if (!TARGET_ZARCH)
1613 error ("hardware decimal floating point instructions"
1614 " not available in ESA/390 mode");
1615 }
1616 else
1617 target_flags &= ~MASK_HARD_DFP;
1618 }
1619
1620 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1621 {
1622 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1623 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1624
1625 target_flags &= ~MASK_HARD_DFP;
1626 }
1627
1628 /* Set processor cost function. */
1629 switch (s390_tune)
1630 {
1631 case PROCESSOR_2084_Z990:
1632 s390_cost = &z990_cost;
1633 break;
1634 case PROCESSOR_2094_Z9_109:
1635 s390_cost = &z9_109_cost;
1636 break;
1637 case PROCESSOR_2097_Z10:
1638 s390_cost = &z10_cost;
1639 break;
1640 case PROCESSOR_2817_Z196:
1641 s390_cost = &z196_cost;
1642 break;
1643 case PROCESSOR_2827_ZEC12:
1644 s390_cost = &zEC12_cost;
1645 break;
1646 default:
1647 s390_cost = &z900_cost;
1648 }
1649
1650 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1651 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1652 "in combination");
1653
1654 if (s390_stack_size)
1655 {
1656 if (s390_stack_guard >= s390_stack_size)
1657 error ("stack size must be greater than the stack guard value");
1658 else if (s390_stack_size > 1 << 16)
1659 error ("stack size must not be greater than 64k");
1660 }
1661 else if (s390_stack_guard)
1662 error ("-mstack-guard implies use of -mstack-size");
1663
1664 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1665 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1666 target_flags |= MASK_LONG_DOUBLE_128;
1667 #endif
1668
1669 if (s390_tune == PROCESSOR_2097_Z10
1670 || s390_tune == PROCESSOR_2817_Z196
1671 || s390_tune == PROCESSOR_2827_ZEC12)
1672 {
1673 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1674 global_options.x_param_values,
1675 global_options_set.x_param_values);
1676 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1677 global_options.x_param_values,
1678 global_options_set.x_param_values);
1679 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1680 global_options.x_param_values,
1681 global_options_set.x_param_values);
1682 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1683 global_options.x_param_values,
1684 global_options_set.x_param_values);
1685 }
1686
1687 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1688 global_options.x_param_values,
1689 global_options_set.x_param_values);
1690 /* values for loop prefetching */
1691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1692 global_options.x_param_values,
1693 global_options_set.x_param_values);
1694 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1695 global_options.x_param_values,
1696 global_options_set.x_param_values);
1697 /* s390 has more than 2 levels and the size is much larger. Since
1698 we are always running virtualized assume that we only get a small
1699 part of the caches above l1. */
1700 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1701 global_options.x_param_values,
1702 global_options_set.x_param_values);
1703 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1704 global_options.x_param_values,
1705 global_options_set.x_param_values);
1706 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1707 global_options.x_param_values,
1708 global_options_set.x_param_values);
1709
1710 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1711 requires the arch flags to be evaluated already. Since prefetching
1712 is beneficial on s390, we enable it if available. */
1713 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1714 flag_prefetch_loop_arrays = 1;
1715
1716 /* Use the alternative scheduling-pressure algorithm by default. */
1717 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1718 global_options.x_param_values,
1719 global_options_set.x_param_values);
1720
1721 if (TARGET_TPF)
1722 {
1723 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1724 debuggers do not yet support DWARF 3/4. */
1725 if (!global_options_set.x_dwarf_strict)
1726 dwarf_strict = 1;
1727 if (!global_options_set.x_dwarf_version)
1728 dwarf_version = 2;
1729 }
1730 }
1731
1732 /* Map for smallest class containing reg regno. */
1733
1734 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1735 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1736 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1737 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1738 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1739 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1740 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1741 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1742 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1743 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1744 ACCESS_REGS, ACCESS_REGS
1745 };
1746
1747 /* Return attribute type of insn. */
1748
1749 static enum attr_type
1750 s390_safe_attr_type (rtx insn)
1751 {
1752 if (recog_memoized (insn) >= 0)
1753 return get_attr_type (insn);
1754 else
1755 return TYPE_NONE;
1756 }
1757
1758 /* Return true if DISP is a valid short displacement. */
1759
1760 static bool
1761 s390_short_displacement (rtx disp)
1762 {
1763 /* No displacement is OK. */
1764 if (!disp)
1765 return true;
1766
1767 /* Without the long displacement facility we don't need to
1768 distingiush between long and short displacement. */
1769 if (!TARGET_LONG_DISPLACEMENT)
1770 return true;
1771
1772 /* Integer displacement in range. */
1773 if (GET_CODE (disp) == CONST_INT)
1774 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1775
1776 /* GOT offset is not OK, the GOT can be large. */
1777 if (GET_CODE (disp) == CONST
1778 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1779 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1780 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1781 return false;
1782
1783 /* All other symbolic constants are literal pool references,
1784 which are OK as the literal pool must be small. */
1785 if (GET_CODE (disp) == CONST)
1786 return true;
1787
1788 return false;
1789 }
1790
1791 /* Decompose a RTL expression ADDR for a memory address into
1792 its components, returned in OUT.
1793
1794 Returns false if ADDR is not a valid memory address, true
1795 otherwise. If OUT is NULL, don't return the components,
1796 but check for validity only.
1797
1798 Note: Only addresses in canonical form are recognized.
1799 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1800 canonical form so that they will be recognized. */
1801
1802 static int
1803 s390_decompose_address (rtx addr, struct s390_address *out)
1804 {
1805 HOST_WIDE_INT offset = 0;
1806 rtx base = NULL_RTX;
1807 rtx indx = NULL_RTX;
1808 rtx disp = NULL_RTX;
1809 rtx orig_disp;
1810 bool pointer = false;
1811 bool base_ptr = false;
1812 bool indx_ptr = false;
1813 bool literal_pool = false;
1814
1815 /* We may need to substitute the literal pool base register into the address
1816 below. However, at this point we do not know which register is going to
1817 be used as base, so we substitute the arg pointer register. This is going
1818 to be treated as holding a pointer below -- it shouldn't be used for any
1819 other purpose. */
1820 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1821
1822 /* Decompose address into base + index + displacement. */
1823
1824 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1825 base = addr;
1826
1827 else if (GET_CODE (addr) == PLUS)
1828 {
1829 rtx op0 = XEXP (addr, 0);
1830 rtx op1 = XEXP (addr, 1);
1831 enum rtx_code code0 = GET_CODE (op0);
1832 enum rtx_code code1 = GET_CODE (op1);
1833
1834 if (code0 == REG || code0 == UNSPEC)
1835 {
1836 if (code1 == REG || code1 == UNSPEC)
1837 {
1838 indx = op0; /* index + base */
1839 base = op1;
1840 }
1841
1842 else
1843 {
1844 base = op0; /* base + displacement */
1845 disp = op1;
1846 }
1847 }
1848
1849 else if (code0 == PLUS)
1850 {
1851 indx = XEXP (op0, 0); /* index + base + disp */
1852 base = XEXP (op0, 1);
1853 disp = op1;
1854 }
1855
1856 else
1857 {
1858 return false;
1859 }
1860 }
1861
1862 else
1863 disp = addr; /* displacement */
1864
1865 /* Extract integer part of displacement. */
1866 orig_disp = disp;
1867 if (disp)
1868 {
1869 if (GET_CODE (disp) == CONST_INT)
1870 {
1871 offset = INTVAL (disp);
1872 disp = NULL_RTX;
1873 }
1874 else if (GET_CODE (disp) == CONST
1875 && GET_CODE (XEXP (disp, 0)) == PLUS
1876 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1877 {
1878 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1879 disp = XEXP (XEXP (disp, 0), 0);
1880 }
1881 }
1882
1883 /* Strip off CONST here to avoid special case tests later. */
1884 if (disp && GET_CODE (disp) == CONST)
1885 disp = XEXP (disp, 0);
1886
1887 /* We can convert literal pool addresses to
1888 displacements by basing them off the base register. */
1889 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1890 {
1891 /* Either base or index must be free to hold the base register. */
1892 if (!base)
1893 base = fake_pool_base, literal_pool = true;
1894 else if (!indx)
1895 indx = fake_pool_base, literal_pool = true;
1896 else
1897 return false;
1898
1899 /* Mark up the displacement. */
1900 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1901 UNSPEC_LTREL_OFFSET);
1902 }
1903
1904 /* Validate base register. */
1905 if (base)
1906 {
1907 if (GET_CODE (base) == UNSPEC)
1908 switch (XINT (base, 1))
1909 {
1910 case UNSPEC_LTREF:
1911 if (!disp)
1912 disp = gen_rtx_UNSPEC (Pmode,
1913 gen_rtvec (1, XVECEXP (base, 0, 0)),
1914 UNSPEC_LTREL_OFFSET);
1915 else
1916 return false;
1917
1918 base = XVECEXP (base, 0, 1);
1919 break;
1920
1921 case UNSPEC_LTREL_BASE:
1922 if (XVECLEN (base, 0) == 1)
1923 base = fake_pool_base, literal_pool = true;
1924 else
1925 base = XVECEXP (base, 0, 1);
1926 break;
1927
1928 default:
1929 return false;
1930 }
1931
1932 if (!REG_P (base)
1933 || (GET_MODE (base) != SImode
1934 && GET_MODE (base) != Pmode))
1935 return false;
1936
1937 if (REGNO (base) == STACK_POINTER_REGNUM
1938 || REGNO (base) == FRAME_POINTER_REGNUM
1939 || ((reload_completed || reload_in_progress)
1940 && frame_pointer_needed
1941 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1942 || REGNO (base) == ARG_POINTER_REGNUM
1943 || (flag_pic
1944 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1945 pointer = base_ptr = true;
1946
1947 if ((reload_completed || reload_in_progress)
1948 && base == cfun->machine->base_reg)
1949 pointer = base_ptr = literal_pool = true;
1950 }
1951
1952 /* Validate index register. */
1953 if (indx)
1954 {
1955 if (GET_CODE (indx) == UNSPEC)
1956 switch (XINT (indx, 1))
1957 {
1958 case UNSPEC_LTREF:
1959 if (!disp)
1960 disp = gen_rtx_UNSPEC (Pmode,
1961 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1962 UNSPEC_LTREL_OFFSET);
1963 else
1964 return false;
1965
1966 indx = XVECEXP (indx, 0, 1);
1967 break;
1968
1969 case UNSPEC_LTREL_BASE:
1970 if (XVECLEN (indx, 0) == 1)
1971 indx = fake_pool_base, literal_pool = true;
1972 else
1973 indx = XVECEXP (indx, 0, 1);
1974 break;
1975
1976 default:
1977 return false;
1978 }
1979
1980 if (!REG_P (indx)
1981 || (GET_MODE (indx) != SImode
1982 && GET_MODE (indx) != Pmode))
1983 return false;
1984
1985 if (REGNO (indx) == STACK_POINTER_REGNUM
1986 || REGNO (indx) == FRAME_POINTER_REGNUM
1987 || ((reload_completed || reload_in_progress)
1988 && frame_pointer_needed
1989 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1990 || REGNO (indx) == ARG_POINTER_REGNUM
1991 || (flag_pic
1992 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1993 pointer = indx_ptr = true;
1994
1995 if ((reload_completed || reload_in_progress)
1996 && indx == cfun->machine->base_reg)
1997 pointer = indx_ptr = literal_pool = true;
1998 }
1999
2000 /* Prefer to use pointer as base, not index. */
2001 if (base && indx && !base_ptr
2002 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2003 {
2004 rtx tmp = base;
2005 base = indx;
2006 indx = tmp;
2007 }
2008
2009 /* Validate displacement. */
2010 if (!disp)
2011 {
2012 /* If virtual registers are involved, the displacement will change later
2013 anyway as the virtual registers get eliminated. This could make a
2014 valid displacement invalid, but it is more likely to make an invalid
2015 displacement valid, because we sometimes access the register save area
2016 via negative offsets to one of those registers.
2017 Thus we don't check the displacement for validity here. If after
2018 elimination the displacement turns out to be invalid after all,
2019 this is fixed up by reload in any case. */
2020 if (base != arg_pointer_rtx
2021 && indx != arg_pointer_rtx
2022 && base != return_address_pointer_rtx
2023 && indx != return_address_pointer_rtx
2024 && base != frame_pointer_rtx
2025 && indx != frame_pointer_rtx
2026 && base != virtual_stack_vars_rtx
2027 && indx != virtual_stack_vars_rtx)
2028 if (!DISP_IN_RANGE (offset))
2029 return false;
2030 }
2031 else
2032 {
2033 /* All the special cases are pointers. */
2034 pointer = true;
2035
2036 /* In the small-PIC case, the linker converts @GOT
2037 and @GOTNTPOFF offsets to possible displacements. */
2038 if (GET_CODE (disp) == UNSPEC
2039 && (XINT (disp, 1) == UNSPEC_GOT
2040 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2041 && flag_pic == 1)
2042 {
2043 ;
2044 }
2045
2046 /* Accept pool label offsets. */
2047 else if (GET_CODE (disp) == UNSPEC
2048 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2049 ;
2050
2051 /* Accept literal pool references. */
2052 else if (GET_CODE (disp) == UNSPEC
2053 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2054 {
2055 /* In case CSE pulled a non literal pool reference out of
2056 the pool we have to reject the address. This is
2057 especially important when loading the GOT pointer on non
2058 zarch CPUs. In this case the literal pool contains an lt
2059 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2060 will most likely exceed the displacement. */
2061 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2062 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2063 return false;
2064
2065 orig_disp = gen_rtx_CONST (Pmode, disp);
2066 if (offset)
2067 {
2068 /* If we have an offset, make sure it does not
2069 exceed the size of the constant pool entry. */
2070 rtx sym = XVECEXP (disp, 0, 0);
2071 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2072 return false;
2073
2074 orig_disp = plus_constant (Pmode, orig_disp, offset);
2075 }
2076 }
2077
2078 else
2079 return false;
2080 }
2081
2082 if (!base && !indx)
2083 pointer = true;
2084
2085 if (out)
2086 {
2087 out->base = base;
2088 out->indx = indx;
2089 out->disp = orig_disp;
2090 out->pointer = pointer;
2091 out->literal_pool = literal_pool;
2092 }
2093
2094 return true;
2095 }
2096
2097 /* Decompose a RTL expression OP for a shift count into its components,
2098 and return the base register in BASE and the offset in OFFSET.
2099
2100 Return true if OP is a valid shift count, false if not. */
2101
2102 bool
2103 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2104 {
2105 HOST_WIDE_INT off = 0;
2106
2107 /* We can have an integer constant, an address register,
2108 or a sum of the two. */
2109 if (GET_CODE (op) == CONST_INT)
2110 {
2111 off = INTVAL (op);
2112 op = NULL_RTX;
2113 }
2114 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2115 {
2116 off = INTVAL (XEXP (op, 1));
2117 op = XEXP (op, 0);
2118 }
2119 while (op && GET_CODE (op) == SUBREG)
2120 op = SUBREG_REG (op);
2121
2122 if (op && GET_CODE (op) != REG)
2123 return false;
2124
2125 if (offset)
2126 *offset = off;
2127 if (base)
2128 *base = op;
2129
2130 return true;
2131 }
2132
2133
2134 /* Return true if CODE is a valid address without index. */
2135
2136 bool
2137 s390_legitimate_address_without_index_p (rtx op)
2138 {
2139 struct s390_address addr;
2140
2141 if (!s390_decompose_address (XEXP (op, 0), &addr))
2142 return false;
2143 if (addr.indx)
2144 return false;
2145
2146 return true;
2147 }
2148
2149
2150 /* Return TRUE if ADDR is an operand valid for a load/store relative
2151 instruction. Be aware that the alignment of the operand needs to
2152 be checked separately.
2153 Valid addresses are single references or a sum of a reference and a
2154 constant integer. Return these parts in SYMREF and ADDEND. You can
2155 pass NULL in REF and/or ADDEND if you are not interested in these
2156 values. Literal pool references are *not* considered symbol
2157 references. */
2158
2159 static bool
2160 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2161 {
2162 HOST_WIDE_INT tmpaddend = 0;
2163
2164 if (GET_CODE (addr) == CONST)
2165 addr = XEXP (addr, 0);
2166
2167 if (GET_CODE (addr) == PLUS)
2168 {
2169 if (!CONST_INT_P (XEXP (addr, 1)))
2170 return false;
2171
2172 tmpaddend = INTVAL (XEXP (addr, 1));
2173 addr = XEXP (addr, 0);
2174 }
2175
2176 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2177 || (GET_CODE (addr) == UNSPEC
2178 && (XINT (addr, 1) == UNSPEC_GOTENT
2179 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2180 {
2181 if (symref)
2182 *symref = addr;
2183 if (addend)
2184 *addend = tmpaddend;
2185
2186 return true;
2187 }
2188 return false;
2189 }
2190
2191 /* Return true if the address in OP is valid for constraint letter C
2192 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2193 pool MEMs should be accepted. Only the Q, R, S, T constraint
2194 letters are allowed for C. */
2195
2196 static int
2197 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2198 {
2199 struct s390_address addr;
2200 bool decomposed = false;
2201
2202 /* This check makes sure that no symbolic address (except literal
2203 pool references) are accepted by the R or T constraints. */
2204 if (s390_loadrelative_operand_p (op, NULL, NULL))
2205 return 0;
2206
2207 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2208 if (!lit_pool_ok)
2209 {
2210 if (!s390_decompose_address (op, &addr))
2211 return 0;
2212 if (addr.literal_pool)
2213 return 0;
2214 decomposed = true;
2215 }
2216
2217 switch (c)
2218 {
2219 case 'Q': /* no index short displacement */
2220 if (!decomposed && !s390_decompose_address (op, &addr))
2221 return 0;
2222 if (addr.indx)
2223 return 0;
2224 if (!s390_short_displacement (addr.disp))
2225 return 0;
2226 break;
2227
2228 case 'R': /* with index short displacement */
2229 if (TARGET_LONG_DISPLACEMENT)
2230 {
2231 if (!decomposed && !s390_decompose_address (op, &addr))
2232 return 0;
2233 if (!s390_short_displacement (addr.disp))
2234 return 0;
2235 }
2236 /* Any invalid address here will be fixed up by reload,
2237 so accept it for the most generic constraint. */
2238 break;
2239
2240 case 'S': /* no index long displacement */
2241 if (!TARGET_LONG_DISPLACEMENT)
2242 return 0;
2243 if (!decomposed && !s390_decompose_address (op, &addr))
2244 return 0;
2245 if (addr.indx)
2246 return 0;
2247 if (s390_short_displacement (addr.disp))
2248 return 0;
2249 break;
2250
2251 case 'T': /* with index long displacement */
2252 if (!TARGET_LONG_DISPLACEMENT)
2253 return 0;
2254 /* Any invalid address here will be fixed up by reload,
2255 so accept it for the most generic constraint. */
2256 if ((decomposed || s390_decompose_address (op, &addr))
2257 && s390_short_displacement (addr.disp))
2258 return 0;
2259 break;
2260 default:
2261 return 0;
2262 }
2263 return 1;
2264 }
2265
2266
2267 /* Evaluates constraint strings described by the regular expression
2268 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2269 the constraint given in STR, or 0 else. */
2270
2271 int
2272 s390_mem_constraint (const char *str, rtx op)
2273 {
2274 char c = str[0];
2275
2276 switch (c)
2277 {
2278 case 'A':
2279 /* Check for offsettable variants of memory constraints. */
2280 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2281 return 0;
2282 if ((reload_completed || reload_in_progress)
2283 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2284 return 0;
2285 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2286 case 'B':
2287 /* Check for non-literal-pool variants of memory constraints. */
2288 if (!MEM_P (op))
2289 return 0;
2290 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2291 case 'Q':
2292 case 'R':
2293 case 'S':
2294 case 'T':
2295 if (GET_CODE (op) != MEM)
2296 return 0;
2297 return s390_check_qrst_address (c, XEXP (op, 0), true);
2298 case 'U':
2299 return (s390_check_qrst_address ('Q', op, true)
2300 || s390_check_qrst_address ('R', op, true));
2301 case 'W':
2302 return (s390_check_qrst_address ('S', op, true)
2303 || s390_check_qrst_address ('T', op, true));
2304 case 'Y':
2305 /* Simply check for the basic form of a shift count. Reload will
2306 take care of making sure we have a proper base register. */
2307 if (!s390_decompose_shift_count (op, NULL, NULL))
2308 return 0;
2309 break;
2310 case 'Z':
2311 return s390_check_qrst_address (str[1], op, true);
2312 default:
2313 return 0;
2314 }
2315 return 1;
2316 }
2317
2318
2319 /* Evaluates constraint strings starting with letter O. Input
2320 parameter C is the second letter following the "O" in the constraint
2321 string. Returns 1 if VALUE meets the respective constraint and 0
2322 otherwise. */
2323
2324 int
2325 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2326 {
2327 if (!TARGET_EXTIMM)
2328 return 0;
2329
2330 switch (c)
2331 {
2332 case 's':
2333 return trunc_int_for_mode (value, SImode) == value;
2334
2335 case 'p':
2336 return value == 0
2337 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2338
2339 case 'n':
2340 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2341
2342 default:
2343 gcc_unreachable ();
2344 }
2345 }
2346
2347
2348 /* Evaluates constraint strings starting with letter N. Parameter STR
2349 contains the letters following letter "N" in the constraint string.
2350 Returns true if VALUE matches the constraint. */
2351
2352 int
2353 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2354 {
2355 enum machine_mode mode, part_mode;
2356 int def;
2357 int part, part_goal;
2358
2359
2360 if (str[0] == 'x')
2361 part_goal = -1;
2362 else
2363 part_goal = str[0] - '0';
2364
2365 switch (str[1])
2366 {
2367 case 'Q':
2368 part_mode = QImode;
2369 break;
2370 case 'H':
2371 part_mode = HImode;
2372 break;
2373 case 'S':
2374 part_mode = SImode;
2375 break;
2376 default:
2377 return 0;
2378 }
2379
2380 switch (str[2])
2381 {
2382 case 'H':
2383 mode = HImode;
2384 break;
2385 case 'S':
2386 mode = SImode;
2387 break;
2388 case 'D':
2389 mode = DImode;
2390 break;
2391 default:
2392 return 0;
2393 }
2394
2395 switch (str[3])
2396 {
2397 case '0':
2398 def = 0;
2399 break;
2400 case 'F':
2401 def = -1;
2402 break;
2403 default:
2404 return 0;
2405 }
2406
2407 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2408 return 0;
2409
2410 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2411 if (part < 0)
2412 return 0;
2413 if (part_goal != -1 && part_goal != part)
2414 return 0;
2415
2416 return 1;
2417 }
2418
2419
2420 /* Returns true if the input parameter VALUE is a float zero. */
2421
2422 int
2423 s390_float_const_zero_p (rtx value)
2424 {
2425 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2426 && value == CONST0_RTX (GET_MODE (value)));
2427 }
2428
2429 /* Implement TARGET_REGISTER_MOVE_COST. */
2430
2431 static int
2432 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2433 reg_class_t from, reg_class_t to)
2434 {
2435 /* On s390, copy between fprs and gprs is expensive. */
2436 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2437 && reg_classes_intersect_p (to, FP_REGS))
2438 || (reg_classes_intersect_p (from, FP_REGS)
2439 && reg_classes_intersect_p (to, GENERAL_REGS)))
2440 return 10;
2441
2442 return 1;
2443 }
2444
2445 /* Implement TARGET_MEMORY_MOVE_COST. */
2446
2447 static int
2448 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2449 reg_class_t rclass ATTRIBUTE_UNUSED,
2450 bool in ATTRIBUTE_UNUSED)
2451 {
2452 return 1;
2453 }
2454
2455 /* Compute a (partial) cost for rtx X. Return true if the complete
2456 cost has been computed, and false if subexpressions should be
2457 scanned. In either case, *TOTAL contains the cost result.
2458 CODE contains GET_CODE (x), OUTER_CODE contains the code
2459 of the superexpression of x. */
2460
2461 static bool
2462 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2463 int *total, bool speed ATTRIBUTE_UNUSED)
2464 {
2465 switch (code)
2466 {
2467 case CONST:
2468 case CONST_INT:
2469 case LABEL_REF:
2470 case SYMBOL_REF:
2471 case CONST_DOUBLE:
2472 case MEM:
2473 *total = 0;
2474 return true;
2475
2476 case ASHIFT:
2477 case ASHIFTRT:
2478 case LSHIFTRT:
2479 case ROTATE:
2480 case ROTATERT:
2481 case AND:
2482 case IOR:
2483 case XOR:
2484 case NEG:
2485 case NOT:
2486 *total = COSTS_N_INSNS (1);
2487 return false;
2488
2489 case PLUS:
2490 case MINUS:
2491 *total = COSTS_N_INSNS (1);
2492 return false;
2493
2494 case MULT:
2495 switch (GET_MODE (x))
2496 {
2497 case SImode:
2498 {
2499 rtx left = XEXP (x, 0);
2500 rtx right = XEXP (x, 1);
2501 if (GET_CODE (right) == CONST_INT
2502 && CONST_OK_FOR_K (INTVAL (right)))
2503 *total = s390_cost->mhi;
2504 else if (GET_CODE (left) == SIGN_EXTEND)
2505 *total = s390_cost->mh;
2506 else
2507 *total = s390_cost->ms; /* msr, ms, msy */
2508 break;
2509 }
2510 case DImode:
2511 {
2512 rtx left = XEXP (x, 0);
2513 rtx right = XEXP (x, 1);
2514 if (TARGET_ZARCH)
2515 {
2516 if (GET_CODE (right) == CONST_INT
2517 && CONST_OK_FOR_K (INTVAL (right)))
2518 *total = s390_cost->mghi;
2519 else if (GET_CODE (left) == SIGN_EXTEND)
2520 *total = s390_cost->msgf;
2521 else
2522 *total = s390_cost->msg; /* msgr, msg */
2523 }
2524 else /* TARGET_31BIT */
2525 {
2526 if (GET_CODE (left) == SIGN_EXTEND
2527 && GET_CODE (right) == SIGN_EXTEND)
2528 /* mulsidi case: mr, m */
2529 *total = s390_cost->m;
2530 else if (GET_CODE (left) == ZERO_EXTEND
2531 && GET_CODE (right) == ZERO_EXTEND
2532 && TARGET_CPU_ZARCH)
2533 /* umulsidi case: ml, mlr */
2534 *total = s390_cost->ml;
2535 else
2536 /* Complex calculation is required. */
2537 *total = COSTS_N_INSNS (40);
2538 }
2539 break;
2540 }
2541 case SFmode:
2542 case DFmode:
2543 *total = s390_cost->mult_df;
2544 break;
2545 case TFmode:
2546 *total = s390_cost->mxbr;
2547 break;
2548 default:
2549 return false;
2550 }
2551 return false;
2552
2553 case FMA:
2554 switch (GET_MODE (x))
2555 {
2556 case DFmode:
2557 *total = s390_cost->madbr;
2558 break;
2559 case SFmode:
2560 *total = s390_cost->maebr;
2561 break;
2562 default:
2563 return false;
2564 }
2565 /* Negate in the third argument is free: FMSUB. */
2566 if (GET_CODE (XEXP (x, 2)) == NEG)
2567 {
2568 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2569 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2570 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2571 return true;
2572 }
2573 return false;
2574
2575 case UDIV:
2576 case UMOD:
2577 if (GET_MODE (x) == TImode) /* 128 bit division */
2578 *total = s390_cost->dlgr;
2579 else if (GET_MODE (x) == DImode)
2580 {
2581 rtx right = XEXP (x, 1);
2582 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2583 *total = s390_cost->dlr;
2584 else /* 64 by 64 bit division */
2585 *total = s390_cost->dlgr;
2586 }
2587 else if (GET_MODE (x) == SImode) /* 32 bit division */
2588 *total = s390_cost->dlr;
2589 return false;
2590
2591 case DIV:
2592 case MOD:
2593 if (GET_MODE (x) == DImode)
2594 {
2595 rtx right = XEXP (x, 1);
2596 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2597 if (TARGET_ZARCH)
2598 *total = s390_cost->dsgfr;
2599 else
2600 *total = s390_cost->dr;
2601 else /* 64 by 64 bit division */
2602 *total = s390_cost->dsgr;
2603 }
2604 else if (GET_MODE (x) == SImode) /* 32 bit division */
2605 *total = s390_cost->dlr;
2606 else if (GET_MODE (x) == SFmode)
2607 {
2608 *total = s390_cost->debr;
2609 }
2610 else if (GET_MODE (x) == DFmode)
2611 {
2612 *total = s390_cost->ddbr;
2613 }
2614 else if (GET_MODE (x) == TFmode)
2615 {
2616 *total = s390_cost->dxbr;
2617 }
2618 return false;
2619
2620 case SQRT:
2621 if (GET_MODE (x) == SFmode)
2622 *total = s390_cost->sqebr;
2623 else if (GET_MODE (x) == DFmode)
2624 *total = s390_cost->sqdbr;
2625 else /* TFmode */
2626 *total = s390_cost->sqxbr;
2627 return false;
2628
2629 case SIGN_EXTEND:
2630 case ZERO_EXTEND:
2631 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2632 || outer_code == PLUS || outer_code == MINUS
2633 || outer_code == COMPARE)
2634 *total = 0;
2635 return false;
2636
2637 case COMPARE:
2638 *total = COSTS_N_INSNS (1);
2639 if (GET_CODE (XEXP (x, 0)) == AND
2640 && GET_CODE (XEXP (x, 1)) == CONST_INT
2641 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2642 {
2643 rtx op0 = XEXP (XEXP (x, 0), 0);
2644 rtx op1 = XEXP (XEXP (x, 0), 1);
2645 rtx op2 = XEXP (x, 1);
2646
2647 if (memory_operand (op0, GET_MODE (op0))
2648 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2649 return true;
2650 if (register_operand (op0, GET_MODE (op0))
2651 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2652 return true;
2653 }
2654 return false;
2655
2656 default:
2657 return false;
2658 }
2659 }
2660
2661 /* Return the cost of an address rtx ADDR. */
2662
2663 static int
2664 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2665 addr_space_t as ATTRIBUTE_UNUSED,
2666 bool speed ATTRIBUTE_UNUSED)
2667 {
2668 struct s390_address ad;
2669 if (!s390_decompose_address (addr, &ad))
2670 return 1000;
2671
2672 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2673 }
2674
2675 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2676 otherwise return 0. */
2677
2678 int
2679 tls_symbolic_operand (rtx op)
2680 {
2681 if (GET_CODE (op) != SYMBOL_REF)
2682 return 0;
2683 return SYMBOL_REF_TLS_MODEL (op);
2684 }
2685 \f
2686 /* Split DImode access register reference REG (on 64-bit) into its constituent
2687 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2688 gen_highpart cannot be used as they assume all registers are word-sized,
2689 while our access registers have only half that size. */
2690
2691 void
2692 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2693 {
2694 gcc_assert (TARGET_64BIT);
2695 gcc_assert (ACCESS_REG_P (reg));
2696 gcc_assert (GET_MODE (reg) == DImode);
2697 gcc_assert (!(REGNO (reg) & 1));
2698
2699 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2700 *hi = gen_rtx_REG (SImode, REGNO (reg));
2701 }
2702
2703 /* Return true if OP contains a symbol reference */
2704
2705 bool
2706 symbolic_reference_mentioned_p (rtx op)
2707 {
2708 const char *fmt;
2709 int i;
2710
2711 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2712 return 1;
2713
2714 fmt = GET_RTX_FORMAT (GET_CODE (op));
2715 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2716 {
2717 if (fmt[i] == 'E')
2718 {
2719 int j;
2720
2721 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2722 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2723 return 1;
2724 }
2725
2726 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2727 return 1;
2728 }
2729
2730 return 0;
2731 }
2732
2733 /* Return true if OP contains a reference to a thread-local symbol. */
2734
2735 bool
2736 tls_symbolic_reference_mentioned_p (rtx op)
2737 {
2738 const char *fmt;
2739 int i;
2740
2741 if (GET_CODE (op) == SYMBOL_REF)
2742 return tls_symbolic_operand (op);
2743
2744 fmt = GET_RTX_FORMAT (GET_CODE (op));
2745 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2746 {
2747 if (fmt[i] == 'E')
2748 {
2749 int j;
2750
2751 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2752 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2753 return true;
2754 }
2755
2756 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2757 return true;
2758 }
2759
2760 return false;
2761 }
2762
2763
2764 /* Return true if OP is a legitimate general operand when
2765 generating PIC code. It is given that flag_pic is on
2766 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2767
2768 int
2769 legitimate_pic_operand_p (rtx op)
2770 {
2771 /* Accept all non-symbolic constants. */
2772 if (!SYMBOLIC_CONST (op))
2773 return 1;
2774
2775 /* Reject everything else; must be handled
2776 via emit_symbolic_move. */
2777 return 0;
2778 }
2779
2780 /* Returns true if the constant value OP is a legitimate general operand.
2781 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2782
2783 static bool
2784 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2785 {
2786 /* Accept all non-symbolic constants. */
2787 if (!SYMBOLIC_CONST (op))
2788 return 1;
2789
2790 /* Accept immediate LARL operands. */
2791 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2792 return 1;
2793
2794 /* Thread-local symbols are never legal constants. This is
2795 so that emit_call knows that computing such addresses
2796 might require a function call. */
2797 if (TLS_SYMBOLIC_CONST (op))
2798 return 0;
2799
2800 /* In the PIC case, symbolic constants must *not* be
2801 forced into the literal pool. We accept them here,
2802 so that they will be handled by emit_symbolic_move. */
2803 if (flag_pic)
2804 return 1;
2805
2806 /* All remaining non-PIC symbolic constants are
2807 forced into the literal pool. */
2808 return 0;
2809 }
2810
2811 /* Determine if it's legal to put X into the constant pool. This
2812 is not possible if X contains the address of a symbol that is
2813 not constant (TLS) or not known at final link time (PIC). */
2814
2815 static bool
2816 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2817 {
2818 switch (GET_CODE (x))
2819 {
2820 case CONST_INT:
2821 case CONST_DOUBLE:
2822 /* Accept all non-symbolic constants. */
2823 return false;
2824
2825 case LABEL_REF:
2826 /* Labels are OK iff we are non-PIC. */
2827 return flag_pic != 0;
2828
2829 case SYMBOL_REF:
2830 /* 'Naked' TLS symbol references are never OK,
2831 non-TLS symbols are OK iff we are non-PIC. */
2832 if (tls_symbolic_operand (x))
2833 return true;
2834 else
2835 return flag_pic != 0;
2836
2837 case CONST:
2838 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2839 case PLUS:
2840 case MINUS:
2841 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2842 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2843
2844 case UNSPEC:
2845 switch (XINT (x, 1))
2846 {
2847 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2848 case UNSPEC_LTREL_OFFSET:
2849 case UNSPEC_GOT:
2850 case UNSPEC_GOTOFF:
2851 case UNSPEC_PLTOFF:
2852 case UNSPEC_TLSGD:
2853 case UNSPEC_TLSLDM:
2854 case UNSPEC_NTPOFF:
2855 case UNSPEC_DTPOFF:
2856 case UNSPEC_GOTNTPOFF:
2857 case UNSPEC_INDNTPOFF:
2858 return false;
2859
2860 /* If the literal pool shares the code section, be put
2861 execute template placeholders into the pool as well. */
2862 case UNSPEC_INSN:
2863 return TARGET_CPU_ZARCH;
2864
2865 default:
2866 return true;
2867 }
2868 break;
2869
2870 default:
2871 gcc_unreachable ();
2872 }
2873 }
2874
2875 /* Returns true if the constant value OP is a legitimate general
2876 operand during and after reload. The difference to
2877 legitimate_constant_p is that this function will not accept
2878 a constant that would need to be forced to the literal pool
2879 before it can be used as operand.
2880 This function accepts all constants which can be loaded directly
2881 into a GPR. */
2882
2883 bool
2884 legitimate_reload_constant_p (rtx op)
2885 {
2886 /* Accept la(y) operands. */
2887 if (GET_CODE (op) == CONST_INT
2888 && DISP_IN_RANGE (INTVAL (op)))
2889 return true;
2890
2891 /* Accept l(g)hi/l(g)fi operands. */
2892 if (GET_CODE (op) == CONST_INT
2893 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2894 return true;
2895
2896 /* Accept lliXX operands. */
2897 if (TARGET_ZARCH
2898 && GET_CODE (op) == CONST_INT
2899 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2900 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2901 return true;
2902
2903 if (TARGET_EXTIMM
2904 && GET_CODE (op) == CONST_INT
2905 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2906 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2907 return true;
2908
2909 /* Accept larl operands. */
2910 if (TARGET_CPU_ZARCH
2911 && larl_operand (op, VOIDmode))
2912 return true;
2913
2914 /* Accept floating-point zero operands that fit into a single GPR. */
2915 if (GET_CODE (op) == CONST_DOUBLE
2916 && s390_float_const_zero_p (op)
2917 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2918 return true;
2919
2920 /* Accept double-word operands that can be split. */
2921 if (GET_CODE (op) == CONST_INT
2922 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2923 {
2924 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2925 rtx hi = operand_subword (op, 0, 0, dword_mode);
2926 rtx lo = operand_subword (op, 1, 0, dword_mode);
2927 return legitimate_reload_constant_p (hi)
2928 && legitimate_reload_constant_p (lo);
2929 }
2930
2931 /* Everything else cannot be handled without reload. */
2932 return false;
2933 }
2934
2935 /* Returns true if the constant value OP is a legitimate fp operand
2936 during and after reload.
2937 This function accepts all constants which can be loaded directly
2938 into an FPR. */
2939
2940 static bool
2941 legitimate_reload_fp_constant_p (rtx op)
2942 {
2943 /* Accept floating-point zero operands if the load zero instruction
2944 can be used. Prior to z196 the load fp zero instruction caused a
2945 performance penalty if the result is used as BFP number. */
2946 if (TARGET_Z196
2947 && GET_CODE (op) == CONST_DOUBLE
2948 && s390_float_const_zero_p (op))
2949 return true;
2950
2951 return false;
2952 }
2953
2954 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2955 return the class of reg to actually use. */
2956
2957 static reg_class_t
2958 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2959 {
2960 switch (GET_CODE (op))
2961 {
2962 /* Constants we cannot reload into general registers
2963 must be forced into the literal pool. */
2964 case CONST_DOUBLE:
2965 case CONST_INT:
2966 if (reg_class_subset_p (GENERAL_REGS, rclass)
2967 && legitimate_reload_constant_p (op))
2968 return GENERAL_REGS;
2969 else if (reg_class_subset_p (ADDR_REGS, rclass)
2970 && legitimate_reload_constant_p (op))
2971 return ADDR_REGS;
2972 else if (reg_class_subset_p (FP_REGS, rclass)
2973 && legitimate_reload_fp_constant_p (op))
2974 return FP_REGS;
2975 return NO_REGS;
2976
2977 /* If a symbolic constant or a PLUS is reloaded,
2978 it is most likely being used as an address, so
2979 prefer ADDR_REGS. If 'class' is not a superset
2980 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2981 case CONST:
2982 /* A larl operand with odd addend will get fixed via secondary
2983 reload. So don't request it to be pushed into literal
2984 pool. */
2985 if (TARGET_CPU_ZARCH
2986 && GET_CODE (XEXP (op, 0)) == PLUS
2987 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2988 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2989 {
2990 if (reg_class_subset_p (ADDR_REGS, rclass))
2991 return ADDR_REGS;
2992 else
2993 return NO_REGS;
2994 }
2995 /* fallthrough */
2996 case LABEL_REF:
2997 case SYMBOL_REF:
2998 if (!legitimate_reload_constant_p (op))
2999 return NO_REGS;
3000 /* fallthrough */
3001 case PLUS:
3002 /* load address will be used. */
3003 if (reg_class_subset_p (ADDR_REGS, rclass))
3004 return ADDR_REGS;
3005 else
3006 return NO_REGS;
3007
3008 default:
3009 break;
3010 }
3011
3012 return rclass;
3013 }
3014
3015 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3016 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3017 aligned. */
3018
3019 bool
3020 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3021 {
3022 HOST_WIDE_INT addend;
3023 rtx symref;
3024
3025 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3026 return false;
3027
3028 if (addend & (alignment - 1))
3029 return false;
3030
3031 if (GET_CODE (symref) == SYMBOL_REF
3032 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3033 return true;
3034
3035 if (GET_CODE (symref) == UNSPEC
3036 && alignment <= UNITS_PER_LONG)
3037 return true;
3038
3039 return false;
3040 }
3041
3042 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3043 operand SCRATCH is used to reload the even part of the address and
3044 adding one. */
3045
3046 void
3047 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3048 {
3049 HOST_WIDE_INT addend;
3050 rtx symref;
3051
3052 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3053 gcc_unreachable ();
3054
3055 if (!(addend & 1))
3056 /* Easy case. The addend is even so larl will do fine. */
3057 emit_move_insn (reg, addr);
3058 else
3059 {
3060 /* We can leave the scratch register untouched if the target
3061 register is a valid base register. */
3062 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3063 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3064 scratch = reg;
3065
3066 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3067 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3068
3069 if (addend != 1)
3070 emit_move_insn (scratch,
3071 gen_rtx_CONST (Pmode,
3072 gen_rtx_PLUS (Pmode, symref,
3073 GEN_INT (addend - 1))));
3074 else
3075 emit_move_insn (scratch, symref);
3076
3077 /* Increment the address using la in order to avoid clobbering cc. */
3078 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3079 }
3080 }
3081
3082 /* Generate what is necessary to move between REG and MEM using
3083 SCRATCH. The direction is given by TOMEM. */
3084
3085 void
3086 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3087 {
3088 /* Reload might have pulled a constant out of the literal pool.
3089 Force it back in. */
3090 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3091 || GET_CODE (mem) == CONST)
3092 mem = force_const_mem (GET_MODE (reg), mem);
3093
3094 gcc_assert (MEM_P (mem));
3095
3096 /* For a load from memory we can leave the scratch register
3097 untouched if the target register is a valid base register. */
3098 if (!tomem
3099 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3100 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3101 && GET_MODE (reg) == GET_MODE (scratch))
3102 scratch = reg;
3103
3104 /* Load address into scratch register. Since we can't have a
3105 secondary reload for a secondary reload we have to cover the case
3106 where larl would need a secondary reload here as well. */
3107 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3108
3109 /* Now we can use a standard load/store to do the move. */
3110 if (tomem)
3111 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3112 else
3113 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3114 }
3115
3116 /* Inform reload about cases where moving X with a mode MODE to a register in
3117 RCLASS requires an extra scratch or immediate register. Return the class
3118 needed for the immediate register. */
3119
3120 static reg_class_t
3121 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3122 enum machine_mode mode, secondary_reload_info *sri)
3123 {
3124 enum reg_class rclass = (enum reg_class) rclass_i;
3125
3126 /* Intermediate register needed. */
3127 if (reg_classes_intersect_p (CC_REGS, rclass))
3128 return GENERAL_REGS;
3129
3130 if (TARGET_Z10)
3131 {
3132 HOST_WIDE_INT offset;
3133 rtx symref;
3134
3135 /* On z10 several optimizer steps may generate larl operands with
3136 an odd addend. */
3137 if (in_p
3138 && s390_loadrelative_operand_p (x, &symref, &offset)
3139 && mode == Pmode
3140 && !SYMBOL_REF_ALIGN1_P (symref)
3141 && (offset & 1) == 1)
3142 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3143 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3144
3145 /* On z10 we need a scratch register when moving QI, TI or floating
3146 point mode values from or to a memory location with a SYMBOL_REF
3147 or if the symref addend of a SI or DI move is not aligned to the
3148 width of the access. */
3149 if (MEM_P (x)
3150 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3151 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3152 || (!TARGET_ZARCH && mode == DImode)
3153 || ((mode == HImode || mode == SImode || mode == DImode)
3154 && (!s390_check_symref_alignment (XEXP (x, 0),
3155 GET_MODE_SIZE (mode))))))
3156 {
3157 #define __SECONDARY_RELOAD_CASE(M,m) \
3158 case M##mode: \
3159 if (TARGET_64BIT) \
3160 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3161 CODE_FOR_reload##m##di_tomem_z10; \
3162 else \
3163 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3164 CODE_FOR_reload##m##si_tomem_z10; \
3165 break;
3166
3167 switch (GET_MODE (x))
3168 {
3169 __SECONDARY_RELOAD_CASE (QI, qi);
3170 __SECONDARY_RELOAD_CASE (HI, hi);
3171 __SECONDARY_RELOAD_CASE (SI, si);
3172 __SECONDARY_RELOAD_CASE (DI, di);
3173 __SECONDARY_RELOAD_CASE (TI, ti);
3174 __SECONDARY_RELOAD_CASE (SF, sf);
3175 __SECONDARY_RELOAD_CASE (DF, df);
3176 __SECONDARY_RELOAD_CASE (TF, tf);
3177 __SECONDARY_RELOAD_CASE (SD, sd);
3178 __SECONDARY_RELOAD_CASE (DD, dd);
3179 __SECONDARY_RELOAD_CASE (TD, td);
3180
3181 default:
3182 gcc_unreachable ();
3183 }
3184 #undef __SECONDARY_RELOAD_CASE
3185 }
3186 }
3187
3188 /* We need a scratch register when loading a PLUS expression which
3189 is not a legitimate operand of the LOAD ADDRESS instruction. */
3190 if (in_p && s390_plus_operand (x, mode))
3191 sri->icode = (TARGET_64BIT ?
3192 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3193
3194 /* Performing a multiword move from or to memory we have to make sure the
3195 second chunk in memory is addressable without causing a displacement
3196 overflow. If that would be the case we calculate the address in
3197 a scratch register. */
3198 if (MEM_P (x)
3199 && GET_CODE (XEXP (x, 0)) == PLUS
3200 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3201 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3202 + GET_MODE_SIZE (mode) - 1))
3203 {
3204 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3205 in a s_operand address since we may fallback to lm/stm. So we only
3206 have to care about overflows in the b+i+d case. */
3207 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3208 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3210 /* For FP_REGS no lm/stm is available so this check is triggered
3211 for displacement overflows in b+i+d and b+d like addresses. */
3212 || (reg_classes_intersect_p (FP_REGS, rclass)
3213 && s390_class_max_nregs (FP_REGS, mode) > 1))
3214 {
3215 if (in_p)
3216 sri->icode = (TARGET_64BIT ?
3217 CODE_FOR_reloaddi_nonoffmem_in :
3218 CODE_FOR_reloadsi_nonoffmem_in);
3219 else
3220 sri->icode = (TARGET_64BIT ?
3221 CODE_FOR_reloaddi_nonoffmem_out :
3222 CODE_FOR_reloadsi_nonoffmem_out);
3223 }
3224 }
3225
3226 /* A scratch address register is needed when a symbolic constant is
3227 copied to r0 compiling with -fPIC. In other cases the target
3228 register might be used as temporary (see legitimize_pic_address). */
3229 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3230 sri->icode = (TARGET_64BIT ?
3231 CODE_FOR_reloaddi_PIC_addr :
3232 CODE_FOR_reloadsi_PIC_addr);
3233
3234 /* Either scratch or no register needed. */
3235 return NO_REGS;
3236 }
3237
3238 /* Generate code to load SRC, which is PLUS that is not a
3239 legitimate operand for the LA instruction, into TARGET.
3240 SCRATCH may be used as scratch register. */
3241
3242 void
3243 s390_expand_plus_operand (rtx target, rtx src,
3244 rtx scratch)
3245 {
3246 rtx sum1, sum2;
3247 struct s390_address ad;
3248
3249 /* src must be a PLUS; get its two operands. */
3250 gcc_assert (GET_CODE (src) == PLUS);
3251 gcc_assert (GET_MODE (src) == Pmode);
3252
3253 /* Check if any of the two operands is already scheduled
3254 for replacement by reload. This can happen e.g. when
3255 float registers occur in an address. */
3256 sum1 = find_replacement (&XEXP (src, 0));
3257 sum2 = find_replacement (&XEXP (src, 1));
3258 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3259
3260 /* If the address is already strictly valid, there's nothing to do. */
3261 if (!s390_decompose_address (src, &ad)
3262 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3263 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3264 {
3265 /* Otherwise, one of the operands cannot be an address register;
3266 we reload its value into the scratch register. */
3267 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3268 {
3269 emit_move_insn (scratch, sum1);
3270 sum1 = scratch;
3271 }
3272 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3273 {
3274 emit_move_insn (scratch, sum2);
3275 sum2 = scratch;
3276 }
3277
3278 /* According to the way these invalid addresses are generated
3279 in reload.c, it should never happen (at least on s390) that
3280 *neither* of the PLUS components, after find_replacements
3281 was applied, is an address register. */
3282 if (sum1 == scratch && sum2 == scratch)
3283 {
3284 debug_rtx (src);
3285 gcc_unreachable ();
3286 }
3287
3288 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3289 }
3290
3291 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3292 is only ever performed on addresses, so we can mark the
3293 sum as legitimate for LA in any case. */
3294 s390_load_address (target, src);
3295 }
3296
3297
3298 /* Return true if ADDR is a valid memory address.
3299 STRICT specifies whether strict register checking applies. */
3300
3301 static bool
3302 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3303 {
3304 struct s390_address ad;
3305
3306 if (TARGET_Z10
3307 && larl_operand (addr, VOIDmode)
3308 && (mode == VOIDmode
3309 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3310 return true;
3311
3312 if (!s390_decompose_address (addr, &ad))
3313 return false;
3314
3315 if (strict)
3316 {
3317 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3318 return false;
3319
3320 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3321 return false;
3322 }
3323 else
3324 {
3325 if (ad.base
3326 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3327 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3328 return false;
3329
3330 if (ad.indx
3331 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3332 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3333 return false;
3334 }
3335 return true;
3336 }
3337
3338 /* Return true if OP is a valid operand for the LA instruction.
3339 In 31-bit, we need to prove that the result is used as an
3340 address, as LA performs only a 31-bit addition. */
3341
3342 bool
3343 legitimate_la_operand_p (rtx op)
3344 {
3345 struct s390_address addr;
3346 if (!s390_decompose_address (op, &addr))
3347 return false;
3348
3349 return (TARGET_64BIT || addr.pointer);
3350 }
3351
3352 /* Return true if it is valid *and* preferable to use LA to
3353 compute the sum of OP1 and OP2. */
3354
3355 bool
3356 preferred_la_operand_p (rtx op1, rtx op2)
3357 {
3358 struct s390_address addr;
3359
3360 if (op2 != const0_rtx)
3361 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3362
3363 if (!s390_decompose_address (op1, &addr))
3364 return false;
3365 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3366 return false;
3367 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3368 return false;
3369
3370 /* Avoid LA instructions with index register on z196; it is
3371 preferable to use regular add instructions when possible.
3372 Starting with zEC12 the la with index register is "uncracked"
3373 again. */
3374 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3375 return false;
3376
3377 if (!TARGET_64BIT && !addr.pointer)
3378 return false;
3379
3380 if (addr.pointer)
3381 return true;
3382
3383 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3384 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3385 return true;
3386
3387 return false;
3388 }
3389
3390 /* Emit a forced load-address operation to load SRC into DST.
3391 This will use the LOAD ADDRESS instruction even in situations
3392 where legitimate_la_operand_p (SRC) returns false. */
3393
3394 void
3395 s390_load_address (rtx dst, rtx src)
3396 {
3397 if (TARGET_64BIT)
3398 emit_move_insn (dst, src);
3399 else
3400 emit_insn (gen_force_la_31 (dst, src));
3401 }
3402
3403 /* Return a legitimate reference for ORIG (an address) using the
3404 register REG. If REG is 0, a new pseudo is generated.
3405
3406 There are two types of references that must be handled:
3407
3408 1. Global data references must load the address from the GOT, via
3409 the PIC reg. An insn is emitted to do this load, and the reg is
3410 returned.
3411
3412 2. Static data references, constant pool addresses, and code labels
3413 compute the address as an offset from the GOT, whose base is in
3414 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3415 differentiate them from global data objects. The returned
3416 address is the PIC reg + an unspec constant.
3417
3418 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3419 reg also appears in the address. */
3420
3421 rtx
3422 legitimize_pic_address (rtx orig, rtx reg)
3423 {
3424 rtx addr = orig;
3425 rtx addend = const0_rtx;
3426 rtx new_rtx = orig;
3427
3428 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3429
3430 if (GET_CODE (addr) == CONST)
3431 addr = XEXP (addr, 0);
3432
3433 if (GET_CODE (addr) == PLUS)
3434 {
3435 addend = XEXP (addr, 1);
3436 addr = XEXP (addr, 0);
3437 }
3438
3439 if ((GET_CODE (addr) == LABEL_REF
3440 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3441 || (GET_CODE (addr) == UNSPEC &&
3442 (XINT (addr, 1) == UNSPEC_GOTENT
3443 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3444 && GET_CODE (addend) == CONST_INT)
3445 {
3446 /* This can be locally addressed. */
3447
3448 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3449 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3450 gen_rtx_CONST (Pmode, addr) : addr);
3451
3452 if (TARGET_CPU_ZARCH
3453 && larl_operand (const_addr, VOIDmode)
3454 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3455 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3456 {
3457 if (INTVAL (addend) & 1)
3458 {
3459 /* LARL can't handle odd offsets, so emit a pair of LARL
3460 and LA. */
3461 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3462
3463 if (!DISP_IN_RANGE (INTVAL (addend)))
3464 {
3465 HOST_WIDE_INT even = INTVAL (addend) - 1;
3466 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3467 addr = gen_rtx_CONST (Pmode, addr);
3468 addend = const1_rtx;
3469 }
3470
3471 emit_move_insn (temp, addr);
3472 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3473
3474 if (reg != 0)
3475 {
3476 s390_load_address (reg, new_rtx);
3477 new_rtx = reg;
3478 }
3479 }
3480 else
3481 {
3482 /* If the offset is even, we can just use LARL. This
3483 will happen automatically. */
3484 }
3485 }
3486 else
3487 {
3488 /* No larl - Access local symbols relative to the GOT. */
3489
3490 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3491
3492 if (reload_in_progress || reload_completed)
3493 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3494
3495 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3496 if (addend != const0_rtx)
3497 addr = gen_rtx_PLUS (Pmode, addr, addend);
3498 addr = gen_rtx_CONST (Pmode, addr);
3499 addr = force_const_mem (Pmode, addr);
3500 emit_move_insn (temp, addr);
3501
3502 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3503 if (reg != 0)
3504 {
3505 s390_load_address (reg, new_rtx);
3506 new_rtx = reg;
3507 }
3508 }
3509 }
3510 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3511 {
3512 /* A non-local symbol reference without addend.
3513
3514 The symbol ref is wrapped into an UNSPEC to make sure the
3515 proper operand modifier (@GOT or @GOTENT) will be emitted.
3516 This will tell the linker to put the symbol into the GOT.
3517
3518 Additionally the code dereferencing the GOT slot is emitted here.
3519
3520 An addend to the symref needs to be added afterwards.
3521 legitimize_pic_address calls itself recursively to handle
3522 that case. So no need to do it here. */
3523
3524 if (reg == 0)
3525 reg = gen_reg_rtx (Pmode);
3526
3527 if (TARGET_Z10)
3528 {
3529 /* Use load relative if possible.
3530 lgrl <target>, sym@GOTENT */
3531 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3532 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3533 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3534
3535 emit_move_insn (reg, new_rtx);
3536 new_rtx = reg;
3537 }
3538 else if (flag_pic == 1)
3539 {
3540 /* Assume GOT offset is a valid displacement operand (< 4k
3541 or < 512k with z990). This is handled the same way in
3542 both 31- and 64-bit code (@GOT).
3543 lg <target>, sym@GOT(r12) */
3544
3545 if (reload_in_progress || reload_completed)
3546 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3547
3548 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3549 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3550 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3551 new_rtx = gen_const_mem (Pmode, new_rtx);
3552 emit_move_insn (reg, new_rtx);
3553 new_rtx = reg;
3554 }
3555 else if (TARGET_CPU_ZARCH)
3556 {
3557 /* If the GOT offset might be >= 4k, we determine the position
3558 of the GOT entry via a PC-relative LARL (@GOTENT).
3559 larl temp, sym@GOTENT
3560 lg <target>, 0(temp) */
3561
3562 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3563
3564 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3565 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3566
3567 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3568 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3569 emit_move_insn (temp, new_rtx);
3570
3571 new_rtx = gen_const_mem (Pmode, temp);
3572 emit_move_insn (reg, new_rtx);
3573
3574 new_rtx = reg;
3575 }
3576 else
3577 {
3578 /* If the GOT offset might be >= 4k, we have to load it
3579 from the literal pool (@GOT).
3580
3581 lg temp, lit-litbase(r13)
3582 lg <target>, 0(temp)
3583 lit: .long sym@GOT */
3584
3585 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3586
3587 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3588 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3589
3590 if (reload_in_progress || reload_completed)
3591 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3592
3593 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3594 addr = gen_rtx_CONST (Pmode, addr);
3595 addr = force_const_mem (Pmode, addr);
3596 emit_move_insn (temp, addr);
3597
3598 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3599 new_rtx = gen_const_mem (Pmode, new_rtx);
3600 emit_move_insn (reg, new_rtx);
3601 new_rtx = reg;
3602 }
3603 }
3604 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3605 {
3606 gcc_assert (XVECLEN (addr, 0) == 1);
3607 switch (XINT (addr, 1))
3608 {
3609 /* These address symbols (or PLT slots) relative to the GOT
3610 (not GOT slots!). In general this will exceed the
3611 displacement range so these value belong into the literal
3612 pool. */
3613 case UNSPEC_GOTOFF:
3614 case UNSPEC_PLTOFF:
3615 new_rtx = force_const_mem (Pmode, orig);
3616 break;
3617
3618 /* For -fPIC the GOT size might exceed the displacement
3619 range so make sure the value is in the literal pool. */
3620 case UNSPEC_GOT:
3621 if (flag_pic == 2)
3622 new_rtx = force_const_mem (Pmode, orig);
3623 break;
3624
3625 /* For @GOTENT larl is used. This is handled like local
3626 symbol refs. */
3627 case UNSPEC_GOTENT:
3628 gcc_unreachable ();
3629 break;
3630
3631 /* @PLT is OK as is on 64-bit, must be converted to
3632 GOT-relative @PLTOFF on 31-bit. */
3633 case UNSPEC_PLT:
3634 if (!TARGET_CPU_ZARCH)
3635 {
3636 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3637
3638 if (reload_in_progress || reload_completed)
3639 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3640
3641 addr = XVECEXP (addr, 0, 0);
3642 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3643 UNSPEC_PLTOFF);
3644 if (addend != const0_rtx)
3645 addr = gen_rtx_PLUS (Pmode, addr, addend);
3646 addr = gen_rtx_CONST (Pmode, addr);
3647 addr = force_const_mem (Pmode, addr);
3648 emit_move_insn (temp, addr);
3649
3650 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3651 if (reg != 0)
3652 {
3653 s390_load_address (reg, new_rtx);
3654 new_rtx = reg;
3655 }
3656 }
3657 else
3658 /* On 64 bit larl can be used. This case is handled like
3659 local symbol refs. */
3660 gcc_unreachable ();
3661 break;
3662
3663 /* Everything else cannot happen. */
3664 default:
3665 gcc_unreachable ();
3666 }
3667 }
3668 else if (addend != const0_rtx)
3669 {
3670 /* Otherwise, compute the sum. */
3671
3672 rtx base = legitimize_pic_address (addr, reg);
3673 new_rtx = legitimize_pic_address (addend,
3674 base == reg ? NULL_RTX : reg);
3675 if (GET_CODE (new_rtx) == CONST_INT)
3676 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3677 else
3678 {
3679 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3680 {
3681 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3682 new_rtx = XEXP (new_rtx, 1);
3683 }
3684 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3685 }
3686
3687 if (GET_CODE (new_rtx) == CONST)
3688 new_rtx = XEXP (new_rtx, 0);
3689 new_rtx = force_operand (new_rtx, 0);
3690 }
3691
3692 return new_rtx;
3693 }
3694
3695 /* Load the thread pointer into a register. */
3696
3697 rtx
3698 s390_get_thread_pointer (void)
3699 {
3700 rtx tp = gen_reg_rtx (Pmode);
3701
3702 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3703 mark_reg_pointer (tp, BITS_PER_WORD);
3704
3705 return tp;
3706 }
3707
3708 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3709 in s390_tls_symbol which always refers to __tls_get_offset.
3710 The returned offset is written to RESULT_REG and an USE rtx is
3711 generated for TLS_CALL. */
3712
3713 static GTY(()) rtx s390_tls_symbol;
3714
3715 static void
3716 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3717 {
3718 rtx insn;
3719
3720 if (!flag_pic)
3721 emit_insn (s390_load_got ());
3722
3723 if (!s390_tls_symbol)
3724 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3725
3726 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3727 gen_rtx_REG (Pmode, RETURN_REGNUM));
3728
3729 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3730 RTL_CONST_CALL_P (insn) = 1;
3731 }
3732
3733 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3734 this (thread-local) address. REG may be used as temporary. */
3735
3736 static rtx
3737 legitimize_tls_address (rtx addr, rtx reg)
3738 {
3739 rtx new_rtx, tls_call, temp, base, r2, insn;
3740
3741 if (GET_CODE (addr) == SYMBOL_REF)
3742 switch (tls_symbolic_operand (addr))
3743 {
3744 case TLS_MODEL_GLOBAL_DYNAMIC:
3745 start_sequence ();
3746 r2 = gen_rtx_REG (Pmode, 2);
3747 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3748 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3749 new_rtx = force_const_mem (Pmode, new_rtx);
3750 emit_move_insn (r2, new_rtx);
3751 s390_emit_tls_call_insn (r2, tls_call);
3752 insn = get_insns ();
3753 end_sequence ();
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_libcall_block (insn, temp, r2, new_rtx);
3758
3759 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3760 if (reg != 0)
3761 {
3762 s390_load_address (reg, new_rtx);
3763 new_rtx = reg;
3764 }
3765 break;
3766
3767 case TLS_MODEL_LOCAL_DYNAMIC:
3768 start_sequence ();
3769 r2 = gen_rtx_REG (Pmode, 2);
3770 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3771 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3772 new_rtx = force_const_mem (Pmode, new_rtx);
3773 emit_move_insn (r2, new_rtx);
3774 s390_emit_tls_call_insn (r2, tls_call);
3775 insn = get_insns ();
3776 end_sequence ();
3777
3778 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3779 temp = gen_reg_rtx (Pmode);
3780 emit_libcall_block (insn, temp, r2, new_rtx);
3781
3782 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3783 base = gen_reg_rtx (Pmode);
3784 s390_load_address (base, new_rtx);
3785
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 case TLS_MODEL_INITIAL_EXEC:
3801 if (flag_pic == 1)
3802 {
3803 /* Assume GOT offset < 4k. This is handled the same way
3804 in both 31- and 64-bit code. */
3805
3806 if (reload_in_progress || reload_completed)
3807 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3808
3809 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3810 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3811 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3812 new_rtx = gen_const_mem (Pmode, new_rtx);
3813 temp = gen_reg_rtx (Pmode);
3814 emit_move_insn (temp, new_rtx);
3815 }
3816 else if (TARGET_CPU_ZARCH)
3817 {
3818 /* If the GOT offset might be >= 4k, we determine the position
3819 of the GOT entry via a PC-relative LARL. */
3820
3821 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3822 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3823 temp = gen_reg_rtx (Pmode);
3824 emit_move_insn (temp, new_rtx);
3825
3826 new_rtx = gen_const_mem (Pmode, temp);
3827 temp = gen_reg_rtx (Pmode);
3828 emit_move_insn (temp, new_rtx);
3829 }
3830 else if (flag_pic)
3831 {
3832 /* If the GOT offset might be >= 4k, we have to load it
3833 from the literal pool. */
3834
3835 if (reload_in_progress || reload_completed)
3836 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3837
3838 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3839 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3840 new_rtx = force_const_mem (Pmode, new_rtx);
3841 temp = gen_reg_rtx (Pmode);
3842 emit_move_insn (temp, new_rtx);
3843
3844 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3845 new_rtx = gen_const_mem (Pmode, new_rtx);
3846
3847 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3848 temp = gen_reg_rtx (Pmode);
3849 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3850 }
3851 else
3852 {
3853 /* In position-dependent code, load the absolute address of
3854 the GOT entry from the literal pool. */
3855
3856 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3857 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3858 new_rtx = force_const_mem (Pmode, new_rtx);
3859 temp = gen_reg_rtx (Pmode);
3860 emit_move_insn (temp, new_rtx);
3861
3862 new_rtx = temp;
3863 new_rtx = gen_const_mem (Pmode, new_rtx);
3864 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3865 temp = gen_reg_rtx (Pmode);
3866 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3867 }
3868
3869 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3870 if (reg != 0)
3871 {
3872 s390_load_address (reg, new_rtx);
3873 new_rtx = reg;
3874 }
3875 break;
3876
3877 case TLS_MODEL_LOCAL_EXEC:
3878 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3879 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3880 new_rtx = force_const_mem (Pmode, new_rtx);
3881 temp = gen_reg_rtx (Pmode);
3882 emit_move_insn (temp, new_rtx);
3883
3884 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3885 if (reg != 0)
3886 {
3887 s390_load_address (reg, new_rtx);
3888 new_rtx = reg;
3889 }
3890 break;
3891
3892 default:
3893 gcc_unreachable ();
3894 }
3895
3896 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3897 {
3898 switch (XINT (XEXP (addr, 0), 1))
3899 {
3900 case UNSPEC_INDNTPOFF:
3901 gcc_assert (TARGET_CPU_ZARCH);
3902 new_rtx = addr;
3903 break;
3904
3905 default:
3906 gcc_unreachable ();
3907 }
3908 }
3909
3910 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3911 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3912 {
3913 new_rtx = XEXP (XEXP (addr, 0), 0);
3914 if (GET_CODE (new_rtx) != SYMBOL_REF)
3915 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3916
3917 new_rtx = legitimize_tls_address (new_rtx, reg);
3918 new_rtx = plus_constant (Pmode, new_rtx,
3919 INTVAL (XEXP (XEXP (addr, 0), 1)));
3920 new_rtx = force_operand (new_rtx, 0);
3921 }
3922
3923 else
3924 gcc_unreachable (); /* for now ... */
3925
3926 return new_rtx;
3927 }
3928
3929 /* Emit insns making the address in operands[1] valid for a standard
3930 move to operands[0]. operands[1] is replaced by an address which
3931 should be used instead of the former RTX to emit the move
3932 pattern. */
3933
3934 void
3935 emit_symbolic_move (rtx *operands)
3936 {
3937 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3938
3939 if (GET_CODE (operands[0]) == MEM)
3940 operands[1] = force_reg (Pmode, operands[1]);
3941 else if (TLS_SYMBOLIC_CONST (operands[1]))
3942 operands[1] = legitimize_tls_address (operands[1], temp);
3943 else if (flag_pic)
3944 operands[1] = legitimize_pic_address (operands[1], temp);
3945 }
3946
3947 /* Try machine-dependent ways of modifying an illegitimate address X
3948 to be legitimate. If we find one, return the new, valid address.
3949
3950 OLDX is the address as it was before break_out_memory_refs was called.
3951 In some cases it is useful to look at this to decide what needs to be done.
3952
3953 MODE is the mode of the operand pointed to by X.
3954
3955 When -fpic is used, special handling is needed for symbolic references.
3956 See comments by legitimize_pic_address for details. */
3957
3958 static rtx
3959 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3960 enum machine_mode mode ATTRIBUTE_UNUSED)
3961 {
3962 rtx constant_term = const0_rtx;
3963
3964 if (TLS_SYMBOLIC_CONST (x))
3965 {
3966 x = legitimize_tls_address (x, 0);
3967
3968 if (s390_legitimate_address_p (mode, x, FALSE))
3969 return x;
3970 }
3971 else if (GET_CODE (x) == PLUS
3972 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3973 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3974 {
3975 return x;
3976 }
3977 else if (flag_pic)
3978 {
3979 if (SYMBOLIC_CONST (x)
3980 || (GET_CODE (x) == PLUS
3981 && (SYMBOLIC_CONST (XEXP (x, 0))
3982 || SYMBOLIC_CONST (XEXP (x, 1)))))
3983 x = legitimize_pic_address (x, 0);
3984
3985 if (s390_legitimate_address_p (mode, x, FALSE))
3986 return x;
3987 }
3988
3989 x = eliminate_constant_term (x, &constant_term);
3990
3991 /* Optimize loading of large displacements by splitting them
3992 into the multiple of 4K and the rest; this allows the
3993 former to be CSE'd if possible.
3994
3995 Don't do this if the displacement is added to a register
3996 pointing into the stack frame, as the offsets will
3997 change later anyway. */
3998
3999 if (GET_CODE (constant_term) == CONST_INT
4000 && !TARGET_LONG_DISPLACEMENT
4001 && !DISP_IN_RANGE (INTVAL (constant_term))
4002 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4003 {
4004 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4005 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4006
4007 rtx temp = gen_reg_rtx (Pmode);
4008 rtx val = force_operand (GEN_INT (upper), temp);
4009 if (val != temp)
4010 emit_move_insn (temp, val);
4011
4012 x = gen_rtx_PLUS (Pmode, x, temp);
4013 constant_term = GEN_INT (lower);
4014 }
4015
4016 if (GET_CODE (x) == PLUS)
4017 {
4018 if (GET_CODE (XEXP (x, 0)) == REG)
4019 {
4020 rtx temp = gen_reg_rtx (Pmode);
4021 rtx val = force_operand (XEXP (x, 1), temp);
4022 if (val != temp)
4023 emit_move_insn (temp, val);
4024
4025 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4026 }
4027
4028 else if (GET_CODE (XEXP (x, 1)) == REG)
4029 {
4030 rtx temp = gen_reg_rtx (Pmode);
4031 rtx val = force_operand (XEXP (x, 0), temp);
4032 if (val != temp)
4033 emit_move_insn (temp, val);
4034
4035 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4036 }
4037 }
4038
4039 if (constant_term != const0_rtx)
4040 x = gen_rtx_PLUS (Pmode, x, constant_term);
4041
4042 return x;
4043 }
4044
4045 /* Try a machine-dependent way of reloading an illegitimate address AD
4046 operand. If we find one, push the reload and return the new address.
4047
4048 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4049 and TYPE is the reload type of the current reload. */
4050
4051 rtx
4052 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4053 int opnum, int type)
4054 {
4055 if (!optimize || TARGET_LONG_DISPLACEMENT)
4056 return NULL_RTX;
4057
4058 if (GET_CODE (ad) == PLUS)
4059 {
4060 rtx tem = simplify_binary_operation (PLUS, Pmode,
4061 XEXP (ad, 0), XEXP (ad, 1));
4062 if (tem)
4063 ad = tem;
4064 }
4065
4066 if (GET_CODE (ad) == PLUS
4067 && GET_CODE (XEXP (ad, 0)) == REG
4068 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4069 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4070 {
4071 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4072 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4073 rtx cst, tem, new_rtx;
4074
4075 cst = GEN_INT (upper);
4076 if (!legitimate_reload_constant_p (cst))
4077 cst = force_const_mem (Pmode, cst);
4078
4079 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4080 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4081
4082 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4083 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4084 opnum, (enum reload_type) type);
4085 return new_rtx;
4086 }
4087
4088 return NULL_RTX;
4089 }
4090
4091 /* Emit code to move LEN bytes from DST to SRC. */
4092
4093 bool
4094 s390_expand_movmem (rtx dst, rtx src, rtx len)
4095 {
4096 /* When tuning for z10 or higher we rely on the Glibc functions to
4097 do the right thing. Only for constant lengths below 64k we will
4098 generate inline code. */
4099 if (s390_tune >= PROCESSOR_2097_Z10
4100 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4101 return false;
4102
4103 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4104 {
4105 if (INTVAL (len) > 0)
4106 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4107 }
4108
4109 else if (TARGET_MVCLE)
4110 {
4111 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4112 }
4113
4114 else
4115 {
4116 rtx dst_addr, src_addr, count, blocks, temp;
4117 rtx loop_start_label = gen_label_rtx ();
4118 rtx loop_end_label = gen_label_rtx ();
4119 rtx end_label = gen_label_rtx ();
4120 enum machine_mode mode;
4121
4122 mode = GET_MODE (len);
4123 if (mode == VOIDmode)
4124 mode = Pmode;
4125
4126 dst_addr = gen_reg_rtx (Pmode);
4127 src_addr = gen_reg_rtx (Pmode);
4128 count = gen_reg_rtx (mode);
4129 blocks = gen_reg_rtx (mode);
4130
4131 convert_move (count, len, 1);
4132 emit_cmp_and_jump_insns (count, const0_rtx,
4133 EQ, NULL_RTX, mode, 1, end_label);
4134
4135 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4136 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4137 dst = change_address (dst, VOIDmode, dst_addr);
4138 src = change_address (src, VOIDmode, src_addr);
4139
4140 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4141 OPTAB_DIRECT);
4142 if (temp != count)
4143 emit_move_insn (count, temp);
4144
4145 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4146 OPTAB_DIRECT);
4147 if (temp != blocks)
4148 emit_move_insn (blocks, temp);
4149
4150 emit_cmp_and_jump_insns (blocks, const0_rtx,
4151 EQ, NULL_RTX, mode, 1, loop_end_label);
4152
4153 emit_label (loop_start_label);
4154
4155 if (TARGET_Z10
4156 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4157 {
4158 rtx prefetch;
4159
4160 /* Issue a read prefetch for the +3 cache line. */
4161 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4162 const0_rtx, const0_rtx);
4163 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4164 emit_insn (prefetch);
4165
4166 /* Issue a write prefetch for the +3 cache line. */
4167 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4168 const1_rtx, const0_rtx);
4169 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4170 emit_insn (prefetch);
4171 }
4172
4173 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4174 s390_load_address (dst_addr,
4175 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4176 s390_load_address (src_addr,
4177 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4178
4179 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4180 OPTAB_DIRECT);
4181 if (temp != blocks)
4182 emit_move_insn (blocks, temp);
4183
4184 emit_cmp_and_jump_insns (blocks, const0_rtx,
4185 EQ, NULL_RTX, mode, 1, loop_end_label);
4186
4187 emit_jump (loop_start_label);
4188 emit_label (loop_end_label);
4189
4190 emit_insn (gen_movmem_short (dst, src,
4191 convert_to_mode (Pmode, count, 1)));
4192 emit_label (end_label);
4193 }
4194 return true;
4195 }
4196
4197 /* Emit code to set LEN bytes at DST to VAL.
4198 Make use of clrmem if VAL is zero. */
4199
4200 void
4201 s390_expand_setmem (rtx dst, rtx len, rtx val)
4202 {
4203 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4204 return;
4205
4206 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4207
4208 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4209 {
4210 if (val == const0_rtx && INTVAL (len) <= 256)
4211 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4212 else
4213 {
4214 /* Initialize memory by storing the first byte. */
4215 emit_move_insn (adjust_address (dst, QImode, 0), val);
4216
4217 if (INTVAL (len) > 1)
4218 {
4219 /* Initiate 1 byte overlap move.
4220 The first byte of DST is propagated through DSTP1.
4221 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4222 DST is set to size 1 so the rest of the memory location
4223 does not count as source operand. */
4224 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4225 set_mem_size (dst, 1);
4226
4227 emit_insn (gen_movmem_short (dstp1, dst,
4228 GEN_INT (INTVAL (len) - 2)));
4229 }
4230 }
4231 }
4232
4233 else if (TARGET_MVCLE)
4234 {
4235 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4236 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4237 }
4238
4239 else
4240 {
4241 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4242 rtx loop_start_label = gen_label_rtx ();
4243 rtx loop_end_label = gen_label_rtx ();
4244 rtx end_label = gen_label_rtx ();
4245 enum machine_mode mode;
4246
4247 mode = GET_MODE (len);
4248 if (mode == VOIDmode)
4249 mode = Pmode;
4250
4251 dst_addr = gen_reg_rtx (Pmode);
4252 count = gen_reg_rtx (mode);
4253 blocks = gen_reg_rtx (mode);
4254
4255 convert_move (count, len, 1);
4256 emit_cmp_and_jump_insns (count, const0_rtx,
4257 EQ, NULL_RTX, mode, 1, end_label);
4258
4259 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4260 dst = change_address (dst, VOIDmode, dst_addr);
4261
4262 if (val == const0_rtx)
4263 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4264 OPTAB_DIRECT);
4265 else
4266 {
4267 dstp1 = adjust_address (dst, VOIDmode, 1);
4268 set_mem_size (dst, 1);
4269
4270 /* Initialize memory by storing the first byte. */
4271 emit_move_insn (adjust_address (dst, QImode, 0), val);
4272
4273 /* If count is 1 we are done. */
4274 emit_cmp_and_jump_insns (count, const1_rtx,
4275 EQ, NULL_RTX, mode, 1, end_label);
4276
4277 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4278 OPTAB_DIRECT);
4279 }
4280 if (temp != count)
4281 emit_move_insn (count, temp);
4282
4283 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4284 OPTAB_DIRECT);
4285 if (temp != blocks)
4286 emit_move_insn (blocks, temp);
4287
4288 emit_cmp_and_jump_insns (blocks, const0_rtx,
4289 EQ, NULL_RTX, mode, 1, loop_end_label);
4290
4291 emit_label (loop_start_label);
4292
4293 if (TARGET_Z10
4294 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4295 {
4296 /* Issue a write prefetch for the +4 cache line. */
4297 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4298 GEN_INT (1024)),
4299 const1_rtx, const0_rtx);
4300 emit_insn (prefetch);
4301 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4302 }
4303
4304 if (val == const0_rtx)
4305 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4306 else
4307 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4308 s390_load_address (dst_addr,
4309 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4310
4311 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4312 OPTAB_DIRECT);
4313 if (temp != blocks)
4314 emit_move_insn (blocks, temp);
4315
4316 emit_cmp_and_jump_insns (blocks, const0_rtx,
4317 EQ, NULL_RTX, mode, 1, loop_end_label);
4318
4319 emit_jump (loop_start_label);
4320 emit_label (loop_end_label);
4321
4322 if (val == const0_rtx)
4323 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4324 else
4325 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4326 emit_label (end_label);
4327 }
4328 }
4329
4330 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4331 and return the result in TARGET. */
4332
4333 bool
4334 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4335 {
4336 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4337 rtx tmp;
4338
4339 /* When tuning for z10 or higher we rely on the Glibc functions to
4340 do the right thing. Only for constant lengths below 64k we will
4341 generate inline code. */
4342 if (s390_tune >= PROCESSOR_2097_Z10
4343 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4344 return false;
4345
4346 /* As the result of CMPINT is inverted compared to what we need,
4347 we have to swap the operands. */
4348 tmp = op0; op0 = op1; op1 = tmp;
4349
4350 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4351 {
4352 if (INTVAL (len) > 0)
4353 {
4354 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4355 emit_insn (gen_cmpint (target, ccreg));
4356 }
4357 else
4358 emit_move_insn (target, const0_rtx);
4359 }
4360 else if (TARGET_MVCLE)
4361 {
4362 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4363 emit_insn (gen_cmpint (target, ccreg));
4364 }
4365 else
4366 {
4367 rtx addr0, addr1, count, blocks, temp;
4368 rtx loop_start_label = gen_label_rtx ();
4369 rtx loop_end_label = gen_label_rtx ();
4370 rtx end_label = gen_label_rtx ();
4371 enum machine_mode mode;
4372
4373 mode = GET_MODE (len);
4374 if (mode == VOIDmode)
4375 mode = Pmode;
4376
4377 addr0 = gen_reg_rtx (Pmode);
4378 addr1 = gen_reg_rtx (Pmode);
4379 count = gen_reg_rtx (mode);
4380 blocks = gen_reg_rtx (mode);
4381
4382 convert_move (count, len, 1);
4383 emit_cmp_and_jump_insns (count, const0_rtx,
4384 EQ, NULL_RTX, mode, 1, end_label);
4385
4386 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4387 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4388 op0 = change_address (op0, VOIDmode, addr0);
4389 op1 = change_address (op1, VOIDmode, addr1);
4390
4391 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4392 OPTAB_DIRECT);
4393 if (temp != count)
4394 emit_move_insn (count, temp);
4395
4396 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4397 OPTAB_DIRECT);
4398 if (temp != blocks)
4399 emit_move_insn (blocks, temp);
4400
4401 emit_cmp_and_jump_insns (blocks, const0_rtx,
4402 EQ, NULL_RTX, mode, 1, loop_end_label);
4403
4404 emit_label (loop_start_label);
4405
4406 if (TARGET_Z10
4407 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4408 {
4409 rtx prefetch;
4410
4411 /* Issue a read prefetch for the +2 cache line of operand 1. */
4412 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4413 const0_rtx, const0_rtx);
4414 emit_insn (prefetch);
4415 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4416
4417 /* Issue a read prefetch for the +2 cache line of operand 2. */
4418 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4419 const0_rtx, const0_rtx);
4420 emit_insn (prefetch);
4421 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4422 }
4423
4424 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4425 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4426 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4427 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4428 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4429 emit_jump_insn (temp);
4430
4431 s390_load_address (addr0,
4432 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4433 s390_load_address (addr1,
4434 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4435
4436 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4437 OPTAB_DIRECT);
4438 if (temp != blocks)
4439 emit_move_insn (blocks, temp);
4440
4441 emit_cmp_and_jump_insns (blocks, const0_rtx,
4442 EQ, NULL_RTX, mode, 1, loop_end_label);
4443
4444 emit_jump (loop_start_label);
4445 emit_label (loop_end_label);
4446
4447 emit_insn (gen_cmpmem_short (op0, op1,
4448 convert_to_mode (Pmode, count, 1)));
4449 emit_label (end_label);
4450
4451 emit_insn (gen_cmpint (target, ccreg));
4452 }
4453 return true;
4454 }
4455
4456
4457 /* Expand conditional increment or decrement using alc/slb instructions.
4458 Should generate code setting DST to either SRC or SRC + INCREMENT,
4459 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4460 Returns true if successful, false otherwise.
4461
4462 That makes it possible to implement some if-constructs without jumps e.g.:
4463 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4464 unsigned int a, b, c;
4465 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4466 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4467 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4468 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4469
4470 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4471 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4472 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4473 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4474 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4475
4476 bool
4477 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4478 rtx dst, rtx src, rtx increment)
4479 {
4480 enum machine_mode cmp_mode;
4481 enum machine_mode cc_mode;
4482 rtx op_res;
4483 rtx insn;
4484 rtvec p;
4485 int ret;
4486
4487 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4488 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4489 cmp_mode = SImode;
4490 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4491 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4492 cmp_mode = DImode;
4493 else
4494 return false;
4495
4496 /* Try ADD LOGICAL WITH CARRY. */
4497 if (increment == const1_rtx)
4498 {
4499 /* Determine CC mode to use. */
4500 if (cmp_code == EQ || cmp_code == NE)
4501 {
4502 if (cmp_op1 != const0_rtx)
4503 {
4504 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4505 NULL_RTX, 0, OPTAB_WIDEN);
4506 cmp_op1 = const0_rtx;
4507 }
4508
4509 cmp_code = cmp_code == EQ ? LEU : GTU;
4510 }
4511
4512 if (cmp_code == LTU || cmp_code == LEU)
4513 {
4514 rtx tem = cmp_op0;
4515 cmp_op0 = cmp_op1;
4516 cmp_op1 = tem;
4517 cmp_code = swap_condition (cmp_code);
4518 }
4519
4520 switch (cmp_code)
4521 {
4522 case GTU:
4523 cc_mode = CCUmode;
4524 break;
4525
4526 case GEU:
4527 cc_mode = CCL3mode;
4528 break;
4529
4530 default:
4531 return false;
4532 }
4533
4534 /* Emit comparison instruction pattern. */
4535 if (!register_operand (cmp_op0, cmp_mode))
4536 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4537
4538 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4539 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4540 /* We use insn_invalid_p here to add clobbers if required. */
4541 ret = insn_invalid_p (emit_insn (insn), false);
4542 gcc_assert (!ret);
4543
4544 /* Emit ALC instruction pattern. */
4545 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4546 gen_rtx_REG (cc_mode, CC_REGNUM),
4547 const0_rtx);
4548
4549 if (src != const0_rtx)
4550 {
4551 if (!register_operand (src, GET_MODE (dst)))
4552 src = force_reg (GET_MODE (dst), src);
4553
4554 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4555 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4556 }
4557
4558 p = rtvec_alloc (2);
4559 RTVEC_ELT (p, 0) =
4560 gen_rtx_SET (VOIDmode, dst, op_res);
4561 RTVEC_ELT (p, 1) =
4562 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4563 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4564
4565 return true;
4566 }
4567
4568 /* Try SUBTRACT LOGICAL WITH BORROW. */
4569 if (increment == constm1_rtx)
4570 {
4571 /* Determine CC mode to use. */
4572 if (cmp_code == EQ || cmp_code == NE)
4573 {
4574 if (cmp_op1 != const0_rtx)
4575 {
4576 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4577 NULL_RTX, 0, OPTAB_WIDEN);
4578 cmp_op1 = const0_rtx;
4579 }
4580
4581 cmp_code = cmp_code == EQ ? LEU : GTU;
4582 }
4583
4584 if (cmp_code == GTU || cmp_code == GEU)
4585 {
4586 rtx tem = cmp_op0;
4587 cmp_op0 = cmp_op1;
4588 cmp_op1 = tem;
4589 cmp_code = swap_condition (cmp_code);
4590 }
4591
4592 switch (cmp_code)
4593 {
4594 case LEU:
4595 cc_mode = CCUmode;
4596 break;
4597
4598 case LTU:
4599 cc_mode = CCL3mode;
4600 break;
4601
4602 default:
4603 return false;
4604 }
4605
4606 /* Emit comparison instruction pattern. */
4607 if (!register_operand (cmp_op0, cmp_mode))
4608 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4609
4610 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4611 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4612 /* We use insn_invalid_p here to add clobbers if required. */
4613 ret = insn_invalid_p (emit_insn (insn), false);
4614 gcc_assert (!ret);
4615
4616 /* Emit SLB instruction pattern. */
4617 if (!register_operand (src, GET_MODE (dst)))
4618 src = force_reg (GET_MODE (dst), src);
4619
4620 op_res = gen_rtx_MINUS (GET_MODE (dst),
4621 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4622 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4623 gen_rtx_REG (cc_mode, CC_REGNUM),
4624 const0_rtx));
4625 p = rtvec_alloc (2);
4626 RTVEC_ELT (p, 0) =
4627 gen_rtx_SET (VOIDmode, dst, op_res);
4628 RTVEC_ELT (p, 1) =
4629 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4630 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4631
4632 return true;
4633 }
4634
4635 return false;
4636 }
4637
4638 /* Expand code for the insv template. Return true if successful. */
4639
4640 bool
4641 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4642 {
4643 int bitsize = INTVAL (op1);
4644 int bitpos = INTVAL (op2);
4645 enum machine_mode mode = GET_MODE (dest);
4646 enum machine_mode smode;
4647 int smode_bsize, mode_bsize;
4648 rtx op, clobber;
4649
4650 /* Generate INSERT IMMEDIATE (IILL et al). */
4651 /* (set (ze (reg)) (const_int)). */
4652 if (TARGET_ZARCH
4653 && register_operand (dest, word_mode)
4654 && (bitpos % 16) == 0
4655 && (bitsize % 16) == 0
4656 && const_int_operand (src, VOIDmode))
4657 {
4658 HOST_WIDE_INT val = INTVAL (src);
4659 int regpos = bitpos + bitsize;
4660
4661 while (regpos > bitpos)
4662 {
4663 enum machine_mode putmode;
4664 int putsize;
4665
4666 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4667 putmode = SImode;
4668 else
4669 putmode = HImode;
4670
4671 putsize = GET_MODE_BITSIZE (putmode);
4672 regpos -= putsize;
4673 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4674 GEN_INT (putsize),
4675 GEN_INT (regpos)),
4676 gen_int_mode (val, putmode));
4677 val >>= putsize;
4678 }
4679 gcc_assert (regpos == bitpos);
4680 return true;
4681 }
4682
4683 smode = smallest_mode_for_size (bitsize, MODE_INT);
4684 smode_bsize = GET_MODE_BITSIZE (smode);
4685 mode_bsize = GET_MODE_BITSIZE (mode);
4686
4687 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4688 if (bitpos == 0
4689 && (bitsize % BITS_PER_UNIT) == 0
4690 && MEM_P (dest)
4691 && (register_operand (src, word_mode)
4692 || const_int_operand (src, VOIDmode)))
4693 {
4694 /* Emit standard pattern if possible. */
4695 if (smode_bsize == bitsize)
4696 {
4697 emit_move_insn (adjust_address (dest, smode, 0),
4698 gen_lowpart (smode, src));
4699 return true;
4700 }
4701
4702 /* (set (ze (mem)) (const_int)). */
4703 else if (const_int_operand (src, VOIDmode))
4704 {
4705 int size = bitsize / BITS_PER_UNIT;
4706 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4707 BLKmode,
4708 UNITS_PER_WORD - size);
4709
4710 dest = adjust_address (dest, BLKmode, 0);
4711 set_mem_size (dest, size);
4712 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4713 return true;
4714 }
4715
4716 /* (set (ze (mem)) (reg)). */
4717 else if (register_operand (src, word_mode))
4718 {
4719 if (bitsize <= 32)
4720 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4721 const0_rtx), src);
4722 else
4723 {
4724 /* Emit st,stcmh sequence. */
4725 int stcmh_width = bitsize - 32;
4726 int size = stcmh_width / BITS_PER_UNIT;
4727
4728 emit_move_insn (adjust_address (dest, SImode, size),
4729 gen_lowpart (SImode, src));
4730 set_mem_size (dest, size);
4731 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4732 GEN_INT (stcmh_width),
4733 const0_rtx),
4734 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4735 }
4736 return true;
4737 }
4738 }
4739
4740 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4741 if ((bitpos % BITS_PER_UNIT) == 0
4742 && (bitsize % BITS_PER_UNIT) == 0
4743 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4744 && MEM_P (src)
4745 && (mode == DImode || mode == SImode)
4746 && register_operand (dest, mode))
4747 {
4748 /* Emit a strict_low_part pattern if possible. */
4749 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4750 {
4751 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4752 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4753 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4754 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4755 return true;
4756 }
4757
4758 /* ??? There are more powerful versions of ICM that are not
4759 completely represented in the md file. */
4760 }
4761
4762 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4763 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4764 {
4765 enum machine_mode mode_s = GET_MODE (src);
4766
4767 if (mode_s == VOIDmode)
4768 {
4769 /* Assume const_int etc already in the proper mode. */
4770 src = force_reg (mode, src);
4771 }
4772 else if (mode_s != mode)
4773 {
4774 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4775 src = force_reg (mode_s, src);
4776 src = gen_lowpart (mode, src);
4777 }
4778
4779 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4780 op = gen_rtx_SET (VOIDmode, op, src);
4781
4782 if (!TARGET_ZEC12)
4783 {
4784 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4785 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4786 }
4787 emit_insn (op);
4788
4789 return true;
4790 }
4791
4792 return false;
4793 }
4794
4795 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4796 register that holds VAL of mode MODE shifted by COUNT bits. */
4797
4798 static inline rtx
4799 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4800 {
4801 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4802 NULL_RTX, 1, OPTAB_DIRECT);
4803 return expand_simple_binop (SImode, ASHIFT, val, count,
4804 NULL_RTX, 1, OPTAB_DIRECT);
4805 }
4806
4807 /* Structure to hold the initial parameters for a compare_and_swap operation
4808 in HImode and QImode. */
4809
4810 struct alignment_context
4811 {
4812 rtx memsi; /* SI aligned memory location. */
4813 rtx shift; /* Bit offset with regard to lsb. */
4814 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4815 rtx modemaski; /* ~modemask */
4816 bool aligned; /* True if memory is aligned, false else. */
4817 };
4818
4819 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4820 structure AC for transparent simplifying, if the memory alignment is known
4821 to be at least 32bit. MEM is the memory location for the actual operation
4822 and MODE its mode. */
4823
4824 static void
4825 init_alignment_context (struct alignment_context *ac, rtx mem,
4826 enum machine_mode mode)
4827 {
4828 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4829 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4830
4831 if (ac->aligned)
4832 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4833 else
4834 {
4835 /* Alignment is unknown. */
4836 rtx byteoffset, addr, align;
4837
4838 /* Force the address into a register. */
4839 addr = force_reg (Pmode, XEXP (mem, 0));
4840
4841 /* Align it to SImode. */
4842 align = expand_simple_binop (Pmode, AND, addr,
4843 GEN_INT (-GET_MODE_SIZE (SImode)),
4844 NULL_RTX, 1, OPTAB_DIRECT);
4845 /* Generate MEM. */
4846 ac->memsi = gen_rtx_MEM (SImode, align);
4847 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4848 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4849 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4850
4851 /* Calculate shiftcount. */
4852 byteoffset = expand_simple_binop (Pmode, AND, addr,
4853 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4854 NULL_RTX, 1, OPTAB_DIRECT);
4855 /* As we already have some offset, evaluate the remaining distance. */
4856 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4857 NULL_RTX, 1, OPTAB_DIRECT);
4858 }
4859
4860 /* Shift is the byte count, but we need the bitcount. */
4861 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4862 NULL_RTX, 1, OPTAB_DIRECT);
4863
4864 /* Calculate masks. */
4865 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4866 GEN_INT (GET_MODE_MASK (mode)),
4867 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4868 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4869 NULL_RTX, 1);
4870 }
4871
4872 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4873 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4874 perform the merge in SEQ2. */
4875
4876 static rtx
4877 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4878 enum machine_mode mode, rtx val, rtx ins)
4879 {
4880 rtx tmp;
4881
4882 if (ac->aligned)
4883 {
4884 start_sequence ();
4885 tmp = copy_to_mode_reg (SImode, val);
4886 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4887 const0_rtx, ins))
4888 {
4889 *seq1 = NULL;
4890 *seq2 = get_insns ();
4891 end_sequence ();
4892 return tmp;
4893 }
4894 end_sequence ();
4895 }
4896
4897 /* Failed to use insv. Generate a two part shift and mask. */
4898 start_sequence ();
4899 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4900 *seq1 = get_insns ();
4901 end_sequence ();
4902
4903 start_sequence ();
4904 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4905 *seq2 = get_insns ();
4906 end_sequence ();
4907
4908 return tmp;
4909 }
4910
4911 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4912 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4913 value to set if CMP == MEM. */
4914
4915 void
4916 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4917 rtx cmp, rtx new_rtx, bool is_weak)
4918 {
4919 struct alignment_context ac;
4920 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4921 rtx res = gen_reg_rtx (SImode);
4922 rtx csloop = NULL, csend = NULL;
4923
4924 gcc_assert (MEM_P (mem));
4925
4926 init_alignment_context (&ac, mem, mode);
4927
4928 /* Load full word. Subsequent loads are performed by CS. */
4929 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4930 NULL_RTX, 1, OPTAB_DIRECT);
4931
4932 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4933 possible, we try to use insv to make this happen efficiently. If
4934 that fails we'll generate code both inside and outside the loop. */
4935 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4936 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4937
4938 if (seq0)
4939 emit_insn (seq0);
4940 if (seq1)
4941 emit_insn (seq1);
4942
4943 /* Start CS loop. */
4944 if (!is_weak)
4945 {
4946 /* Begin assuming success. */
4947 emit_move_insn (btarget, const1_rtx);
4948
4949 csloop = gen_label_rtx ();
4950 csend = gen_label_rtx ();
4951 emit_label (csloop);
4952 }
4953
4954 /* val = "<mem>00..0<mem>"
4955 * cmp = "00..0<cmp>00..0"
4956 * new = "00..0<new>00..0"
4957 */
4958
4959 emit_insn (seq2);
4960 emit_insn (seq3);
4961
4962 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4963 if (is_weak)
4964 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4965 else
4966 {
4967 rtx tmp;
4968
4969 /* Jump to end if we're done (likely?). */
4970 s390_emit_jump (csend, cc);
4971
4972 /* Check for changes outside mode, and loop internal if so.
4973 Arrange the moves so that the compare is adjacent to the
4974 branch so that we can generate CRJ. */
4975 tmp = copy_to_reg (val);
4976 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4977 1, OPTAB_DIRECT);
4978 cc = s390_emit_compare (NE, val, tmp);
4979 s390_emit_jump (csloop, cc);
4980
4981 /* Failed. */
4982 emit_move_insn (btarget, const0_rtx);
4983 emit_label (csend);
4984 }
4985
4986 /* Return the correct part of the bitfield. */
4987 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4988 NULL_RTX, 1, OPTAB_DIRECT), 1);
4989 }
4990
4991 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4992 and VAL the value to play with. If AFTER is true then store the value
4993 MEM holds after the operation, if AFTER is false then store the value MEM
4994 holds before the operation. If TARGET is zero then discard that value, else
4995 store it to TARGET. */
4996
4997 void
4998 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4999 rtx target, rtx mem, rtx val, bool after)
5000 {
5001 struct alignment_context ac;
5002 rtx cmp;
5003 rtx new_rtx = gen_reg_rtx (SImode);
5004 rtx orig = gen_reg_rtx (SImode);
5005 rtx csloop = gen_label_rtx ();
5006
5007 gcc_assert (!target || register_operand (target, VOIDmode));
5008 gcc_assert (MEM_P (mem));
5009
5010 init_alignment_context (&ac, mem, mode);
5011
5012 /* Shift val to the correct bit positions.
5013 Preserve "icm", but prevent "ex icm". */
5014 if (!(ac.aligned && code == SET && MEM_P (val)))
5015 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5016
5017 /* Further preparation insns. */
5018 if (code == PLUS || code == MINUS)
5019 emit_move_insn (orig, val);
5020 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5021 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5022 NULL_RTX, 1, OPTAB_DIRECT);
5023
5024 /* Load full word. Subsequent loads are performed by CS. */
5025 cmp = force_reg (SImode, ac.memsi);
5026
5027 /* Start CS loop. */
5028 emit_label (csloop);
5029 emit_move_insn (new_rtx, cmp);
5030
5031 /* Patch new with val at correct position. */
5032 switch (code)
5033 {
5034 case PLUS:
5035 case MINUS:
5036 val = expand_simple_binop (SImode, code, new_rtx, orig,
5037 NULL_RTX, 1, OPTAB_DIRECT);
5038 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5039 NULL_RTX, 1, OPTAB_DIRECT);
5040 /* FALLTHRU */
5041 case SET:
5042 if (ac.aligned && MEM_P (val))
5043 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5044 0, 0, SImode, val);
5045 else
5046 {
5047 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5048 NULL_RTX, 1, OPTAB_DIRECT);
5049 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5050 NULL_RTX, 1, OPTAB_DIRECT);
5051 }
5052 break;
5053 case AND:
5054 case IOR:
5055 case XOR:
5056 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5057 NULL_RTX, 1, OPTAB_DIRECT);
5058 break;
5059 case MULT: /* NAND */
5060 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5062 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5063 NULL_RTX, 1, OPTAB_DIRECT);
5064 break;
5065 default:
5066 gcc_unreachable ();
5067 }
5068
5069 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5070 ac.memsi, cmp, new_rtx));
5071
5072 /* Return the correct part of the bitfield. */
5073 if (target)
5074 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5075 after ? new_rtx : cmp, ac.shift,
5076 NULL_RTX, 1, OPTAB_DIRECT), 1);
5077 }
5078
5079 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5080 We need to emit DTP-relative relocations. */
5081
5082 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5083
5084 static void
5085 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5086 {
5087 switch (size)
5088 {
5089 case 4:
5090 fputs ("\t.long\t", file);
5091 break;
5092 case 8:
5093 fputs ("\t.quad\t", file);
5094 break;
5095 default:
5096 gcc_unreachable ();
5097 }
5098 output_addr_const (file, x);
5099 fputs ("@DTPOFF", file);
5100 }
5101
5102 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5103 /* Implement TARGET_MANGLE_TYPE. */
5104
5105 static const char *
5106 s390_mangle_type (const_tree type)
5107 {
5108 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5109 && TARGET_LONG_DOUBLE_128)
5110 return "g";
5111
5112 /* For all other types, use normal C++ mangling. */
5113 return NULL;
5114 }
5115 #endif
5116
5117 /* In the name of slightly smaller debug output, and to cater to
5118 general assembler lossage, recognize various UNSPEC sequences
5119 and turn them back into a direct symbol reference. */
5120
5121 static rtx
5122 s390_delegitimize_address (rtx orig_x)
5123 {
5124 rtx x, y;
5125
5126 orig_x = delegitimize_mem_from_attrs (orig_x);
5127 x = orig_x;
5128
5129 /* Extract the symbol ref from:
5130 (plus:SI (reg:SI 12 %r12)
5131 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5132 UNSPEC_GOTOFF/PLTOFF)))
5133 and
5134 (plus:SI (reg:SI 12 %r12)
5135 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5136 UNSPEC_GOTOFF/PLTOFF)
5137 (const_int 4 [0x4])))) */
5138 if (GET_CODE (x) == PLUS
5139 && REG_P (XEXP (x, 0))
5140 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5141 && GET_CODE (XEXP (x, 1)) == CONST)
5142 {
5143 HOST_WIDE_INT offset = 0;
5144
5145 /* The const operand. */
5146 y = XEXP (XEXP (x, 1), 0);
5147
5148 if (GET_CODE (y) == PLUS
5149 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5150 {
5151 offset = INTVAL (XEXP (y, 1));
5152 y = XEXP (y, 0);
5153 }
5154
5155 if (GET_CODE (y) == UNSPEC
5156 && (XINT (y, 1) == UNSPEC_GOTOFF
5157 || XINT (y, 1) == UNSPEC_PLTOFF))
5158 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5159 }
5160
5161 if (GET_CODE (x) != MEM)
5162 return orig_x;
5163
5164 x = XEXP (x, 0);
5165 if (GET_CODE (x) == PLUS
5166 && GET_CODE (XEXP (x, 1)) == CONST
5167 && GET_CODE (XEXP (x, 0)) == REG
5168 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5169 {
5170 y = XEXP (XEXP (x, 1), 0);
5171 if (GET_CODE (y) == UNSPEC
5172 && XINT (y, 1) == UNSPEC_GOT)
5173 y = XVECEXP (y, 0, 0);
5174 else
5175 return orig_x;
5176 }
5177 else if (GET_CODE (x) == CONST)
5178 {
5179 /* Extract the symbol ref from:
5180 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5181 UNSPEC_PLT/GOTENT))) */
5182
5183 y = XEXP (x, 0);
5184 if (GET_CODE (y) == UNSPEC
5185 && (XINT (y, 1) == UNSPEC_GOTENT
5186 || XINT (y, 1) == UNSPEC_PLT))
5187 y = XVECEXP (y, 0, 0);
5188 else
5189 return orig_x;
5190 }
5191 else
5192 return orig_x;
5193
5194 if (GET_MODE (orig_x) != Pmode)
5195 {
5196 if (GET_MODE (orig_x) == BLKmode)
5197 return orig_x;
5198 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5199 if (y == NULL_RTX)
5200 return orig_x;
5201 }
5202 return y;
5203 }
5204
5205 /* Output operand OP to stdio stream FILE.
5206 OP is an address (register + offset) which is not used to address data;
5207 instead the rightmost bits are interpreted as the value. */
5208
5209 static void
5210 print_shift_count_operand (FILE *file, rtx op)
5211 {
5212 HOST_WIDE_INT offset;
5213 rtx base;
5214
5215 /* Extract base register and offset. */
5216 if (!s390_decompose_shift_count (op, &base, &offset))
5217 gcc_unreachable ();
5218
5219 /* Sanity check. */
5220 if (base)
5221 {
5222 gcc_assert (GET_CODE (base) == REG);
5223 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5224 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5225 }
5226
5227 /* Offsets are constricted to twelve bits. */
5228 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5229 if (base)
5230 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5231 }
5232
5233 /* See 'get_some_local_dynamic_name'. */
5234
5235 static int
5236 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5237 {
5238 rtx x = *px;
5239
5240 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5241 {
5242 x = get_pool_constant (x);
5243 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5244 }
5245
5246 if (GET_CODE (x) == SYMBOL_REF
5247 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5248 {
5249 cfun->machine->some_ld_name = XSTR (x, 0);
5250 return 1;
5251 }
5252
5253 return 0;
5254 }
5255
5256 /* Locate some local-dynamic symbol still in use by this function
5257 so that we can print its name in local-dynamic base patterns. */
5258
5259 static const char *
5260 get_some_local_dynamic_name (void)
5261 {
5262 rtx insn;
5263
5264 if (cfun->machine->some_ld_name)
5265 return cfun->machine->some_ld_name;
5266
5267 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5268 if (INSN_P (insn)
5269 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5270 return cfun->machine->some_ld_name;
5271
5272 gcc_unreachable ();
5273 }
5274
5275 /* Output machine-dependent UNSPECs occurring in address constant X
5276 in assembler syntax to stdio stream FILE. Returns true if the
5277 constant X could be recognized, false otherwise. */
5278
5279 static bool
5280 s390_output_addr_const_extra (FILE *file, rtx x)
5281 {
5282 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5283 switch (XINT (x, 1))
5284 {
5285 case UNSPEC_GOTENT:
5286 output_addr_const (file, XVECEXP (x, 0, 0));
5287 fprintf (file, "@GOTENT");
5288 return true;
5289 case UNSPEC_GOT:
5290 output_addr_const (file, XVECEXP (x, 0, 0));
5291 fprintf (file, "@GOT");
5292 return true;
5293 case UNSPEC_GOTOFF:
5294 output_addr_const (file, XVECEXP (x, 0, 0));
5295 fprintf (file, "@GOTOFF");
5296 return true;
5297 case UNSPEC_PLT:
5298 output_addr_const (file, XVECEXP (x, 0, 0));
5299 fprintf (file, "@PLT");
5300 return true;
5301 case UNSPEC_PLTOFF:
5302 output_addr_const (file, XVECEXP (x, 0, 0));
5303 fprintf (file, "@PLTOFF");
5304 return true;
5305 case UNSPEC_TLSGD:
5306 output_addr_const (file, XVECEXP (x, 0, 0));
5307 fprintf (file, "@TLSGD");
5308 return true;
5309 case UNSPEC_TLSLDM:
5310 assemble_name (file, get_some_local_dynamic_name ());
5311 fprintf (file, "@TLSLDM");
5312 return true;
5313 case UNSPEC_DTPOFF:
5314 output_addr_const (file, XVECEXP (x, 0, 0));
5315 fprintf (file, "@DTPOFF");
5316 return true;
5317 case UNSPEC_NTPOFF:
5318 output_addr_const (file, XVECEXP (x, 0, 0));
5319 fprintf (file, "@NTPOFF");
5320 return true;
5321 case UNSPEC_GOTNTPOFF:
5322 output_addr_const (file, XVECEXP (x, 0, 0));
5323 fprintf (file, "@GOTNTPOFF");
5324 return true;
5325 case UNSPEC_INDNTPOFF:
5326 output_addr_const (file, XVECEXP (x, 0, 0));
5327 fprintf (file, "@INDNTPOFF");
5328 return true;
5329 }
5330
5331 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5332 switch (XINT (x, 1))
5333 {
5334 case UNSPEC_POOL_OFFSET:
5335 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5336 output_addr_const (file, x);
5337 return true;
5338 }
5339 return false;
5340 }
5341
5342 /* Output address operand ADDR in assembler syntax to
5343 stdio stream FILE. */
5344
5345 void
5346 print_operand_address (FILE *file, rtx addr)
5347 {
5348 struct s390_address ad;
5349
5350 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5351 {
5352 if (!TARGET_Z10)
5353 {
5354 output_operand_lossage ("symbolic memory references are "
5355 "only supported on z10 or later");
5356 return;
5357 }
5358 output_addr_const (file, addr);
5359 return;
5360 }
5361
5362 if (!s390_decompose_address (addr, &ad)
5363 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5364 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5365 output_operand_lossage ("cannot decompose address");
5366
5367 if (ad.disp)
5368 output_addr_const (file, ad.disp);
5369 else
5370 fprintf (file, "0");
5371
5372 if (ad.base && ad.indx)
5373 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5374 reg_names[REGNO (ad.base)]);
5375 else if (ad.base)
5376 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5377 }
5378
5379 /* Output operand X in assembler syntax to stdio stream FILE.
5380 CODE specified the format flag. The following format flags
5381 are recognized:
5382
5383 'C': print opcode suffix for branch condition.
5384 'D': print opcode suffix for inverse branch condition.
5385 'E': print opcode suffix for branch on index instruction.
5386 'G': print the size of the operand in bytes.
5387 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5388 'M': print the second word of a TImode operand.
5389 'N': print the second word of a DImode operand.
5390 'O': print only the displacement of a memory reference.
5391 'R': print only the base register of a memory reference.
5392 'S': print S-type memory reference (base+displacement).
5393 'Y': print shift count operand.
5394
5395 'b': print integer X as if it's an unsigned byte.
5396 'c': print integer X as if it's an signed byte.
5397 'e': "end" of DImode contiguous bitmask X.
5398 'f': "end" of SImode contiguous bitmask X.
5399 'h': print integer X as if it's a signed halfword.
5400 'i': print the first nonzero HImode part of X.
5401 'j': print the first HImode part unequal to -1 of X.
5402 'k': print the first nonzero SImode part of X.
5403 'm': print the first SImode part unequal to -1 of X.
5404 'o': print integer X as if it's an unsigned 32bit word.
5405 's': "start" of DImode contiguous bitmask X.
5406 't': "start" of SImode contiguous bitmask X.
5407 'x': print integer X as if it's an unsigned halfword.
5408 */
5409
5410 void
5411 print_operand (FILE *file, rtx x, int code)
5412 {
5413 HOST_WIDE_INT ival;
5414
5415 switch (code)
5416 {
5417 case 'C':
5418 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5419 return;
5420
5421 case 'D':
5422 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5423 return;
5424
5425 case 'E':
5426 if (GET_CODE (x) == LE)
5427 fprintf (file, "l");
5428 else if (GET_CODE (x) == GT)
5429 fprintf (file, "h");
5430 else
5431 output_operand_lossage ("invalid comparison operator "
5432 "for 'E' output modifier");
5433 return;
5434
5435 case 'J':
5436 if (GET_CODE (x) == SYMBOL_REF)
5437 {
5438 fprintf (file, "%s", ":tls_load:");
5439 output_addr_const (file, x);
5440 }
5441 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5442 {
5443 fprintf (file, "%s", ":tls_gdcall:");
5444 output_addr_const (file, XVECEXP (x, 0, 0));
5445 }
5446 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5447 {
5448 fprintf (file, "%s", ":tls_ldcall:");
5449 assemble_name (file, get_some_local_dynamic_name ());
5450 }
5451 else
5452 output_operand_lossage ("invalid reference for 'J' output modifier");
5453 return;
5454
5455 case 'G':
5456 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5457 return;
5458
5459 case 'O':
5460 {
5461 struct s390_address ad;
5462 int ret;
5463
5464 if (!MEM_P (x))
5465 {
5466 output_operand_lossage ("memory reference expected for "
5467 "'O' output modifier");
5468 return;
5469 }
5470
5471 ret = s390_decompose_address (XEXP (x, 0), &ad);
5472
5473 if (!ret
5474 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5475 || ad.indx)
5476 {
5477 output_operand_lossage ("invalid address for 'O' output modifier");
5478 return;
5479 }
5480
5481 if (ad.disp)
5482 output_addr_const (file, ad.disp);
5483 else
5484 fprintf (file, "0");
5485 }
5486 return;
5487
5488 case 'R':
5489 {
5490 struct s390_address ad;
5491 int ret;
5492
5493 if (!MEM_P (x))
5494 {
5495 output_operand_lossage ("memory reference expected for "
5496 "'R' output modifier");
5497 return;
5498 }
5499
5500 ret = s390_decompose_address (XEXP (x, 0), &ad);
5501
5502 if (!ret
5503 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5504 || ad.indx)
5505 {
5506 output_operand_lossage ("invalid address for 'R' output modifier");
5507 return;
5508 }
5509
5510 if (ad.base)
5511 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5512 else
5513 fprintf (file, "0");
5514 }
5515 return;
5516
5517 case 'S':
5518 {
5519 struct s390_address ad;
5520 int ret;
5521
5522 if (!MEM_P (x))
5523 {
5524 output_operand_lossage ("memory reference expected for "
5525 "'S' output modifier");
5526 return;
5527 }
5528 ret = s390_decompose_address (XEXP (x, 0), &ad);
5529
5530 if (!ret
5531 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5532 || ad.indx)
5533 {
5534 output_operand_lossage ("invalid address for 'S' output modifier");
5535 return;
5536 }
5537
5538 if (ad.disp)
5539 output_addr_const (file, ad.disp);
5540 else
5541 fprintf (file, "0");
5542
5543 if (ad.base)
5544 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5545 }
5546 return;
5547
5548 case 'N':
5549 if (GET_CODE (x) == REG)
5550 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5551 else if (GET_CODE (x) == MEM)
5552 x = change_address (x, VOIDmode,
5553 plus_constant (Pmode, XEXP (x, 0), 4));
5554 else
5555 output_operand_lossage ("register or memory expression expected "
5556 "for 'N' output modifier");
5557 break;
5558
5559 case 'M':
5560 if (GET_CODE (x) == REG)
5561 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5562 else if (GET_CODE (x) == MEM)
5563 x = change_address (x, VOIDmode,
5564 plus_constant (Pmode, XEXP (x, 0), 8));
5565 else
5566 output_operand_lossage ("register or memory expression expected "
5567 "for 'M' output modifier");
5568 break;
5569
5570 case 'Y':
5571 print_shift_count_operand (file, x);
5572 return;
5573 }
5574
5575 switch (GET_CODE (x))
5576 {
5577 case REG:
5578 fprintf (file, "%s", reg_names[REGNO (x)]);
5579 break;
5580
5581 case MEM:
5582 output_address (XEXP (x, 0));
5583 break;
5584
5585 case CONST:
5586 case CODE_LABEL:
5587 case LABEL_REF:
5588 case SYMBOL_REF:
5589 output_addr_const (file, x);
5590 break;
5591
5592 case CONST_INT:
5593 ival = INTVAL (x);
5594 switch (code)
5595 {
5596 case 0:
5597 break;
5598 case 'b':
5599 ival &= 0xff;
5600 break;
5601 case 'c':
5602 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5603 break;
5604 case 'x':
5605 ival &= 0xffff;
5606 break;
5607 case 'h':
5608 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5609 break;
5610 case 'i':
5611 ival = s390_extract_part (x, HImode, 0);
5612 break;
5613 case 'j':
5614 ival = s390_extract_part (x, HImode, -1);
5615 break;
5616 case 'k':
5617 ival = s390_extract_part (x, SImode, 0);
5618 break;
5619 case 'm':
5620 ival = s390_extract_part (x, SImode, -1);
5621 break;
5622 case 'o':
5623 ival &= 0xffffffff;
5624 break;
5625 case 'e': case 'f':
5626 case 's': case 't':
5627 {
5628 int pos, len;
5629 bool ok;
5630
5631 len = (code == 's' || code == 'e' ? 64 : 32);
5632 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5633 gcc_assert (ok);
5634 if (code == 's' || code == 't')
5635 ival = 64 - pos - len;
5636 else
5637 ival = 64 - 1 - pos;
5638 }
5639 break;
5640 default:
5641 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5642 }
5643 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5644 break;
5645
5646 case CONST_DOUBLE:
5647 gcc_assert (GET_MODE (x) == VOIDmode);
5648 if (code == 'b')
5649 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5650 else if (code == 'x')
5651 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5652 else if (code == 'h')
5653 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5654 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5655 else
5656 {
5657 if (code == 0)
5658 output_operand_lossage ("invalid constant - try using "
5659 "an output modifier");
5660 else
5661 output_operand_lossage ("invalid constant for output modifier '%c'",
5662 code);
5663 }
5664 break;
5665
5666 default:
5667 if (code == 0)
5668 output_operand_lossage ("invalid expression - try using "
5669 "an output modifier");
5670 else
5671 output_operand_lossage ("invalid expression for output "
5672 "modifier '%c'", code);
5673 break;
5674 }
5675 }
5676
5677 /* Target hook for assembling integer objects. We need to define it
5678 here to work a round a bug in some versions of GAS, which couldn't
5679 handle values smaller than INT_MIN when printed in decimal. */
5680
5681 static bool
5682 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5683 {
5684 if (size == 8 && aligned_p
5685 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5686 {
5687 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5688 INTVAL (x));
5689 return true;
5690 }
5691 return default_assemble_integer (x, size, aligned_p);
5692 }
5693
5694 /* Returns true if register REGNO is used for forming
5695 a memory address in expression X. */
5696
5697 static bool
5698 reg_used_in_mem_p (int regno, rtx x)
5699 {
5700 enum rtx_code code = GET_CODE (x);
5701 int i, j;
5702 const char *fmt;
5703
5704 if (code == MEM)
5705 {
5706 if (refers_to_regno_p (regno, regno+1,
5707 XEXP (x, 0), 0))
5708 return true;
5709 }
5710 else if (code == SET
5711 && GET_CODE (SET_DEST (x)) == PC)
5712 {
5713 if (refers_to_regno_p (regno, regno+1,
5714 SET_SRC (x), 0))
5715 return true;
5716 }
5717
5718 fmt = GET_RTX_FORMAT (code);
5719 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5720 {
5721 if (fmt[i] == 'e'
5722 && reg_used_in_mem_p (regno, XEXP (x, i)))
5723 return true;
5724
5725 else if (fmt[i] == 'E')
5726 for (j = 0; j < XVECLEN (x, i); j++)
5727 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5728 return true;
5729 }
5730 return false;
5731 }
5732
5733 /* Returns true if expression DEP_RTX sets an address register
5734 used by instruction INSN to address memory. */
5735
5736 static bool
5737 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5738 {
5739 rtx target, pat;
5740
5741 if (NONJUMP_INSN_P (dep_rtx))
5742 dep_rtx = PATTERN (dep_rtx);
5743
5744 if (GET_CODE (dep_rtx) == SET)
5745 {
5746 target = SET_DEST (dep_rtx);
5747 if (GET_CODE (target) == STRICT_LOW_PART)
5748 target = XEXP (target, 0);
5749 while (GET_CODE (target) == SUBREG)
5750 target = SUBREG_REG (target);
5751
5752 if (GET_CODE (target) == REG)
5753 {
5754 int regno = REGNO (target);
5755
5756 if (s390_safe_attr_type (insn) == TYPE_LA)
5757 {
5758 pat = PATTERN (insn);
5759 if (GET_CODE (pat) == PARALLEL)
5760 {
5761 gcc_assert (XVECLEN (pat, 0) == 2);
5762 pat = XVECEXP (pat, 0, 0);
5763 }
5764 gcc_assert (GET_CODE (pat) == SET);
5765 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5766 }
5767 else if (get_attr_atype (insn) == ATYPE_AGEN)
5768 return reg_used_in_mem_p (regno, PATTERN (insn));
5769 }
5770 }
5771 return false;
5772 }
5773
5774 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5775
5776 int
5777 s390_agen_dep_p (rtx dep_insn, rtx insn)
5778 {
5779 rtx dep_rtx = PATTERN (dep_insn);
5780 int i;
5781
5782 if (GET_CODE (dep_rtx) == SET
5783 && addr_generation_dependency_p (dep_rtx, insn))
5784 return 1;
5785 else if (GET_CODE (dep_rtx) == PARALLEL)
5786 {
5787 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5788 {
5789 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5790 return 1;
5791 }
5792 }
5793 return 0;
5794 }
5795
5796
5797 /* A C statement (sans semicolon) to update the integer scheduling priority
5798 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5799 reduce the priority to execute INSN later. Do not define this macro if
5800 you do not need to adjust the scheduling priorities of insns.
5801
5802 A STD instruction should be scheduled earlier,
5803 in order to use the bypass. */
5804 static int
5805 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5806 {
5807 if (! INSN_P (insn))
5808 return priority;
5809
5810 if (s390_tune != PROCESSOR_2084_Z990
5811 && s390_tune != PROCESSOR_2094_Z9_109
5812 && s390_tune != PROCESSOR_2097_Z10
5813 && s390_tune != PROCESSOR_2817_Z196
5814 && s390_tune != PROCESSOR_2827_ZEC12)
5815 return priority;
5816
5817 switch (s390_safe_attr_type (insn))
5818 {
5819 case TYPE_FSTOREDF:
5820 case TYPE_FSTORESF:
5821 priority = priority << 3;
5822 break;
5823 case TYPE_STORE:
5824 case TYPE_STM:
5825 priority = priority << 1;
5826 break;
5827 default:
5828 break;
5829 }
5830 return priority;
5831 }
5832
5833
5834 /* The number of instructions that can be issued per cycle. */
5835
5836 static int
5837 s390_issue_rate (void)
5838 {
5839 switch (s390_tune)
5840 {
5841 case PROCESSOR_2084_Z990:
5842 case PROCESSOR_2094_Z9_109:
5843 case PROCESSOR_2817_Z196:
5844 return 3;
5845 case PROCESSOR_2097_Z10:
5846 case PROCESSOR_2827_ZEC12:
5847 return 2;
5848 default:
5849 return 1;
5850 }
5851 }
5852
5853 static int
5854 s390_first_cycle_multipass_dfa_lookahead (void)
5855 {
5856 return 4;
5857 }
5858
5859 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5860 Fix up MEMs as required. */
5861
5862 static void
5863 annotate_constant_pool_refs (rtx *x)
5864 {
5865 int i, j;
5866 const char *fmt;
5867
5868 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5869 || !CONSTANT_POOL_ADDRESS_P (*x));
5870
5871 /* Literal pool references can only occur inside a MEM ... */
5872 if (GET_CODE (*x) == MEM)
5873 {
5874 rtx memref = XEXP (*x, 0);
5875
5876 if (GET_CODE (memref) == SYMBOL_REF
5877 && CONSTANT_POOL_ADDRESS_P (memref))
5878 {
5879 rtx base = cfun->machine->base_reg;
5880 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5881 UNSPEC_LTREF);
5882
5883 *x = replace_equiv_address (*x, addr);
5884 return;
5885 }
5886
5887 if (GET_CODE (memref) == CONST
5888 && GET_CODE (XEXP (memref, 0)) == PLUS
5889 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5890 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5891 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5892 {
5893 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5894 rtx sym = XEXP (XEXP (memref, 0), 0);
5895 rtx base = cfun->machine->base_reg;
5896 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5897 UNSPEC_LTREF);
5898
5899 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5900 return;
5901 }
5902 }
5903
5904 /* ... or a load-address type pattern. */
5905 if (GET_CODE (*x) == SET)
5906 {
5907 rtx addrref = SET_SRC (*x);
5908
5909 if (GET_CODE (addrref) == SYMBOL_REF
5910 && CONSTANT_POOL_ADDRESS_P (addrref))
5911 {
5912 rtx base = cfun->machine->base_reg;
5913 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5914 UNSPEC_LTREF);
5915
5916 SET_SRC (*x) = addr;
5917 return;
5918 }
5919
5920 if (GET_CODE (addrref) == CONST
5921 && GET_CODE (XEXP (addrref, 0)) == PLUS
5922 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5923 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5924 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5925 {
5926 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5927 rtx sym = XEXP (XEXP (addrref, 0), 0);
5928 rtx base = cfun->machine->base_reg;
5929 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5930 UNSPEC_LTREF);
5931
5932 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5933 return;
5934 }
5935 }
5936
5937 /* Annotate LTREL_BASE as well. */
5938 if (GET_CODE (*x) == UNSPEC
5939 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5940 {
5941 rtx base = cfun->machine->base_reg;
5942 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5943 UNSPEC_LTREL_BASE);
5944 return;
5945 }
5946
5947 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5948 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5949 {
5950 if (fmt[i] == 'e')
5951 {
5952 annotate_constant_pool_refs (&XEXP (*x, i));
5953 }
5954 else if (fmt[i] == 'E')
5955 {
5956 for (j = 0; j < XVECLEN (*x, i); j++)
5957 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5958 }
5959 }
5960 }
5961
5962 /* Split all branches that exceed the maximum distance.
5963 Returns true if this created a new literal pool entry. */
5964
5965 static int
5966 s390_split_branches (void)
5967 {
5968 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5969 int new_literal = 0, ret;
5970 rtx insn, pat, tmp, target;
5971 rtx *label;
5972
5973 /* We need correct insn addresses. */
5974
5975 shorten_branches (get_insns ());
5976
5977 /* Find all branches that exceed 64KB, and split them. */
5978
5979 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5980 {
5981 if (! JUMP_P (insn))
5982 continue;
5983
5984 pat = PATTERN (insn);
5985 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5986 pat = XVECEXP (pat, 0, 0);
5987 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5988 continue;
5989
5990 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5991 {
5992 label = &SET_SRC (pat);
5993 }
5994 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5995 {
5996 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5997 label = &XEXP (SET_SRC (pat), 1);
5998 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5999 label = &XEXP (SET_SRC (pat), 2);
6000 else
6001 continue;
6002 }
6003 else
6004 continue;
6005
6006 if (get_attr_length (insn) <= 4)
6007 continue;
6008
6009 /* We are going to use the return register as scratch register,
6010 make sure it will be saved/restored by the prologue/epilogue. */
6011 cfun_frame_layout.save_return_addr_p = 1;
6012
6013 if (!flag_pic)
6014 {
6015 new_literal = 1;
6016 tmp = force_const_mem (Pmode, *label);
6017 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
6018 INSN_ADDRESSES_NEW (tmp, -1);
6019 annotate_constant_pool_refs (&PATTERN (tmp));
6020
6021 target = temp_reg;
6022 }
6023 else
6024 {
6025 new_literal = 1;
6026 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6027 UNSPEC_LTREL_OFFSET);
6028 target = gen_rtx_CONST (Pmode, target);
6029 target = force_const_mem (Pmode, target);
6030 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6031 INSN_ADDRESSES_NEW (tmp, -1);
6032 annotate_constant_pool_refs (&PATTERN (tmp));
6033
6034 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6035 cfun->machine->base_reg),
6036 UNSPEC_LTREL_BASE);
6037 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6038 }
6039
6040 ret = validate_change (insn, label, target, 0);
6041 gcc_assert (ret);
6042 }
6043
6044 return new_literal;
6045 }
6046
6047
6048 /* Find an annotated literal pool symbol referenced in RTX X,
6049 and store it at REF. Will abort if X contains references to
6050 more than one such pool symbol; multiple references to the same
6051 symbol are allowed, however.
6052
6053 The rtx pointed to by REF must be initialized to NULL_RTX
6054 by the caller before calling this routine. */
6055
6056 static void
6057 find_constant_pool_ref (rtx x, rtx *ref)
6058 {
6059 int i, j;
6060 const char *fmt;
6061
6062 /* Ignore LTREL_BASE references. */
6063 if (GET_CODE (x) == UNSPEC
6064 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6065 return;
6066 /* Likewise POOL_ENTRY insns. */
6067 if (GET_CODE (x) == UNSPEC_VOLATILE
6068 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6069 return;
6070
6071 gcc_assert (GET_CODE (x) != SYMBOL_REF
6072 || !CONSTANT_POOL_ADDRESS_P (x));
6073
6074 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6075 {
6076 rtx sym = XVECEXP (x, 0, 0);
6077 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6078 && CONSTANT_POOL_ADDRESS_P (sym));
6079
6080 if (*ref == NULL_RTX)
6081 *ref = sym;
6082 else
6083 gcc_assert (*ref == sym);
6084
6085 return;
6086 }
6087
6088 fmt = GET_RTX_FORMAT (GET_CODE (x));
6089 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6090 {
6091 if (fmt[i] == 'e')
6092 {
6093 find_constant_pool_ref (XEXP (x, i), ref);
6094 }
6095 else if (fmt[i] == 'E')
6096 {
6097 for (j = 0; j < XVECLEN (x, i); j++)
6098 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6099 }
6100 }
6101 }
6102
6103 /* Replace every reference to the annotated literal pool
6104 symbol REF in X by its base plus OFFSET. */
6105
6106 static void
6107 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6108 {
6109 int i, j;
6110 const char *fmt;
6111
6112 gcc_assert (*x != ref);
6113
6114 if (GET_CODE (*x) == UNSPEC
6115 && XINT (*x, 1) == UNSPEC_LTREF
6116 && XVECEXP (*x, 0, 0) == ref)
6117 {
6118 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6119 return;
6120 }
6121
6122 if (GET_CODE (*x) == PLUS
6123 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6124 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6125 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6126 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6127 {
6128 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6129 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6130 return;
6131 }
6132
6133 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6134 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6135 {
6136 if (fmt[i] == 'e')
6137 {
6138 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6139 }
6140 else if (fmt[i] == 'E')
6141 {
6142 for (j = 0; j < XVECLEN (*x, i); j++)
6143 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6144 }
6145 }
6146 }
6147
6148 /* Check whether X contains an UNSPEC_LTREL_BASE.
6149 Return its constant pool symbol if found, NULL_RTX otherwise. */
6150
6151 static rtx
6152 find_ltrel_base (rtx x)
6153 {
6154 int i, j;
6155 const char *fmt;
6156
6157 if (GET_CODE (x) == UNSPEC
6158 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6159 return XVECEXP (x, 0, 0);
6160
6161 fmt = GET_RTX_FORMAT (GET_CODE (x));
6162 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6163 {
6164 if (fmt[i] == 'e')
6165 {
6166 rtx fnd = find_ltrel_base (XEXP (x, i));
6167 if (fnd)
6168 return fnd;
6169 }
6170 else if (fmt[i] == 'E')
6171 {
6172 for (j = 0; j < XVECLEN (x, i); j++)
6173 {
6174 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6175 if (fnd)
6176 return fnd;
6177 }
6178 }
6179 }
6180
6181 return NULL_RTX;
6182 }
6183
6184 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6185
6186 static void
6187 replace_ltrel_base (rtx *x)
6188 {
6189 int i, j;
6190 const char *fmt;
6191
6192 if (GET_CODE (*x) == UNSPEC
6193 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6194 {
6195 *x = XVECEXP (*x, 0, 1);
6196 return;
6197 }
6198
6199 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6200 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6201 {
6202 if (fmt[i] == 'e')
6203 {
6204 replace_ltrel_base (&XEXP (*x, i));
6205 }
6206 else if (fmt[i] == 'E')
6207 {
6208 for (j = 0; j < XVECLEN (*x, i); j++)
6209 replace_ltrel_base (&XVECEXP (*x, i, j));
6210 }
6211 }
6212 }
6213
6214
6215 /* We keep a list of constants which we have to add to internal
6216 constant tables in the middle of large functions. */
6217
6218 #define NR_C_MODES 11
6219 enum machine_mode constant_modes[NR_C_MODES] =
6220 {
6221 TFmode, TImode, TDmode,
6222 DFmode, DImode, DDmode,
6223 SFmode, SImode, SDmode,
6224 HImode,
6225 QImode
6226 };
6227
6228 struct constant
6229 {
6230 struct constant *next;
6231 rtx value;
6232 rtx label;
6233 };
6234
6235 struct constant_pool
6236 {
6237 struct constant_pool *next;
6238 rtx first_insn;
6239 rtx pool_insn;
6240 bitmap insns;
6241 rtx emit_pool_after;
6242
6243 struct constant *constants[NR_C_MODES];
6244 struct constant *execute;
6245 rtx label;
6246 int size;
6247 };
6248
6249 /* Allocate new constant_pool structure. */
6250
6251 static struct constant_pool *
6252 s390_alloc_pool (void)
6253 {
6254 struct constant_pool *pool;
6255 int i;
6256
6257 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6258 pool->next = NULL;
6259 for (i = 0; i < NR_C_MODES; i++)
6260 pool->constants[i] = NULL;
6261
6262 pool->execute = NULL;
6263 pool->label = gen_label_rtx ();
6264 pool->first_insn = NULL_RTX;
6265 pool->pool_insn = NULL_RTX;
6266 pool->insns = BITMAP_ALLOC (NULL);
6267 pool->size = 0;
6268 pool->emit_pool_after = NULL_RTX;
6269
6270 return pool;
6271 }
6272
6273 /* Create new constant pool covering instructions starting at INSN
6274 and chain it to the end of POOL_LIST. */
6275
6276 static struct constant_pool *
6277 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6278 {
6279 struct constant_pool *pool, **prev;
6280
6281 pool = s390_alloc_pool ();
6282 pool->first_insn = insn;
6283
6284 for (prev = pool_list; *prev; prev = &(*prev)->next)
6285 ;
6286 *prev = pool;
6287
6288 return pool;
6289 }
6290
6291 /* End range of instructions covered by POOL at INSN and emit
6292 placeholder insn representing the pool. */
6293
6294 static void
6295 s390_end_pool (struct constant_pool *pool, rtx insn)
6296 {
6297 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6298
6299 if (!insn)
6300 insn = get_last_insn ();
6301
6302 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6303 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6304 }
6305
6306 /* Add INSN to the list of insns covered by POOL. */
6307
6308 static void
6309 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6310 {
6311 bitmap_set_bit (pool->insns, INSN_UID (insn));
6312 }
6313
6314 /* Return pool out of POOL_LIST that covers INSN. */
6315
6316 static struct constant_pool *
6317 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6318 {
6319 struct constant_pool *pool;
6320
6321 for (pool = pool_list; pool; pool = pool->next)
6322 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6323 break;
6324
6325 return pool;
6326 }
6327
6328 /* Add constant VAL of mode MODE to the constant pool POOL. */
6329
6330 static void
6331 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6332 {
6333 struct constant *c;
6334 int i;
6335
6336 for (i = 0; i < NR_C_MODES; i++)
6337 if (constant_modes[i] == mode)
6338 break;
6339 gcc_assert (i != NR_C_MODES);
6340
6341 for (c = pool->constants[i]; c != NULL; c = c->next)
6342 if (rtx_equal_p (val, c->value))
6343 break;
6344
6345 if (c == NULL)
6346 {
6347 c = (struct constant *) xmalloc (sizeof *c);
6348 c->value = val;
6349 c->label = gen_label_rtx ();
6350 c->next = pool->constants[i];
6351 pool->constants[i] = c;
6352 pool->size += GET_MODE_SIZE (mode);
6353 }
6354 }
6355
6356 /* Return an rtx that represents the offset of X from the start of
6357 pool POOL. */
6358
6359 static rtx
6360 s390_pool_offset (struct constant_pool *pool, rtx x)
6361 {
6362 rtx label;
6363
6364 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6365 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6366 UNSPEC_POOL_OFFSET);
6367 return gen_rtx_CONST (GET_MODE (x), x);
6368 }
6369
6370 /* Find constant VAL of mode MODE in the constant pool POOL.
6371 Return an RTX describing the distance from the start of
6372 the pool to the location of the new constant. */
6373
6374 static rtx
6375 s390_find_constant (struct constant_pool *pool, rtx val,
6376 enum machine_mode mode)
6377 {
6378 struct constant *c;
6379 int i;
6380
6381 for (i = 0; i < NR_C_MODES; i++)
6382 if (constant_modes[i] == mode)
6383 break;
6384 gcc_assert (i != NR_C_MODES);
6385
6386 for (c = pool->constants[i]; c != NULL; c = c->next)
6387 if (rtx_equal_p (val, c->value))
6388 break;
6389
6390 gcc_assert (c);
6391
6392 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6393 }
6394
6395 /* Check whether INSN is an execute. Return the label_ref to its
6396 execute target template if so, NULL_RTX otherwise. */
6397
6398 static rtx
6399 s390_execute_label (rtx insn)
6400 {
6401 if (NONJUMP_INSN_P (insn)
6402 && GET_CODE (PATTERN (insn)) == PARALLEL
6403 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6404 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6405 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6406
6407 return NULL_RTX;
6408 }
6409
6410 /* Add execute target for INSN to the constant pool POOL. */
6411
6412 static void
6413 s390_add_execute (struct constant_pool *pool, rtx insn)
6414 {
6415 struct constant *c;
6416
6417 for (c = pool->execute; c != NULL; c = c->next)
6418 if (INSN_UID (insn) == INSN_UID (c->value))
6419 break;
6420
6421 if (c == NULL)
6422 {
6423 c = (struct constant *) xmalloc (sizeof *c);
6424 c->value = insn;
6425 c->label = gen_label_rtx ();
6426 c->next = pool->execute;
6427 pool->execute = c;
6428 pool->size += 6;
6429 }
6430 }
6431
6432 /* Find execute target for INSN in the constant pool POOL.
6433 Return an RTX describing the distance from the start of
6434 the pool to the location of the execute target. */
6435
6436 static rtx
6437 s390_find_execute (struct constant_pool *pool, rtx insn)
6438 {
6439 struct constant *c;
6440
6441 for (c = pool->execute; c != NULL; c = c->next)
6442 if (INSN_UID (insn) == INSN_UID (c->value))
6443 break;
6444
6445 gcc_assert (c);
6446
6447 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6448 }
6449
6450 /* For an execute INSN, extract the execute target template. */
6451
6452 static rtx
6453 s390_execute_target (rtx insn)
6454 {
6455 rtx pattern = PATTERN (insn);
6456 gcc_assert (s390_execute_label (insn));
6457
6458 if (XVECLEN (pattern, 0) == 2)
6459 {
6460 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6461 }
6462 else
6463 {
6464 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6465 int i;
6466
6467 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6468 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6469
6470 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6471 }
6472
6473 return pattern;
6474 }
6475
6476 /* Indicate that INSN cannot be duplicated. This is the case for
6477 execute insns that carry a unique label. */
6478
6479 static bool
6480 s390_cannot_copy_insn_p (rtx insn)
6481 {
6482 rtx label = s390_execute_label (insn);
6483 return label && label != const0_rtx;
6484 }
6485
6486 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6487 do not emit the pool base label. */
6488
6489 static void
6490 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6491 {
6492 struct constant *c;
6493 rtx insn = pool->pool_insn;
6494 int i;
6495
6496 /* Switch to rodata section. */
6497 if (TARGET_CPU_ZARCH)
6498 {
6499 insn = emit_insn_after (gen_pool_section_start (), insn);
6500 INSN_ADDRESSES_NEW (insn, -1);
6501 }
6502
6503 /* Ensure minimum pool alignment. */
6504 if (TARGET_CPU_ZARCH)
6505 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6506 else
6507 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6508 INSN_ADDRESSES_NEW (insn, -1);
6509
6510 /* Emit pool base label. */
6511 if (!remote_label)
6512 {
6513 insn = emit_label_after (pool->label, insn);
6514 INSN_ADDRESSES_NEW (insn, -1);
6515 }
6516
6517 /* Dump constants in descending alignment requirement order,
6518 ensuring proper alignment for every constant. */
6519 for (i = 0; i < NR_C_MODES; i++)
6520 for (c = pool->constants[i]; c; c = c->next)
6521 {
6522 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6523 rtx value = copy_rtx (c->value);
6524 if (GET_CODE (value) == CONST
6525 && GET_CODE (XEXP (value, 0)) == UNSPEC
6526 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6527 && XVECLEN (XEXP (value, 0), 0) == 1)
6528 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6529
6530 insn = emit_label_after (c->label, insn);
6531 INSN_ADDRESSES_NEW (insn, -1);
6532
6533 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6534 gen_rtvec (1, value),
6535 UNSPECV_POOL_ENTRY);
6536 insn = emit_insn_after (value, insn);
6537 INSN_ADDRESSES_NEW (insn, -1);
6538 }
6539
6540 /* Ensure minimum alignment for instructions. */
6541 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6542 INSN_ADDRESSES_NEW (insn, -1);
6543
6544 /* Output in-pool execute template insns. */
6545 for (c = pool->execute; c; c = c->next)
6546 {
6547 insn = emit_label_after (c->label, insn);
6548 INSN_ADDRESSES_NEW (insn, -1);
6549
6550 insn = emit_insn_after (s390_execute_target (c->value), insn);
6551 INSN_ADDRESSES_NEW (insn, -1);
6552 }
6553
6554 /* Switch back to previous section. */
6555 if (TARGET_CPU_ZARCH)
6556 {
6557 insn = emit_insn_after (gen_pool_section_end (), insn);
6558 INSN_ADDRESSES_NEW (insn, -1);
6559 }
6560
6561 insn = emit_barrier_after (insn);
6562 INSN_ADDRESSES_NEW (insn, -1);
6563
6564 /* Remove placeholder insn. */
6565 remove_insn (pool->pool_insn);
6566 }
6567
6568 /* Free all memory used by POOL. */
6569
6570 static void
6571 s390_free_pool (struct constant_pool *pool)
6572 {
6573 struct constant *c, *next;
6574 int i;
6575
6576 for (i = 0; i < NR_C_MODES; i++)
6577 for (c = pool->constants[i]; c; c = next)
6578 {
6579 next = c->next;
6580 free (c);
6581 }
6582
6583 for (c = pool->execute; c; c = next)
6584 {
6585 next = c->next;
6586 free (c);
6587 }
6588
6589 BITMAP_FREE (pool->insns);
6590 free (pool);
6591 }
6592
6593
6594 /* Collect main literal pool. Return NULL on overflow. */
6595
6596 static struct constant_pool *
6597 s390_mainpool_start (void)
6598 {
6599 struct constant_pool *pool;
6600 rtx insn;
6601
6602 pool = s390_alloc_pool ();
6603
6604 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6605 {
6606 if (NONJUMP_INSN_P (insn)
6607 && GET_CODE (PATTERN (insn)) == SET
6608 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6609 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6610 {
6611 gcc_assert (!pool->pool_insn);
6612 pool->pool_insn = insn;
6613 }
6614
6615 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6616 {
6617 s390_add_execute (pool, insn);
6618 }
6619 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6620 {
6621 rtx pool_ref = NULL_RTX;
6622 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6623 if (pool_ref)
6624 {
6625 rtx constant = get_pool_constant (pool_ref);
6626 enum machine_mode mode = get_pool_mode (pool_ref);
6627 s390_add_constant (pool, constant, mode);
6628 }
6629 }
6630
6631 /* If hot/cold partitioning is enabled we have to make sure that
6632 the literal pool is emitted in the same section where the
6633 initialization of the literal pool base pointer takes place.
6634 emit_pool_after is only used in the non-overflow case on non
6635 Z cpus where we can emit the literal pool at the end of the
6636 function body within the text section. */
6637 if (NOTE_P (insn)
6638 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6639 && !pool->emit_pool_after)
6640 pool->emit_pool_after = PREV_INSN (insn);
6641 }
6642
6643 gcc_assert (pool->pool_insn || pool->size == 0);
6644
6645 if (pool->size >= 4096)
6646 {
6647 /* We're going to chunkify the pool, so remove the main
6648 pool placeholder insn. */
6649 remove_insn (pool->pool_insn);
6650
6651 s390_free_pool (pool);
6652 pool = NULL;
6653 }
6654
6655 /* If the functions ends with the section where the literal pool
6656 should be emitted set the marker to its end. */
6657 if (pool && !pool->emit_pool_after)
6658 pool->emit_pool_after = get_last_insn ();
6659
6660 return pool;
6661 }
6662
6663 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6664 Modify the current function to output the pool constants as well as
6665 the pool register setup instruction. */
6666
6667 static void
6668 s390_mainpool_finish (struct constant_pool *pool)
6669 {
6670 rtx base_reg = cfun->machine->base_reg;
6671 rtx insn;
6672
6673 /* If the pool is empty, we're done. */
6674 if (pool->size == 0)
6675 {
6676 /* We don't actually need a base register after all. */
6677 cfun->machine->base_reg = NULL_RTX;
6678
6679 if (pool->pool_insn)
6680 remove_insn (pool->pool_insn);
6681 s390_free_pool (pool);
6682 return;
6683 }
6684
6685 /* We need correct insn addresses. */
6686 shorten_branches (get_insns ());
6687
6688 /* On zSeries, we use a LARL to load the pool register. The pool is
6689 located in the .rodata section, so we emit it after the function. */
6690 if (TARGET_CPU_ZARCH)
6691 {
6692 insn = gen_main_base_64 (base_reg, pool->label);
6693 insn = emit_insn_after (insn, pool->pool_insn);
6694 INSN_ADDRESSES_NEW (insn, -1);
6695 remove_insn (pool->pool_insn);
6696
6697 insn = get_last_insn ();
6698 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6699 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6700
6701 s390_dump_pool (pool, 0);
6702 }
6703
6704 /* On S/390, if the total size of the function's code plus literal pool
6705 does not exceed 4096 bytes, we use BASR to set up a function base
6706 pointer, and emit the literal pool at the end of the function. */
6707 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6708 + pool->size + 8 /* alignment slop */ < 4096)
6709 {
6710 insn = gen_main_base_31_small (base_reg, pool->label);
6711 insn = emit_insn_after (insn, pool->pool_insn);
6712 INSN_ADDRESSES_NEW (insn, -1);
6713 remove_insn (pool->pool_insn);
6714
6715 insn = emit_label_after (pool->label, insn);
6716 INSN_ADDRESSES_NEW (insn, -1);
6717
6718 /* emit_pool_after will be set by s390_mainpool_start to the
6719 last insn of the section where the literal pool should be
6720 emitted. */
6721 insn = pool->emit_pool_after;
6722
6723 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6724 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6725
6726 s390_dump_pool (pool, 1);
6727 }
6728
6729 /* Otherwise, we emit an inline literal pool and use BASR to branch
6730 over it, setting up the pool register at the same time. */
6731 else
6732 {
6733 rtx pool_end = gen_label_rtx ();
6734
6735 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6736 insn = emit_jump_insn_after (insn, pool->pool_insn);
6737 JUMP_LABEL (insn) = pool_end;
6738 INSN_ADDRESSES_NEW (insn, -1);
6739 remove_insn (pool->pool_insn);
6740
6741 insn = emit_label_after (pool->label, insn);
6742 INSN_ADDRESSES_NEW (insn, -1);
6743
6744 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6745 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6746
6747 insn = emit_label_after (pool_end, pool->pool_insn);
6748 INSN_ADDRESSES_NEW (insn, -1);
6749
6750 s390_dump_pool (pool, 1);
6751 }
6752
6753
6754 /* Replace all literal pool references. */
6755
6756 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6757 {
6758 if (INSN_P (insn))
6759 replace_ltrel_base (&PATTERN (insn));
6760
6761 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6762 {
6763 rtx addr, pool_ref = NULL_RTX;
6764 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6765 if (pool_ref)
6766 {
6767 if (s390_execute_label (insn))
6768 addr = s390_find_execute (pool, insn);
6769 else
6770 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6771 get_pool_mode (pool_ref));
6772
6773 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6774 INSN_CODE (insn) = -1;
6775 }
6776 }
6777 }
6778
6779
6780 /* Free the pool. */
6781 s390_free_pool (pool);
6782 }
6783
6784 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6785 We have decided we cannot use this pool, so revert all changes
6786 to the current function that were done by s390_mainpool_start. */
6787 static void
6788 s390_mainpool_cancel (struct constant_pool *pool)
6789 {
6790 /* We didn't actually change the instruction stream, so simply
6791 free the pool memory. */
6792 s390_free_pool (pool);
6793 }
6794
6795
6796 /* Chunkify the literal pool. */
6797
6798 #define S390_POOL_CHUNK_MIN 0xc00
6799 #define S390_POOL_CHUNK_MAX 0xe00
6800
6801 static struct constant_pool *
6802 s390_chunkify_start (void)
6803 {
6804 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6805 int extra_size = 0;
6806 bitmap far_labels;
6807 rtx pending_ltrel = NULL_RTX;
6808 rtx insn;
6809
6810 rtx (*gen_reload_base) (rtx, rtx) =
6811 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6812
6813
6814 /* We need correct insn addresses. */
6815
6816 shorten_branches (get_insns ());
6817
6818 /* Scan all insns and move literals to pool chunks. */
6819
6820 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6821 {
6822 bool section_switch_p = false;
6823
6824 /* Check for pending LTREL_BASE. */
6825 if (INSN_P (insn))
6826 {
6827 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6828 if (ltrel_base)
6829 {
6830 gcc_assert (ltrel_base == pending_ltrel);
6831 pending_ltrel = NULL_RTX;
6832 }
6833 }
6834
6835 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6836 {
6837 if (!curr_pool)
6838 curr_pool = s390_start_pool (&pool_list, insn);
6839
6840 s390_add_execute (curr_pool, insn);
6841 s390_add_pool_insn (curr_pool, insn);
6842 }
6843 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6844 {
6845 rtx pool_ref = NULL_RTX;
6846 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6847 if (pool_ref)
6848 {
6849 rtx constant = get_pool_constant (pool_ref);
6850 enum machine_mode mode = get_pool_mode (pool_ref);
6851
6852 if (!curr_pool)
6853 curr_pool = s390_start_pool (&pool_list, insn);
6854
6855 s390_add_constant (curr_pool, constant, mode);
6856 s390_add_pool_insn (curr_pool, insn);
6857
6858 /* Don't split the pool chunk between a LTREL_OFFSET load
6859 and the corresponding LTREL_BASE. */
6860 if (GET_CODE (constant) == CONST
6861 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6862 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6863 {
6864 gcc_assert (!pending_ltrel);
6865 pending_ltrel = pool_ref;
6866 }
6867 }
6868 }
6869
6870 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6871 {
6872 if (curr_pool)
6873 s390_add_pool_insn (curr_pool, insn);
6874 /* An LTREL_BASE must follow within the same basic block. */
6875 gcc_assert (!pending_ltrel);
6876 }
6877
6878 if (NOTE_P (insn))
6879 switch (NOTE_KIND (insn))
6880 {
6881 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6882 section_switch_p = true;
6883 break;
6884 case NOTE_INSN_VAR_LOCATION:
6885 case NOTE_INSN_CALL_ARG_LOCATION:
6886 continue;
6887 default:
6888 break;
6889 }
6890
6891 if (!curr_pool
6892 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6893 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6894 continue;
6895
6896 if (TARGET_CPU_ZARCH)
6897 {
6898 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6899 continue;
6900
6901 s390_end_pool (curr_pool, NULL_RTX);
6902 curr_pool = NULL;
6903 }
6904 else
6905 {
6906 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6907 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6908 + extra_size;
6909
6910 /* We will later have to insert base register reload insns.
6911 Those will have an effect on code size, which we need to
6912 consider here. This calculation makes rather pessimistic
6913 worst-case assumptions. */
6914 if (LABEL_P (insn))
6915 extra_size += 6;
6916
6917 if (chunk_size < S390_POOL_CHUNK_MIN
6918 && curr_pool->size < S390_POOL_CHUNK_MIN
6919 && !section_switch_p)
6920 continue;
6921
6922 /* Pool chunks can only be inserted after BARRIERs ... */
6923 if (BARRIER_P (insn))
6924 {
6925 s390_end_pool (curr_pool, insn);
6926 curr_pool = NULL;
6927 extra_size = 0;
6928 }
6929
6930 /* ... so if we don't find one in time, create one. */
6931 else if (chunk_size > S390_POOL_CHUNK_MAX
6932 || curr_pool->size > S390_POOL_CHUNK_MAX
6933 || section_switch_p)
6934 {
6935 rtx label, jump, barrier, next, prev;
6936
6937 if (!section_switch_p)
6938 {
6939 /* We can insert the barrier only after a 'real' insn. */
6940 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
6941 continue;
6942 if (get_attr_length (insn) == 0)
6943 continue;
6944 /* Don't separate LTREL_BASE from the corresponding
6945 LTREL_OFFSET load. */
6946 if (pending_ltrel)
6947 continue;
6948 next = insn;
6949 do
6950 {
6951 insn = next;
6952 next = NEXT_INSN (insn);
6953 }
6954 while (next
6955 && NOTE_P (next)
6956 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6957 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6958 }
6959 else
6960 {
6961 gcc_assert (!pending_ltrel);
6962
6963 /* The old pool has to end before the section switch
6964 note in order to make it part of the current
6965 section. */
6966 insn = PREV_INSN (insn);
6967 }
6968
6969 label = gen_label_rtx ();
6970 prev = insn;
6971 if (prev && NOTE_P (prev))
6972 prev = prev_nonnote_insn (prev);
6973 if (prev)
6974 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6975 INSN_LOCATION (prev));
6976 else
6977 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6978 barrier = emit_barrier_after (jump);
6979 insn = emit_label_after (label, barrier);
6980 JUMP_LABEL (jump) = label;
6981 LABEL_NUSES (label) = 1;
6982
6983 INSN_ADDRESSES_NEW (jump, -1);
6984 INSN_ADDRESSES_NEW (barrier, -1);
6985 INSN_ADDRESSES_NEW (insn, -1);
6986
6987 s390_end_pool (curr_pool, barrier);
6988 curr_pool = NULL;
6989 extra_size = 0;
6990 }
6991 }
6992 }
6993
6994 if (curr_pool)
6995 s390_end_pool (curr_pool, NULL_RTX);
6996 gcc_assert (!pending_ltrel);
6997
6998 /* Find all labels that are branched into
6999 from an insn belonging to a different chunk. */
7000
7001 far_labels = BITMAP_ALLOC (NULL);
7002
7003 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7004 {
7005 /* Labels marked with LABEL_PRESERVE_P can be target
7006 of non-local jumps, so we have to mark them.
7007 The same holds for named labels.
7008
7009 Don't do that, however, if it is the label before
7010 a jump table. */
7011
7012 if (LABEL_P (insn)
7013 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7014 {
7015 rtx vec_insn = next_real_insn (insn);
7016 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7017 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7018 }
7019
7020 /* If we have a direct jump (conditional or unconditional)
7021 or a casesi jump, check all potential targets. */
7022 else if (JUMP_P (insn))
7023 {
7024 rtx pat = PATTERN (insn);
7025 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
7026 pat = XVECEXP (pat, 0, 0);
7027
7028 if (GET_CODE (pat) == SET)
7029 {
7030 rtx label = JUMP_LABEL (insn);
7031 if (label)
7032 {
7033 if (s390_find_pool (pool_list, label)
7034 != s390_find_pool (pool_list, insn))
7035 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7036 }
7037 }
7038 else if (GET_CODE (pat) == PARALLEL
7039 && XVECLEN (pat, 0) == 2
7040 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
7041 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
7042 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
7043 {
7044 /* Find the jump table used by this casesi jump. */
7045 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
7046 rtx vec_insn = next_real_insn (vec_label);
7047 if (vec_insn && JUMP_TABLE_DATA_P (vec_insn))
7048 {
7049 rtx vec_pat = PATTERN (vec_insn);
7050 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7051
7052 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7053 {
7054 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7055
7056 if (s390_find_pool (pool_list, label)
7057 != s390_find_pool (pool_list, insn))
7058 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7059 }
7060 }
7061 }
7062 }
7063 }
7064
7065 /* Insert base register reload insns before every pool. */
7066
7067 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7068 {
7069 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7070 curr_pool->label);
7071 rtx insn = curr_pool->first_insn;
7072 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7073 }
7074
7075 /* Insert base register reload insns at every far label. */
7076
7077 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7078 if (LABEL_P (insn)
7079 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7080 {
7081 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7082 if (pool)
7083 {
7084 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7085 pool->label);
7086 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7087 }
7088 }
7089
7090
7091 BITMAP_FREE (far_labels);
7092
7093
7094 /* Recompute insn addresses. */
7095
7096 init_insn_lengths ();
7097 shorten_branches (get_insns ());
7098
7099 return pool_list;
7100 }
7101
7102 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7103 After we have decided to use this list, finish implementing
7104 all changes to the current function as required. */
7105
7106 static void
7107 s390_chunkify_finish (struct constant_pool *pool_list)
7108 {
7109 struct constant_pool *curr_pool = NULL;
7110 rtx insn;
7111
7112
7113 /* Replace all literal pool references. */
7114
7115 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7116 {
7117 if (INSN_P (insn))
7118 replace_ltrel_base (&PATTERN (insn));
7119
7120 curr_pool = s390_find_pool (pool_list, insn);
7121 if (!curr_pool)
7122 continue;
7123
7124 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7125 {
7126 rtx addr, pool_ref = NULL_RTX;
7127 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7128 if (pool_ref)
7129 {
7130 if (s390_execute_label (insn))
7131 addr = s390_find_execute (curr_pool, insn);
7132 else
7133 addr = s390_find_constant (curr_pool,
7134 get_pool_constant (pool_ref),
7135 get_pool_mode (pool_ref));
7136
7137 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7138 INSN_CODE (insn) = -1;
7139 }
7140 }
7141 }
7142
7143 /* Dump out all literal pools. */
7144
7145 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7146 s390_dump_pool (curr_pool, 0);
7147
7148 /* Free pool list. */
7149
7150 while (pool_list)
7151 {
7152 struct constant_pool *next = pool_list->next;
7153 s390_free_pool (pool_list);
7154 pool_list = next;
7155 }
7156 }
7157
7158 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7159 We have decided we cannot use this list, so revert all changes
7160 to the current function that were done by s390_chunkify_start. */
7161
7162 static void
7163 s390_chunkify_cancel (struct constant_pool *pool_list)
7164 {
7165 struct constant_pool *curr_pool = NULL;
7166 rtx insn;
7167
7168 /* Remove all pool placeholder insns. */
7169
7170 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7171 {
7172 /* Did we insert an extra barrier? Remove it. */
7173 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7174 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7175 rtx label = NEXT_INSN (curr_pool->pool_insn);
7176
7177 if (jump && JUMP_P (jump)
7178 && barrier && BARRIER_P (barrier)
7179 && label && LABEL_P (label)
7180 && GET_CODE (PATTERN (jump)) == SET
7181 && SET_DEST (PATTERN (jump)) == pc_rtx
7182 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7183 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7184 {
7185 remove_insn (jump);
7186 remove_insn (barrier);
7187 remove_insn (label);
7188 }
7189
7190 remove_insn (curr_pool->pool_insn);
7191 }
7192
7193 /* Remove all base register reload insns. */
7194
7195 for (insn = get_insns (); insn; )
7196 {
7197 rtx next_insn = NEXT_INSN (insn);
7198
7199 if (NONJUMP_INSN_P (insn)
7200 && GET_CODE (PATTERN (insn)) == SET
7201 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7202 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7203 remove_insn (insn);
7204
7205 insn = next_insn;
7206 }
7207
7208 /* Free pool list. */
7209
7210 while (pool_list)
7211 {
7212 struct constant_pool *next = pool_list->next;
7213 s390_free_pool (pool_list);
7214 pool_list = next;
7215 }
7216 }
7217
7218 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7219
7220 void
7221 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7222 {
7223 REAL_VALUE_TYPE r;
7224
7225 switch (GET_MODE_CLASS (mode))
7226 {
7227 case MODE_FLOAT:
7228 case MODE_DECIMAL_FLOAT:
7229 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7230
7231 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7232 assemble_real (r, mode, align);
7233 break;
7234
7235 case MODE_INT:
7236 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7237 mark_symbol_refs_as_used (exp);
7238 break;
7239
7240 default:
7241 gcc_unreachable ();
7242 }
7243 }
7244
7245
7246 /* Return an RTL expression representing the value of the return address
7247 for the frame COUNT steps up from the current frame. FRAME is the
7248 frame pointer of that frame. */
7249
7250 rtx
7251 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7252 {
7253 int offset;
7254 rtx addr;
7255
7256 /* Without backchain, we fail for all but the current frame. */
7257
7258 if (!TARGET_BACKCHAIN && count > 0)
7259 return NULL_RTX;
7260
7261 /* For the current frame, we need to make sure the initial
7262 value of RETURN_REGNUM is actually saved. */
7263
7264 if (count == 0)
7265 {
7266 /* On non-z architectures branch splitting could overwrite r14. */
7267 if (TARGET_CPU_ZARCH)
7268 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7269 else
7270 {
7271 cfun_frame_layout.save_return_addr_p = true;
7272 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7273 }
7274 }
7275
7276 if (TARGET_PACKED_STACK)
7277 offset = -2 * UNITS_PER_LONG;
7278 else
7279 offset = RETURN_REGNUM * UNITS_PER_LONG;
7280
7281 addr = plus_constant (Pmode, frame, offset);
7282 addr = memory_address (Pmode, addr);
7283 return gen_rtx_MEM (Pmode, addr);
7284 }
7285
7286 /* Return an RTL expression representing the back chain stored in
7287 the current stack frame. */
7288
7289 rtx
7290 s390_back_chain_rtx (void)
7291 {
7292 rtx chain;
7293
7294 gcc_assert (TARGET_BACKCHAIN);
7295
7296 if (TARGET_PACKED_STACK)
7297 chain = plus_constant (Pmode, stack_pointer_rtx,
7298 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7299 else
7300 chain = stack_pointer_rtx;
7301
7302 chain = gen_rtx_MEM (Pmode, chain);
7303 return chain;
7304 }
7305
7306 /* Find first call clobbered register unused in a function.
7307 This could be used as base register in a leaf function
7308 or for holding the return address before epilogue. */
7309
7310 static int
7311 find_unused_clobbered_reg (void)
7312 {
7313 int i;
7314 for (i = 0; i < 6; i++)
7315 if (!df_regs_ever_live_p (i))
7316 return i;
7317 return 0;
7318 }
7319
7320
7321 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7322 clobbered hard regs in SETREG. */
7323
7324 static void
7325 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7326 {
7327 int *regs_ever_clobbered = (int *)data;
7328 unsigned int i, regno;
7329 enum machine_mode mode = GET_MODE (setreg);
7330
7331 if (GET_CODE (setreg) == SUBREG)
7332 {
7333 rtx inner = SUBREG_REG (setreg);
7334 if (!GENERAL_REG_P (inner))
7335 return;
7336 regno = subreg_regno (setreg);
7337 }
7338 else if (GENERAL_REG_P (setreg))
7339 regno = REGNO (setreg);
7340 else
7341 return;
7342
7343 for (i = regno;
7344 i < regno + HARD_REGNO_NREGS (regno, mode);
7345 i++)
7346 regs_ever_clobbered[i] = 1;
7347 }
7348
7349 /* Walks through all basic blocks of the current function looking
7350 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7351 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7352 each of those regs. */
7353
7354 static void
7355 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7356 {
7357 basic_block cur_bb;
7358 rtx cur_insn;
7359 unsigned int i;
7360
7361 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7362
7363 /* For non-leaf functions we have to consider all call clobbered regs to be
7364 clobbered. */
7365 if (!crtl->is_leaf)
7366 {
7367 for (i = 0; i < 16; i++)
7368 regs_ever_clobbered[i] = call_really_used_regs[i];
7369 }
7370
7371 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7372 this work is done by liveness analysis (mark_regs_live_at_end).
7373 Special care is needed for functions containing landing pads. Landing pads
7374 may use the eh registers, but the code which sets these registers is not
7375 contained in that function. Hence s390_regs_ever_clobbered is not able to
7376 deal with this automatically. */
7377 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7378 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7379 if (crtl->calls_eh_return
7380 || (cfun->machine->has_landing_pad_p
7381 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7382 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7383
7384 /* For nonlocal gotos all call-saved registers have to be saved.
7385 This flag is also set for the unwinding code in libgcc.
7386 See expand_builtin_unwind_init. For regs_ever_live this is done by
7387 reload. */
7388 if (cfun->has_nonlocal_label)
7389 for (i = 0; i < 16; i++)
7390 if (!call_really_used_regs[i])
7391 regs_ever_clobbered[i] = 1;
7392
7393 FOR_EACH_BB (cur_bb)
7394 {
7395 FOR_BB_INSNS (cur_bb, cur_insn)
7396 {
7397 if (INSN_P (cur_insn))
7398 note_stores (PATTERN (cur_insn),
7399 s390_reg_clobbered_rtx,
7400 regs_ever_clobbered);
7401 }
7402 }
7403 }
7404
7405 /* Determine the frame area which actually has to be accessed
7406 in the function epilogue. The values are stored at the
7407 given pointers AREA_BOTTOM (address of the lowest used stack
7408 address) and AREA_TOP (address of the first item which does
7409 not belong to the stack frame). */
7410
7411 static void
7412 s390_frame_area (int *area_bottom, int *area_top)
7413 {
7414 int b, t;
7415 int i;
7416
7417 b = INT_MAX;
7418 t = INT_MIN;
7419
7420 if (cfun_frame_layout.first_restore_gpr != -1)
7421 {
7422 b = (cfun_frame_layout.gprs_offset
7423 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7424 t = b + (cfun_frame_layout.last_restore_gpr
7425 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7426 }
7427
7428 if (TARGET_64BIT && cfun_save_high_fprs_p)
7429 {
7430 b = MIN (b, cfun_frame_layout.f8_offset);
7431 t = MAX (t, (cfun_frame_layout.f8_offset
7432 + cfun_frame_layout.high_fprs * 8));
7433 }
7434
7435 if (!TARGET_64BIT)
7436 for (i = 2; i < 4; i++)
7437 if (cfun_fpr_bit_p (i))
7438 {
7439 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7440 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7441 }
7442
7443 *area_bottom = b;
7444 *area_top = t;
7445 }
7446
7447 /* Fill cfun->machine with info about register usage of current function.
7448 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7449
7450 static void
7451 s390_register_info (int clobbered_regs[])
7452 {
7453 int i, j;
7454
7455 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7456 cfun_frame_layout.fpr_bitmap = 0;
7457 cfun_frame_layout.high_fprs = 0;
7458 if (TARGET_64BIT)
7459 for (i = 24; i < 32; i++)
7460 if (df_regs_ever_live_p (i) && !global_regs[i])
7461 {
7462 cfun_set_fpr_bit (i - 16);
7463 cfun_frame_layout.high_fprs++;
7464 }
7465
7466 /* Find first and last gpr to be saved. We trust regs_ever_live
7467 data, except that we don't save and restore global registers.
7468
7469 Also, all registers with special meaning to the compiler need
7470 to be handled extra. */
7471
7472 s390_regs_ever_clobbered (clobbered_regs);
7473
7474 for (i = 0; i < 16; i++)
7475 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7476
7477 if (frame_pointer_needed)
7478 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7479
7480 if (flag_pic)
7481 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7482 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7483
7484 clobbered_regs[BASE_REGNUM]
7485 |= (cfun->machine->base_reg
7486 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7487
7488 clobbered_regs[RETURN_REGNUM]
7489 |= (!crtl->is_leaf
7490 || TARGET_TPF_PROFILING
7491 || cfun->machine->split_branches_pending_p
7492 || cfun_frame_layout.save_return_addr_p
7493 || crtl->calls_eh_return
7494 || cfun->stdarg);
7495
7496 clobbered_regs[STACK_POINTER_REGNUM]
7497 |= (!crtl->is_leaf
7498 || TARGET_TPF_PROFILING
7499 || cfun_save_high_fprs_p
7500 || get_frame_size () > 0
7501 || cfun->calls_alloca
7502 || cfun->stdarg);
7503
7504 for (i = 6; i < 16; i++)
7505 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7506 break;
7507 for (j = 15; j > i; j--)
7508 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7509 break;
7510
7511 if (i == 16)
7512 {
7513 /* Nothing to save/restore. */
7514 cfun_frame_layout.first_save_gpr_slot = -1;
7515 cfun_frame_layout.last_save_gpr_slot = -1;
7516 cfun_frame_layout.first_save_gpr = -1;
7517 cfun_frame_layout.first_restore_gpr = -1;
7518 cfun_frame_layout.last_save_gpr = -1;
7519 cfun_frame_layout.last_restore_gpr = -1;
7520 }
7521 else
7522 {
7523 /* Save slots for gprs from i to j. */
7524 cfun_frame_layout.first_save_gpr_slot = i;
7525 cfun_frame_layout.last_save_gpr_slot = j;
7526
7527 for (i = cfun_frame_layout.first_save_gpr_slot;
7528 i < cfun_frame_layout.last_save_gpr_slot + 1;
7529 i++)
7530 if (clobbered_regs[i])
7531 break;
7532
7533 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7534 if (clobbered_regs[j])
7535 break;
7536
7537 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7538 {
7539 /* Nothing to save/restore. */
7540 cfun_frame_layout.first_save_gpr = -1;
7541 cfun_frame_layout.first_restore_gpr = -1;
7542 cfun_frame_layout.last_save_gpr = -1;
7543 cfun_frame_layout.last_restore_gpr = -1;
7544 }
7545 else
7546 {
7547 /* Save / Restore from gpr i to j. */
7548 cfun_frame_layout.first_save_gpr = i;
7549 cfun_frame_layout.first_restore_gpr = i;
7550 cfun_frame_layout.last_save_gpr = j;
7551 cfun_frame_layout.last_restore_gpr = j;
7552 }
7553 }
7554
7555 if (cfun->stdarg)
7556 {
7557 /* Varargs functions need to save gprs 2 to 6. */
7558 if (cfun->va_list_gpr_size
7559 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7560 {
7561 int min_gpr = crtl->args.info.gprs;
7562 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7563 if (max_gpr > GP_ARG_NUM_REG)
7564 max_gpr = GP_ARG_NUM_REG;
7565
7566 if (cfun_frame_layout.first_save_gpr == -1
7567 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7568 {
7569 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7570 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7571 }
7572
7573 if (cfun_frame_layout.last_save_gpr == -1
7574 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7575 {
7576 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7577 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7578 }
7579 }
7580
7581 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7582 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7583 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7584 {
7585 int min_fpr = crtl->args.info.fprs;
7586 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7587 if (max_fpr > FP_ARG_NUM_REG)
7588 max_fpr = FP_ARG_NUM_REG;
7589
7590 /* ??? This is currently required to ensure proper location
7591 of the fpr save slots within the va_list save area. */
7592 if (TARGET_PACKED_STACK)
7593 min_fpr = 0;
7594
7595 for (i = min_fpr; i < max_fpr; i++)
7596 cfun_set_fpr_bit (i);
7597 }
7598 }
7599
7600 if (!TARGET_64BIT)
7601 for (i = 2; i < 4; i++)
7602 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7603 cfun_set_fpr_bit (i);
7604 }
7605
7606 /* Fill cfun->machine with info about frame of current function. */
7607
7608 static void
7609 s390_frame_info (void)
7610 {
7611 int i;
7612
7613 cfun_frame_layout.frame_size = get_frame_size ();
7614 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7615 fatal_error ("total size of local variables exceeds architecture limit");
7616
7617 if (!TARGET_PACKED_STACK)
7618 {
7619 cfun_frame_layout.backchain_offset = 0;
7620 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7621 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7622 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7623 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7624 * UNITS_PER_LONG);
7625 }
7626 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7627 {
7628 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7629 - UNITS_PER_LONG);
7630 cfun_frame_layout.gprs_offset
7631 = (cfun_frame_layout.backchain_offset
7632 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7633 * UNITS_PER_LONG);
7634
7635 if (TARGET_64BIT)
7636 {
7637 cfun_frame_layout.f4_offset
7638 = (cfun_frame_layout.gprs_offset
7639 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7640
7641 cfun_frame_layout.f0_offset
7642 = (cfun_frame_layout.f4_offset
7643 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7644 }
7645 else
7646 {
7647 /* On 31 bit we have to care about alignment of the
7648 floating point regs to provide fastest access. */
7649 cfun_frame_layout.f0_offset
7650 = ((cfun_frame_layout.gprs_offset
7651 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7652 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7653
7654 cfun_frame_layout.f4_offset
7655 = (cfun_frame_layout.f0_offset
7656 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7657 }
7658 }
7659 else /* no backchain */
7660 {
7661 cfun_frame_layout.f4_offset
7662 = (STACK_POINTER_OFFSET
7663 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7664
7665 cfun_frame_layout.f0_offset
7666 = (cfun_frame_layout.f4_offset
7667 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7668
7669 cfun_frame_layout.gprs_offset
7670 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7671 }
7672
7673 if (crtl->is_leaf
7674 && !TARGET_TPF_PROFILING
7675 && cfun_frame_layout.frame_size == 0
7676 && !cfun_save_high_fprs_p
7677 && !cfun->calls_alloca
7678 && !cfun->stdarg)
7679 return;
7680
7681 if (!TARGET_PACKED_STACK)
7682 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7683 + crtl->outgoing_args_size
7684 + cfun_frame_layout.high_fprs * 8);
7685 else
7686 {
7687 if (TARGET_BACKCHAIN)
7688 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7689
7690 /* No alignment trouble here because f8-f15 are only saved under
7691 64 bit. */
7692 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7693 cfun_frame_layout.f4_offset),
7694 cfun_frame_layout.gprs_offset)
7695 - cfun_frame_layout.high_fprs * 8);
7696
7697 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7698
7699 for (i = 0; i < 8; i++)
7700 if (cfun_fpr_bit_p (i))
7701 cfun_frame_layout.frame_size += 8;
7702
7703 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7704
7705 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7706 the frame size to sustain 8 byte alignment of stack frames. */
7707 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7708 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7709 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7710
7711 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7712 }
7713 }
7714
7715 /* Generate frame layout. Fills in register and frame data for the current
7716 function in cfun->machine. This routine can be called multiple times;
7717 it will re-do the complete frame layout every time. */
7718
7719 static void
7720 s390_init_frame_layout (void)
7721 {
7722 HOST_WIDE_INT frame_size;
7723 int base_used;
7724 int clobbered_regs[16];
7725
7726 /* On S/390 machines, we may need to perform branch splitting, which
7727 will require both base and return address register. We have no
7728 choice but to assume we're going to need them until right at the
7729 end of the machine dependent reorg phase. */
7730 if (!TARGET_CPU_ZARCH)
7731 cfun->machine->split_branches_pending_p = true;
7732
7733 do
7734 {
7735 frame_size = cfun_frame_layout.frame_size;
7736
7737 /* Try to predict whether we'll need the base register. */
7738 base_used = cfun->machine->split_branches_pending_p
7739 || crtl->uses_const_pool
7740 || (!DISP_IN_RANGE (frame_size)
7741 && !CONST_OK_FOR_K (frame_size));
7742
7743 /* Decide which register to use as literal pool base. In small
7744 leaf functions, try to use an unused call-clobbered register
7745 as base register to avoid save/restore overhead. */
7746 if (!base_used)
7747 cfun->machine->base_reg = NULL_RTX;
7748 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7749 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7750 else
7751 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7752
7753 s390_register_info (clobbered_regs);
7754 s390_frame_info ();
7755 }
7756 while (frame_size != cfun_frame_layout.frame_size);
7757 }
7758
7759 /* Update frame layout. Recompute actual register save data based on
7760 current info and update regs_ever_live for the special registers.
7761 May be called multiple times, but may never cause *more* registers
7762 to be saved than s390_init_frame_layout allocated room for. */
7763
7764 static void
7765 s390_update_frame_layout (void)
7766 {
7767 int clobbered_regs[16];
7768
7769 s390_register_info (clobbered_regs);
7770
7771 df_set_regs_ever_live (BASE_REGNUM,
7772 clobbered_regs[BASE_REGNUM] ? true : false);
7773 df_set_regs_ever_live (RETURN_REGNUM,
7774 clobbered_regs[RETURN_REGNUM] ? true : false);
7775 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7776 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7777
7778 if (cfun->machine->base_reg)
7779 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7780 }
7781
7782 /* Return true if it is legal to put a value with MODE into REGNO. */
7783
7784 bool
7785 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7786 {
7787 switch (REGNO_REG_CLASS (regno))
7788 {
7789 case FP_REGS:
7790 if (REGNO_PAIR_OK (regno, mode))
7791 {
7792 if (mode == SImode || mode == DImode)
7793 return true;
7794
7795 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7796 return true;
7797 }
7798 break;
7799 case ADDR_REGS:
7800 if (FRAME_REGNO_P (regno) && mode == Pmode)
7801 return true;
7802
7803 /* fallthrough */
7804 case GENERAL_REGS:
7805 if (REGNO_PAIR_OK (regno, mode))
7806 {
7807 if (TARGET_ZARCH
7808 || (mode != TFmode && mode != TCmode && mode != TDmode))
7809 return true;
7810 }
7811 break;
7812 case CC_REGS:
7813 if (GET_MODE_CLASS (mode) == MODE_CC)
7814 return true;
7815 break;
7816 case ACCESS_REGS:
7817 if (REGNO_PAIR_OK (regno, mode))
7818 {
7819 if (mode == SImode || mode == Pmode)
7820 return true;
7821 }
7822 break;
7823 default:
7824 return false;
7825 }
7826
7827 return false;
7828 }
7829
7830 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7831
7832 bool
7833 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7834 {
7835 /* Once we've decided upon a register to use as base register, it must
7836 no longer be used for any other purpose. */
7837 if (cfun->machine->base_reg)
7838 if (REGNO (cfun->machine->base_reg) == old_reg
7839 || REGNO (cfun->machine->base_reg) == new_reg)
7840 return false;
7841
7842 return true;
7843 }
7844
7845 /* Maximum number of registers to represent a value of mode MODE
7846 in a register of class RCLASS. */
7847
7848 int
7849 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7850 {
7851 switch (rclass)
7852 {
7853 case FP_REGS:
7854 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7855 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7856 else
7857 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7858 case ACCESS_REGS:
7859 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7860 default:
7861 break;
7862 }
7863 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7864 }
7865
7866 /* Return true if register FROM can be eliminated via register TO. */
7867
7868 static bool
7869 s390_can_eliminate (const int from, const int to)
7870 {
7871 /* On zSeries machines, we have not marked the base register as fixed.
7872 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7873 If a function requires the base register, we say here that this
7874 elimination cannot be performed. This will cause reload to free
7875 up the base register (as if it were fixed). On the other hand,
7876 if the current function does *not* require the base register, we
7877 say here the elimination succeeds, which in turn allows reload
7878 to allocate the base register for any other purpose. */
7879 if (from == BASE_REGNUM && to == BASE_REGNUM)
7880 {
7881 if (TARGET_CPU_ZARCH)
7882 {
7883 s390_init_frame_layout ();
7884 return cfun->machine->base_reg == NULL_RTX;
7885 }
7886
7887 return false;
7888 }
7889
7890 /* Everything else must point into the stack frame. */
7891 gcc_assert (to == STACK_POINTER_REGNUM
7892 || to == HARD_FRAME_POINTER_REGNUM);
7893
7894 gcc_assert (from == FRAME_POINTER_REGNUM
7895 || from == ARG_POINTER_REGNUM
7896 || from == RETURN_ADDRESS_POINTER_REGNUM);
7897
7898 /* Make sure we actually saved the return address. */
7899 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7900 if (!crtl->calls_eh_return
7901 && !cfun->stdarg
7902 && !cfun_frame_layout.save_return_addr_p)
7903 return false;
7904
7905 return true;
7906 }
7907
7908 /* Return offset between register FROM and TO initially after prolog. */
7909
7910 HOST_WIDE_INT
7911 s390_initial_elimination_offset (int from, int to)
7912 {
7913 HOST_WIDE_INT offset;
7914 int index;
7915
7916 /* ??? Why are we called for non-eliminable pairs? */
7917 if (!s390_can_eliminate (from, to))
7918 return 0;
7919
7920 switch (from)
7921 {
7922 case FRAME_POINTER_REGNUM:
7923 offset = (get_frame_size()
7924 + STACK_POINTER_OFFSET
7925 + crtl->outgoing_args_size);
7926 break;
7927
7928 case ARG_POINTER_REGNUM:
7929 s390_init_frame_layout ();
7930 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7931 break;
7932
7933 case RETURN_ADDRESS_POINTER_REGNUM:
7934 s390_init_frame_layout ();
7935 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7936 gcc_assert (index >= 0);
7937 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7938 offset += index * UNITS_PER_LONG;
7939 break;
7940
7941 case BASE_REGNUM:
7942 offset = 0;
7943 break;
7944
7945 default:
7946 gcc_unreachable ();
7947 }
7948
7949 return offset;
7950 }
7951
7952 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7953 to register BASE. Return generated insn. */
7954
7955 static rtx
7956 save_fpr (rtx base, int offset, int regnum)
7957 {
7958 rtx addr;
7959 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7960
7961 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7962 set_mem_alias_set (addr, get_varargs_alias_set ());
7963 else
7964 set_mem_alias_set (addr, get_frame_alias_set ());
7965
7966 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7967 }
7968
7969 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7970 to register BASE. Return generated insn. */
7971
7972 static rtx
7973 restore_fpr (rtx base, int offset, int regnum)
7974 {
7975 rtx addr;
7976 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7977 set_mem_alias_set (addr, get_frame_alias_set ());
7978
7979 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7980 }
7981
7982 /* Return true if REGNO is a global register, but not one
7983 of the special ones that need to be saved/restored in anyway. */
7984
7985 static inline bool
7986 global_not_special_regno_p (int regno)
7987 {
7988 return (global_regs[regno]
7989 /* These registers are special and need to be
7990 restored in any case. */
7991 && !(regno == STACK_POINTER_REGNUM
7992 || regno == RETURN_REGNUM
7993 || regno == BASE_REGNUM
7994 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7995 }
7996
7997 /* Generate insn to save registers FIRST to LAST into
7998 the register save area located at offset OFFSET
7999 relative to register BASE. */
8000
8001 static rtx
8002 save_gprs (rtx base, int offset, int first, int last)
8003 {
8004 rtx addr, insn, note;
8005 int i;
8006
8007 addr = plus_constant (Pmode, base, offset);
8008 addr = gen_rtx_MEM (Pmode, addr);
8009
8010 set_mem_alias_set (addr, get_frame_alias_set ());
8011
8012 /* Special-case single register. */
8013 if (first == last)
8014 {
8015 if (TARGET_64BIT)
8016 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8017 else
8018 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8019
8020 if (!global_not_special_regno_p (first))
8021 RTX_FRAME_RELATED_P (insn) = 1;
8022 return insn;
8023 }
8024
8025
8026 insn = gen_store_multiple (addr,
8027 gen_rtx_REG (Pmode, first),
8028 GEN_INT (last - first + 1));
8029
8030 if (first <= 6 && cfun->stdarg)
8031 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8032 {
8033 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8034
8035 if (first + i <= 6)
8036 set_mem_alias_set (mem, get_varargs_alias_set ());
8037 }
8038
8039 /* We need to set the FRAME_RELATED flag on all SETs
8040 inside the store-multiple pattern.
8041
8042 However, we must not emit DWARF records for registers 2..5
8043 if they are stored for use by variable arguments ...
8044
8045 ??? Unfortunately, it is not enough to simply not the
8046 FRAME_RELATED flags for those SETs, because the first SET
8047 of the PARALLEL is always treated as if it had the flag
8048 set, even if it does not. Therefore we emit a new pattern
8049 without those registers as REG_FRAME_RELATED_EXPR note. */
8050
8051 if (first >= 6 && !global_not_special_regno_p (first))
8052 {
8053 rtx pat = PATTERN (insn);
8054
8055 for (i = 0; i < XVECLEN (pat, 0); i++)
8056 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8057 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8058 0, i)))))
8059 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8060
8061 RTX_FRAME_RELATED_P (insn) = 1;
8062 }
8063 else if (last >= 6)
8064 {
8065 int start;
8066
8067 for (start = first >= 6 ? first : 6; start <= last; start++)
8068 if (!global_not_special_regno_p (start))
8069 break;
8070
8071 if (start > last)
8072 return insn;
8073
8074 addr = plus_constant (Pmode, base,
8075 offset + (start - first) * UNITS_PER_LONG);
8076 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8077 gen_rtx_REG (Pmode, start),
8078 GEN_INT (last - start + 1));
8079 note = PATTERN (note);
8080
8081 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8082
8083 for (i = 0; i < XVECLEN (note, 0); i++)
8084 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8085 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8086 0, i)))))
8087 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8088
8089 RTX_FRAME_RELATED_P (insn) = 1;
8090 }
8091
8092 return insn;
8093 }
8094
8095 /* Generate insn to restore registers FIRST to LAST from
8096 the register save area located at offset OFFSET
8097 relative to register BASE. */
8098
8099 static rtx
8100 restore_gprs (rtx base, int offset, int first, int last)
8101 {
8102 rtx addr, insn;
8103
8104 addr = plus_constant (Pmode, base, offset);
8105 addr = gen_rtx_MEM (Pmode, addr);
8106 set_mem_alias_set (addr, get_frame_alias_set ());
8107
8108 /* Special-case single register. */
8109 if (first == last)
8110 {
8111 if (TARGET_64BIT)
8112 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8113 else
8114 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8115
8116 return insn;
8117 }
8118
8119 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8120 addr,
8121 GEN_INT (last - first + 1));
8122 return insn;
8123 }
8124
8125 /* Return insn sequence to load the GOT register. */
8126
8127 static GTY(()) rtx got_symbol;
8128 rtx
8129 s390_load_got (void)
8130 {
8131 rtx insns;
8132
8133 /* We cannot use pic_offset_table_rtx here since we use this
8134 function also for non-pic if __tls_get_offset is called and in
8135 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8136 aren't usable. */
8137 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8138
8139 if (!got_symbol)
8140 {
8141 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8142 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8143 }
8144
8145 start_sequence ();
8146
8147 if (TARGET_CPU_ZARCH)
8148 {
8149 emit_move_insn (got_rtx, got_symbol);
8150 }
8151 else
8152 {
8153 rtx offset;
8154
8155 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8156 UNSPEC_LTREL_OFFSET);
8157 offset = gen_rtx_CONST (Pmode, offset);
8158 offset = force_const_mem (Pmode, offset);
8159
8160 emit_move_insn (got_rtx, offset);
8161
8162 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8163 UNSPEC_LTREL_BASE);
8164 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8165
8166 emit_move_insn (got_rtx, offset);
8167 }
8168
8169 insns = get_insns ();
8170 end_sequence ();
8171 return insns;
8172 }
8173
8174 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8175 and the change to the stack pointer. */
8176
8177 static void
8178 s390_emit_stack_tie (void)
8179 {
8180 rtx mem = gen_frame_mem (BLKmode,
8181 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8182
8183 emit_insn (gen_stack_tie (mem));
8184 }
8185
8186 /* Expand the prologue into a bunch of separate insns. */
8187
8188 void
8189 s390_emit_prologue (void)
8190 {
8191 rtx insn, addr;
8192 rtx temp_reg;
8193 int i;
8194 int offset;
8195 int next_fpr = 0;
8196
8197 /* Complete frame layout. */
8198
8199 s390_update_frame_layout ();
8200
8201 /* Annotate all constant pool references to let the scheduler know
8202 they implicitly use the base register. */
8203
8204 push_topmost_sequence ();
8205
8206 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8207 if (INSN_P (insn))
8208 {
8209 annotate_constant_pool_refs (&PATTERN (insn));
8210 df_insn_rescan (insn);
8211 }
8212
8213 pop_topmost_sequence ();
8214
8215 /* Choose best register to use for temp use within prologue.
8216 See below for why TPF must use the register 1. */
8217
8218 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8219 && !crtl->is_leaf
8220 && !TARGET_TPF_PROFILING)
8221 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8222 else
8223 temp_reg = gen_rtx_REG (Pmode, 1);
8224
8225 /* Save call saved gprs. */
8226 if (cfun_frame_layout.first_save_gpr != -1)
8227 {
8228 insn = save_gprs (stack_pointer_rtx,
8229 cfun_frame_layout.gprs_offset +
8230 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8231 - cfun_frame_layout.first_save_gpr_slot),
8232 cfun_frame_layout.first_save_gpr,
8233 cfun_frame_layout.last_save_gpr);
8234 emit_insn (insn);
8235 }
8236
8237 /* Dummy insn to mark literal pool slot. */
8238
8239 if (cfun->machine->base_reg)
8240 emit_insn (gen_main_pool (cfun->machine->base_reg));
8241
8242 offset = cfun_frame_layout.f0_offset;
8243
8244 /* Save f0 and f2. */
8245 for (i = 0; i < 2; i++)
8246 {
8247 if (cfun_fpr_bit_p (i))
8248 {
8249 save_fpr (stack_pointer_rtx, offset, i + 16);
8250 offset += 8;
8251 }
8252 else if (!TARGET_PACKED_STACK)
8253 offset += 8;
8254 }
8255
8256 /* Save f4 and f6. */
8257 offset = cfun_frame_layout.f4_offset;
8258 for (i = 2; i < 4; i++)
8259 {
8260 if (cfun_fpr_bit_p (i))
8261 {
8262 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8263 offset += 8;
8264
8265 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8266 therefore are not frame related. */
8267 if (!call_really_used_regs[i + 16])
8268 RTX_FRAME_RELATED_P (insn) = 1;
8269 }
8270 else if (!TARGET_PACKED_STACK)
8271 offset += 8;
8272 }
8273
8274 if (TARGET_PACKED_STACK
8275 && cfun_save_high_fprs_p
8276 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8277 {
8278 offset = (cfun_frame_layout.f8_offset
8279 + (cfun_frame_layout.high_fprs - 1) * 8);
8280
8281 for (i = 15; i > 7 && offset >= 0; i--)
8282 if (cfun_fpr_bit_p (i))
8283 {
8284 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8285
8286 RTX_FRAME_RELATED_P (insn) = 1;
8287 offset -= 8;
8288 }
8289 if (offset >= cfun_frame_layout.f8_offset)
8290 next_fpr = i + 16;
8291 }
8292
8293 if (!TARGET_PACKED_STACK)
8294 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8295
8296 if (flag_stack_usage_info)
8297 current_function_static_stack_size = cfun_frame_layout.frame_size;
8298
8299 /* Decrement stack pointer. */
8300
8301 if (cfun_frame_layout.frame_size > 0)
8302 {
8303 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8304 rtx real_frame_off;
8305
8306 if (s390_stack_size)
8307 {
8308 HOST_WIDE_INT stack_guard;
8309
8310 if (s390_stack_guard)
8311 stack_guard = s390_stack_guard;
8312 else
8313 {
8314 /* If no value for stack guard is provided the smallest power of 2
8315 larger than the current frame size is chosen. */
8316 stack_guard = 1;
8317 while (stack_guard < cfun_frame_layout.frame_size)
8318 stack_guard <<= 1;
8319 }
8320
8321 if (cfun_frame_layout.frame_size >= s390_stack_size)
8322 {
8323 warning (0, "frame size of function %qs is %wd"
8324 " bytes exceeding user provided stack limit of "
8325 "%d bytes. "
8326 "An unconditional trap is added.",
8327 current_function_name(), cfun_frame_layout.frame_size,
8328 s390_stack_size);
8329 emit_insn (gen_trap ());
8330 }
8331 else
8332 {
8333 /* stack_guard has to be smaller than s390_stack_size.
8334 Otherwise we would emit an AND with zero which would
8335 not match the test under mask pattern. */
8336 if (stack_guard >= s390_stack_size)
8337 {
8338 warning (0, "frame size of function %qs is %wd"
8339 " bytes which is more than half the stack size. "
8340 "The dynamic check would not be reliable. "
8341 "No check emitted for this function.",
8342 current_function_name(),
8343 cfun_frame_layout.frame_size);
8344 }
8345 else
8346 {
8347 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8348 & ~(stack_guard - 1));
8349
8350 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8351 GEN_INT (stack_check_mask));
8352 if (TARGET_64BIT)
8353 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8354 t, const0_rtx),
8355 t, const0_rtx, const0_rtx));
8356 else
8357 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8358 t, const0_rtx),
8359 t, const0_rtx, const0_rtx));
8360 }
8361 }
8362 }
8363
8364 if (s390_warn_framesize > 0
8365 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8366 warning (0, "frame size of %qs is %wd bytes",
8367 current_function_name (), cfun_frame_layout.frame_size);
8368
8369 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8370 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8371
8372 /* Save incoming stack pointer into temp reg. */
8373 if (TARGET_BACKCHAIN || next_fpr)
8374 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8375
8376 /* Subtract frame size from stack pointer. */
8377
8378 if (DISP_IN_RANGE (INTVAL (frame_off)))
8379 {
8380 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8381 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8382 frame_off));
8383 insn = emit_insn (insn);
8384 }
8385 else
8386 {
8387 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8388 frame_off = force_const_mem (Pmode, frame_off);
8389
8390 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8391 annotate_constant_pool_refs (&PATTERN (insn));
8392 }
8393
8394 RTX_FRAME_RELATED_P (insn) = 1;
8395 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8396 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8397 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8398 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8399 real_frame_off)));
8400
8401 /* Set backchain. */
8402
8403 if (TARGET_BACKCHAIN)
8404 {
8405 if (cfun_frame_layout.backchain_offset)
8406 addr = gen_rtx_MEM (Pmode,
8407 plus_constant (Pmode, stack_pointer_rtx,
8408 cfun_frame_layout.backchain_offset));
8409 else
8410 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8411 set_mem_alias_set (addr, get_frame_alias_set ());
8412 insn = emit_insn (gen_move_insn (addr, temp_reg));
8413 }
8414
8415 /* If we support non-call exceptions (e.g. for Java),
8416 we need to make sure the backchain pointer is set up
8417 before any possibly trapping memory access. */
8418 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8419 {
8420 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8421 emit_clobber (addr);
8422 }
8423 }
8424
8425 /* Save fprs 8 - 15 (64 bit ABI). */
8426
8427 if (cfun_save_high_fprs_p && next_fpr)
8428 {
8429 /* If the stack might be accessed through a different register
8430 we have to make sure that the stack pointer decrement is not
8431 moved below the use of the stack slots. */
8432 s390_emit_stack_tie ();
8433
8434 insn = emit_insn (gen_add2_insn (temp_reg,
8435 GEN_INT (cfun_frame_layout.f8_offset)));
8436
8437 offset = 0;
8438
8439 for (i = 24; i <= next_fpr; i++)
8440 if (cfun_fpr_bit_p (i - 16))
8441 {
8442 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8443 cfun_frame_layout.frame_size
8444 + cfun_frame_layout.f8_offset
8445 + offset);
8446
8447 insn = save_fpr (temp_reg, offset, i);
8448 offset += 8;
8449 RTX_FRAME_RELATED_P (insn) = 1;
8450 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8451 gen_rtx_SET (VOIDmode,
8452 gen_rtx_MEM (DFmode, addr),
8453 gen_rtx_REG (DFmode, i)));
8454 }
8455 }
8456
8457 /* Set frame pointer, if needed. */
8458
8459 if (frame_pointer_needed)
8460 {
8461 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8462 RTX_FRAME_RELATED_P (insn) = 1;
8463 }
8464
8465 /* Set up got pointer, if needed. */
8466
8467 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8468 {
8469 rtx insns = s390_load_got ();
8470
8471 for (insn = insns; insn; insn = NEXT_INSN (insn))
8472 annotate_constant_pool_refs (&PATTERN (insn));
8473
8474 emit_insn (insns);
8475 }
8476
8477 if (TARGET_TPF_PROFILING)
8478 {
8479 /* Generate a BAS instruction to serve as a function
8480 entry intercept to facilitate the use of tracing
8481 algorithms located at the branch target. */
8482 emit_insn (gen_prologue_tpf ());
8483
8484 /* Emit a blockage here so that all code
8485 lies between the profiling mechanisms. */
8486 emit_insn (gen_blockage ());
8487 }
8488 }
8489
8490 /* Expand the epilogue into a bunch of separate insns. */
8491
8492 void
8493 s390_emit_epilogue (bool sibcall)
8494 {
8495 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8496 int area_bottom, area_top, offset = 0;
8497 int next_offset;
8498 rtvec p;
8499 int i;
8500
8501 if (TARGET_TPF_PROFILING)
8502 {
8503
8504 /* Generate a BAS instruction to serve as a function
8505 entry intercept to facilitate the use of tracing
8506 algorithms located at the branch target. */
8507
8508 /* Emit a blockage here so that all code
8509 lies between the profiling mechanisms. */
8510 emit_insn (gen_blockage ());
8511
8512 emit_insn (gen_epilogue_tpf ());
8513 }
8514
8515 /* Check whether to use frame or stack pointer for restore. */
8516
8517 frame_pointer = (frame_pointer_needed
8518 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8519
8520 s390_frame_area (&area_bottom, &area_top);
8521
8522 /* Check whether we can access the register save area.
8523 If not, increment the frame pointer as required. */
8524
8525 if (area_top <= area_bottom)
8526 {
8527 /* Nothing to restore. */
8528 }
8529 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8530 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8531 {
8532 /* Area is in range. */
8533 offset = cfun_frame_layout.frame_size;
8534 }
8535 else
8536 {
8537 rtx insn, frame_off, cfa;
8538
8539 offset = area_bottom < 0 ? -area_bottom : 0;
8540 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8541
8542 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8543 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8544 if (DISP_IN_RANGE (INTVAL (frame_off)))
8545 {
8546 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8547 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8548 insn = emit_insn (insn);
8549 }
8550 else
8551 {
8552 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8553 frame_off = force_const_mem (Pmode, frame_off);
8554
8555 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8556 annotate_constant_pool_refs (&PATTERN (insn));
8557 }
8558 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8559 RTX_FRAME_RELATED_P (insn) = 1;
8560 }
8561
8562 /* Restore call saved fprs. */
8563
8564 if (TARGET_64BIT)
8565 {
8566 if (cfun_save_high_fprs_p)
8567 {
8568 next_offset = cfun_frame_layout.f8_offset;
8569 for (i = 24; i < 32; i++)
8570 {
8571 if (cfun_fpr_bit_p (i - 16))
8572 {
8573 restore_fpr (frame_pointer,
8574 offset + next_offset, i);
8575 cfa_restores
8576 = alloc_reg_note (REG_CFA_RESTORE,
8577 gen_rtx_REG (DFmode, i), cfa_restores);
8578 next_offset += 8;
8579 }
8580 }
8581 }
8582
8583 }
8584 else
8585 {
8586 next_offset = cfun_frame_layout.f4_offset;
8587 for (i = 18; i < 20; i++)
8588 {
8589 if (cfun_fpr_bit_p (i - 16))
8590 {
8591 restore_fpr (frame_pointer,
8592 offset + next_offset, i);
8593 cfa_restores
8594 = alloc_reg_note (REG_CFA_RESTORE,
8595 gen_rtx_REG (DFmode, i), cfa_restores);
8596 next_offset += 8;
8597 }
8598 else if (!TARGET_PACKED_STACK)
8599 next_offset += 8;
8600 }
8601
8602 }
8603
8604 /* Return register. */
8605
8606 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8607
8608 /* Restore call saved gprs. */
8609
8610 if (cfun_frame_layout.first_restore_gpr != -1)
8611 {
8612 rtx insn, addr;
8613 int i;
8614
8615 /* Check for global register and save them
8616 to stack location from where they get restored. */
8617
8618 for (i = cfun_frame_layout.first_restore_gpr;
8619 i <= cfun_frame_layout.last_restore_gpr;
8620 i++)
8621 {
8622 if (global_not_special_regno_p (i))
8623 {
8624 addr = plus_constant (Pmode, frame_pointer,
8625 offset + cfun_frame_layout.gprs_offset
8626 + (i - cfun_frame_layout.first_save_gpr_slot)
8627 * UNITS_PER_LONG);
8628 addr = gen_rtx_MEM (Pmode, addr);
8629 set_mem_alias_set (addr, get_frame_alias_set ());
8630 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8631 }
8632 else
8633 cfa_restores
8634 = alloc_reg_note (REG_CFA_RESTORE,
8635 gen_rtx_REG (Pmode, i), cfa_restores);
8636 }
8637
8638 if (! sibcall)
8639 {
8640 /* Fetch return address from stack before load multiple,
8641 this will do good for scheduling. */
8642
8643 if (cfun_frame_layout.save_return_addr_p
8644 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8645 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8646 {
8647 int return_regnum = find_unused_clobbered_reg();
8648 if (!return_regnum)
8649 return_regnum = 4;
8650 return_reg = gen_rtx_REG (Pmode, return_regnum);
8651
8652 addr = plus_constant (Pmode, frame_pointer,
8653 offset + cfun_frame_layout.gprs_offset
8654 + (RETURN_REGNUM
8655 - cfun_frame_layout.first_save_gpr_slot)
8656 * UNITS_PER_LONG);
8657 addr = gen_rtx_MEM (Pmode, addr);
8658 set_mem_alias_set (addr, get_frame_alias_set ());
8659 emit_move_insn (return_reg, addr);
8660 }
8661 }
8662
8663 insn = restore_gprs (frame_pointer,
8664 offset + cfun_frame_layout.gprs_offset
8665 + (cfun_frame_layout.first_restore_gpr
8666 - cfun_frame_layout.first_save_gpr_slot)
8667 * UNITS_PER_LONG,
8668 cfun_frame_layout.first_restore_gpr,
8669 cfun_frame_layout.last_restore_gpr);
8670 insn = emit_insn (insn);
8671 REG_NOTES (insn) = cfa_restores;
8672 add_reg_note (insn, REG_CFA_DEF_CFA,
8673 plus_constant (Pmode, stack_pointer_rtx,
8674 STACK_POINTER_OFFSET));
8675 RTX_FRAME_RELATED_P (insn) = 1;
8676 }
8677
8678 if (! sibcall)
8679 {
8680
8681 /* Return to caller. */
8682
8683 p = rtvec_alloc (2);
8684
8685 RTVEC_ELT (p, 0) = ret_rtx;
8686 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8687 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8688 }
8689 }
8690
8691
8692 /* Return the size in bytes of a function argument of
8693 type TYPE and/or mode MODE. At least one of TYPE or
8694 MODE must be specified. */
8695
8696 static int
8697 s390_function_arg_size (enum machine_mode mode, const_tree type)
8698 {
8699 if (type)
8700 return int_size_in_bytes (type);
8701
8702 /* No type info available for some library calls ... */
8703 if (mode != BLKmode)
8704 return GET_MODE_SIZE (mode);
8705
8706 /* If we have neither type nor mode, abort */
8707 gcc_unreachable ();
8708 }
8709
8710 /* Return true if a function argument of type TYPE and mode MODE
8711 is to be passed in a floating-point register, if available. */
8712
8713 static bool
8714 s390_function_arg_float (enum machine_mode mode, const_tree type)
8715 {
8716 int size = s390_function_arg_size (mode, type);
8717 if (size > 8)
8718 return false;
8719
8720 /* Soft-float changes the ABI: no floating-point registers are used. */
8721 if (TARGET_SOFT_FLOAT)
8722 return false;
8723
8724 /* No type info available for some library calls ... */
8725 if (!type)
8726 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8727
8728 /* The ABI says that record types with a single member are treated
8729 just like that member would be. */
8730 while (TREE_CODE (type) == RECORD_TYPE)
8731 {
8732 tree field, single = NULL_TREE;
8733
8734 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8735 {
8736 if (TREE_CODE (field) != FIELD_DECL)
8737 continue;
8738
8739 if (single == NULL_TREE)
8740 single = TREE_TYPE (field);
8741 else
8742 return false;
8743 }
8744
8745 if (single == NULL_TREE)
8746 return false;
8747 else
8748 type = single;
8749 }
8750
8751 return TREE_CODE (type) == REAL_TYPE;
8752 }
8753
8754 /* Return true if a function argument of type TYPE and mode MODE
8755 is to be passed in an integer register, or a pair of integer
8756 registers, if available. */
8757
8758 static bool
8759 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8760 {
8761 int size = s390_function_arg_size (mode, type);
8762 if (size > 8)
8763 return false;
8764
8765 /* No type info available for some library calls ... */
8766 if (!type)
8767 return GET_MODE_CLASS (mode) == MODE_INT
8768 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8769
8770 /* We accept small integral (and similar) types. */
8771 if (INTEGRAL_TYPE_P (type)
8772 || POINTER_TYPE_P (type)
8773 || TREE_CODE (type) == NULLPTR_TYPE
8774 || TREE_CODE (type) == OFFSET_TYPE
8775 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8776 return true;
8777
8778 /* We also accept structs of size 1, 2, 4, 8 that are not
8779 passed in floating-point registers. */
8780 if (AGGREGATE_TYPE_P (type)
8781 && exact_log2 (size) >= 0
8782 && !s390_function_arg_float (mode, type))
8783 return true;
8784
8785 return false;
8786 }
8787
8788 /* Return 1 if a function argument of type TYPE and mode MODE
8789 is to be passed by reference. The ABI specifies that only
8790 structures of size 1, 2, 4, or 8 bytes are passed by value,
8791 all other structures (and complex numbers) are passed by
8792 reference. */
8793
8794 static bool
8795 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8796 enum machine_mode mode, const_tree type,
8797 bool named ATTRIBUTE_UNUSED)
8798 {
8799 int size = s390_function_arg_size (mode, type);
8800 if (size > 8)
8801 return true;
8802
8803 if (type)
8804 {
8805 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8806 return 1;
8807
8808 if (TREE_CODE (type) == COMPLEX_TYPE
8809 || TREE_CODE (type) == VECTOR_TYPE)
8810 return 1;
8811 }
8812
8813 return 0;
8814 }
8815
8816 /* Update the data in CUM to advance over an argument of mode MODE and
8817 data type TYPE. (TYPE is null for libcalls where that information
8818 may not be available.). The boolean NAMED specifies whether the
8819 argument is a named argument (as opposed to an unnamed argument
8820 matching an ellipsis). */
8821
8822 static void
8823 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8824 const_tree type, bool named ATTRIBUTE_UNUSED)
8825 {
8826 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8827
8828 if (s390_function_arg_float (mode, type))
8829 {
8830 cum->fprs += 1;
8831 }
8832 else if (s390_function_arg_integer (mode, type))
8833 {
8834 int size = s390_function_arg_size (mode, type);
8835 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8836 }
8837 else
8838 gcc_unreachable ();
8839 }
8840
8841 /* Define where to put the arguments to a function.
8842 Value is zero to push the argument on the stack,
8843 or a hard register in which to store the argument.
8844
8845 MODE is the argument's machine mode.
8846 TYPE is the data type of the argument (as a tree).
8847 This is null for libcalls where that information may
8848 not be available.
8849 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8850 the preceding args and about the function being called.
8851 NAMED is nonzero if this argument is a named parameter
8852 (otherwise it is an extra parameter matching an ellipsis).
8853
8854 On S/390, we use general purpose registers 2 through 6 to
8855 pass integer, pointer, and certain structure arguments, and
8856 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8857 to pass floating point arguments. All remaining arguments
8858 are pushed to the stack. */
8859
8860 static rtx
8861 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8862 const_tree type, bool named ATTRIBUTE_UNUSED)
8863 {
8864 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8865
8866 if (s390_function_arg_float (mode, type))
8867 {
8868 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8869 return 0;
8870 else
8871 return gen_rtx_REG (mode, cum->fprs + 16);
8872 }
8873 else if (s390_function_arg_integer (mode, type))
8874 {
8875 int size = s390_function_arg_size (mode, type);
8876 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8877
8878 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8879 return 0;
8880 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8881 return gen_rtx_REG (mode, cum->gprs + 2);
8882 else if (n_gprs == 2)
8883 {
8884 rtvec p = rtvec_alloc (2);
8885
8886 RTVEC_ELT (p, 0)
8887 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8888 const0_rtx);
8889 RTVEC_ELT (p, 1)
8890 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8891 GEN_INT (4));
8892
8893 return gen_rtx_PARALLEL (mode, p);
8894 }
8895 }
8896
8897 /* After the real arguments, expand_call calls us once again
8898 with a void_type_node type. Whatever we return here is
8899 passed as operand 2 to the call expanders.
8900
8901 We don't need this feature ... */
8902 else if (type == void_type_node)
8903 return const0_rtx;
8904
8905 gcc_unreachable ();
8906 }
8907
8908 /* Return true if return values of type TYPE should be returned
8909 in a memory buffer whose address is passed by the caller as
8910 hidden first argument. */
8911
8912 static bool
8913 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8914 {
8915 /* We accept small integral (and similar) types. */
8916 if (INTEGRAL_TYPE_P (type)
8917 || POINTER_TYPE_P (type)
8918 || TREE_CODE (type) == OFFSET_TYPE
8919 || TREE_CODE (type) == REAL_TYPE)
8920 return int_size_in_bytes (type) > 8;
8921
8922 /* Aggregates and similar constructs are always returned
8923 in memory. */
8924 if (AGGREGATE_TYPE_P (type)
8925 || TREE_CODE (type) == COMPLEX_TYPE
8926 || TREE_CODE (type) == VECTOR_TYPE)
8927 return true;
8928
8929 /* ??? We get called on all sorts of random stuff from
8930 aggregate_value_p. We can't abort, but it's not clear
8931 what's safe to return. Pretend it's a struct I guess. */
8932 return true;
8933 }
8934
8935 /* Function arguments and return values are promoted to word size. */
8936
8937 static enum machine_mode
8938 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8939 int *punsignedp,
8940 const_tree fntype ATTRIBUTE_UNUSED,
8941 int for_return ATTRIBUTE_UNUSED)
8942 {
8943 if (INTEGRAL_MODE_P (mode)
8944 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8945 {
8946 if (type != NULL_TREE && POINTER_TYPE_P (type))
8947 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8948 return Pmode;
8949 }
8950
8951 return mode;
8952 }
8953
8954 /* Define where to return a (scalar) value of type RET_TYPE.
8955 If RET_TYPE is null, define where to return a (scalar)
8956 value of mode MODE from a libcall. */
8957
8958 static rtx
8959 s390_function_and_libcall_value (enum machine_mode mode,
8960 const_tree ret_type,
8961 const_tree fntype_or_decl,
8962 bool outgoing ATTRIBUTE_UNUSED)
8963 {
8964 /* For normal functions perform the promotion as
8965 promote_function_mode would do. */
8966 if (ret_type)
8967 {
8968 int unsignedp = TYPE_UNSIGNED (ret_type);
8969 mode = promote_function_mode (ret_type, mode, &unsignedp,
8970 fntype_or_decl, 1);
8971 }
8972
8973 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8974 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8975
8976 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8977 return gen_rtx_REG (mode, 16);
8978 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8979 || UNITS_PER_LONG == UNITS_PER_WORD)
8980 return gen_rtx_REG (mode, 2);
8981 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8982 {
8983 /* This case is triggered when returning a 64 bit value with
8984 -m31 -mzarch. Although the value would fit into a single
8985 register it has to be forced into a 32 bit register pair in
8986 order to match the ABI. */
8987 rtvec p = rtvec_alloc (2);
8988
8989 RTVEC_ELT (p, 0)
8990 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8991 RTVEC_ELT (p, 1)
8992 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8993
8994 return gen_rtx_PARALLEL (mode, p);
8995 }
8996
8997 gcc_unreachable ();
8998 }
8999
9000 /* Define where to return a scalar return value of type RET_TYPE. */
9001
9002 static rtx
9003 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9004 bool outgoing)
9005 {
9006 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9007 fn_decl_or_type, outgoing);
9008 }
9009
9010 /* Define where to return a scalar libcall return value of mode
9011 MODE. */
9012
9013 static rtx
9014 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9015 {
9016 return s390_function_and_libcall_value (mode, NULL_TREE,
9017 NULL_TREE, true);
9018 }
9019
9020
9021 /* Create and return the va_list datatype.
9022
9023 On S/390, va_list is an array type equivalent to
9024
9025 typedef struct __va_list_tag
9026 {
9027 long __gpr;
9028 long __fpr;
9029 void *__overflow_arg_area;
9030 void *__reg_save_area;
9031 } va_list[1];
9032
9033 where __gpr and __fpr hold the number of general purpose
9034 or floating point arguments used up to now, respectively,
9035 __overflow_arg_area points to the stack location of the
9036 next argument passed on the stack, and __reg_save_area
9037 always points to the start of the register area in the
9038 call frame of the current function. The function prologue
9039 saves all registers used for argument passing into this
9040 area if the function uses variable arguments. */
9041
9042 static tree
9043 s390_build_builtin_va_list (void)
9044 {
9045 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9046
9047 record = lang_hooks.types.make_type (RECORD_TYPE);
9048
9049 type_decl =
9050 build_decl (BUILTINS_LOCATION,
9051 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9052
9053 f_gpr = build_decl (BUILTINS_LOCATION,
9054 FIELD_DECL, get_identifier ("__gpr"),
9055 long_integer_type_node);
9056 f_fpr = build_decl (BUILTINS_LOCATION,
9057 FIELD_DECL, get_identifier ("__fpr"),
9058 long_integer_type_node);
9059 f_ovf = build_decl (BUILTINS_LOCATION,
9060 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9061 ptr_type_node);
9062 f_sav = build_decl (BUILTINS_LOCATION,
9063 FIELD_DECL, get_identifier ("__reg_save_area"),
9064 ptr_type_node);
9065
9066 va_list_gpr_counter_field = f_gpr;
9067 va_list_fpr_counter_field = f_fpr;
9068
9069 DECL_FIELD_CONTEXT (f_gpr) = record;
9070 DECL_FIELD_CONTEXT (f_fpr) = record;
9071 DECL_FIELD_CONTEXT (f_ovf) = record;
9072 DECL_FIELD_CONTEXT (f_sav) = record;
9073
9074 TYPE_STUB_DECL (record) = type_decl;
9075 TYPE_NAME (record) = type_decl;
9076 TYPE_FIELDS (record) = f_gpr;
9077 DECL_CHAIN (f_gpr) = f_fpr;
9078 DECL_CHAIN (f_fpr) = f_ovf;
9079 DECL_CHAIN (f_ovf) = f_sav;
9080
9081 layout_type (record);
9082
9083 /* The correct type is an array type of one element. */
9084 return build_array_type (record, build_index_type (size_zero_node));
9085 }
9086
9087 /* Implement va_start by filling the va_list structure VALIST.
9088 STDARG_P is always true, and ignored.
9089 NEXTARG points to the first anonymous stack argument.
9090
9091 The following global variables are used to initialize
9092 the va_list structure:
9093
9094 crtl->args.info:
9095 holds number of gprs and fprs used for named arguments.
9096 crtl->args.arg_offset_rtx:
9097 holds the offset of the first anonymous stack argument
9098 (relative to the virtual arg pointer). */
9099
9100 static void
9101 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9102 {
9103 HOST_WIDE_INT n_gpr, n_fpr;
9104 int off;
9105 tree f_gpr, f_fpr, f_ovf, f_sav;
9106 tree gpr, fpr, ovf, sav, t;
9107
9108 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9109 f_fpr = DECL_CHAIN (f_gpr);
9110 f_ovf = DECL_CHAIN (f_fpr);
9111 f_sav = DECL_CHAIN (f_ovf);
9112
9113 valist = build_simple_mem_ref (valist);
9114 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9115 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9116 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9117 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9118
9119 /* Count number of gp and fp argument registers used. */
9120
9121 n_gpr = crtl->args.info.gprs;
9122 n_fpr = crtl->args.info.fprs;
9123
9124 if (cfun->va_list_gpr_size)
9125 {
9126 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9127 build_int_cst (NULL_TREE, n_gpr));
9128 TREE_SIDE_EFFECTS (t) = 1;
9129 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9130 }
9131
9132 if (cfun->va_list_fpr_size)
9133 {
9134 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9135 build_int_cst (NULL_TREE, n_fpr));
9136 TREE_SIDE_EFFECTS (t) = 1;
9137 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9138 }
9139
9140 /* Find the overflow area. */
9141 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9142 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9143 {
9144 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9145
9146 off = INTVAL (crtl->args.arg_offset_rtx);
9147 off = off < 0 ? 0 : off;
9148 if (TARGET_DEBUG_ARG)
9149 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9150 (int)n_gpr, (int)n_fpr, off);
9151
9152 t = fold_build_pointer_plus_hwi (t, off);
9153
9154 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9155 TREE_SIDE_EFFECTS (t) = 1;
9156 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9157 }
9158
9159 /* Find the register save area. */
9160 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9161 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9162 {
9163 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9164 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9165
9166 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9167 TREE_SIDE_EFFECTS (t) = 1;
9168 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9169 }
9170 }
9171
9172 /* Implement va_arg by updating the va_list structure
9173 VALIST as required to retrieve an argument of type
9174 TYPE, and returning that argument.
9175
9176 Generates code equivalent to:
9177
9178 if (integral value) {
9179 if (size <= 4 && args.gpr < 5 ||
9180 size > 4 && args.gpr < 4 )
9181 ret = args.reg_save_area[args.gpr+8]
9182 else
9183 ret = *args.overflow_arg_area++;
9184 } else if (float value) {
9185 if (args.fgpr < 2)
9186 ret = args.reg_save_area[args.fpr+64]
9187 else
9188 ret = *args.overflow_arg_area++;
9189 } else if (aggregate value) {
9190 if (args.gpr < 5)
9191 ret = *args.reg_save_area[args.gpr]
9192 else
9193 ret = **args.overflow_arg_area++;
9194 } */
9195
9196 static tree
9197 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9198 gimple_seq *post_p ATTRIBUTE_UNUSED)
9199 {
9200 tree f_gpr, f_fpr, f_ovf, f_sav;
9201 tree gpr, fpr, ovf, sav, reg, t, u;
9202 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9203 tree lab_false, lab_over, addr;
9204
9205 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9206 f_fpr = DECL_CHAIN (f_gpr);
9207 f_ovf = DECL_CHAIN (f_fpr);
9208 f_sav = DECL_CHAIN (f_ovf);
9209
9210 valist = build_va_arg_indirect_ref (valist);
9211 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9212 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9213 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9214
9215 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9216 both appear on a lhs. */
9217 valist = unshare_expr (valist);
9218 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9219
9220 size = int_size_in_bytes (type);
9221
9222 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9223 {
9224 if (TARGET_DEBUG_ARG)
9225 {
9226 fprintf (stderr, "va_arg: aggregate type");
9227 debug_tree (type);
9228 }
9229
9230 /* Aggregates are passed by reference. */
9231 indirect_p = 1;
9232 reg = gpr;
9233 n_reg = 1;
9234
9235 /* kernel stack layout on 31 bit: It is assumed here that no padding
9236 will be added by s390_frame_info because for va_args always an even
9237 number of gprs has to be saved r15-r2 = 14 regs. */
9238 sav_ofs = 2 * UNITS_PER_LONG;
9239 sav_scale = UNITS_PER_LONG;
9240 size = UNITS_PER_LONG;
9241 max_reg = GP_ARG_NUM_REG - n_reg;
9242 }
9243 else if (s390_function_arg_float (TYPE_MODE (type), type))
9244 {
9245 if (TARGET_DEBUG_ARG)
9246 {
9247 fprintf (stderr, "va_arg: float type");
9248 debug_tree (type);
9249 }
9250
9251 /* FP args go in FP registers, if present. */
9252 indirect_p = 0;
9253 reg = fpr;
9254 n_reg = 1;
9255 sav_ofs = 16 * UNITS_PER_LONG;
9256 sav_scale = 8;
9257 max_reg = FP_ARG_NUM_REG - n_reg;
9258 }
9259 else
9260 {
9261 if (TARGET_DEBUG_ARG)
9262 {
9263 fprintf (stderr, "va_arg: other type");
9264 debug_tree (type);
9265 }
9266
9267 /* Otherwise into GP registers. */
9268 indirect_p = 0;
9269 reg = gpr;
9270 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9271
9272 /* kernel stack layout on 31 bit: It is assumed here that no padding
9273 will be added by s390_frame_info because for va_args always an even
9274 number of gprs has to be saved r15-r2 = 14 regs. */
9275 sav_ofs = 2 * UNITS_PER_LONG;
9276
9277 if (size < UNITS_PER_LONG)
9278 sav_ofs += UNITS_PER_LONG - size;
9279
9280 sav_scale = UNITS_PER_LONG;
9281 max_reg = GP_ARG_NUM_REG - n_reg;
9282 }
9283
9284 /* Pull the value out of the saved registers ... */
9285
9286 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9287 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9288 addr = create_tmp_var (ptr_type_node, "addr");
9289
9290 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9291 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9292 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9293 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9294 gimplify_and_add (t, pre_p);
9295
9296 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9297 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9298 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9299 t = fold_build_pointer_plus (t, u);
9300
9301 gimplify_assign (addr, t, pre_p);
9302
9303 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9304
9305 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9306
9307
9308 /* ... Otherwise out of the overflow area. */
9309
9310 t = ovf;
9311 if (size < UNITS_PER_LONG)
9312 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9313
9314 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9315
9316 gimplify_assign (addr, t, pre_p);
9317
9318 t = fold_build_pointer_plus_hwi (t, size);
9319 gimplify_assign (ovf, t, pre_p);
9320
9321 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9322
9323
9324 /* Increment register save count. */
9325
9326 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9327 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9328 gimplify_and_add (u, pre_p);
9329
9330 if (indirect_p)
9331 {
9332 t = build_pointer_type_for_mode (build_pointer_type (type),
9333 ptr_mode, true);
9334 addr = fold_convert (t, addr);
9335 addr = build_va_arg_indirect_ref (addr);
9336 }
9337 else
9338 {
9339 t = build_pointer_type_for_mode (type, ptr_mode, true);
9340 addr = fold_convert (t, addr);
9341 }
9342
9343 return build_va_arg_indirect_ref (addr);
9344 }
9345
9346 /* Output assembly code for the trampoline template to
9347 stdio stream FILE.
9348
9349 On S/390, we use gpr 1 internally in the trampoline code;
9350 gpr 0 is used to hold the static chain. */
9351
9352 static void
9353 s390_asm_trampoline_template (FILE *file)
9354 {
9355 rtx op[2];
9356 op[0] = gen_rtx_REG (Pmode, 0);
9357 op[1] = gen_rtx_REG (Pmode, 1);
9358
9359 if (TARGET_64BIT)
9360 {
9361 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9362 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9363 output_asm_insn ("br\t%1", op); /* 2 byte */
9364 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9365 }
9366 else
9367 {
9368 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9369 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9370 output_asm_insn ("br\t%1", op); /* 2 byte */
9371 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9372 }
9373 }
9374
9375 /* Emit RTL insns to initialize the variable parts of a trampoline.
9376 FNADDR is an RTX for the address of the function's pure code.
9377 CXT is an RTX for the static chain value for the function. */
9378
9379 static void
9380 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9381 {
9382 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9383 rtx mem;
9384
9385 emit_block_move (m_tramp, assemble_trampoline_template (),
9386 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9387
9388 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9389 emit_move_insn (mem, cxt);
9390 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9391 emit_move_insn (mem, fnaddr);
9392 }
9393
9394 /* Output assembler code to FILE to increment profiler label # LABELNO
9395 for profiling a function entry. */
9396
9397 void
9398 s390_function_profiler (FILE *file, int labelno)
9399 {
9400 rtx op[7];
9401
9402 char label[128];
9403 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9404
9405 fprintf (file, "# function profiler \n");
9406
9407 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9408 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9409 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9410
9411 op[2] = gen_rtx_REG (Pmode, 1);
9412 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9413 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9414
9415 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9416 if (flag_pic)
9417 {
9418 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9419 op[4] = gen_rtx_CONST (Pmode, op[4]);
9420 }
9421
9422 if (TARGET_64BIT)
9423 {
9424 output_asm_insn ("stg\t%0,%1", op);
9425 output_asm_insn ("larl\t%2,%3", op);
9426 output_asm_insn ("brasl\t%0,%4", op);
9427 output_asm_insn ("lg\t%0,%1", op);
9428 }
9429 else if (!flag_pic)
9430 {
9431 op[6] = gen_label_rtx ();
9432
9433 output_asm_insn ("st\t%0,%1", op);
9434 output_asm_insn ("bras\t%2,%l6", op);
9435 output_asm_insn (".long\t%4", op);
9436 output_asm_insn (".long\t%3", op);
9437 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9438 output_asm_insn ("l\t%0,0(%2)", op);
9439 output_asm_insn ("l\t%2,4(%2)", op);
9440 output_asm_insn ("basr\t%0,%0", op);
9441 output_asm_insn ("l\t%0,%1", op);
9442 }
9443 else
9444 {
9445 op[5] = gen_label_rtx ();
9446 op[6] = gen_label_rtx ();
9447
9448 output_asm_insn ("st\t%0,%1", op);
9449 output_asm_insn ("bras\t%2,%l6", op);
9450 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9451 output_asm_insn (".long\t%4-%l5", op);
9452 output_asm_insn (".long\t%3-%l5", op);
9453 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9454 output_asm_insn ("lr\t%0,%2", op);
9455 output_asm_insn ("a\t%0,0(%2)", op);
9456 output_asm_insn ("a\t%2,4(%2)", op);
9457 output_asm_insn ("basr\t%0,%0", op);
9458 output_asm_insn ("l\t%0,%1", op);
9459 }
9460 }
9461
9462 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9463 into its SYMBOL_REF_FLAGS. */
9464
9465 static void
9466 s390_encode_section_info (tree decl, rtx rtl, int first)
9467 {
9468 default_encode_section_info (decl, rtl, first);
9469
9470 if (TREE_CODE (decl) == VAR_DECL)
9471 {
9472 /* If a variable has a forced alignment to < 2 bytes, mark it
9473 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9474 operand. */
9475 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9476 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9477 if (!DECL_SIZE (decl)
9478 || !DECL_ALIGN (decl)
9479 || !host_integerp (DECL_SIZE (decl), 0)
9480 || (DECL_ALIGN (decl) <= 64
9481 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9482 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9483 }
9484
9485 /* Literal pool references don't have a decl so they are handled
9486 differently here. We rely on the information in the MEM_ALIGN
9487 entry to decide upon natural alignment. */
9488 if (MEM_P (rtl)
9489 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9490 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9491 && (MEM_ALIGN (rtl) == 0
9492 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9493 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9494 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9495 }
9496
9497 /* Output thunk to FILE that implements a C++ virtual function call (with
9498 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9499 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9500 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9501 relative to the resulting this pointer. */
9502
9503 static void
9504 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9505 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9506 tree function)
9507 {
9508 rtx op[10];
9509 int nonlocal = 0;
9510
9511 /* Make sure unwind info is emitted for the thunk if needed. */
9512 final_start_function (emit_barrier (), file, 1);
9513
9514 /* Operand 0 is the target function. */
9515 op[0] = XEXP (DECL_RTL (function), 0);
9516 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9517 {
9518 nonlocal = 1;
9519 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9520 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9521 op[0] = gen_rtx_CONST (Pmode, op[0]);
9522 }
9523
9524 /* Operand 1 is the 'this' pointer. */
9525 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9526 op[1] = gen_rtx_REG (Pmode, 3);
9527 else
9528 op[1] = gen_rtx_REG (Pmode, 2);
9529
9530 /* Operand 2 is the delta. */
9531 op[2] = GEN_INT (delta);
9532
9533 /* Operand 3 is the vcall_offset. */
9534 op[3] = GEN_INT (vcall_offset);
9535
9536 /* Operand 4 is the temporary register. */
9537 op[4] = gen_rtx_REG (Pmode, 1);
9538
9539 /* Operands 5 to 8 can be used as labels. */
9540 op[5] = NULL_RTX;
9541 op[6] = NULL_RTX;
9542 op[7] = NULL_RTX;
9543 op[8] = NULL_RTX;
9544
9545 /* Operand 9 can be used for temporary register. */
9546 op[9] = NULL_RTX;
9547
9548 /* Generate code. */
9549 if (TARGET_64BIT)
9550 {
9551 /* Setup literal pool pointer if required. */
9552 if ((!DISP_IN_RANGE (delta)
9553 && !CONST_OK_FOR_K (delta)
9554 && !CONST_OK_FOR_Os (delta))
9555 || (!DISP_IN_RANGE (vcall_offset)
9556 && !CONST_OK_FOR_K (vcall_offset)
9557 && !CONST_OK_FOR_Os (vcall_offset)))
9558 {
9559 op[5] = gen_label_rtx ();
9560 output_asm_insn ("larl\t%4,%5", op);
9561 }
9562
9563 /* Add DELTA to this pointer. */
9564 if (delta)
9565 {
9566 if (CONST_OK_FOR_J (delta))
9567 output_asm_insn ("la\t%1,%2(%1)", op);
9568 else if (DISP_IN_RANGE (delta))
9569 output_asm_insn ("lay\t%1,%2(%1)", op);
9570 else if (CONST_OK_FOR_K (delta))
9571 output_asm_insn ("aghi\t%1,%2", op);
9572 else if (CONST_OK_FOR_Os (delta))
9573 output_asm_insn ("agfi\t%1,%2", op);
9574 else
9575 {
9576 op[6] = gen_label_rtx ();
9577 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9578 }
9579 }
9580
9581 /* Perform vcall adjustment. */
9582 if (vcall_offset)
9583 {
9584 if (DISP_IN_RANGE (vcall_offset))
9585 {
9586 output_asm_insn ("lg\t%4,0(%1)", op);
9587 output_asm_insn ("ag\t%1,%3(%4)", op);
9588 }
9589 else if (CONST_OK_FOR_K (vcall_offset))
9590 {
9591 output_asm_insn ("lghi\t%4,%3", op);
9592 output_asm_insn ("ag\t%4,0(%1)", op);
9593 output_asm_insn ("ag\t%1,0(%4)", op);
9594 }
9595 else if (CONST_OK_FOR_Os (vcall_offset))
9596 {
9597 output_asm_insn ("lgfi\t%4,%3", op);
9598 output_asm_insn ("ag\t%4,0(%1)", op);
9599 output_asm_insn ("ag\t%1,0(%4)", op);
9600 }
9601 else
9602 {
9603 op[7] = gen_label_rtx ();
9604 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9605 output_asm_insn ("ag\t%4,0(%1)", op);
9606 output_asm_insn ("ag\t%1,0(%4)", op);
9607 }
9608 }
9609
9610 /* Jump to target. */
9611 output_asm_insn ("jg\t%0", op);
9612
9613 /* Output literal pool if required. */
9614 if (op[5])
9615 {
9616 output_asm_insn (".align\t4", op);
9617 targetm.asm_out.internal_label (file, "L",
9618 CODE_LABEL_NUMBER (op[5]));
9619 }
9620 if (op[6])
9621 {
9622 targetm.asm_out.internal_label (file, "L",
9623 CODE_LABEL_NUMBER (op[6]));
9624 output_asm_insn (".long\t%2", op);
9625 }
9626 if (op[7])
9627 {
9628 targetm.asm_out.internal_label (file, "L",
9629 CODE_LABEL_NUMBER (op[7]));
9630 output_asm_insn (".long\t%3", op);
9631 }
9632 }
9633 else
9634 {
9635 /* Setup base pointer if required. */
9636 if (!vcall_offset
9637 || (!DISP_IN_RANGE (delta)
9638 && !CONST_OK_FOR_K (delta)
9639 && !CONST_OK_FOR_Os (delta))
9640 || (!DISP_IN_RANGE (delta)
9641 && !CONST_OK_FOR_K (vcall_offset)
9642 && !CONST_OK_FOR_Os (vcall_offset)))
9643 {
9644 op[5] = gen_label_rtx ();
9645 output_asm_insn ("basr\t%4,0", op);
9646 targetm.asm_out.internal_label (file, "L",
9647 CODE_LABEL_NUMBER (op[5]));
9648 }
9649
9650 /* Add DELTA to this pointer. */
9651 if (delta)
9652 {
9653 if (CONST_OK_FOR_J (delta))
9654 output_asm_insn ("la\t%1,%2(%1)", op);
9655 else if (DISP_IN_RANGE (delta))
9656 output_asm_insn ("lay\t%1,%2(%1)", op);
9657 else if (CONST_OK_FOR_K (delta))
9658 output_asm_insn ("ahi\t%1,%2", op);
9659 else if (CONST_OK_FOR_Os (delta))
9660 output_asm_insn ("afi\t%1,%2", op);
9661 else
9662 {
9663 op[6] = gen_label_rtx ();
9664 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9665 }
9666 }
9667
9668 /* Perform vcall adjustment. */
9669 if (vcall_offset)
9670 {
9671 if (CONST_OK_FOR_J (vcall_offset))
9672 {
9673 output_asm_insn ("l\t%4,0(%1)", op);
9674 output_asm_insn ("a\t%1,%3(%4)", op);
9675 }
9676 else if (DISP_IN_RANGE (vcall_offset))
9677 {
9678 output_asm_insn ("l\t%4,0(%1)", op);
9679 output_asm_insn ("ay\t%1,%3(%4)", op);
9680 }
9681 else if (CONST_OK_FOR_K (vcall_offset))
9682 {
9683 output_asm_insn ("lhi\t%4,%3", op);
9684 output_asm_insn ("a\t%4,0(%1)", op);
9685 output_asm_insn ("a\t%1,0(%4)", op);
9686 }
9687 else if (CONST_OK_FOR_Os (vcall_offset))
9688 {
9689 output_asm_insn ("iilf\t%4,%3", op);
9690 output_asm_insn ("a\t%4,0(%1)", op);
9691 output_asm_insn ("a\t%1,0(%4)", op);
9692 }
9693 else
9694 {
9695 op[7] = gen_label_rtx ();
9696 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9697 output_asm_insn ("a\t%4,0(%1)", op);
9698 output_asm_insn ("a\t%1,0(%4)", op);
9699 }
9700
9701 /* We had to clobber the base pointer register.
9702 Re-setup the base pointer (with a different base). */
9703 op[5] = gen_label_rtx ();
9704 output_asm_insn ("basr\t%4,0", op);
9705 targetm.asm_out.internal_label (file, "L",
9706 CODE_LABEL_NUMBER (op[5]));
9707 }
9708
9709 /* Jump to target. */
9710 op[8] = gen_label_rtx ();
9711
9712 if (!flag_pic)
9713 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9714 else if (!nonlocal)
9715 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9716 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9717 else if (flag_pic == 1)
9718 {
9719 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9720 output_asm_insn ("l\t%4,%0(%4)", op);
9721 }
9722 else if (flag_pic == 2)
9723 {
9724 op[9] = gen_rtx_REG (Pmode, 0);
9725 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9726 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9727 output_asm_insn ("ar\t%4,%9", op);
9728 output_asm_insn ("l\t%4,0(%4)", op);
9729 }
9730
9731 output_asm_insn ("br\t%4", op);
9732
9733 /* Output literal pool. */
9734 output_asm_insn (".align\t4", op);
9735
9736 if (nonlocal && flag_pic == 2)
9737 output_asm_insn (".long\t%0", op);
9738 if (nonlocal)
9739 {
9740 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9741 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9742 }
9743
9744 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9745 if (!flag_pic)
9746 output_asm_insn (".long\t%0", op);
9747 else
9748 output_asm_insn (".long\t%0-%5", op);
9749
9750 if (op[6])
9751 {
9752 targetm.asm_out.internal_label (file, "L",
9753 CODE_LABEL_NUMBER (op[6]));
9754 output_asm_insn (".long\t%2", op);
9755 }
9756 if (op[7])
9757 {
9758 targetm.asm_out.internal_label (file, "L",
9759 CODE_LABEL_NUMBER (op[7]));
9760 output_asm_insn (".long\t%3", op);
9761 }
9762 }
9763 final_end_function ();
9764 }
9765
9766 static bool
9767 s390_valid_pointer_mode (enum machine_mode mode)
9768 {
9769 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9770 }
9771
9772 /* Checks whether the given CALL_EXPR would use a caller
9773 saved register. This is used to decide whether sibling call
9774 optimization could be performed on the respective function
9775 call. */
9776
9777 static bool
9778 s390_call_saved_register_used (tree call_expr)
9779 {
9780 CUMULATIVE_ARGS cum_v;
9781 cumulative_args_t cum;
9782 tree parameter;
9783 enum machine_mode mode;
9784 tree type;
9785 rtx parm_rtx;
9786 int reg, i;
9787
9788 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9789 cum = pack_cumulative_args (&cum_v);
9790
9791 for (i = 0; i < call_expr_nargs (call_expr); i++)
9792 {
9793 parameter = CALL_EXPR_ARG (call_expr, i);
9794 gcc_assert (parameter);
9795
9796 /* For an undeclared variable passed as parameter we will get
9797 an ERROR_MARK node here. */
9798 if (TREE_CODE (parameter) == ERROR_MARK)
9799 return true;
9800
9801 type = TREE_TYPE (parameter);
9802 gcc_assert (type);
9803
9804 mode = TYPE_MODE (type);
9805 gcc_assert (mode);
9806
9807 if (pass_by_reference (&cum_v, mode, type, true))
9808 {
9809 mode = Pmode;
9810 type = build_pointer_type (type);
9811 }
9812
9813 parm_rtx = s390_function_arg (cum, mode, type, 0);
9814
9815 s390_function_arg_advance (cum, mode, type, 0);
9816
9817 if (!parm_rtx)
9818 continue;
9819
9820 if (REG_P (parm_rtx))
9821 {
9822 for (reg = 0;
9823 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9824 reg++)
9825 if (!call_used_regs[reg + REGNO (parm_rtx)])
9826 return true;
9827 }
9828
9829 if (GET_CODE (parm_rtx) == PARALLEL)
9830 {
9831 int i;
9832
9833 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9834 {
9835 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9836
9837 gcc_assert (REG_P (r));
9838
9839 for (reg = 0;
9840 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9841 reg++)
9842 if (!call_used_regs[reg + REGNO (r)])
9843 return true;
9844 }
9845 }
9846
9847 }
9848 return false;
9849 }
9850
9851 /* Return true if the given call expression can be
9852 turned into a sibling call.
9853 DECL holds the declaration of the function to be called whereas
9854 EXP is the call expression itself. */
9855
9856 static bool
9857 s390_function_ok_for_sibcall (tree decl, tree exp)
9858 {
9859 /* The TPF epilogue uses register 1. */
9860 if (TARGET_TPF_PROFILING)
9861 return false;
9862
9863 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9864 which would have to be restored before the sibcall. */
9865 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9866 return false;
9867
9868 /* Register 6 on s390 is available as an argument register but unfortunately
9869 "caller saved". This makes functions needing this register for arguments
9870 not suitable for sibcalls. */
9871 return !s390_call_saved_register_used (exp);
9872 }
9873
9874 /* Return the fixed registers used for condition codes. */
9875
9876 static bool
9877 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9878 {
9879 *p1 = CC_REGNUM;
9880 *p2 = INVALID_REGNUM;
9881
9882 return true;
9883 }
9884
9885 /* This function is used by the call expanders of the machine description.
9886 It emits the call insn itself together with the necessary operations
9887 to adjust the target address and returns the emitted insn.
9888 ADDR_LOCATION is the target address rtx
9889 TLS_CALL the location of the thread-local symbol
9890 RESULT_REG the register where the result of the call should be stored
9891 RETADDR_REG the register where the return address should be stored
9892 If this parameter is NULL_RTX the call is considered
9893 to be a sibling call. */
9894
9895 rtx
9896 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9897 rtx retaddr_reg)
9898 {
9899 bool plt_call = false;
9900 rtx insn;
9901 rtx call;
9902 rtx clobber;
9903 rtvec vec;
9904
9905 /* Direct function calls need special treatment. */
9906 if (GET_CODE (addr_location) == SYMBOL_REF)
9907 {
9908 /* When calling a global routine in PIC mode, we must
9909 replace the symbol itself with the PLT stub. */
9910 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9911 {
9912 if (retaddr_reg != NULL_RTX)
9913 {
9914 addr_location = gen_rtx_UNSPEC (Pmode,
9915 gen_rtvec (1, addr_location),
9916 UNSPEC_PLT);
9917 addr_location = gen_rtx_CONST (Pmode, addr_location);
9918 plt_call = true;
9919 }
9920 else
9921 /* For -fpic code the PLT entries might use r12 which is
9922 call-saved. Therefore we cannot do a sibcall when
9923 calling directly using a symbol ref. When reaching
9924 this point we decided (in s390_function_ok_for_sibcall)
9925 to do a sibcall for a function pointer but one of the
9926 optimizers was able to get rid of the function pointer
9927 by propagating the symbol ref into the call. This
9928 optimization is illegal for S/390 so we turn the direct
9929 call into a indirect call again. */
9930 addr_location = force_reg (Pmode, addr_location);
9931 }
9932
9933 /* Unless we can use the bras(l) insn, force the
9934 routine address into a register. */
9935 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9936 {
9937 if (flag_pic)
9938 addr_location = legitimize_pic_address (addr_location, 0);
9939 else
9940 addr_location = force_reg (Pmode, addr_location);
9941 }
9942 }
9943
9944 /* If it is already an indirect call or the code above moved the
9945 SYMBOL_REF to somewhere else make sure the address can be found in
9946 register 1. */
9947 if (retaddr_reg == NULL_RTX
9948 && GET_CODE (addr_location) != SYMBOL_REF
9949 && !plt_call)
9950 {
9951 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9952 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9953 }
9954
9955 addr_location = gen_rtx_MEM (QImode, addr_location);
9956 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9957
9958 if (result_reg != NULL_RTX)
9959 call = gen_rtx_SET (VOIDmode, result_reg, call);
9960
9961 if (retaddr_reg != NULL_RTX)
9962 {
9963 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9964
9965 if (tls_call != NULL_RTX)
9966 vec = gen_rtvec (3, call, clobber,
9967 gen_rtx_USE (VOIDmode, tls_call));
9968 else
9969 vec = gen_rtvec (2, call, clobber);
9970
9971 call = gen_rtx_PARALLEL (VOIDmode, vec);
9972 }
9973
9974 insn = emit_call_insn (call);
9975
9976 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9977 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9978 {
9979 /* s390_function_ok_for_sibcall should
9980 have denied sibcalls in this case. */
9981 gcc_assert (retaddr_reg != NULL_RTX);
9982 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9983 }
9984 return insn;
9985 }
9986
9987 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9988
9989 static void
9990 s390_conditional_register_usage (void)
9991 {
9992 int i;
9993
9994 if (flag_pic)
9995 {
9996 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9997 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9998 }
9999 if (TARGET_CPU_ZARCH)
10000 {
10001 fixed_regs[BASE_REGNUM] = 0;
10002 call_used_regs[BASE_REGNUM] = 0;
10003 fixed_regs[RETURN_REGNUM] = 0;
10004 call_used_regs[RETURN_REGNUM] = 0;
10005 }
10006 if (TARGET_64BIT)
10007 {
10008 for (i = 24; i < 32; i++)
10009 call_used_regs[i] = call_really_used_regs[i] = 0;
10010 }
10011 else
10012 {
10013 for (i = 18; i < 20; i++)
10014 call_used_regs[i] = call_really_used_regs[i] = 0;
10015 }
10016
10017 if (TARGET_SOFT_FLOAT)
10018 {
10019 for (i = 16; i < 32; i++)
10020 call_used_regs[i] = fixed_regs[i] = 1;
10021 }
10022 }
10023
10024 /* Corresponding function to eh_return expander. */
10025
10026 static GTY(()) rtx s390_tpf_eh_return_symbol;
10027 void
10028 s390_emit_tpf_eh_return (rtx target)
10029 {
10030 rtx insn, reg;
10031
10032 if (!s390_tpf_eh_return_symbol)
10033 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10034
10035 reg = gen_rtx_REG (Pmode, 2);
10036
10037 emit_move_insn (reg, target);
10038 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10039 gen_rtx_REG (Pmode, RETURN_REGNUM));
10040 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10041
10042 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10043 }
10044
10045 /* Rework the prologue/epilogue to avoid saving/restoring
10046 registers unnecessarily. */
10047
10048 static void
10049 s390_optimize_prologue (void)
10050 {
10051 rtx insn, new_insn, next_insn;
10052
10053 /* Do a final recompute of the frame-related data. */
10054
10055 s390_update_frame_layout ();
10056
10057 /* If all special registers are in fact used, there's nothing we
10058 can do, so no point in walking the insn list. */
10059
10060 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10061 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10062 && (TARGET_CPU_ZARCH
10063 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10064 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10065 return;
10066
10067 /* Search for prologue/epilogue insns and replace them. */
10068
10069 for (insn = get_insns (); insn; insn = next_insn)
10070 {
10071 int first, last, off;
10072 rtx set, base, offset;
10073
10074 next_insn = NEXT_INSN (insn);
10075
10076 if (! NONJUMP_INSN_P (insn))
10077 continue;
10078
10079 if (GET_CODE (PATTERN (insn)) == PARALLEL
10080 && store_multiple_operation (PATTERN (insn), VOIDmode))
10081 {
10082 set = XVECEXP (PATTERN (insn), 0, 0);
10083 first = REGNO (SET_SRC (set));
10084 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10085 offset = const0_rtx;
10086 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10087 off = INTVAL (offset);
10088
10089 if (GET_CODE (base) != REG || off < 0)
10090 continue;
10091 if (cfun_frame_layout.first_save_gpr != -1
10092 && (cfun_frame_layout.first_save_gpr < first
10093 || cfun_frame_layout.last_save_gpr > last))
10094 continue;
10095 if (REGNO (base) != STACK_POINTER_REGNUM
10096 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10097 continue;
10098 if (first > BASE_REGNUM || last < BASE_REGNUM)
10099 continue;
10100
10101 if (cfun_frame_layout.first_save_gpr != -1)
10102 {
10103 new_insn = save_gprs (base,
10104 off + (cfun_frame_layout.first_save_gpr
10105 - first) * UNITS_PER_LONG,
10106 cfun_frame_layout.first_save_gpr,
10107 cfun_frame_layout.last_save_gpr);
10108 new_insn = emit_insn_before (new_insn, insn);
10109 INSN_ADDRESSES_NEW (new_insn, -1);
10110 }
10111
10112 remove_insn (insn);
10113 continue;
10114 }
10115
10116 if (cfun_frame_layout.first_save_gpr == -1
10117 && GET_CODE (PATTERN (insn)) == SET
10118 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10119 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10120 || (!TARGET_CPU_ZARCH
10121 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10122 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10123 {
10124 set = PATTERN (insn);
10125 first = REGNO (SET_SRC (set));
10126 offset = const0_rtx;
10127 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10128 off = INTVAL (offset);
10129
10130 if (GET_CODE (base) != REG || off < 0)
10131 continue;
10132 if (REGNO (base) != STACK_POINTER_REGNUM
10133 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10134 continue;
10135
10136 remove_insn (insn);
10137 continue;
10138 }
10139
10140 if (GET_CODE (PATTERN (insn)) == PARALLEL
10141 && load_multiple_operation (PATTERN (insn), VOIDmode))
10142 {
10143 set = XVECEXP (PATTERN (insn), 0, 0);
10144 first = REGNO (SET_DEST (set));
10145 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10146 offset = const0_rtx;
10147 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10148 off = INTVAL (offset);
10149
10150 if (GET_CODE (base) != REG || off < 0)
10151 continue;
10152 if (cfun_frame_layout.first_restore_gpr != -1
10153 && (cfun_frame_layout.first_restore_gpr < first
10154 || cfun_frame_layout.last_restore_gpr > last))
10155 continue;
10156 if (REGNO (base) != STACK_POINTER_REGNUM
10157 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10158 continue;
10159 if (first > BASE_REGNUM || last < BASE_REGNUM)
10160 continue;
10161
10162 if (cfun_frame_layout.first_restore_gpr != -1)
10163 {
10164 new_insn = restore_gprs (base,
10165 off + (cfun_frame_layout.first_restore_gpr
10166 - first) * UNITS_PER_LONG,
10167 cfun_frame_layout.first_restore_gpr,
10168 cfun_frame_layout.last_restore_gpr);
10169 new_insn = emit_insn_before (new_insn, insn);
10170 INSN_ADDRESSES_NEW (new_insn, -1);
10171 }
10172
10173 remove_insn (insn);
10174 continue;
10175 }
10176
10177 if (cfun_frame_layout.first_restore_gpr == -1
10178 && GET_CODE (PATTERN (insn)) == SET
10179 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10180 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10181 || (!TARGET_CPU_ZARCH
10182 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10183 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10184 {
10185 set = PATTERN (insn);
10186 first = REGNO (SET_DEST (set));
10187 offset = const0_rtx;
10188 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10189 off = INTVAL (offset);
10190
10191 if (GET_CODE (base) != REG || off < 0)
10192 continue;
10193 if (REGNO (base) != STACK_POINTER_REGNUM
10194 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10195 continue;
10196
10197 remove_insn (insn);
10198 continue;
10199 }
10200 }
10201 }
10202
10203 /* On z10 and later the dynamic branch prediction must see the
10204 backward jump within a certain windows. If not it falls back to
10205 the static prediction. This function rearranges the loop backward
10206 branch in a way which makes the static prediction always correct.
10207 The function returns true if it added an instruction. */
10208 static bool
10209 s390_fix_long_loop_prediction (rtx insn)
10210 {
10211 rtx set = single_set (insn);
10212 rtx code_label, label_ref, new_label;
10213 rtx uncond_jump;
10214 rtx cur_insn;
10215 rtx tmp;
10216 int distance;
10217
10218 /* This will exclude branch on count and branch on index patterns
10219 since these are correctly statically predicted. */
10220 if (!set
10221 || SET_DEST (set) != pc_rtx
10222 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10223 return false;
10224
10225 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10226 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10227
10228 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10229
10230 code_label = XEXP (label_ref, 0);
10231
10232 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10233 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10234 || (INSN_ADDRESSES (INSN_UID (insn))
10235 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10236 return false;
10237
10238 for (distance = 0, cur_insn = PREV_INSN (insn);
10239 distance < PREDICT_DISTANCE - 6;
10240 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10241 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10242 return false;
10243
10244 new_label = gen_label_rtx ();
10245 uncond_jump = emit_jump_insn_after (
10246 gen_rtx_SET (VOIDmode, pc_rtx,
10247 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10248 insn);
10249 emit_label_after (new_label, uncond_jump);
10250
10251 tmp = XEXP (SET_SRC (set), 1);
10252 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10253 XEXP (SET_SRC (set), 2) = tmp;
10254 INSN_CODE (insn) = -1;
10255
10256 XEXP (label_ref, 0) = new_label;
10257 JUMP_LABEL (insn) = new_label;
10258 JUMP_LABEL (uncond_jump) = code_label;
10259
10260 return true;
10261 }
10262
10263 /* Returns 1 if INSN reads the value of REG for purposes not related
10264 to addressing of memory, and 0 otherwise. */
10265 static int
10266 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10267 {
10268 return reg_referenced_p (reg, PATTERN (insn))
10269 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10270 }
10271
10272 /* Starting from INSN find_cond_jump looks downwards in the insn
10273 stream for a single jump insn which is the last user of the
10274 condition code set in INSN. */
10275 static rtx
10276 find_cond_jump (rtx insn)
10277 {
10278 for (; insn; insn = NEXT_INSN (insn))
10279 {
10280 rtx ite, cc;
10281
10282 if (LABEL_P (insn))
10283 break;
10284
10285 if (!JUMP_P (insn))
10286 {
10287 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10288 break;
10289 continue;
10290 }
10291
10292 /* This will be triggered by a return. */
10293 if (GET_CODE (PATTERN (insn)) != SET)
10294 break;
10295
10296 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10297 ite = SET_SRC (PATTERN (insn));
10298
10299 if (GET_CODE (ite) != IF_THEN_ELSE)
10300 break;
10301
10302 cc = XEXP (XEXP (ite, 0), 0);
10303 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10304 break;
10305
10306 if (find_reg_note (insn, REG_DEAD, cc))
10307 return insn;
10308 break;
10309 }
10310
10311 return NULL_RTX;
10312 }
10313
10314 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10315 the semantics does not change. If NULL_RTX is passed as COND the
10316 function tries to find the conditional jump starting with INSN. */
10317 static void
10318 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10319 {
10320 rtx tmp = *op0;
10321
10322 if (cond == NULL_RTX)
10323 {
10324 rtx jump = find_cond_jump (NEXT_INSN (insn));
10325 jump = jump ? single_set (jump) : NULL_RTX;
10326
10327 if (jump == NULL_RTX)
10328 return;
10329
10330 cond = XEXP (XEXP (jump, 1), 0);
10331 }
10332
10333 *op0 = *op1;
10334 *op1 = tmp;
10335 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10336 }
10337
10338 /* On z10, instructions of the compare-and-branch family have the
10339 property to access the register occurring as second operand with
10340 its bits complemented. If such a compare is grouped with a second
10341 instruction that accesses the same register non-complemented, and
10342 if that register's value is delivered via a bypass, then the
10343 pipeline recycles, thereby causing significant performance decline.
10344 This function locates such situations and exchanges the two
10345 operands of the compare. The function return true whenever it
10346 added an insn. */
10347 static bool
10348 s390_z10_optimize_cmp (rtx insn)
10349 {
10350 rtx prev_insn, next_insn;
10351 bool insn_added_p = false;
10352 rtx cond, *op0, *op1;
10353
10354 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10355 {
10356 /* Handle compare and branch and branch on count
10357 instructions. */
10358 rtx pattern = single_set (insn);
10359
10360 if (!pattern
10361 || SET_DEST (pattern) != pc_rtx
10362 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10363 return false;
10364
10365 cond = XEXP (SET_SRC (pattern), 0);
10366 op0 = &XEXP (cond, 0);
10367 op1 = &XEXP (cond, 1);
10368 }
10369 else if (GET_CODE (PATTERN (insn)) == SET)
10370 {
10371 rtx src, dest;
10372
10373 /* Handle normal compare instructions. */
10374 src = SET_SRC (PATTERN (insn));
10375 dest = SET_DEST (PATTERN (insn));
10376
10377 if (!REG_P (dest)
10378 || !CC_REGNO_P (REGNO (dest))
10379 || GET_CODE (src) != COMPARE)
10380 return false;
10381
10382 /* s390_swap_cmp will try to find the conditional
10383 jump when passing NULL_RTX as condition. */
10384 cond = NULL_RTX;
10385 op0 = &XEXP (src, 0);
10386 op1 = &XEXP (src, 1);
10387 }
10388 else
10389 return false;
10390
10391 if (!REG_P (*op0) || !REG_P (*op1))
10392 return false;
10393
10394 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10395 return false;
10396
10397 /* Swap the COMPARE arguments and its mask if there is a
10398 conflicting access in the previous insn. */
10399 prev_insn = prev_active_insn (insn);
10400 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10401 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10402 s390_swap_cmp (cond, op0, op1, insn);
10403
10404 /* Check if there is a conflict with the next insn. If there
10405 was no conflict with the previous insn, then swap the
10406 COMPARE arguments and its mask. If we already swapped
10407 the operands, or if swapping them would cause a conflict
10408 with the previous insn, issue a NOP after the COMPARE in
10409 order to separate the two instuctions. */
10410 next_insn = next_active_insn (insn);
10411 if (next_insn != NULL_RTX && INSN_P (next_insn)
10412 && s390_non_addr_reg_read_p (*op1, next_insn))
10413 {
10414 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10415 && s390_non_addr_reg_read_p (*op0, prev_insn))
10416 {
10417 if (REGNO (*op1) == 0)
10418 emit_insn_after (gen_nop1 (), insn);
10419 else
10420 emit_insn_after (gen_nop (), insn);
10421 insn_added_p = true;
10422 }
10423 else
10424 s390_swap_cmp (cond, op0, op1, insn);
10425 }
10426 return insn_added_p;
10427 }
10428
10429 /* Perform machine-dependent processing. */
10430
10431 static void
10432 s390_reorg (void)
10433 {
10434 bool pool_overflow = false;
10435
10436 /* Make sure all splits have been performed; splits after
10437 machine_dependent_reorg might confuse insn length counts. */
10438 split_all_insns_noflow ();
10439
10440 /* Install the main literal pool and the associated base
10441 register load insns.
10442
10443 In addition, there are two problematic situations we need
10444 to correct:
10445
10446 - the literal pool might be > 4096 bytes in size, so that
10447 some of its elements cannot be directly accessed
10448
10449 - a branch target might be > 64K away from the branch, so that
10450 it is not possible to use a PC-relative instruction.
10451
10452 To fix those, we split the single literal pool into multiple
10453 pool chunks, reloading the pool base register at various
10454 points throughout the function to ensure it always points to
10455 the pool chunk the following code expects, and / or replace
10456 PC-relative branches by absolute branches.
10457
10458 However, the two problems are interdependent: splitting the
10459 literal pool can move a branch further away from its target,
10460 causing the 64K limit to overflow, and on the other hand,
10461 replacing a PC-relative branch by an absolute branch means
10462 we need to put the branch target address into the literal
10463 pool, possibly causing it to overflow.
10464
10465 So, we loop trying to fix up both problems until we manage
10466 to satisfy both conditions at the same time. Note that the
10467 loop is guaranteed to terminate as every pass of the loop
10468 strictly decreases the total number of PC-relative branches
10469 in the function. (This is not completely true as there
10470 might be branch-over-pool insns introduced by chunkify_start.
10471 Those never need to be split however.) */
10472
10473 for (;;)
10474 {
10475 struct constant_pool *pool = NULL;
10476
10477 /* Collect the literal pool. */
10478 if (!pool_overflow)
10479 {
10480 pool = s390_mainpool_start ();
10481 if (!pool)
10482 pool_overflow = true;
10483 }
10484
10485 /* If literal pool overflowed, start to chunkify it. */
10486 if (pool_overflow)
10487 pool = s390_chunkify_start ();
10488
10489 /* Split out-of-range branches. If this has created new
10490 literal pool entries, cancel current chunk list and
10491 recompute it. zSeries machines have large branch
10492 instructions, so we never need to split a branch. */
10493 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10494 {
10495 if (pool_overflow)
10496 s390_chunkify_cancel (pool);
10497 else
10498 s390_mainpool_cancel (pool);
10499
10500 continue;
10501 }
10502
10503 /* If we made it up to here, both conditions are satisfied.
10504 Finish up literal pool related changes. */
10505 if (pool_overflow)
10506 s390_chunkify_finish (pool);
10507 else
10508 s390_mainpool_finish (pool);
10509
10510 /* We're done splitting branches. */
10511 cfun->machine->split_branches_pending_p = false;
10512 break;
10513 }
10514
10515 /* Generate out-of-pool execute target insns. */
10516 if (TARGET_CPU_ZARCH)
10517 {
10518 rtx insn, label, target;
10519
10520 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10521 {
10522 label = s390_execute_label (insn);
10523 if (!label)
10524 continue;
10525
10526 gcc_assert (label != const0_rtx);
10527
10528 target = emit_label (XEXP (label, 0));
10529 INSN_ADDRESSES_NEW (target, -1);
10530
10531 target = emit_insn (s390_execute_target (insn));
10532 INSN_ADDRESSES_NEW (target, -1);
10533 }
10534 }
10535
10536 /* Try to optimize prologue and epilogue further. */
10537 s390_optimize_prologue ();
10538
10539 /* Walk over the insns and do some >=z10 specific changes. */
10540 if (s390_tune == PROCESSOR_2097_Z10
10541 || s390_tune == PROCESSOR_2817_Z196
10542 || s390_tune == PROCESSOR_2827_ZEC12)
10543 {
10544 rtx insn;
10545 bool insn_added_p = false;
10546
10547 /* The insn lengths and addresses have to be up to date for the
10548 following manipulations. */
10549 shorten_branches (get_insns ());
10550
10551 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10552 {
10553 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10554 continue;
10555
10556 if (JUMP_P (insn))
10557 insn_added_p |= s390_fix_long_loop_prediction (insn);
10558
10559 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10560 || GET_CODE (PATTERN (insn)) == SET)
10561 && s390_tune == PROCESSOR_2097_Z10)
10562 insn_added_p |= s390_z10_optimize_cmp (insn);
10563 }
10564
10565 /* Adjust branches if we added new instructions. */
10566 if (insn_added_p)
10567 shorten_branches (get_insns ());
10568 }
10569 }
10570
10571 /* Return true if INSN is a fp load insn writing register REGNO. */
10572 static inline bool
10573 s390_fpload_toreg (rtx insn, unsigned int regno)
10574 {
10575 rtx set;
10576 enum attr_type flag = s390_safe_attr_type (insn);
10577
10578 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10579 return false;
10580
10581 set = single_set (insn);
10582
10583 if (set == NULL_RTX)
10584 return false;
10585
10586 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10587 return false;
10588
10589 if (REGNO (SET_DEST (set)) != regno)
10590 return false;
10591
10592 return true;
10593 }
10594
10595 /* This value describes the distance to be avoided between an
10596 aritmetic fp instruction and an fp load writing the same register.
10597 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10598 fine but the exact value has to be avoided. Otherwise the FP
10599 pipeline will throw an exception causing a major penalty. */
10600 #define Z10_EARLYLOAD_DISTANCE 7
10601
10602 /* Rearrange the ready list in order to avoid the situation described
10603 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10604 moved to the very end of the ready list. */
10605 static void
10606 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10607 {
10608 unsigned int regno;
10609 int nready = *nready_p;
10610 rtx tmp;
10611 int i;
10612 rtx insn;
10613 rtx set;
10614 enum attr_type flag;
10615 int distance;
10616
10617 /* Skip DISTANCE - 1 active insns. */
10618 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10619 distance > 0 && insn != NULL_RTX;
10620 distance--, insn = prev_active_insn (insn))
10621 if (CALL_P (insn) || JUMP_P (insn))
10622 return;
10623
10624 if (insn == NULL_RTX)
10625 return;
10626
10627 set = single_set (insn);
10628
10629 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10630 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10631 return;
10632
10633 flag = s390_safe_attr_type (insn);
10634
10635 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10636 return;
10637
10638 regno = REGNO (SET_DEST (set));
10639 i = nready - 1;
10640
10641 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10642 i--;
10643
10644 if (!i)
10645 return;
10646
10647 tmp = ready[i];
10648 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10649 ready[0] = tmp;
10650 }
10651
10652
10653 /* The s390_sched_state variable tracks the state of the current or
10654 the last instruction group.
10655
10656 0,1,2 number of instructions scheduled in the current group
10657 3 the last group is complete - normal insns
10658 4 the last group was a cracked/expanded insn */
10659
10660 static int s390_sched_state;
10661
10662 #define S390_OOO_SCHED_STATE_NORMAL 3
10663 #define S390_OOO_SCHED_STATE_CRACKED 4
10664
10665 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
10666 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
10667 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
10668 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
10669
10670 static unsigned int
10671 s390_get_sched_attrmask (rtx insn)
10672 {
10673 unsigned int mask = 0;
10674
10675 if (get_attr_ooo_cracked (insn))
10676 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
10677 if (get_attr_ooo_expanded (insn))
10678 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
10679 if (get_attr_ooo_endgroup (insn))
10680 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
10681 if (get_attr_ooo_groupalone (insn))
10682 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
10683 return mask;
10684 }
10685
10686 /* Return the scheduling score for INSN. The higher the score the
10687 better. The score is calculated from the OOO scheduling attributes
10688 of INSN and the scheduling state s390_sched_state. */
10689 static int
10690 s390_sched_score (rtx insn)
10691 {
10692 unsigned int mask = s390_get_sched_attrmask (insn);
10693 int score = 0;
10694
10695 switch (s390_sched_state)
10696 {
10697 case 0:
10698 /* Try to put insns into the first slot which would otherwise
10699 break a group. */
10700 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10701 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10702 score += 5;
10703 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10704 score += 10;
10705 case 1:
10706 /* Prefer not cracked insns while trying to put together a
10707 group. */
10708 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10709 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10710 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10711 score += 10;
10712 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
10713 score += 5;
10714 break;
10715 case 2:
10716 /* Prefer not cracked insns while trying to put together a
10717 group. */
10718 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10719 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10720 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10721 score += 10;
10722 /* Prefer endgroup insns in the last slot. */
10723 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
10724 score += 10;
10725 break;
10726 case S390_OOO_SCHED_STATE_NORMAL:
10727 /* Prefer not cracked insns if the last was not cracked. */
10728 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10729 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
10730 score += 5;
10731 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10732 score += 10;
10733 break;
10734 case S390_OOO_SCHED_STATE_CRACKED:
10735 /* Try to keep cracked insns together to prevent them from
10736 interrupting groups. */
10737 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10738 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10739 score += 5;
10740 break;
10741 }
10742 return score;
10743 }
10744
10745 /* This function is called via hook TARGET_SCHED_REORDER before
10746 issueing one insn from list READY which contains *NREADYP entries.
10747 For target z10 it reorders load instructions to avoid early load
10748 conflicts in the floating point pipeline */
10749 static int
10750 s390_sched_reorder (FILE *file, int verbose,
10751 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10752 {
10753 if (s390_tune == PROCESSOR_2097_Z10)
10754 if (reload_completed && *nreadyp > 1)
10755 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10756
10757 if (s390_tune == PROCESSOR_2827_ZEC12
10758 && reload_completed
10759 && *nreadyp > 1)
10760 {
10761 int i;
10762 int last_index = *nreadyp - 1;
10763 int max_index = -1;
10764 int max_score = -1;
10765 rtx tmp;
10766
10767 /* Just move the insn with the highest score to the top (the
10768 end) of the list. A full sort is not needed since a conflict
10769 in the hazard recognition cannot happen. So the top insn in
10770 the ready list will always be taken. */
10771 for (i = last_index; i >= 0; i--)
10772 {
10773 int score;
10774
10775 if (recog_memoized (ready[i]) < 0)
10776 continue;
10777
10778 score = s390_sched_score (ready[i]);
10779 if (score > max_score)
10780 {
10781 max_score = score;
10782 max_index = i;
10783 }
10784 }
10785
10786 if (max_index != -1)
10787 {
10788 if (max_index != last_index)
10789 {
10790 tmp = ready[max_index];
10791 ready[max_index] = ready[last_index];
10792 ready[last_index] = tmp;
10793
10794 if (verbose > 5)
10795 fprintf (file,
10796 "move insn %d to the top of list\n",
10797 INSN_UID (ready[last_index]));
10798 }
10799 else if (verbose > 5)
10800 fprintf (file,
10801 "best insn %d already on top\n",
10802 INSN_UID (ready[last_index]));
10803 }
10804
10805 if (verbose > 5)
10806 {
10807 fprintf (file, "ready list ooo attributes - sched state: %d\n",
10808 s390_sched_state);
10809
10810 for (i = last_index; i >= 0; i--)
10811 {
10812 if (recog_memoized (ready[i]) < 0)
10813 continue;
10814 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
10815 s390_sched_score (ready[i]));
10816 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
10817 PRINT_OOO_ATTR (ooo_cracked);
10818 PRINT_OOO_ATTR (ooo_expanded);
10819 PRINT_OOO_ATTR (ooo_endgroup);
10820 PRINT_OOO_ATTR (ooo_groupalone);
10821 #undef PRINT_OOO_ATTR
10822 fprintf (file, "\n");
10823 }
10824 }
10825 }
10826
10827 return s390_issue_rate ();
10828 }
10829
10830
10831 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10832 the scheduler has issued INSN. It stores the last issued insn into
10833 last_scheduled_insn in order to make it available for
10834 s390_sched_reorder. */
10835 static int
10836 s390_sched_variable_issue (FILE *file, int verbose, rtx insn, int more)
10837 {
10838 last_scheduled_insn = insn;
10839
10840 if (s390_tune == PROCESSOR_2827_ZEC12
10841 && reload_completed
10842 && recog_memoized (insn) >= 0)
10843 {
10844 unsigned int mask = s390_get_sched_attrmask (insn);
10845
10846 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10847 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10848 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
10849 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
10850 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10851 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10852 else
10853 {
10854 /* Only normal insns are left (mask == 0). */
10855 switch (s390_sched_state)
10856 {
10857 case 0:
10858 case 1:
10859 case 2:
10860 case S390_OOO_SCHED_STATE_NORMAL:
10861 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
10862 s390_sched_state = 1;
10863 else
10864 s390_sched_state++;
10865
10866 break;
10867 case S390_OOO_SCHED_STATE_CRACKED:
10868 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10869 break;
10870 }
10871 }
10872 if (verbose > 5)
10873 {
10874 fprintf (file, "insn %d: ", INSN_UID (insn));
10875 #define PRINT_OOO_ATTR(ATTR) \
10876 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
10877 PRINT_OOO_ATTR (ooo_cracked);
10878 PRINT_OOO_ATTR (ooo_expanded);
10879 PRINT_OOO_ATTR (ooo_endgroup);
10880 PRINT_OOO_ATTR (ooo_groupalone);
10881 #undef PRINT_OOO_ATTR
10882 fprintf (file, "\n");
10883 fprintf (file, "sched state: %d\n", s390_sched_state);
10884 }
10885 }
10886
10887 if (GET_CODE (PATTERN (insn)) != USE
10888 && GET_CODE (PATTERN (insn)) != CLOBBER)
10889 return more - 1;
10890 else
10891 return more;
10892 }
10893
10894 static void
10895 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10896 int verbose ATTRIBUTE_UNUSED,
10897 int max_ready ATTRIBUTE_UNUSED)
10898 {
10899 last_scheduled_insn = NULL_RTX;
10900 s390_sched_state = 0;
10901 }
10902
10903 /* This function checks the whole of insn X for memory references. The
10904 function always returns zero because the framework it is called
10905 from would stop recursively analyzing the insn upon a return value
10906 other than zero. The real result of this function is updating
10907 counter variable MEM_COUNT. */
10908 static int
10909 check_dpu (rtx *x, unsigned *mem_count)
10910 {
10911 if (*x != NULL_RTX && MEM_P (*x))
10912 (*mem_count)++;
10913 return 0;
10914 }
10915
10916 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10917 a new number struct loop *loop should be unrolled if tuned for cpus with
10918 a built-in stride prefetcher.
10919 The loop is analyzed for memory accesses by calling check_dpu for
10920 each rtx of the loop. Depending on the loop_depth and the amount of
10921 memory accesses a new number <=nunroll is returned to improve the
10922 behaviour of the hardware prefetch unit. */
10923 static unsigned
10924 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10925 {
10926 basic_block *bbs;
10927 rtx insn;
10928 unsigned i;
10929 unsigned mem_count = 0;
10930
10931 if (s390_tune != PROCESSOR_2097_Z10
10932 && s390_tune != PROCESSOR_2817_Z196
10933 && s390_tune != PROCESSOR_2827_ZEC12)
10934 return nunroll;
10935
10936 /* Count the number of memory references within the loop body. */
10937 bbs = get_loop_body (loop);
10938 for (i = 0; i < loop->num_nodes; i++)
10939 {
10940 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10941 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10942 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10943 }
10944 free (bbs);
10945
10946 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10947 if (mem_count == 0)
10948 return nunroll;
10949
10950 switch (loop_depth(loop))
10951 {
10952 case 1:
10953 return MIN (nunroll, 28 / mem_count);
10954 case 2:
10955 return MIN (nunroll, 22 / mem_count);
10956 default:
10957 return MIN (nunroll, 16 / mem_count);
10958 }
10959 }
10960
10961 /* Initialize GCC target structure. */
10962
10963 #undef TARGET_ASM_ALIGNED_HI_OP
10964 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10965 #undef TARGET_ASM_ALIGNED_DI_OP
10966 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10967 #undef TARGET_ASM_INTEGER
10968 #define TARGET_ASM_INTEGER s390_assemble_integer
10969
10970 #undef TARGET_ASM_OPEN_PAREN
10971 #define TARGET_ASM_OPEN_PAREN ""
10972
10973 #undef TARGET_ASM_CLOSE_PAREN
10974 #define TARGET_ASM_CLOSE_PAREN ""
10975
10976 #undef TARGET_OPTION_OVERRIDE
10977 #define TARGET_OPTION_OVERRIDE s390_option_override
10978
10979 #undef TARGET_ENCODE_SECTION_INFO
10980 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10981
10982 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10983 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10984
10985 #ifdef HAVE_AS_TLS
10986 #undef TARGET_HAVE_TLS
10987 #define TARGET_HAVE_TLS true
10988 #endif
10989 #undef TARGET_CANNOT_FORCE_CONST_MEM
10990 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10991
10992 #undef TARGET_DELEGITIMIZE_ADDRESS
10993 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10994
10995 #undef TARGET_LEGITIMIZE_ADDRESS
10996 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10997
10998 #undef TARGET_RETURN_IN_MEMORY
10999 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
11000
11001 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
11002 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
11003
11004 #undef TARGET_ASM_OUTPUT_MI_THUNK
11005 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
11006 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11007 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11008
11009 #undef TARGET_SCHED_ADJUST_PRIORITY
11010 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
11011 #undef TARGET_SCHED_ISSUE_RATE
11012 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
11013 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11014 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
11015
11016 #undef TARGET_SCHED_VARIABLE_ISSUE
11017 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
11018 #undef TARGET_SCHED_REORDER
11019 #define TARGET_SCHED_REORDER s390_sched_reorder
11020 #undef TARGET_SCHED_INIT
11021 #define TARGET_SCHED_INIT s390_sched_init
11022
11023 #undef TARGET_CANNOT_COPY_INSN_P
11024 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
11025 #undef TARGET_RTX_COSTS
11026 #define TARGET_RTX_COSTS s390_rtx_costs
11027 #undef TARGET_ADDRESS_COST
11028 #define TARGET_ADDRESS_COST s390_address_cost
11029 #undef TARGET_REGISTER_MOVE_COST
11030 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
11031 #undef TARGET_MEMORY_MOVE_COST
11032 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
11033
11034 #undef TARGET_MACHINE_DEPENDENT_REORG
11035 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
11036
11037 #undef TARGET_VALID_POINTER_MODE
11038 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
11039
11040 #undef TARGET_BUILD_BUILTIN_VA_LIST
11041 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
11042 #undef TARGET_EXPAND_BUILTIN_VA_START
11043 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
11044 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11045 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
11046
11047 #undef TARGET_PROMOTE_FUNCTION_MODE
11048 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
11049 #undef TARGET_PASS_BY_REFERENCE
11050 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
11051
11052 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11053 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
11054 #undef TARGET_FUNCTION_ARG
11055 #define TARGET_FUNCTION_ARG s390_function_arg
11056 #undef TARGET_FUNCTION_ARG_ADVANCE
11057 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
11058 #undef TARGET_FUNCTION_VALUE
11059 #define TARGET_FUNCTION_VALUE s390_function_value
11060 #undef TARGET_LIBCALL_VALUE
11061 #define TARGET_LIBCALL_VALUE s390_libcall_value
11062
11063 #undef TARGET_FIXED_CONDITION_CODE_REGS
11064 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
11065
11066 #undef TARGET_CC_MODES_COMPATIBLE
11067 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
11068
11069 #undef TARGET_INVALID_WITHIN_DOLOOP
11070 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
11071
11072 #ifdef HAVE_AS_TLS
11073 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
11074 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
11075 #endif
11076
11077 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11078 #undef TARGET_MANGLE_TYPE
11079 #define TARGET_MANGLE_TYPE s390_mangle_type
11080 #endif
11081
11082 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11083 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11084
11085 #undef TARGET_PREFERRED_RELOAD_CLASS
11086 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
11087
11088 #undef TARGET_SECONDARY_RELOAD
11089 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
11090
11091 #undef TARGET_LIBGCC_CMP_RETURN_MODE
11092 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
11093
11094 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
11095 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
11096
11097 #undef TARGET_LEGITIMATE_ADDRESS_P
11098 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
11099
11100 #undef TARGET_LEGITIMATE_CONSTANT_P
11101 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
11102
11103 #undef TARGET_CAN_ELIMINATE
11104 #define TARGET_CAN_ELIMINATE s390_can_eliminate
11105
11106 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11107 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
11108
11109 #undef TARGET_LOOP_UNROLL_ADJUST
11110 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
11111
11112 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
11113 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
11114 #undef TARGET_TRAMPOLINE_INIT
11115 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
11116
11117 #undef TARGET_UNWIND_WORD_MODE
11118 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
11119
11120 #undef TARGET_CANONICALIZE_COMPARISON
11121 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
11122
11123 struct gcc_target targetm = TARGET_INITIALIZER;
11124
11125 #include "gt-s390.h"