re PR rtl-optimization/55719 (ICE: Segmentation fault)
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "reload.h"
42 #include "diagnostic-core.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "debug.h"
48 #include "langhooks.h"
49 #include "optabs.h"
50 #include "gimple.h"
51 #include "df.h"
52 #include "params.h"
53 #include "cfgloop.h"
54 #include "opts.h"
55
56 /* Define the specific costs for a given cpu. */
57
58 struct processor_costs
59 {
60 /* multiplication */
61 const int m; /* cost of an M instruction. */
62 const int mghi; /* cost of an MGHI instruction. */
63 const int mh; /* cost of an MH instruction. */
64 const int mhi; /* cost of an MHI instruction. */
65 const int ml; /* cost of an ML instruction. */
66 const int mr; /* cost of an MR instruction. */
67 const int ms; /* cost of an MS instruction. */
68 const int msg; /* cost of an MSG instruction. */
69 const int msgf; /* cost of an MSGF instruction. */
70 const int msgfr; /* cost of an MSGFR instruction. */
71 const int msgr; /* cost of an MSGR instruction. */
72 const int msr; /* cost of an MSR instruction. */
73 const int mult_df; /* cost of multiplication in DFmode. */
74 const int mxbr;
75 /* square root */
76 const int sqxbr; /* cost of square root in TFmode. */
77 const int sqdbr; /* cost of square root in DFmode. */
78 const int sqebr; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr; /* cost of multiply and add in DFmode. */
81 const int maebr; /* cost of multiply and add in SFmode. */
82 /* division */
83 const int dxbr;
84 const int ddbr;
85 const int debr;
86 const int dlgr;
87 const int dlr;
88 const int dr;
89 const int dsgfr;
90 const int dsgr;
91 };
92
93 const struct processor_costs *s390_cost;
94
95 static const
96 struct processor_costs z900_cost =
97 {
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
125 };
126
127 static const
128 struct processor_costs z990_cost =
129 {
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
157 };
158
159 static const
160 struct processor_costs z9_109_cost =
161 {
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
189 };
190
191 static const
192 struct processor_costs z10_cost =
193 {
194 COSTS_N_INSNS (10), /* M */
195 COSTS_N_INSNS (10), /* MGHI */
196 COSTS_N_INSNS (10), /* MH */
197 COSTS_N_INSNS (10), /* MHI */
198 COSTS_N_INSNS (10), /* ML */
199 COSTS_N_INSNS (10), /* MR */
200 COSTS_N_INSNS (10), /* MS */
201 COSTS_N_INSNS (10), /* MSG */
202 COSTS_N_INSNS (10), /* MSGF */
203 COSTS_N_INSNS (10), /* MSGFR */
204 COSTS_N_INSNS (10), /* MSGR */
205 COSTS_N_INSNS (10), /* MSR */
206 COSTS_N_INSNS (1) , /* multiplication in DFmode */
207 COSTS_N_INSNS (50), /* MXBR */
208 COSTS_N_INSNS (120), /* SQXBR */
209 COSTS_N_INSNS (52), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (111), /* DXBR */
214 COSTS_N_INSNS (39), /* DDBR */
215 COSTS_N_INSNS (32), /* DEBR */
216 COSTS_N_INSNS (160), /* DLGR */
217 COSTS_N_INSNS (71), /* DLR */
218 COSTS_N_INSNS (71), /* DR */
219 COSTS_N_INSNS (71), /* DSGFR */
220 COSTS_N_INSNS (71), /* DSGR */
221 };
222
223 static const
224 struct processor_costs z196_cost =
225 {
226 COSTS_N_INSNS (7), /* M */
227 COSTS_N_INSNS (5), /* MGHI */
228 COSTS_N_INSNS (5), /* MH */
229 COSTS_N_INSNS (5), /* MHI */
230 COSTS_N_INSNS (7), /* ML */
231 COSTS_N_INSNS (7), /* MR */
232 COSTS_N_INSNS (6), /* MS */
233 COSTS_N_INSNS (8), /* MSG */
234 COSTS_N_INSNS (6), /* MSGF */
235 COSTS_N_INSNS (6), /* MSGFR */
236 COSTS_N_INSNS (8), /* MSGR */
237 COSTS_N_INSNS (6), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (40), /* MXBR B+40 */
240 COSTS_N_INSNS (100), /* SQXBR B+100 */
241 COSTS_N_INSNS (42), /* SQDBR B+42 */
242 COSTS_N_INSNS (28), /* SQEBR B+28 */
243 COSTS_N_INSNS (1), /* MADBR B */
244 COSTS_N_INSNS (1), /* MAEBR B */
245 COSTS_N_INSNS (101), /* DXBR B+101 */
246 COSTS_N_INSNS (29), /* DDBR */
247 COSTS_N_INSNS (22), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR cracked */
249 COSTS_N_INSNS (160), /* DLR cracked */
250 COSTS_N_INSNS (160), /* DR expanded */
251 COSTS_N_INSNS (160), /* DSGFR cracked */
252 COSTS_N_INSNS (160), /* DSGR cracked */
253 };
254
255 static const
256 struct processor_costs zEC12_cost =
257 {
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (131), /* DXBR B+131 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
285 };
286
287 extern int reload_completed;
288
289 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
290 static rtx last_scheduled_insn;
291
292 /* Structure used to hold the components of a S/390 memory
293 address. A legitimate address on S/390 is of the general
294 form
295 base + index + displacement
296 where any of the components is optional.
297
298 base and index are registers of the class ADDR_REGS,
299 displacement is an unsigned 12-bit immediate constant. */
300
301 struct s390_address
302 {
303 rtx base;
304 rtx indx;
305 rtx disp;
306 bool pointer;
307 bool literal_pool;
308 };
309
310 /* The following structure is embedded in the machine
311 specific part of struct function. */
312
313 struct GTY (()) s390_frame_layout
314 {
315 /* Offset within stack frame. */
316 HOST_WIDE_INT gprs_offset;
317 HOST_WIDE_INT f0_offset;
318 HOST_WIDE_INT f4_offset;
319 HOST_WIDE_INT f8_offset;
320 HOST_WIDE_INT backchain_offset;
321
322 /* Number of first and last gpr where slots in the register
323 save area are reserved for. */
324 int first_save_gpr_slot;
325 int last_save_gpr_slot;
326
327 /* Number of first and last gpr to be saved, restored. */
328 int first_save_gpr;
329 int first_restore_gpr;
330 int last_save_gpr;
331 int last_restore_gpr;
332
333 /* Bits standing for floating point registers. Set, if the
334 respective register has to be saved. Starting with reg 16 (f0)
335 at the rightmost bit.
336 Bit 15 - 8 7 6 5 4 3 2 1 0
337 fpr 15 - 8 7 5 3 1 6 4 2 0
338 reg 31 - 24 23 22 21 20 19 18 17 16 */
339 unsigned int fpr_bitmap;
340
341 /* Number of floating point registers f8-f15 which must be saved. */
342 int high_fprs;
343
344 /* Set if return address needs to be saved.
345 This flag is set by s390_return_addr_rtx if it could not use
346 the initial value of r14 and therefore depends on r14 saved
347 to the stack. */
348 bool save_return_addr_p;
349
350 /* Size of stack frame. */
351 HOST_WIDE_INT frame_size;
352 };
353
354 /* Define the structure for the machine field in struct function. */
355
356 struct GTY(()) machine_function
357 {
358 struct s390_frame_layout frame_layout;
359
360 /* Literal pool base register. */
361 rtx base_reg;
362
363 /* True if we may need to perform branch splitting. */
364 bool split_branches_pending_p;
365
366 /* Some local-dynamic TLS symbol name. */
367 const char *some_ld_name;
368
369 bool has_landing_pad_p;
370 };
371
372 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
373
374 #define cfun_frame_layout (cfun->machine->frame_layout)
375 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
376 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
377 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
378 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
379 (1 << (BITNUM)))
380 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
381 (1 << (BITNUM))))
382
383 /* Number of GPRs and FPRs used for argument passing. */
384 #define GP_ARG_NUM_REG 5
385 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
386
387 /* A couple of shortcuts. */
388 #define CONST_OK_FOR_J(x) \
389 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
390 #define CONST_OK_FOR_K(x) \
391 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
392 #define CONST_OK_FOR_Os(x) \
393 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
394 #define CONST_OK_FOR_Op(x) \
395 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
396 #define CONST_OK_FOR_On(x) \
397 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
398
399 #define REGNO_PAIR_OK(REGNO, MODE) \
400 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
401
402 /* That's the read ahead of the dynamic branch prediction unit in
403 bytes on a z10 (or higher) CPU. */
404 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
405
406 /* Return the alignment for LABEL. We default to the -falign-labels
407 value except for the literal pool base label. */
408 int
409 s390_label_align (rtx label)
410 {
411 rtx prev_insn = prev_active_insn (label);
412
413 if (prev_insn == NULL_RTX)
414 goto old;
415
416 prev_insn = single_set (prev_insn);
417
418 if (prev_insn == NULL_RTX)
419 goto old;
420
421 prev_insn = SET_SRC (prev_insn);
422
423 /* Don't align literal pool base labels. */
424 if (GET_CODE (prev_insn) == UNSPEC
425 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
426 return 0;
427
428 old:
429 return align_labels_log;
430 }
431
432 static enum machine_mode
433 s390_libgcc_cmp_return_mode (void)
434 {
435 return TARGET_64BIT ? DImode : SImode;
436 }
437
438 static enum machine_mode
439 s390_libgcc_shift_count_mode (void)
440 {
441 return TARGET_64BIT ? DImode : SImode;
442 }
443
444 static enum machine_mode
445 s390_unwind_word_mode (void)
446 {
447 return TARGET_64BIT ? DImode : SImode;
448 }
449
450 /* Return true if the back end supports mode MODE. */
451 static bool
452 s390_scalar_mode_supported_p (enum machine_mode mode)
453 {
454 /* In contrast to the default implementation reject TImode constants on 31bit
455 TARGET_ZARCH for ABI compliance. */
456 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
457 return false;
458
459 if (DECIMAL_FLOAT_MODE_P (mode))
460 return default_decimal_float_supported_p ();
461
462 return default_scalar_mode_supported_p (mode);
463 }
464
465 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
466
467 void
468 s390_set_has_landing_pad_p (bool value)
469 {
470 cfun->machine->has_landing_pad_p = value;
471 }
472
473 /* If two condition code modes are compatible, return a condition code
474 mode which is compatible with both. Otherwise, return
475 VOIDmode. */
476
477 static enum machine_mode
478 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
479 {
480 if (m1 == m2)
481 return m1;
482
483 switch (m1)
484 {
485 case CCZmode:
486 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
487 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
488 return m2;
489 return VOIDmode;
490
491 case CCSmode:
492 case CCUmode:
493 case CCTmode:
494 case CCSRmode:
495 case CCURmode:
496 case CCZ1mode:
497 if (m2 == CCZmode)
498 return m1;
499
500 return VOIDmode;
501
502 default:
503 return VOIDmode;
504 }
505 return VOIDmode;
506 }
507
508 /* Return true if SET either doesn't set the CC register, or else
509 the source and destination have matching CC modes and that
510 CC mode is at least as constrained as REQ_MODE. */
511
512 static bool
513 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
514 {
515 enum machine_mode set_mode;
516
517 gcc_assert (GET_CODE (set) == SET);
518
519 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
520 return 1;
521
522 set_mode = GET_MODE (SET_DEST (set));
523 switch (set_mode)
524 {
525 case CCSmode:
526 case CCSRmode:
527 case CCUmode:
528 case CCURmode:
529 case CCLmode:
530 case CCL1mode:
531 case CCL2mode:
532 case CCL3mode:
533 case CCT1mode:
534 case CCT2mode:
535 case CCT3mode:
536 if (req_mode != set_mode)
537 return 0;
538 break;
539
540 case CCZmode:
541 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
542 && req_mode != CCSRmode && req_mode != CCURmode)
543 return 0;
544 break;
545
546 case CCAPmode:
547 case CCANmode:
548 if (req_mode != CCAmode)
549 return 0;
550 break;
551
552 default:
553 gcc_unreachable ();
554 }
555
556 return (GET_MODE (SET_SRC (set)) == set_mode);
557 }
558
559 /* Return true if every SET in INSN that sets the CC register
560 has source and destination with matching CC modes and that
561 CC mode is at least as constrained as REQ_MODE.
562 If REQ_MODE is VOIDmode, always return false. */
563
564 bool
565 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
566 {
567 int i;
568
569 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
570 if (req_mode == VOIDmode)
571 return false;
572
573 if (GET_CODE (PATTERN (insn)) == SET)
574 return s390_match_ccmode_set (PATTERN (insn), req_mode);
575
576 if (GET_CODE (PATTERN (insn)) == PARALLEL)
577 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
578 {
579 rtx set = XVECEXP (PATTERN (insn), 0, i);
580 if (GET_CODE (set) == SET)
581 if (!s390_match_ccmode_set (set, req_mode))
582 return false;
583 }
584
585 return true;
586 }
587
588 /* If a test-under-mask instruction can be used to implement
589 (compare (and ... OP1) OP2), return the CC mode required
590 to do that. Otherwise, return VOIDmode.
591 MIXED is true if the instruction can distinguish between
592 CC1 and CC2 for mixed selected bits (TMxx), it is false
593 if the instruction cannot (TM). */
594
595 enum machine_mode
596 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
597 {
598 int bit0, bit1;
599
600 /* ??? Fixme: should work on CONST_DOUBLE as well. */
601 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
602 return VOIDmode;
603
604 /* Selected bits all zero: CC0.
605 e.g.: int a; if ((a & (16 + 128)) == 0) */
606 if (INTVAL (op2) == 0)
607 return CCTmode;
608
609 /* Selected bits all one: CC3.
610 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
611 if (INTVAL (op2) == INTVAL (op1))
612 return CCT3mode;
613
614 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
615 int a;
616 if ((a & (16 + 128)) == 16) -> CCT1
617 if ((a & (16 + 128)) == 128) -> CCT2 */
618 if (mixed)
619 {
620 bit1 = exact_log2 (INTVAL (op2));
621 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
622 if (bit0 != -1 && bit1 != -1)
623 return bit0 > bit1 ? CCT1mode : CCT2mode;
624 }
625
626 return VOIDmode;
627 }
628
629 /* Given a comparison code OP (EQ, NE, etc.) and the operands
630 OP0 and OP1 of a COMPARE, return the mode to be used for the
631 comparison. */
632
633 enum machine_mode
634 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
635 {
636 switch (code)
637 {
638 case EQ:
639 case NE:
640 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
641 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
642 return CCAPmode;
643 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
644 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
645 return CCAPmode;
646 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
647 || GET_CODE (op1) == NEG)
648 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
649 return CCLmode;
650
651 if (GET_CODE (op0) == AND)
652 {
653 /* Check whether we can potentially do it via TM. */
654 enum machine_mode ccmode;
655 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
656 if (ccmode != VOIDmode)
657 {
658 /* Relax CCTmode to CCZmode to allow fall-back to AND
659 if that turns out to be beneficial. */
660 return ccmode == CCTmode ? CCZmode : ccmode;
661 }
662 }
663
664 if (register_operand (op0, HImode)
665 && GET_CODE (op1) == CONST_INT
666 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
667 return CCT3mode;
668 if (register_operand (op0, QImode)
669 && GET_CODE (op1) == CONST_INT
670 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
671 return CCT3mode;
672
673 return CCZmode;
674
675 case LE:
676 case LT:
677 case GE:
678 case GT:
679 /* The only overflow condition of NEG and ABS happens when
680 -INT_MAX is used as parameter, which stays negative. So
681 we have an overflow from a positive value to a negative.
682 Using CCAP mode the resulting cc can be used for comparisons. */
683 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCAPmode;
686
687 /* If constants are involved in an add instruction it is possible to use
688 the resulting cc for comparisons with zero. Knowing the sign of the
689 constant the overflow behavior gets predictable. e.g.:
690 int a, b; if ((b = a + c) > 0)
691 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
692 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
693 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
694 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
695 /* Avoid INT32_MIN on 32 bit. */
696 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
697 {
698 if (INTVAL (XEXP((op0), 1)) < 0)
699 return CCANmode;
700 else
701 return CCAPmode;
702 }
703 /* Fall through. */
704 case UNORDERED:
705 case ORDERED:
706 case UNEQ:
707 case UNLE:
708 case UNLT:
709 case UNGE:
710 case UNGT:
711 case LTGT:
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCSRmode;
715 return CCSmode;
716
717 case LTU:
718 case GEU:
719 if (GET_CODE (op0) == PLUS
720 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
721 return CCL1mode;
722
723 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
724 && GET_CODE (op1) != CONST_INT)
725 return CCURmode;
726 return CCUmode;
727
728 case LEU:
729 case GTU:
730 if (GET_CODE (op0) == MINUS
731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
732 return CCL2mode;
733
734 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
735 && GET_CODE (op1) != CONST_INT)
736 return CCURmode;
737 return CCUmode;
738
739 default:
740 gcc_unreachable ();
741 }
742 }
743
744 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
745 that we can implement more efficiently. */
746
747 static void
748 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
749 bool op0_preserve_value)
750 {
751 if (op0_preserve_value)
752 return;
753
754 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == ZERO_EXTRACT
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
760 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
761 {
762 rtx inner = XEXP (*op0, 0);
763 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
764 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
765 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
766
767 if (len > 0 && len < modesize
768 && pos >= 0 && pos + len <= modesize
769 && modesize <= HOST_BITS_PER_WIDE_INT)
770 {
771 unsigned HOST_WIDE_INT block;
772 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
773 block <<= modesize - pos - len;
774
775 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
776 gen_int_mode (block, GET_MODE (inner)));
777 }
778 }
779
780 /* Narrow AND of memory against immediate to enable TM. */
781 if ((*code == EQ || *code == NE)
782 && *op1 == const0_rtx
783 && GET_CODE (*op0) == AND
784 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
785 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
786 {
787 rtx inner = XEXP (*op0, 0);
788 rtx mask = XEXP (*op0, 1);
789
790 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
791 if (GET_CODE (inner) == SUBREG
792 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
793 && (GET_MODE_SIZE (GET_MODE (inner))
794 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
795 && ((INTVAL (mask)
796 & GET_MODE_MASK (GET_MODE (inner))
797 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
798 == 0))
799 inner = SUBREG_REG (inner);
800
801 /* Do not change volatile MEMs. */
802 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
803 {
804 int part = s390_single_part (XEXP (*op0, 1),
805 GET_MODE (inner), QImode, 0);
806 if (part >= 0)
807 {
808 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
809 inner = adjust_address_nv (inner, QImode, part);
810 *op0 = gen_rtx_AND (QImode, inner, mask);
811 }
812 }
813 }
814
815 /* Narrow comparisons against 0xffff to HImode if possible. */
816 if ((*code == EQ || *code == NE)
817 && GET_CODE (*op1) == CONST_INT
818 && INTVAL (*op1) == 0xffff
819 && SCALAR_INT_MODE_P (GET_MODE (*op0))
820 && (nonzero_bits (*op0, GET_MODE (*op0))
821 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
822 {
823 *op0 = gen_lowpart (HImode, *op0);
824 *op1 = constm1_rtx;
825 }
826
827 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
828 if (GET_CODE (*op0) == UNSPEC
829 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
830 && XVECLEN (*op0, 0) == 1
831 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
832 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
833 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
834 && *op1 == const0_rtx)
835 {
836 enum rtx_code new_code = UNKNOWN;
837 switch (*code)
838 {
839 case EQ: new_code = EQ; break;
840 case NE: new_code = NE; break;
841 case LT: new_code = GTU; break;
842 case GT: new_code = LTU; break;
843 case LE: new_code = GEU; break;
844 case GE: new_code = LEU; break;
845 default: break;
846 }
847
848 if (new_code != UNKNOWN)
849 {
850 *op0 = XVECEXP (*op0, 0, 0);
851 *code = new_code;
852 }
853 }
854
855 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
856 if (GET_CODE (*op0) == UNSPEC
857 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
858 && XVECLEN (*op0, 0) == 1
859 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
860 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
861 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
862 && *op1 == const0_rtx)
863 {
864 enum rtx_code new_code = UNKNOWN;
865 switch (*code)
866 {
867 case EQ: new_code = EQ; break;
868 case NE: new_code = NE; break;
869 default: break;
870 }
871
872 if (new_code != UNKNOWN)
873 {
874 *op0 = XVECEXP (*op0, 0, 0);
875 *code = new_code;
876 }
877 }
878
879 /* Simplify cascaded EQ, NE with const0_rtx. */
880 if ((*code == NE || *code == EQ)
881 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
882 && GET_MODE (*op0) == SImode
883 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
884 && REG_P (XEXP (*op0, 0))
885 && XEXP (*op0, 1) == const0_rtx
886 && *op1 == const0_rtx)
887 {
888 if ((*code == EQ && GET_CODE (*op0) == NE)
889 || (*code == NE && GET_CODE (*op0) == EQ))
890 *code = EQ;
891 else
892 *code = NE;
893 *op0 = XEXP (*op0, 0);
894 }
895
896 /* Prefer register over memory as first operand. */
897 if (MEM_P (*op0) && REG_P (*op1))
898 {
899 rtx tem = *op0; *op0 = *op1; *op1 = tem;
900 *code = (int)swap_condition ((enum rtx_code)*code);
901 }
902 }
903
904 /* Emit a compare instruction suitable to implement the comparison
905 OP0 CODE OP1. Return the correct condition RTL to be placed in
906 the IF_THEN_ELSE of the conditional branch testing the result. */
907
908 rtx
909 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
910 {
911 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
912 rtx cc;
913
914 /* Do not output a redundant compare instruction if a compare_and_swap
915 pattern already computed the result and the machine modes are compatible. */
916 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
917 {
918 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
919 == GET_MODE (op0));
920 cc = op0;
921 }
922 else
923 {
924 cc = gen_rtx_REG (mode, CC_REGNUM);
925 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
926 }
927
928 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
929 }
930
931 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
932 matches CMP.
933 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
934 conditional branch testing the result. */
935
936 static rtx
937 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
938 rtx cmp, rtx new_rtx)
939 {
940 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
941 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
942 const0_rtx);
943 }
944
945 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
946 unconditional jump, else a conditional jump under condition COND. */
947
948 void
949 s390_emit_jump (rtx target, rtx cond)
950 {
951 rtx insn;
952
953 target = gen_rtx_LABEL_REF (VOIDmode, target);
954 if (cond)
955 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
956
957 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
958 emit_jump_insn (insn);
959 }
960
961 /* Return branch condition mask to implement a branch
962 specified by CODE. Return -1 for invalid comparisons. */
963
964 int
965 s390_branch_condition_mask (rtx code)
966 {
967 const int CC0 = 1 << 3;
968 const int CC1 = 1 << 2;
969 const int CC2 = 1 << 1;
970 const int CC3 = 1 << 0;
971
972 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
973 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
974 gcc_assert (XEXP (code, 1) == const0_rtx);
975
976 switch (GET_MODE (XEXP (code, 0)))
977 {
978 case CCZmode:
979 case CCZ1mode:
980 switch (GET_CODE (code))
981 {
982 case EQ: return CC0;
983 case NE: return CC1 | CC2 | CC3;
984 default: return -1;
985 }
986 break;
987
988 case CCT1mode:
989 switch (GET_CODE (code))
990 {
991 case EQ: return CC1;
992 case NE: return CC0 | CC2 | CC3;
993 default: return -1;
994 }
995 break;
996
997 case CCT2mode:
998 switch (GET_CODE (code))
999 {
1000 case EQ: return CC2;
1001 case NE: return CC0 | CC1 | CC3;
1002 default: return -1;
1003 }
1004 break;
1005
1006 case CCT3mode:
1007 switch (GET_CODE (code))
1008 {
1009 case EQ: return CC3;
1010 case NE: return CC0 | CC1 | CC2;
1011 default: return -1;
1012 }
1013 break;
1014
1015 case CCLmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0 | CC2;
1019 case NE: return CC1 | CC3;
1020 default: return -1;
1021 }
1022 break;
1023
1024 case CCL1mode:
1025 switch (GET_CODE (code))
1026 {
1027 case LTU: return CC2 | CC3; /* carry */
1028 case GEU: return CC0 | CC1; /* no carry */
1029 default: return -1;
1030 }
1031 break;
1032
1033 case CCL2mode:
1034 switch (GET_CODE (code))
1035 {
1036 case GTU: return CC0 | CC1; /* borrow */
1037 case LEU: return CC2 | CC3; /* no borrow */
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCL3mode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0 | CC2;
1046 case NE: return CC1 | CC3;
1047 case LTU: return CC1;
1048 case GTU: return CC3;
1049 case LEU: return CC1 | CC2;
1050 case GEU: return CC2 | CC3;
1051 default: return -1;
1052 }
1053
1054 case CCUmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LTU: return CC1;
1060 case GTU: return CC2;
1061 case LEU: return CC0 | CC1;
1062 case GEU: return CC0 | CC2;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCURmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC2 | CC1 | CC3;
1072 case LTU: return CC2;
1073 case GTU: return CC1;
1074 case LEU: return CC0 | CC2;
1075 case GEU: return CC0 | CC1;
1076 default: return -1;
1077 }
1078 break;
1079
1080 case CCAPmode:
1081 switch (GET_CODE (code))
1082 {
1083 case EQ: return CC0;
1084 case NE: return CC1 | CC2 | CC3;
1085 case LT: return CC1 | CC3;
1086 case GT: return CC2;
1087 case LE: return CC0 | CC1 | CC3;
1088 case GE: return CC0 | CC2;
1089 default: return -1;
1090 }
1091 break;
1092
1093 case CCANmode:
1094 switch (GET_CODE (code))
1095 {
1096 case EQ: return CC0;
1097 case NE: return CC1 | CC2 | CC3;
1098 case LT: return CC1;
1099 case GT: return CC2 | CC3;
1100 case LE: return CC0 | CC1;
1101 case GE: return CC0 | CC2 | CC3;
1102 default: return -1;
1103 }
1104 break;
1105
1106 case CCSmode:
1107 switch (GET_CODE (code))
1108 {
1109 case EQ: return CC0;
1110 case NE: return CC1 | CC2 | CC3;
1111 case LT: return CC1;
1112 case GT: return CC2;
1113 case LE: return CC0 | CC1;
1114 case GE: return CC0 | CC2;
1115 case UNORDERED: return CC3;
1116 case ORDERED: return CC0 | CC1 | CC2;
1117 case UNEQ: return CC0 | CC3;
1118 case UNLT: return CC1 | CC3;
1119 case UNGT: return CC2 | CC3;
1120 case UNLE: return CC0 | CC1 | CC3;
1121 case UNGE: return CC0 | CC2 | CC3;
1122 case LTGT: return CC1 | CC2;
1123 default: return -1;
1124 }
1125 break;
1126
1127 case CCSRmode:
1128 switch (GET_CODE (code))
1129 {
1130 case EQ: return CC0;
1131 case NE: return CC2 | CC1 | CC3;
1132 case LT: return CC2;
1133 case GT: return CC1;
1134 case LE: return CC0 | CC2;
1135 case GE: return CC0 | CC1;
1136 case UNORDERED: return CC3;
1137 case ORDERED: return CC0 | CC2 | CC1;
1138 case UNEQ: return CC0 | CC3;
1139 case UNLT: return CC2 | CC3;
1140 case UNGT: return CC1 | CC3;
1141 case UNLE: return CC0 | CC2 | CC3;
1142 case UNGE: return CC0 | CC1 | CC3;
1143 case LTGT: return CC2 | CC1;
1144 default: return -1;
1145 }
1146 break;
1147
1148 default:
1149 return -1;
1150 }
1151 }
1152
1153
1154 /* Return branch condition mask to implement a compare and branch
1155 specified by CODE. Return -1 for invalid comparisons. */
1156
1157 int
1158 s390_compare_and_branch_condition_mask (rtx code)
1159 {
1160 const int CC0 = 1 << 3;
1161 const int CC1 = 1 << 2;
1162 const int CC2 = 1 << 1;
1163
1164 switch (GET_CODE (code))
1165 {
1166 case EQ:
1167 return CC0;
1168 case NE:
1169 return CC1 | CC2;
1170 case LT:
1171 case LTU:
1172 return CC1;
1173 case GT:
1174 case GTU:
1175 return CC2;
1176 case LE:
1177 case LEU:
1178 return CC0 | CC1;
1179 case GE:
1180 case GEU:
1181 return CC0 | CC2;
1182 default:
1183 gcc_unreachable ();
1184 }
1185 return -1;
1186 }
1187
1188 /* If INV is false, return assembler mnemonic string to implement
1189 a branch specified by CODE. If INV is true, return mnemonic
1190 for the corresponding inverted branch. */
1191
1192 static const char *
1193 s390_branch_condition_mnemonic (rtx code, int inv)
1194 {
1195 int mask;
1196
1197 static const char *const mnemonic[16] =
1198 {
1199 NULL, "o", "h", "nle",
1200 "l", "nhe", "lh", "ne",
1201 "e", "nlh", "he", "nl",
1202 "le", "nh", "no", NULL
1203 };
1204
1205 if (GET_CODE (XEXP (code, 0)) == REG
1206 && REGNO (XEXP (code, 0)) == CC_REGNUM
1207 && XEXP (code, 1) == const0_rtx)
1208 mask = s390_branch_condition_mask (code);
1209 else
1210 mask = s390_compare_and_branch_condition_mask (code);
1211
1212 gcc_assert (mask >= 0);
1213
1214 if (inv)
1215 mask ^= 15;
1216
1217 gcc_assert (mask >= 1 && mask <= 14);
1218
1219 return mnemonic[mask];
1220 }
1221
1222 /* Return the part of op which has a value different from def.
1223 The size of the part is determined by mode.
1224 Use this function only if you already know that op really
1225 contains such a part. */
1226
1227 unsigned HOST_WIDE_INT
1228 s390_extract_part (rtx op, enum machine_mode mode, int def)
1229 {
1230 unsigned HOST_WIDE_INT value = 0;
1231 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1232 int part_bits = GET_MODE_BITSIZE (mode);
1233 unsigned HOST_WIDE_INT part_mask
1234 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1235 int i;
1236
1237 for (i = 0; i < max_parts; i++)
1238 {
1239 if (i == 0)
1240 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1241 else
1242 value >>= part_bits;
1243
1244 if ((value & part_mask) != (def & part_mask))
1245 return value & part_mask;
1246 }
1247
1248 gcc_unreachable ();
1249 }
1250
1251 /* If OP is an integer constant of mode MODE with exactly one
1252 part of mode PART_MODE unequal to DEF, return the number of that
1253 part. Otherwise, return -1. */
1254
1255 int
1256 s390_single_part (rtx op,
1257 enum machine_mode mode,
1258 enum machine_mode part_mode,
1259 int def)
1260 {
1261 unsigned HOST_WIDE_INT value = 0;
1262 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1263 unsigned HOST_WIDE_INT part_mask
1264 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1265 int i, part = -1;
1266
1267 if (GET_CODE (op) != CONST_INT)
1268 return -1;
1269
1270 for (i = 0; i < n_parts; i++)
1271 {
1272 if (i == 0)
1273 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1274 else
1275 value >>= GET_MODE_BITSIZE (part_mode);
1276
1277 if ((value & part_mask) != (def & part_mask))
1278 {
1279 if (part != -1)
1280 return -1;
1281 else
1282 part = i;
1283 }
1284 }
1285 return part == -1 ? -1 : n_parts - 1 - part;
1286 }
1287
1288 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1289 bits and no other bits are set in IN. POS and LENGTH can be used
1290 to obtain the start position and the length of the bitfield.
1291
1292 POS gives the position of the first bit of the bitfield counting
1293 from the lowest order bit starting with zero. In order to use this
1294 value for S/390 instructions this has to be converted to "bits big
1295 endian" style. */
1296
1297 bool
1298 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1299 int *pos, int *length)
1300 {
1301 int tmp_pos = 0;
1302 int tmp_length = 0;
1303 int i;
1304 unsigned HOST_WIDE_INT mask = 1ULL;
1305 bool contiguous = false;
1306
1307 for (i = 0; i < size; mask <<= 1, i++)
1308 {
1309 if (contiguous)
1310 {
1311 if (mask & in)
1312 tmp_length++;
1313 else
1314 break;
1315 }
1316 else
1317 {
1318 if (mask & in)
1319 {
1320 contiguous = true;
1321 tmp_length++;
1322 }
1323 else
1324 tmp_pos++;
1325 }
1326 }
1327
1328 if (!tmp_length)
1329 return false;
1330
1331 /* Calculate a mask for all bits beyond the contiguous bits. */
1332 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1333
1334 if (mask & in)
1335 return false;
1336
1337 if (tmp_length + tmp_pos - 1 > size)
1338 return false;
1339
1340 if (length)
1341 *length = tmp_length;
1342
1343 if (pos)
1344 *pos = tmp_pos;
1345
1346 return true;
1347 }
1348
1349 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1350 equivalent to a shift followed by the AND. In particular, CONTIG
1351 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1352 for ROTL indicate a rotate to the right. */
1353
1354 bool
1355 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1356 {
1357 int pos, len;
1358 bool ok;
1359
1360 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1361 gcc_assert (ok);
1362
1363 return ((rotl >= 0 && rotl <= pos)
1364 || (rotl < 0 && -rotl <= bitsize - len - pos));
1365 }
1366
1367 /* Check whether we can (and want to) split a double-word
1368 move in mode MODE from SRC to DST into two single-word
1369 moves, moving the subword FIRST_SUBWORD first. */
1370
1371 bool
1372 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1373 {
1374 /* Floating point registers cannot be split. */
1375 if (FP_REG_P (src) || FP_REG_P (dst))
1376 return false;
1377
1378 /* We don't need to split if operands are directly accessible. */
1379 if (s_operand (src, mode) || s_operand (dst, mode))
1380 return false;
1381
1382 /* Non-offsettable memory references cannot be split. */
1383 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1384 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1385 return false;
1386
1387 /* Moving the first subword must not clobber a register
1388 needed to move the second subword. */
1389 if (register_operand (dst, mode))
1390 {
1391 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1392 if (reg_overlap_mentioned_p (subreg, src))
1393 return false;
1394 }
1395
1396 return true;
1397 }
1398
1399 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1400 and [MEM2, MEM2 + SIZE] do overlap and false
1401 otherwise. */
1402
1403 bool
1404 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1405 {
1406 rtx addr1, addr2, addr_delta;
1407 HOST_WIDE_INT delta;
1408
1409 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1410 return true;
1411
1412 if (size == 0)
1413 return false;
1414
1415 addr1 = XEXP (mem1, 0);
1416 addr2 = XEXP (mem2, 0);
1417
1418 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1419
1420 /* This overlapping check is used by peepholes merging memory block operations.
1421 Overlapping operations would otherwise be recognized by the S/390 hardware
1422 and would fall back to a slower implementation. Allowing overlapping
1423 operations would lead to slow code but not to wrong code. Therefore we are
1424 somewhat optimistic if we cannot prove that the memory blocks are
1425 overlapping.
1426 That's why we return false here although this may accept operations on
1427 overlapping memory areas. */
1428 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1429 return false;
1430
1431 delta = INTVAL (addr_delta);
1432
1433 if (delta == 0
1434 || (delta > 0 && delta < size)
1435 || (delta < 0 && -delta < size))
1436 return true;
1437
1438 return false;
1439 }
1440
1441 /* Check whether the address of memory reference MEM2 equals exactly
1442 the address of memory reference MEM1 plus DELTA. Return true if
1443 we can prove this to be the case, false otherwise. */
1444
1445 bool
1446 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1447 {
1448 rtx addr1, addr2, addr_delta;
1449
1450 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1451 return false;
1452
1453 addr1 = XEXP (mem1, 0);
1454 addr2 = XEXP (mem2, 0);
1455
1456 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1457 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1458 return false;
1459
1460 return true;
1461 }
1462
1463 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1464
1465 void
1466 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1467 rtx *operands)
1468 {
1469 enum machine_mode wmode = mode;
1470 rtx dst = operands[0];
1471 rtx src1 = operands[1];
1472 rtx src2 = operands[2];
1473 rtx op, clob, tem;
1474
1475 /* If we cannot handle the operation directly, use a temp register. */
1476 if (!s390_logical_operator_ok_p (operands))
1477 dst = gen_reg_rtx (mode);
1478
1479 /* QImode and HImode patterns make sense only if we have a destination
1480 in memory. Otherwise perform the operation in SImode. */
1481 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1482 wmode = SImode;
1483
1484 /* Widen operands if required. */
1485 if (mode != wmode)
1486 {
1487 if (GET_CODE (dst) == SUBREG
1488 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1489 dst = tem;
1490 else if (REG_P (dst))
1491 dst = gen_rtx_SUBREG (wmode, dst, 0);
1492 else
1493 dst = gen_reg_rtx (wmode);
1494
1495 if (GET_CODE (src1) == SUBREG
1496 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1497 src1 = tem;
1498 else if (GET_MODE (src1) != VOIDmode)
1499 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1500
1501 if (GET_CODE (src2) == SUBREG
1502 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1503 src2 = tem;
1504 else if (GET_MODE (src2) != VOIDmode)
1505 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1506 }
1507
1508 /* Emit the instruction. */
1509 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1510 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1511 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1512
1513 /* Fix up the destination if needed. */
1514 if (dst != operands[0])
1515 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1516 }
1517
1518 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1519
1520 bool
1521 s390_logical_operator_ok_p (rtx *operands)
1522 {
1523 /* If the destination operand is in memory, it needs to coincide
1524 with one of the source operands. After reload, it has to be
1525 the first source operand. */
1526 if (GET_CODE (operands[0]) == MEM)
1527 return rtx_equal_p (operands[0], operands[1])
1528 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1529
1530 return true;
1531 }
1532
1533 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1534 operand IMMOP to switch from SS to SI type instructions. */
1535
1536 void
1537 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1538 {
1539 int def = code == AND ? -1 : 0;
1540 HOST_WIDE_INT mask;
1541 int part;
1542
1543 gcc_assert (GET_CODE (*memop) == MEM);
1544 gcc_assert (!MEM_VOLATILE_P (*memop));
1545
1546 mask = s390_extract_part (*immop, QImode, def);
1547 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1548 gcc_assert (part >= 0);
1549
1550 *memop = adjust_address (*memop, QImode, part);
1551 *immop = gen_int_mode (mask, QImode);
1552 }
1553
1554
1555 /* How to allocate a 'struct machine_function'. */
1556
1557 static struct machine_function *
1558 s390_init_machine_status (void)
1559 {
1560 return ggc_alloc_cleared_machine_function ();
1561 }
1562
1563 static void
1564 s390_option_override (void)
1565 {
1566 /* Set up function hooks. */
1567 init_machine_status = s390_init_machine_status;
1568
1569 /* Architecture mode defaults according to ABI. */
1570 if (!(target_flags_explicit & MASK_ZARCH))
1571 {
1572 if (TARGET_64BIT)
1573 target_flags |= MASK_ZARCH;
1574 else
1575 target_flags &= ~MASK_ZARCH;
1576 }
1577
1578 /* Set the march default in case it hasn't been specified on
1579 cmdline. */
1580 if (s390_arch == PROCESSOR_max)
1581 {
1582 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1583 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1584 s390_arch_flags = processor_flags_table[(int)s390_arch];
1585 }
1586
1587 /* Determine processor to tune for. */
1588 if (s390_tune == PROCESSOR_max)
1589 {
1590 s390_tune = s390_arch;
1591 s390_tune_flags = s390_arch_flags;
1592 }
1593
1594 /* Sanity checks. */
1595 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1596 error ("z/Architecture mode not supported on %s", s390_arch_string);
1597 if (TARGET_64BIT && !TARGET_ZARCH)
1598 error ("64-bit ABI not supported in ESA/390 mode");
1599
1600 /* Use hardware DFP if available and not explicitly disabled by
1601 user. E.g. with -m31 -march=z10 -mzarch */
1602 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1603 target_flags |= MASK_HARD_DFP;
1604
1605 if (TARGET_HARD_DFP && !TARGET_DFP)
1606 {
1607 if (target_flags_explicit & MASK_HARD_DFP)
1608 {
1609 if (!TARGET_CPU_DFP)
1610 error ("hardware decimal floating point instructions"
1611 " not available on %s", s390_arch_string);
1612 if (!TARGET_ZARCH)
1613 error ("hardware decimal floating point instructions"
1614 " not available in ESA/390 mode");
1615 }
1616 else
1617 target_flags &= ~MASK_HARD_DFP;
1618 }
1619
1620 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1621 {
1622 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1623 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1624
1625 target_flags &= ~MASK_HARD_DFP;
1626 }
1627
1628 /* Set processor cost function. */
1629 switch (s390_tune)
1630 {
1631 case PROCESSOR_2084_Z990:
1632 s390_cost = &z990_cost;
1633 break;
1634 case PROCESSOR_2094_Z9_109:
1635 s390_cost = &z9_109_cost;
1636 break;
1637 case PROCESSOR_2097_Z10:
1638 s390_cost = &z10_cost;
1639 break;
1640 case PROCESSOR_2817_Z196:
1641 s390_cost = &z196_cost;
1642 break;
1643 case PROCESSOR_2827_ZEC12:
1644 s390_cost = &zEC12_cost;
1645 break;
1646 default:
1647 s390_cost = &z900_cost;
1648 }
1649
1650 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1651 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1652 "in combination");
1653
1654 if (s390_stack_size)
1655 {
1656 if (s390_stack_guard >= s390_stack_size)
1657 error ("stack size must be greater than the stack guard value");
1658 else if (s390_stack_size > 1 << 16)
1659 error ("stack size must not be greater than 64k");
1660 }
1661 else if (s390_stack_guard)
1662 error ("-mstack-guard implies use of -mstack-size");
1663
1664 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1665 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1666 target_flags |= MASK_LONG_DOUBLE_128;
1667 #endif
1668
1669 if (s390_tune == PROCESSOR_2097_Z10
1670 || s390_tune == PROCESSOR_2817_Z196
1671 || s390_tune == PROCESSOR_2827_ZEC12)
1672 {
1673 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1674 global_options.x_param_values,
1675 global_options_set.x_param_values);
1676 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1677 global_options.x_param_values,
1678 global_options_set.x_param_values);
1679 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1680 global_options.x_param_values,
1681 global_options_set.x_param_values);
1682 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1683 global_options.x_param_values,
1684 global_options_set.x_param_values);
1685 }
1686
1687 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1688 global_options.x_param_values,
1689 global_options_set.x_param_values);
1690 /* values for loop prefetching */
1691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1692 global_options.x_param_values,
1693 global_options_set.x_param_values);
1694 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1695 global_options.x_param_values,
1696 global_options_set.x_param_values);
1697 /* s390 has more than 2 levels and the size is much larger. Since
1698 we are always running virtualized assume that we only get a small
1699 part of the caches above l1. */
1700 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1701 global_options.x_param_values,
1702 global_options_set.x_param_values);
1703 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1704 global_options.x_param_values,
1705 global_options_set.x_param_values);
1706 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1707 global_options.x_param_values,
1708 global_options_set.x_param_values);
1709
1710 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1711 requires the arch flags to be evaluated already. Since prefetching
1712 is beneficial on s390, we enable it if available. */
1713 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1714 flag_prefetch_loop_arrays = 1;
1715
1716 /* Use the alternative scheduling-pressure algorithm by default. */
1717 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1718 global_options.x_param_values,
1719 global_options_set.x_param_values);
1720
1721 if (TARGET_TPF)
1722 {
1723 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1724 debuggers do not yet support DWARF 3/4. */
1725 if (!global_options_set.x_dwarf_strict)
1726 dwarf_strict = 1;
1727 if (!global_options_set.x_dwarf_version)
1728 dwarf_version = 2;
1729 }
1730 }
1731
1732 /* Map for smallest class containing reg regno. */
1733
1734 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1735 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1736 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1737 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1738 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1739 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1740 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1741 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1742 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1743 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1744 ACCESS_REGS, ACCESS_REGS
1745 };
1746
1747 /* Return attribute type of insn. */
1748
1749 static enum attr_type
1750 s390_safe_attr_type (rtx insn)
1751 {
1752 if (recog_memoized (insn) >= 0)
1753 return get_attr_type (insn);
1754 else
1755 return TYPE_NONE;
1756 }
1757
1758 /* Return true if DISP is a valid short displacement. */
1759
1760 static bool
1761 s390_short_displacement (rtx disp)
1762 {
1763 /* No displacement is OK. */
1764 if (!disp)
1765 return true;
1766
1767 /* Without the long displacement facility we don't need to
1768 distingiush between long and short displacement. */
1769 if (!TARGET_LONG_DISPLACEMENT)
1770 return true;
1771
1772 /* Integer displacement in range. */
1773 if (GET_CODE (disp) == CONST_INT)
1774 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1775
1776 /* GOT offset is not OK, the GOT can be large. */
1777 if (GET_CODE (disp) == CONST
1778 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1779 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1780 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1781 return false;
1782
1783 /* All other symbolic constants are literal pool references,
1784 which are OK as the literal pool must be small. */
1785 if (GET_CODE (disp) == CONST)
1786 return true;
1787
1788 return false;
1789 }
1790
1791 /* Decompose a RTL expression ADDR for a memory address into
1792 its components, returned in OUT.
1793
1794 Returns false if ADDR is not a valid memory address, true
1795 otherwise. If OUT is NULL, don't return the components,
1796 but check for validity only.
1797
1798 Note: Only addresses in canonical form are recognized.
1799 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1800 canonical form so that they will be recognized. */
1801
1802 static int
1803 s390_decompose_address (rtx addr, struct s390_address *out)
1804 {
1805 HOST_WIDE_INT offset = 0;
1806 rtx base = NULL_RTX;
1807 rtx indx = NULL_RTX;
1808 rtx disp = NULL_RTX;
1809 rtx orig_disp;
1810 bool pointer = false;
1811 bool base_ptr = false;
1812 bool indx_ptr = false;
1813 bool literal_pool = false;
1814
1815 /* We may need to substitute the literal pool base register into the address
1816 below. However, at this point we do not know which register is going to
1817 be used as base, so we substitute the arg pointer register. This is going
1818 to be treated as holding a pointer below -- it shouldn't be used for any
1819 other purpose. */
1820 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1821
1822 /* Decompose address into base + index + displacement. */
1823
1824 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1825 base = addr;
1826
1827 else if (GET_CODE (addr) == PLUS)
1828 {
1829 rtx op0 = XEXP (addr, 0);
1830 rtx op1 = XEXP (addr, 1);
1831 enum rtx_code code0 = GET_CODE (op0);
1832 enum rtx_code code1 = GET_CODE (op1);
1833
1834 if (code0 == REG || code0 == UNSPEC)
1835 {
1836 if (code1 == REG || code1 == UNSPEC)
1837 {
1838 indx = op0; /* index + base */
1839 base = op1;
1840 }
1841
1842 else
1843 {
1844 base = op0; /* base + displacement */
1845 disp = op1;
1846 }
1847 }
1848
1849 else if (code0 == PLUS)
1850 {
1851 indx = XEXP (op0, 0); /* index + base + disp */
1852 base = XEXP (op0, 1);
1853 disp = op1;
1854 }
1855
1856 else
1857 {
1858 return false;
1859 }
1860 }
1861
1862 else
1863 disp = addr; /* displacement */
1864
1865 /* Extract integer part of displacement. */
1866 orig_disp = disp;
1867 if (disp)
1868 {
1869 if (GET_CODE (disp) == CONST_INT)
1870 {
1871 offset = INTVAL (disp);
1872 disp = NULL_RTX;
1873 }
1874 else if (GET_CODE (disp) == CONST
1875 && GET_CODE (XEXP (disp, 0)) == PLUS
1876 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1877 {
1878 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1879 disp = XEXP (XEXP (disp, 0), 0);
1880 }
1881 }
1882
1883 /* Strip off CONST here to avoid special case tests later. */
1884 if (disp && GET_CODE (disp) == CONST)
1885 disp = XEXP (disp, 0);
1886
1887 /* We can convert literal pool addresses to
1888 displacements by basing them off the base register. */
1889 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1890 {
1891 /* Either base or index must be free to hold the base register. */
1892 if (!base)
1893 base = fake_pool_base, literal_pool = true;
1894 else if (!indx)
1895 indx = fake_pool_base, literal_pool = true;
1896 else
1897 return false;
1898
1899 /* Mark up the displacement. */
1900 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1901 UNSPEC_LTREL_OFFSET);
1902 }
1903
1904 /* Validate base register. */
1905 if (base)
1906 {
1907 if (GET_CODE (base) == UNSPEC)
1908 switch (XINT (base, 1))
1909 {
1910 case UNSPEC_LTREF:
1911 if (!disp)
1912 disp = gen_rtx_UNSPEC (Pmode,
1913 gen_rtvec (1, XVECEXP (base, 0, 0)),
1914 UNSPEC_LTREL_OFFSET);
1915 else
1916 return false;
1917
1918 base = XVECEXP (base, 0, 1);
1919 break;
1920
1921 case UNSPEC_LTREL_BASE:
1922 if (XVECLEN (base, 0) == 1)
1923 base = fake_pool_base, literal_pool = true;
1924 else
1925 base = XVECEXP (base, 0, 1);
1926 break;
1927
1928 default:
1929 return false;
1930 }
1931
1932 if (!REG_P (base)
1933 || (GET_MODE (base) != SImode
1934 && GET_MODE (base) != Pmode))
1935 return false;
1936
1937 if (REGNO (base) == STACK_POINTER_REGNUM
1938 || REGNO (base) == FRAME_POINTER_REGNUM
1939 || ((reload_completed || reload_in_progress)
1940 && frame_pointer_needed
1941 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1942 || REGNO (base) == ARG_POINTER_REGNUM
1943 || (flag_pic
1944 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1945 pointer = base_ptr = true;
1946
1947 if ((reload_completed || reload_in_progress)
1948 && base == cfun->machine->base_reg)
1949 pointer = base_ptr = literal_pool = true;
1950 }
1951
1952 /* Validate index register. */
1953 if (indx)
1954 {
1955 if (GET_CODE (indx) == UNSPEC)
1956 switch (XINT (indx, 1))
1957 {
1958 case UNSPEC_LTREF:
1959 if (!disp)
1960 disp = gen_rtx_UNSPEC (Pmode,
1961 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1962 UNSPEC_LTREL_OFFSET);
1963 else
1964 return false;
1965
1966 indx = XVECEXP (indx, 0, 1);
1967 break;
1968
1969 case UNSPEC_LTREL_BASE:
1970 if (XVECLEN (indx, 0) == 1)
1971 indx = fake_pool_base, literal_pool = true;
1972 else
1973 indx = XVECEXP (indx, 0, 1);
1974 break;
1975
1976 default:
1977 return false;
1978 }
1979
1980 if (!REG_P (indx)
1981 || (GET_MODE (indx) != SImode
1982 && GET_MODE (indx) != Pmode))
1983 return false;
1984
1985 if (REGNO (indx) == STACK_POINTER_REGNUM
1986 || REGNO (indx) == FRAME_POINTER_REGNUM
1987 || ((reload_completed || reload_in_progress)
1988 && frame_pointer_needed
1989 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1990 || REGNO (indx) == ARG_POINTER_REGNUM
1991 || (flag_pic
1992 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1993 pointer = indx_ptr = true;
1994
1995 if ((reload_completed || reload_in_progress)
1996 && indx == cfun->machine->base_reg)
1997 pointer = indx_ptr = literal_pool = true;
1998 }
1999
2000 /* Prefer to use pointer as base, not index. */
2001 if (base && indx && !base_ptr
2002 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2003 {
2004 rtx tmp = base;
2005 base = indx;
2006 indx = tmp;
2007 }
2008
2009 /* Validate displacement. */
2010 if (!disp)
2011 {
2012 /* If virtual registers are involved, the displacement will change later
2013 anyway as the virtual registers get eliminated. This could make a
2014 valid displacement invalid, but it is more likely to make an invalid
2015 displacement valid, because we sometimes access the register save area
2016 via negative offsets to one of those registers.
2017 Thus we don't check the displacement for validity here. If after
2018 elimination the displacement turns out to be invalid after all,
2019 this is fixed up by reload in any case. */
2020 if (base != arg_pointer_rtx
2021 && indx != arg_pointer_rtx
2022 && base != return_address_pointer_rtx
2023 && indx != return_address_pointer_rtx
2024 && base != frame_pointer_rtx
2025 && indx != frame_pointer_rtx
2026 && base != virtual_stack_vars_rtx
2027 && indx != virtual_stack_vars_rtx)
2028 if (!DISP_IN_RANGE (offset))
2029 return false;
2030 }
2031 else
2032 {
2033 /* All the special cases are pointers. */
2034 pointer = true;
2035
2036 /* In the small-PIC case, the linker converts @GOT
2037 and @GOTNTPOFF offsets to possible displacements. */
2038 if (GET_CODE (disp) == UNSPEC
2039 && (XINT (disp, 1) == UNSPEC_GOT
2040 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2041 && flag_pic == 1)
2042 {
2043 ;
2044 }
2045
2046 /* Accept pool label offsets. */
2047 else if (GET_CODE (disp) == UNSPEC
2048 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2049 ;
2050
2051 /* Accept literal pool references. */
2052 else if (GET_CODE (disp) == UNSPEC
2053 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2054 {
2055 /* In case CSE pulled a non literal pool reference out of
2056 the pool we have to reject the address. This is
2057 especially important when loading the GOT pointer on non
2058 zarch CPUs. In this case the literal pool contains an lt
2059 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2060 will most likely exceed the displacement. */
2061 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2062 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2063 return false;
2064
2065 orig_disp = gen_rtx_CONST (Pmode, disp);
2066 if (offset)
2067 {
2068 /* If we have an offset, make sure it does not
2069 exceed the size of the constant pool entry. */
2070 rtx sym = XVECEXP (disp, 0, 0);
2071 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2072 return false;
2073
2074 orig_disp = plus_constant (Pmode, orig_disp, offset);
2075 }
2076 }
2077
2078 else
2079 return false;
2080 }
2081
2082 if (!base && !indx)
2083 pointer = true;
2084
2085 if (out)
2086 {
2087 out->base = base;
2088 out->indx = indx;
2089 out->disp = orig_disp;
2090 out->pointer = pointer;
2091 out->literal_pool = literal_pool;
2092 }
2093
2094 return true;
2095 }
2096
2097 /* Decompose a RTL expression OP for a shift count into its components,
2098 and return the base register in BASE and the offset in OFFSET.
2099
2100 Return true if OP is a valid shift count, false if not. */
2101
2102 bool
2103 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2104 {
2105 HOST_WIDE_INT off = 0;
2106
2107 /* We can have an integer constant, an address register,
2108 or a sum of the two. */
2109 if (GET_CODE (op) == CONST_INT)
2110 {
2111 off = INTVAL (op);
2112 op = NULL_RTX;
2113 }
2114 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2115 {
2116 off = INTVAL (XEXP (op, 1));
2117 op = XEXP (op, 0);
2118 }
2119 while (op && GET_CODE (op) == SUBREG)
2120 op = SUBREG_REG (op);
2121
2122 if (op && GET_CODE (op) != REG)
2123 return false;
2124
2125 if (offset)
2126 *offset = off;
2127 if (base)
2128 *base = op;
2129
2130 return true;
2131 }
2132
2133
2134 /* Return true if CODE is a valid address without index. */
2135
2136 bool
2137 s390_legitimate_address_without_index_p (rtx op)
2138 {
2139 struct s390_address addr;
2140
2141 if (!s390_decompose_address (XEXP (op, 0), &addr))
2142 return false;
2143 if (addr.indx)
2144 return false;
2145
2146 return true;
2147 }
2148
2149
2150 /* Return TRUE if ADDR is an operand valid for a load/store relative
2151 instruction. Be aware that the alignment of the operand needs to
2152 be checked separately.
2153 Valid addresses are single references or a sum of a reference and a
2154 constant integer. Return these parts in SYMREF and ADDEND. You can
2155 pass NULL in REF and/or ADDEND if you are not interested in these
2156 values. Literal pool references are *not* considered symbol
2157 references. */
2158
2159 static bool
2160 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2161 {
2162 HOST_WIDE_INT tmpaddend = 0;
2163
2164 if (GET_CODE (addr) == CONST)
2165 addr = XEXP (addr, 0);
2166
2167 if (GET_CODE (addr) == PLUS)
2168 {
2169 if (!CONST_INT_P (XEXP (addr, 1)))
2170 return false;
2171
2172 tmpaddend = INTVAL (XEXP (addr, 1));
2173 addr = XEXP (addr, 0);
2174 }
2175
2176 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2177 || (GET_CODE (addr) == UNSPEC
2178 && (XINT (addr, 1) == UNSPEC_GOTENT
2179 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2180 {
2181 if (symref)
2182 *symref = addr;
2183 if (addend)
2184 *addend = tmpaddend;
2185
2186 return true;
2187 }
2188 return false;
2189 }
2190
2191 /* Return true if the address in OP is valid for constraint letter C
2192 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2193 pool MEMs should be accepted. Only the Q, R, S, T constraint
2194 letters are allowed for C. */
2195
2196 static int
2197 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2198 {
2199 struct s390_address addr;
2200 bool decomposed = false;
2201
2202 /* This check makes sure that no symbolic address (except literal
2203 pool references) are accepted by the R or T constraints. */
2204 if (s390_loadrelative_operand_p (op, NULL, NULL))
2205 return 0;
2206
2207 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2208 if (!lit_pool_ok)
2209 {
2210 if (!s390_decompose_address (op, &addr))
2211 return 0;
2212 if (addr.literal_pool)
2213 return 0;
2214 decomposed = true;
2215 }
2216
2217 switch (c)
2218 {
2219 case 'Q': /* no index short displacement */
2220 if (!decomposed && !s390_decompose_address (op, &addr))
2221 return 0;
2222 if (addr.indx)
2223 return 0;
2224 if (!s390_short_displacement (addr.disp))
2225 return 0;
2226 break;
2227
2228 case 'R': /* with index short displacement */
2229 if (TARGET_LONG_DISPLACEMENT)
2230 {
2231 if (!decomposed && !s390_decompose_address (op, &addr))
2232 return 0;
2233 if (!s390_short_displacement (addr.disp))
2234 return 0;
2235 }
2236 /* Any invalid address here will be fixed up by reload,
2237 so accept it for the most generic constraint. */
2238 break;
2239
2240 case 'S': /* no index long displacement */
2241 if (!TARGET_LONG_DISPLACEMENT)
2242 return 0;
2243 if (!decomposed && !s390_decompose_address (op, &addr))
2244 return 0;
2245 if (addr.indx)
2246 return 0;
2247 if (s390_short_displacement (addr.disp))
2248 return 0;
2249 break;
2250
2251 case 'T': /* with index long displacement */
2252 if (!TARGET_LONG_DISPLACEMENT)
2253 return 0;
2254 /* Any invalid address here will be fixed up by reload,
2255 so accept it for the most generic constraint. */
2256 if ((decomposed || s390_decompose_address (op, &addr))
2257 && s390_short_displacement (addr.disp))
2258 return 0;
2259 break;
2260 default:
2261 return 0;
2262 }
2263 return 1;
2264 }
2265
2266
2267 /* Evaluates constraint strings described by the regular expression
2268 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2269 the constraint given in STR, or 0 else. */
2270
2271 int
2272 s390_mem_constraint (const char *str, rtx op)
2273 {
2274 char c = str[0];
2275
2276 switch (c)
2277 {
2278 case 'A':
2279 /* Check for offsettable variants of memory constraints. */
2280 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2281 return 0;
2282 if ((reload_completed || reload_in_progress)
2283 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2284 return 0;
2285 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2286 case 'B':
2287 /* Check for non-literal-pool variants of memory constraints. */
2288 if (!MEM_P (op))
2289 return 0;
2290 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2291 case 'Q':
2292 case 'R':
2293 case 'S':
2294 case 'T':
2295 if (GET_CODE (op) != MEM)
2296 return 0;
2297 return s390_check_qrst_address (c, XEXP (op, 0), true);
2298 case 'U':
2299 return (s390_check_qrst_address ('Q', op, true)
2300 || s390_check_qrst_address ('R', op, true));
2301 case 'W':
2302 return (s390_check_qrst_address ('S', op, true)
2303 || s390_check_qrst_address ('T', op, true));
2304 case 'Y':
2305 /* Simply check for the basic form of a shift count. Reload will
2306 take care of making sure we have a proper base register. */
2307 if (!s390_decompose_shift_count (op, NULL, NULL))
2308 return 0;
2309 break;
2310 case 'Z':
2311 return s390_check_qrst_address (str[1], op, true);
2312 default:
2313 return 0;
2314 }
2315 return 1;
2316 }
2317
2318
2319 /* Evaluates constraint strings starting with letter O. Input
2320 parameter C is the second letter following the "O" in the constraint
2321 string. Returns 1 if VALUE meets the respective constraint and 0
2322 otherwise. */
2323
2324 int
2325 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2326 {
2327 if (!TARGET_EXTIMM)
2328 return 0;
2329
2330 switch (c)
2331 {
2332 case 's':
2333 return trunc_int_for_mode (value, SImode) == value;
2334
2335 case 'p':
2336 return value == 0
2337 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2338
2339 case 'n':
2340 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2341
2342 default:
2343 gcc_unreachable ();
2344 }
2345 }
2346
2347
2348 /* Evaluates constraint strings starting with letter N. Parameter STR
2349 contains the letters following letter "N" in the constraint string.
2350 Returns true if VALUE matches the constraint. */
2351
2352 int
2353 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2354 {
2355 enum machine_mode mode, part_mode;
2356 int def;
2357 int part, part_goal;
2358
2359
2360 if (str[0] == 'x')
2361 part_goal = -1;
2362 else
2363 part_goal = str[0] - '0';
2364
2365 switch (str[1])
2366 {
2367 case 'Q':
2368 part_mode = QImode;
2369 break;
2370 case 'H':
2371 part_mode = HImode;
2372 break;
2373 case 'S':
2374 part_mode = SImode;
2375 break;
2376 default:
2377 return 0;
2378 }
2379
2380 switch (str[2])
2381 {
2382 case 'H':
2383 mode = HImode;
2384 break;
2385 case 'S':
2386 mode = SImode;
2387 break;
2388 case 'D':
2389 mode = DImode;
2390 break;
2391 default:
2392 return 0;
2393 }
2394
2395 switch (str[3])
2396 {
2397 case '0':
2398 def = 0;
2399 break;
2400 case 'F':
2401 def = -1;
2402 break;
2403 default:
2404 return 0;
2405 }
2406
2407 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2408 return 0;
2409
2410 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2411 if (part < 0)
2412 return 0;
2413 if (part_goal != -1 && part_goal != part)
2414 return 0;
2415
2416 return 1;
2417 }
2418
2419
2420 /* Returns true if the input parameter VALUE is a float zero. */
2421
2422 int
2423 s390_float_const_zero_p (rtx value)
2424 {
2425 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2426 && value == CONST0_RTX (GET_MODE (value)));
2427 }
2428
2429 /* Implement TARGET_REGISTER_MOVE_COST. */
2430
2431 static int
2432 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2433 reg_class_t from, reg_class_t to)
2434 {
2435 /* On s390, copy between fprs and gprs is expensive. */
2436 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2437 && reg_classes_intersect_p (to, FP_REGS))
2438 || (reg_classes_intersect_p (from, FP_REGS)
2439 && reg_classes_intersect_p (to, GENERAL_REGS)))
2440 return 10;
2441
2442 return 1;
2443 }
2444
2445 /* Implement TARGET_MEMORY_MOVE_COST. */
2446
2447 static int
2448 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2449 reg_class_t rclass ATTRIBUTE_UNUSED,
2450 bool in ATTRIBUTE_UNUSED)
2451 {
2452 return 1;
2453 }
2454
2455 /* Compute a (partial) cost for rtx X. Return true if the complete
2456 cost has been computed, and false if subexpressions should be
2457 scanned. In either case, *TOTAL contains the cost result.
2458 CODE contains GET_CODE (x), OUTER_CODE contains the code
2459 of the superexpression of x. */
2460
2461 static bool
2462 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2463 int *total, bool speed ATTRIBUTE_UNUSED)
2464 {
2465 switch (code)
2466 {
2467 case CONST:
2468 case CONST_INT:
2469 case LABEL_REF:
2470 case SYMBOL_REF:
2471 case CONST_DOUBLE:
2472 case MEM:
2473 *total = 0;
2474 return true;
2475
2476 case ASHIFT:
2477 case ASHIFTRT:
2478 case LSHIFTRT:
2479 case ROTATE:
2480 case ROTATERT:
2481 case AND:
2482 case IOR:
2483 case XOR:
2484 case NEG:
2485 case NOT:
2486 *total = COSTS_N_INSNS (1);
2487 return false;
2488
2489 case PLUS:
2490 case MINUS:
2491 *total = COSTS_N_INSNS (1);
2492 return false;
2493
2494 case MULT:
2495 switch (GET_MODE (x))
2496 {
2497 case SImode:
2498 {
2499 rtx left = XEXP (x, 0);
2500 rtx right = XEXP (x, 1);
2501 if (GET_CODE (right) == CONST_INT
2502 && CONST_OK_FOR_K (INTVAL (right)))
2503 *total = s390_cost->mhi;
2504 else if (GET_CODE (left) == SIGN_EXTEND)
2505 *total = s390_cost->mh;
2506 else
2507 *total = s390_cost->ms; /* msr, ms, msy */
2508 break;
2509 }
2510 case DImode:
2511 {
2512 rtx left = XEXP (x, 0);
2513 rtx right = XEXP (x, 1);
2514 if (TARGET_ZARCH)
2515 {
2516 if (GET_CODE (right) == CONST_INT
2517 && CONST_OK_FOR_K (INTVAL (right)))
2518 *total = s390_cost->mghi;
2519 else if (GET_CODE (left) == SIGN_EXTEND)
2520 *total = s390_cost->msgf;
2521 else
2522 *total = s390_cost->msg; /* msgr, msg */
2523 }
2524 else /* TARGET_31BIT */
2525 {
2526 if (GET_CODE (left) == SIGN_EXTEND
2527 && GET_CODE (right) == SIGN_EXTEND)
2528 /* mulsidi case: mr, m */
2529 *total = s390_cost->m;
2530 else if (GET_CODE (left) == ZERO_EXTEND
2531 && GET_CODE (right) == ZERO_EXTEND
2532 && TARGET_CPU_ZARCH)
2533 /* umulsidi case: ml, mlr */
2534 *total = s390_cost->ml;
2535 else
2536 /* Complex calculation is required. */
2537 *total = COSTS_N_INSNS (40);
2538 }
2539 break;
2540 }
2541 case SFmode:
2542 case DFmode:
2543 *total = s390_cost->mult_df;
2544 break;
2545 case TFmode:
2546 *total = s390_cost->mxbr;
2547 break;
2548 default:
2549 return false;
2550 }
2551 return false;
2552
2553 case FMA:
2554 switch (GET_MODE (x))
2555 {
2556 case DFmode:
2557 *total = s390_cost->madbr;
2558 break;
2559 case SFmode:
2560 *total = s390_cost->maebr;
2561 break;
2562 default:
2563 return false;
2564 }
2565 /* Negate in the third argument is free: FMSUB. */
2566 if (GET_CODE (XEXP (x, 2)) == NEG)
2567 {
2568 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2569 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2570 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2571 return true;
2572 }
2573 return false;
2574
2575 case UDIV:
2576 case UMOD:
2577 if (GET_MODE (x) == TImode) /* 128 bit division */
2578 *total = s390_cost->dlgr;
2579 else if (GET_MODE (x) == DImode)
2580 {
2581 rtx right = XEXP (x, 1);
2582 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2583 *total = s390_cost->dlr;
2584 else /* 64 by 64 bit division */
2585 *total = s390_cost->dlgr;
2586 }
2587 else if (GET_MODE (x) == SImode) /* 32 bit division */
2588 *total = s390_cost->dlr;
2589 return false;
2590
2591 case DIV:
2592 case MOD:
2593 if (GET_MODE (x) == DImode)
2594 {
2595 rtx right = XEXP (x, 1);
2596 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2597 if (TARGET_ZARCH)
2598 *total = s390_cost->dsgfr;
2599 else
2600 *total = s390_cost->dr;
2601 else /* 64 by 64 bit division */
2602 *total = s390_cost->dsgr;
2603 }
2604 else if (GET_MODE (x) == SImode) /* 32 bit division */
2605 *total = s390_cost->dlr;
2606 else if (GET_MODE (x) == SFmode)
2607 {
2608 *total = s390_cost->debr;
2609 }
2610 else if (GET_MODE (x) == DFmode)
2611 {
2612 *total = s390_cost->ddbr;
2613 }
2614 else if (GET_MODE (x) == TFmode)
2615 {
2616 *total = s390_cost->dxbr;
2617 }
2618 return false;
2619
2620 case SQRT:
2621 if (GET_MODE (x) == SFmode)
2622 *total = s390_cost->sqebr;
2623 else if (GET_MODE (x) == DFmode)
2624 *total = s390_cost->sqdbr;
2625 else /* TFmode */
2626 *total = s390_cost->sqxbr;
2627 return false;
2628
2629 case SIGN_EXTEND:
2630 case ZERO_EXTEND:
2631 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2632 || outer_code == PLUS || outer_code == MINUS
2633 || outer_code == COMPARE)
2634 *total = 0;
2635 return false;
2636
2637 case COMPARE:
2638 *total = COSTS_N_INSNS (1);
2639 if (GET_CODE (XEXP (x, 0)) == AND
2640 && GET_CODE (XEXP (x, 1)) == CONST_INT
2641 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2642 {
2643 rtx op0 = XEXP (XEXP (x, 0), 0);
2644 rtx op1 = XEXP (XEXP (x, 0), 1);
2645 rtx op2 = XEXP (x, 1);
2646
2647 if (memory_operand (op0, GET_MODE (op0))
2648 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2649 return true;
2650 if (register_operand (op0, GET_MODE (op0))
2651 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2652 return true;
2653 }
2654 return false;
2655
2656 default:
2657 return false;
2658 }
2659 }
2660
2661 /* Return the cost of an address rtx ADDR. */
2662
2663 static int
2664 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2665 addr_space_t as ATTRIBUTE_UNUSED,
2666 bool speed ATTRIBUTE_UNUSED)
2667 {
2668 struct s390_address ad;
2669 if (!s390_decompose_address (addr, &ad))
2670 return 1000;
2671
2672 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2673 }
2674
2675 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2676 otherwise return 0. */
2677
2678 int
2679 tls_symbolic_operand (rtx op)
2680 {
2681 if (GET_CODE (op) != SYMBOL_REF)
2682 return 0;
2683 return SYMBOL_REF_TLS_MODEL (op);
2684 }
2685 \f
2686 /* Split DImode access register reference REG (on 64-bit) into its constituent
2687 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2688 gen_highpart cannot be used as they assume all registers are word-sized,
2689 while our access registers have only half that size. */
2690
2691 void
2692 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2693 {
2694 gcc_assert (TARGET_64BIT);
2695 gcc_assert (ACCESS_REG_P (reg));
2696 gcc_assert (GET_MODE (reg) == DImode);
2697 gcc_assert (!(REGNO (reg) & 1));
2698
2699 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2700 *hi = gen_rtx_REG (SImode, REGNO (reg));
2701 }
2702
2703 /* Return true if OP contains a symbol reference */
2704
2705 bool
2706 symbolic_reference_mentioned_p (rtx op)
2707 {
2708 const char *fmt;
2709 int i;
2710
2711 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2712 return 1;
2713
2714 fmt = GET_RTX_FORMAT (GET_CODE (op));
2715 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2716 {
2717 if (fmt[i] == 'E')
2718 {
2719 int j;
2720
2721 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2722 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2723 return 1;
2724 }
2725
2726 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2727 return 1;
2728 }
2729
2730 return 0;
2731 }
2732
2733 /* Return true if OP contains a reference to a thread-local symbol. */
2734
2735 bool
2736 tls_symbolic_reference_mentioned_p (rtx op)
2737 {
2738 const char *fmt;
2739 int i;
2740
2741 if (GET_CODE (op) == SYMBOL_REF)
2742 return tls_symbolic_operand (op);
2743
2744 fmt = GET_RTX_FORMAT (GET_CODE (op));
2745 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2746 {
2747 if (fmt[i] == 'E')
2748 {
2749 int j;
2750
2751 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2752 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2753 return true;
2754 }
2755
2756 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2757 return true;
2758 }
2759
2760 return false;
2761 }
2762
2763
2764 /* Return true if OP is a legitimate general operand when
2765 generating PIC code. It is given that flag_pic is on
2766 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2767
2768 int
2769 legitimate_pic_operand_p (rtx op)
2770 {
2771 /* Accept all non-symbolic constants. */
2772 if (!SYMBOLIC_CONST (op))
2773 return 1;
2774
2775 /* Reject everything else; must be handled
2776 via emit_symbolic_move. */
2777 return 0;
2778 }
2779
2780 /* Returns true if the constant value OP is a legitimate general operand.
2781 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2782
2783 static bool
2784 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2785 {
2786 /* Accept all non-symbolic constants. */
2787 if (!SYMBOLIC_CONST (op))
2788 return 1;
2789
2790 /* Accept immediate LARL operands. */
2791 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2792 return 1;
2793
2794 /* Thread-local symbols are never legal constants. This is
2795 so that emit_call knows that computing such addresses
2796 might require a function call. */
2797 if (TLS_SYMBOLIC_CONST (op))
2798 return 0;
2799
2800 /* In the PIC case, symbolic constants must *not* be
2801 forced into the literal pool. We accept them here,
2802 so that they will be handled by emit_symbolic_move. */
2803 if (flag_pic)
2804 return 1;
2805
2806 /* All remaining non-PIC symbolic constants are
2807 forced into the literal pool. */
2808 return 0;
2809 }
2810
2811 /* Determine if it's legal to put X into the constant pool. This
2812 is not possible if X contains the address of a symbol that is
2813 not constant (TLS) or not known at final link time (PIC). */
2814
2815 static bool
2816 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2817 {
2818 switch (GET_CODE (x))
2819 {
2820 case CONST_INT:
2821 case CONST_DOUBLE:
2822 /* Accept all non-symbolic constants. */
2823 return false;
2824
2825 case LABEL_REF:
2826 /* Labels are OK iff we are non-PIC. */
2827 return flag_pic != 0;
2828
2829 case SYMBOL_REF:
2830 /* 'Naked' TLS symbol references are never OK,
2831 non-TLS symbols are OK iff we are non-PIC. */
2832 if (tls_symbolic_operand (x))
2833 return true;
2834 else
2835 return flag_pic != 0;
2836
2837 case CONST:
2838 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2839 case PLUS:
2840 case MINUS:
2841 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2842 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2843
2844 case UNSPEC:
2845 switch (XINT (x, 1))
2846 {
2847 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2848 case UNSPEC_LTREL_OFFSET:
2849 case UNSPEC_GOT:
2850 case UNSPEC_GOTOFF:
2851 case UNSPEC_PLTOFF:
2852 case UNSPEC_TLSGD:
2853 case UNSPEC_TLSLDM:
2854 case UNSPEC_NTPOFF:
2855 case UNSPEC_DTPOFF:
2856 case UNSPEC_GOTNTPOFF:
2857 case UNSPEC_INDNTPOFF:
2858 return false;
2859
2860 /* If the literal pool shares the code section, be put
2861 execute template placeholders into the pool as well. */
2862 case UNSPEC_INSN:
2863 return TARGET_CPU_ZARCH;
2864
2865 default:
2866 return true;
2867 }
2868 break;
2869
2870 default:
2871 gcc_unreachable ();
2872 }
2873 }
2874
2875 /* Returns true if the constant value OP is a legitimate general
2876 operand during and after reload. The difference to
2877 legitimate_constant_p is that this function will not accept
2878 a constant that would need to be forced to the literal pool
2879 before it can be used as operand.
2880 This function accepts all constants which can be loaded directly
2881 into a GPR. */
2882
2883 bool
2884 legitimate_reload_constant_p (rtx op)
2885 {
2886 /* Accept la(y) operands. */
2887 if (GET_CODE (op) == CONST_INT
2888 && DISP_IN_RANGE (INTVAL (op)))
2889 return true;
2890
2891 /* Accept l(g)hi/l(g)fi operands. */
2892 if (GET_CODE (op) == CONST_INT
2893 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2894 return true;
2895
2896 /* Accept lliXX operands. */
2897 if (TARGET_ZARCH
2898 && GET_CODE (op) == CONST_INT
2899 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2900 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2901 return true;
2902
2903 if (TARGET_EXTIMM
2904 && GET_CODE (op) == CONST_INT
2905 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2906 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2907 return true;
2908
2909 /* Accept larl operands. */
2910 if (TARGET_CPU_ZARCH
2911 && larl_operand (op, VOIDmode))
2912 return true;
2913
2914 /* Accept floating-point zero operands that fit into a single GPR. */
2915 if (GET_CODE (op) == CONST_DOUBLE
2916 && s390_float_const_zero_p (op)
2917 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2918 return true;
2919
2920 /* Accept double-word operands that can be split. */
2921 if (GET_CODE (op) == CONST_INT
2922 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2923 {
2924 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2925 rtx hi = operand_subword (op, 0, 0, dword_mode);
2926 rtx lo = operand_subword (op, 1, 0, dword_mode);
2927 return legitimate_reload_constant_p (hi)
2928 && legitimate_reload_constant_p (lo);
2929 }
2930
2931 /* Everything else cannot be handled without reload. */
2932 return false;
2933 }
2934
2935 /* Returns true if the constant value OP is a legitimate fp operand
2936 during and after reload.
2937 This function accepts all constants which can be loaded directly
2938 into an FPR. */
2939
2940 static bool
2941 legitimate_reload_fp_constant_p (rtx op)
2942 {
2943 /* Accept floating-point zero operands if the load zero instruction
2944 can be used. Prior to z196 the load fp zero instruction caused a
2945 performance penalty if the result is used as BFP number. */
2946 if (TARGET_Z196
2947 && GET_CODE (op) == CONST_DOUBLE
2948 && s390_float_const_zero_p (op))
2949 return true;
2950
2951 return false;
2952 }
2953
2954 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2955 return the class of reg to actually use. */
2956
2957 static reg_class_t
2958 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2959 {
2960 switch (GET_CODE (op))
2961 {
2962 /* Constants we cannot reload into general registers
2963 must be forced into the literal pool. */
2964 case CONST_DOUBLE:
2965 case CONST_INT:
2966 if (reg_class_subset_p (GENERAL_REGS, rclass)
2967 && legitimate_reload_constant_p (op))
2968 return GENERAL_REGS;
2969 else if (reg_class_subset_p (ADDR_REGS, rclass)
2970 && legitimate_reload_constant_p (op))
2971 return ADDR_REGS;
2972 else if (reg_class_subset_p (FP_REGS, rclass)
2973 && legitimate_reload_fp_constant_p (op))
2974 return FP_REGS;
2975 return NO_REGS;
2976
2977 /* If a symbolic constant or a PLUS is reloaded,
2978 it is most likely being used as an address, so
2979 prefer ADDR_REGS. If 'class' is not a superset
2980 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2981 case CONST:
2982 /* A larl operand with odd addend will get fixed via secondary
2983 reload. So don't request it to be pushed into literal
2984 pool. */
2985 if (TARGET_CPU_ZARCH
2986 && GET_CODE (XEXP (op, 0)) == PLUS
2987 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2988 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2989 {
2990 if (reg_class_subset_p (ADDR_REGS, rclass))
2991 return ADDR_REGS;
2992 else
2993 return NO_REGS;
2994 }
2995 /* fallthrough */
2996 case LABEL_REF:
2997 case SYMBOL_REF:
2998 if (!legitimate_reload_constant_p (op))
2999 return NO_REGS;
3000 /* fallthrough */
3001 case PLUS:
3002 /* load address will be used. */
3003 if (reg_class_subset_p (ADDR_REGS, rclass))
3004 return ADDR_REGS;
3005 else
3006 return NO_REGS;
3007
3008 default:
3009 break;
3010 }
3011
3012 return rclass;
3013 }
3014
3015 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3016 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3017 aligned. */
3018
3019 bool
3020 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3021 {
3022 HOST_WIDE_INT addend;
3023 rtx symref;
3024
3025 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3026 return false;
3027
3028 if (addend & (alignment - 1))
3029 return false;
3030
3031 if (GET_CODE (symref) == SYMBOL_REF
3032 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3033 return true;
3034
3035 if (GET_CODE (symref) == UNSPEC
3036 && alignment <= UNITS_PER_LONG)
3037 return true;
3038
3039 return false;
3040 }
3041
3042 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3043 operand SCRATCH is used to reload the even part of the address and
3044 adding one. */
3045
3046 void
3047 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3048 {
3049 HOST_WIDE_INT addend;
3050 rtx symref;
3051
3052 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3053 gcc_unreachable ();
3054
3055 if (!(addend & 1))
3056 /* Easy case. The addend is even so larl will do fine. */
3057 emit_move_insn (reg, addr);
3058 else
3059 {
3060 /* We can leave the scratch register untouched if the target
3061 register is a valid base register. */
3062 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3063 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3064 scratch = reg;
3065
3066 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3067 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3068
3069 if (addend != 1)
3070 emit_move_insn (scratch,
3071 gen_rtx_CONST (Pmode,
3072 gen_rtx_PLUS (Pmode, symref,
3073 GEN_INT (addend - 1))));
3074 else
3075 emit_move_insn (scratch, symref);
3076
3077 /* Increment the address using la in order to avoid clobbering cc. */
3078 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3079 }
3080 }
3081
3082 /* Generate what is necessary to move between REG and MEM using
3083 SCRATCH. The direction is given by TOMEM. */
3084
3085 void
3086 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3087 {
3088 /* Reload might have pulled a constant out of the literal pool.
3089 Force it back in. */
3090 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3091 || GET_CODE (mem) == CONST)
3092 mem = force_const_mem (GET_MODE (reg), mem);
3093
3094 gcc_assert (MEM_P (mem));
3095
3096 /* For a load from memory we can leave the scratch register
3097 untouched if the target register is a valid base register. */
3098 if (!tomem
3099 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3100 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3101 && GET_MODE (reg) == GET_MODE (scratch))
3102 scratch = reg;
3103
3104 /* Load address into scratch register. Since we can't have a
3105 secondary reload for a secondary reload we have to cover the case
3106 where larl would need a secondary reload here as well. */
3107 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3108
3109 /* Now we can use a standard load/store to do the move. */
3110 if (tomem)
3111 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3112 else
3113 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3114 }
3115
3116 /* Inform reload about cases where moving X with a mode MODE to a register in
3117 RCLASS requires an extra scratch or immediate register. Return the class
3118 needed for the immediate register. */
3119
3120 static reg_class_t
3121 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3122 enum machine_mode mode, secondary_reload_info *sri)
3123 {
3124 enum reg_class rclass = (enum reg_class) rclass_i;
3125
3126 /* Intermediate register needed. */
3127 if (reg_classes_intersect_p (CC_REGS, rclass))
3128 return GENERAL_REGS;
3129
3130 if (TARGET_Z10)
3131 {
3132 HOST_WIDE_INT offset;
3133 rtx symref;
3134
3135 /* On z10 several optimizer steps may generate larl operands with
3136 an odd addend. */
3137 if (in_p
3138 && s390_loadrelative_operand_p (x, &symref, &offset)
3139 && mode == Pmode
3140 && !SYMBOL_REF_ALIGN1_P (symref)
3141 && (offset & 1) == 1)
3142 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3143 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3144
3145 /* On z10 we need a scratch register when moving QI, TI or floating
3146 point mode values from or to a memory location with a SYMBOL_REF
3147 or if the symref addend of a SI or DI move is not aligned to the
3148 width of the access. */
3149 if (MEM_P (x)
3150 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3151 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3152 || (!TARGET_ZARCH && mode == DImode)
3153 || ((mode == HImode || mode == SImode || mode == DImode)
3154 && (!s390_check_symref_alignment (XEXP (x, 0),
3155 GET_MODE_SIZE (mode))))))
3156 {
3157 #define __SECONDARY_RELOAD_CASE(M,m) \
3158 case M##mode: \
3159 if (TARGET_64BIT) \
3160 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3161 CODE_FOR_reload##m##di_tomem_z10; \
3162 else \
3163 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3164 CODE_FOR_reload##m##si_tomem_z10; \
3165 break;
3166
3167 switch (GET_MODE (x))
3168 {
3169 __SECONDARY_RELOAD_CASE (QI, qi);
3170 __SECONDARY_RELOAD_CASE (HI, hi);
3171 __SECONDARY_RELOAD_CASE (SI, si);
3172 __SECONDARY_RELOAD_CASE (DI, di);
3173 __SECONDARY_RELOAD_CASE (TI, ti);
3174 __SECONDARY_RELOAD_CASE (SF, sf);
3175 __SECONDARY_RELOAD_CASE (DF, df);
3176 __SECONDARY_RELOAD_CASE (TF, tf);
3177 __SECONDARY_RELOAD_CASE (SD, sd);
3178 __SECONDARY_RELOAD_CASE (DD, dd);
3179 __SECONDARY_RELOAD_CASE (TD, td);
3180
3181 default:
3182 gcc_unreachable ();
3183 }
3184 #undef __SECONDARY_RELOAD_CASE
3185 }
3186 }
3187
3188 /* We need a scratch register when loading a PLUS expression which
3189 is not a legitimate operand of the LOAD ADDRESS instruction. */
3190 if (in_p && s390_plus_operand (x, mode))
3191 sri->icode = (TARGET_64BIT ?
3192 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3193
3194 /* Performing a multiword move from or to memory we have to make sure the
3195 second chunk in memory is addressable without causing a displacement
3196 overflow. If that would be the case we calculate the address in
3197 a scratch register. */
3198 if (MEM_P (x)
3199 && GET_CODE (XEXP (x, 0)) == PLUS
3200 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3201 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3202 + GET_MODE_SIZE (mode) - 1))
3203 {
3204 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3205 in a s_operand address since we may fallback to lm/stm. So we only
3206 have to care about overflows in the b+i+d case. */
3207 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3208 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3210 /* For FP_REGS no lm/stm is available so this check is triggered
3211 for displacement overflows in b+i+d and b+d like addresses. */
3212 || (reg_classes_intersect_p (FP_REGS, rclass)
3213 && s390_class_max_nregs (FP_REGS, mode) > 1))
3214 {
3215 if (in_p)
3216 sri->icode = (TARGET_64BIT ?
3217 CODE_FOR_reloaddi_nonoffmem_in :
3218 CODE_FOR_reloadsi_nonoffmem_in);
3219 else
3220 sri->icode = (TARGET_64BIT ?
3221 CODE_FOR_reloaddi_nonoffmem_out :
3222 CODE_FOR_reloadsi_nonoffmem_out);
3223 }
3224 }
3225
3226 /* A scratch address register is needed when a symbolic constant is
3227 copied to r0 compiling with -fPIC. In other cases the target
3228 register might be used as temporary (see legitimize_pic_address). */
3229 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3230 sri->icode = (TARGET_64BIT ?
3231 CODE_FOR_reloaddi_PIC_addr :
3232 CODE_FOR_reloadsi_PIC_addr);
3233
3234 /* Either scratch or no register needed. */
3235 return NO_REGS;
3236 }
3237
3238 /* Generate code to load SRC, which is PLUS that is not a
3239 legitimate operand for the LA instruction, into TARGET.
3240 SCRATCH may be used as scratch register. */
3241
3242 void
3243 s390_expand_plus_operand (rtx target, rtx src,
3244 rtx scratch)
3245 {
3246 rtx sum1, sum2;
3247 struct s390_address ad;
3248
3249 /* src must be a PLUS; get its two operands. */
3250 gcc_assert (GET_CODE (src) == PLUS);
3251 gcc_assert (GET_MODE (src) == Pmode);
3252
3253 /* Check if any of the two operands is already scheduled
3254 for replacement by reload. This can happen e.g. when
3255 float registers occur in an address. */
3256 sum1 = find_replacement (&XEXP (src, 0));
3257 sum2 = find_replacement (&XEXP (src, 1));
3258 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3259
3260 /* If the address is already strictly valid, there's nothing to do. */
3261 if (!s390_decompose_address (src, &ad)
3262 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3263 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3264 {
3265 /* Otherwise, one of the operands cannot be an address register;
3266 we reload its value into the scratch register. */
3267 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3268 {
3269 emit_move_insn (scratch, sum1);
3270 sum1 = scratch;
3271 }
3272 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3273 {
3274 emit_move_insn (scratch, sum2);
3275 sum2 = scratch;
3276 }
3277
3278 /* According to the way these invalid addresses are generated
3279 in reload.c, it should never happen (at least on s390) that
3280 *neither* of the PLUS components, after find_replacements
3281 was applied, is an address register. */
3282 if (sum1 == scratch && sum2 == scratch)
3283 {
3284 debug_rtx (src);
3285 gcc_unreachable ();
3286 }
3287
3288 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3289 }
3290
3291 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3292 is only ever performed on addresses, so we can mark the
3293 sum as legitimate for LA in any case. */
3294 s390_load_address (target, src);
3295 }
3296
3297
3298 /* Return true if ADDR is a valid memory address.
3299 STRICT specifies whether strict register checking applies. */
3300
3301 static bool
3302 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3303 {
3304 struct s390_address ad;
3305
3306 if (TARGET_Z10
3307 && larl_operand (addr, VOIDmode)
3308 && (mode == VOIDmode
3309 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3310 return true;
3311
3312 if (!s390_decompose_address (addr, &ad))
3313 return false;
3314
3315 if (strict)
3316 {
3317 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3318 return false;
3319
3320 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3321 return false;
3322 }
3323 else
3324 {
3325 if (ad.base
3326 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3327 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3328 return false;
3329
3330 if (ad.indx
3331 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3332 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3333 return false;
3334 }
3335 return true;
3336 }
3337
3338 /* Return true if OP is a valid operand for the LA instruction.
3339 In 31-bit, we need to prove that the result is used as an
3340 address, as LA performs only a 31-bit addition. */
3341
3342 bool
3343 legitimate_la_operand_p (rtx op)
3344 {
3345 struct s390_address addr;
3346 if (!s390_decompose_address (op, &addr))
3347 return false;
3348
3349 return (TARGET_64BIT || addr.pointer);
3350 }
3351
3352 /* Return true if it is valid *and* preferable to use LA to
3353 compute the sum of OP1 and OP2. */
3354
3355 bool
3356 preferred_la_operand_p (rtx op1, rtx op2)
3357 {
3358 struct s390_address addr;
3359
3360 if (op2 != const0_rtx)
3361 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3362
3363 if (!s390_decompose_address (op1, &addr))
3364 return false;
3365 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3366 return false;
3367 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3368 return false;
3369
3370 /* Avoid LA instructions with index register on z196; it is
3371 preferable to use regular add instructions when possible.
3372 Starting with zEC12 the la with index register is "uncracked"
3373 again. */
3374 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3375 return false;
3376
3377 if (!TARGET_64BIT && !addr.pointer)
3378 return false;
3379
3380 if (addr.pointer)
3381 return true;
3382
3383 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3384 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3385 return true;
3386
3387 return false;
3388 }
3389
3390 /* Emit a forced load-address operation to load SRC into DST.
3391 This will use the LOAD ADDRESS instruction even in situations
3392 where legitimate_la_operand_p (SRC) returns false. */
3393
3394 void
3395 s390_load_address (rtx dst, rtx src)
3396 {
3397 if (TARGET_64BIT)
3398 emit_move_insn (dst, src);
3399 else
3400 emit_insn (gen_force_la_31 (dst, src));
3401 }
3402
3403 /* Return a legitimate reference for ORIG (an address) using the
3404 register REG. If REG is 0, a new pseudo is generated.
3405
3406 There are two types of references that must be handled:
3407
3408 1. Global data references must load the address from the GOT, via
3409 the PIC reg. An insn is emitted to do this load, and the reg is
3410 returned.
3411
3412 2. Static data references, constant pool addresses, and code labels
3413 compute the address as an offset from the GOT, whose base is in
3414 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3415 differentiate them from global data objects. The returned
3416 address is the PIC reg + an unspec constant.
3417
3418 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3419 reg also appears in the address. */
3420
3421 rtx
3422 legitimize_pic_address (rtx orig, rtx reg)
3423 {
3424 rtx addr = orig;
3425 rtx addend = const0_rtx;
3426 rtx new_rtx = orig;
3427
3428 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3429
3430 if (GET_CODE (addr) == CONST)
3431 addr = XEXP (addr, 0);
3432
3433 if (GET_CODE (addr) == PLUS)
3434 {
3435 addend = XEXP (addr, 1);
3436 addr = XEXP (addr, 0);
3437 }
3438
3439 if ((GET_CODE (addr) == LABEL_REF
3440 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3441 || (GET_CODE (addr) == UNSPEC &&
3442 (XINT (addr, 1) == UNSPEC_GOTENT
3443 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3444 && GET_CODE (addend) == CONST_INT)
3445 {
3446 /* This can be locally addressed. */
3447
3448 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3449 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3450 gen_rtx_CONST (Pmode, addr) : addr);
3451
3452 if (TARGET_CPU_ZARCH
3453 && larl_operand (const_addr, VOIDmode)
3454 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3455 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3456 {
3457 if (INTVAL (addend) & 1)
3458 {
3459 /* LARL can't handle odd offsets, so emit a pair of LARL
3460 and LA. */
3461 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3462
3463 if (!DISP_IN_RANGE (INTVAL (addend)))
3464 {
3465 HOST_WIDE_INT even = INTVAL (addend) - 1;
3466 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3467 addr = gen_rtx_CONST (Pmode, addr);
3468 addend = const1_rtx;
3469 }
3470
3471 emit_move_insn (temp, addr);
3472 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3473
3474 if (reg != 0)
3475 {
3476 s390_load_address (reg, new_rtx);
3477 new_rtx = reg;
3478 }
3479 }
3480 else
3481 {
3482 /* If the offset is even, we can just use LARL. This
3483 will happen automatically. */
3484 }
3485 }
3486 else
3487 {
3488 /* No larl - Access local symbols relative to the GOT. */
3489
3490 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3491
3492 if (reload_in_progress || reload_completed)
3493 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3494
3495 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3496 if (addend != const0_rtx)
3497 addr = gen_rtx_PLUS (Pmode, addr, addend);
3498 addr = gen_rtx_CONST (Pmode, addr);
3499 addr = force_const_mem (Pmode, addr);
3500 emit_move_insn (temp, addr);
3501
3502 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3503 if (reg != 0)
3504 {
3505 s390_load_address (reg, new_rtx);
3506 new_rtx = reg;
3507 }
3508 }
3509 }
3510 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3511 {
3512 /* A non-local symbol reference without addend.
3513
3514 The symbol ref is wrapped into an UNSPEC to make sure the
3515 proper operand modifier (@GOT or @GOTENT) will be emitted.
3516 This will tell the linker to put the symbol into the GOT.
3517
3518 Additionally the code dereferencing the GOT slot is emitted here.
3519
3520 An addend to the symref needs to be added afterwards.
3521 legitimize_pic_address calls itself recursively to handle
3522 that case. So no need to do it here. */
3523
3524 if (reg == 0)
3525 reg = gen_reg_rtx (Pmode);
3526
3527 if (TARGET_Z10)
3528 {
3529 /* Use load relative if possible.
3530 lgrl <target>, sym@GOTENT */
3531 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3532 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3533 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3534
3535 emit_move_insn (reg, new_rtx);
3536 new_rtx = reg;
3537 }
3538 else if (flag_pic == 1)
3539 {
3540 /* Assume GOT offset is a valid displacement operand (< 4k
3541 or < 512k with z990). This is handled the same way in
3542 both 31- and 64-bit code (@GOT).
3543 lg <target>, sym@GOT(r12) */
3544
3545 if (reload_in_progress || reload_completed)
3546 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3547
3548 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3549 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3550 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3551 new_rtx = gen_const_mem (Pmode, new_rtx);
3552 emit_move_insn (reg, new_rtx);
3553 new_rtx = reg;
3554 }
3555 else if (TARGET_CPU_ZARCH)
3556 {
3557 /* If the GOT offset might be >= 4k, we determine the position
3558 of the GOT entry via a PC-relative LARL (@GOTENT).
3559 larl temp, sym@GOTENT
3560 lg <target>, 0(temp) */
3561
3562 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3563
3564 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3565 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3566
3567 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3568 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3569 emit_move_insn (temp, new_rtx);
3570
3571 new_rtx = gen_const_mem (Pmode, temp);
3572 emit_move_insn (reg, new_rtx);
3573
3574 new_rtx = reg;
3575 }
3576 else
3577 {
3578 /* If the GOT offset might be >= 4k, we have to load it
3579 from the literal pool (@GOT).
3580
3581 lg temp, lit-litbase(r13)
3582 lg <target>, 0(temp)
3583 lit: .long sym@GOT */
3584
3585 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3586
3587 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3588 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3589
3590 if (reload_in_progress || reload_completed)
3591 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3592
3593 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3594 addr = gen_rtx_CONST (Pmode, addr);
3595 addr = force_const_mem (Pmode, addr);
3596 emit_move_insn (temp, addr);
3597
3598 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3599 new_rtx = gen_const_mem (Pmode, new_rtx);
3600 emit_move_insn (reg, new_rtx);
3601 new_rtx = reg;
3602 }
3603 }
3604 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3605 {
3606 gcc_assert (XVECLEN (addr, 0) == 1);
3607 switch (XINT (addr, 1))
3608 {
3609 /* These address symbols (or PLT slots) relative to the GOT
3610 (not GOT slots!). In general this will exceed the
3611 displacement range so these value belong into the literal
3612 pool. */
3613 case UNSPEC_GOTOFF:
3614 case UNSPEC_PLTOFF:
3615 new_rtx = force_const_mem (Pmode, orig);
3616 break;
3617
3618 /* For -fPIC the GOT size might exceed the displacement
3619 range so make sure the value is in the literal pool. */
3620 case UNSPEC_GOT:
3621 if (flag_pic == 2)
3622 new_rtx = force_const_mem (Pmode, orig);
3623 break;
3624
3625 /* For @GOTENT larl is used. This is handled like local
3626 symbol refs. */
3627 case UNSPEC_GOTENT:
3628 gcc_unreachable ();
3629 break;
3630
3631 /* @PLT is OK as is on 64-bit, must be converted to
3632 GOT-relative @PLTOFF on 31-bit. */
3633 case UNSPEC_PLT:
3634 if (!TARGET_CPU_ZARCH)
3635 {
3636 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3637
3638 if (reload_in_progress || reload_completed)
3639 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3640
3641 addr = XVECEXP (addr, 0, 0);
3642 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3643 UNSPEC_PLTOFF);
3644 if (addend != const0_rtx)
3645 addr = gen_rtx_PLUS (Pmode, addr, addend);
3646 addr = gen_rtx_CONST (Pmode, addr);
3647 addr = force_const_mem (Pmode, addr);
3648 emit_move_insn (temp, addr);
3649
3650 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3651 if (reg != 0)
3652 {
3653 s390_load_address (reg, new_rtx);
3654 new_rtx = reg;
3655 }
3656 }
3657 else
3658 /* On 64 bit larl can be used. This case is handled like
3659 local symbol refs. */
3660 gcc_unreachable ();
3661 break;
3662
3663 /* Everything else cannot happen. */
3664 default:
3665 gcc_unreachable ();
3666 }
3667 }
3668 else if (addend != const0_rtx)
3669 {
3670 /* Otherwise, compute the sum. */
3671
3672 rtx base = legitimize_pic_address (addr, reg);
3673 new_rtx = legitimize_pic_address (addend,
3674 base == reg ? NULL_RTX : reg);
3675 if (GET_CODE (new_rtx) == CONST_INT)
3676 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3677 else
3678 {
3679 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3680 {
3681 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3682 new_rtx = XEXP (new_rtx, 1);
3683 }
3684 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3685 }
3686
3687 if (GET_CODE (new_rtx) == CONST)
3688 new_rtx = XEXP (new_rtx, 0);
3689 new_rtx = force_operand (new_rtx, 0);
3690 }
3691
3692 return new_rtx;
3693 }
3694
3695 /* Load the thread pointer into a register. */
3696
3697 rtx
3698 s390_get_thread_pointer (void)
3699 {
3700 rtx tp = gen_reg_rtx (Pmode);
3701
3702 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3703 mark_reg_pointer (tp, BITS_PER_WORD);
3704
3705 return tp;
3706 }
3707
3708 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3709 in s390_tls_symbol which always refers to __tls_get_offset.
3710 The returned offset is written to RESULT_REG and an USE rtx is
3711 generated for TLS_CALL. */
3712
3713 static GTY(()) rtx s390_tls_symbol;
3714
3715 static void
3716 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3717 {
3718 rtx insn;
3719
3720 if (!flag_pic)
3721 emit_insn (s390_load_got ());
3722
3723 if (!s390_tls_symbol)
3724 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3725
3726 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3727 gen_rtx_REG (Pmode, RETURN_REGNUM));
3728
3729 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3730 RTL_CONST_CALL_P (insn) = 1;
3731 }
3732
3733 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3734 this (thread-local) address. REG may be used as temporary. */
3735
3736 static rtx
3737 legitimize_tls_address (rtx addr, rtx reg)
3738 {
3739 rtx new_rtx, tls_call, temp, base, r2, insn;
3740
3741 if (GET_CODE (addr) == SYMBOL_REF)
3742 switch (tls_symbolic_operand (addr))
3743 {
3744 case TLS_MODEL_GLOBAL_DYNAMIC:
3745 start_sequence ();
3746 r2 = gen_rtx_REG (Pmode, 2);
3747 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3748 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3749 new_rtx = force_const_mem (Pmode, new_rtx);
3750 emit_move_insn (r2, new_rtx);
3751 s390_emit_tls_call_insn (r2, tls_call);
3752 insn = get_insns ();
3753 end_sequence ();
3754
3755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3756 temp = gen_reg_rtx (Pmode);
3757 emit_libcall_block (insn, temp, r2, new_rtx);
3758
3759 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3760 if (reg != 0)
3761 {
3762 s390_load_address (reg, new_rtx);
3763 new_rtx = reg;
3764 }
3765 break;
3766
3767 case TLS_MODEL_LOCAL_DYNAMIC:
3768 start_sequence ();
3769 r2 = gen_rtx_REG (Pmode, 2);
3770 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3771 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3772 new_rtx = force_const_mem (Pmode, new_rtx);
3773 emit_move_insn (r2, new_rtx);
3774 s390_emit_tls_call_insn (r2, tls_call);
3775 insn = get_insns ();
3776 end_sequence ();
3777
3778 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3779 temp = gen_reg_rtx (Pmode);
3780 emit_libcall_block (insn, temp, r2, new_rtx);
3781
3782 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3783 base = gen_reg_rtx (Pmode);
3784 s390_load_address (base, new_rtx);
3785
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3788 new_rtx = force_const_mem (Pmode, new_rtx);
3789 temp = gen_reg_rtx (Pmode);
3790 emit_move_insn (temp, new_rtx);
3791
3792 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3793 if (reg != 0)
3794 {
3795 s390_load_address (reg, new_rtx);
3796 new_rtx = reg;
3797 }
3798 break;
3799
3800 case TLS_MODEL_INITIAL_EXEC:
3801 if (flag_pic == 1)
3802 {
3803 /* Assume GOT offset < 4k. This is handled the same way
3804 in both 31- and 64-bit code. */
3805
3806 if (reload_in_progress || reload_completed)
3807 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3808
3809 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3810 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3811 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3812 new_rtx = gen_const_mem (Pmode, new_rtx);
3813 temp = gen_reg_rtx (Pmode);
3814 emit_move_insn (temp, new_rtx);
3815 }
3816 else if (TARGET_CPU_ZARCH)
3817 {
3818 /* If the GOT offset might be >= 4k, we determine the position
3819 of the GOT entry via a PC-relative LARL. */
3820
3821 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3822 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3823 temp = gen_reg_rtx (Pmode);
3824 emit_move_insn (temp, new_rtx);
3825
3826 new_rtx = gen_const_mem (Pmode, temp);
3827 temp = gen_reg_rtx (Pmode);
3828 emit_move_insn (temp, new_rtx);
3829 }
3830 else if (flag_pic)
3831 {
3832 /* If the GOT offset might be >= 4k, we have to load it
3833 from the literal pool. */
3834
3835 if (reload_in_progress || reload_completed)
3836 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3837
3838 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3839 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3840 new_rtx = force_const_mem (Pmode, new_rtx);
3841 temp = gen_reg_rtx (Pmode);
3842 emit_move_insn (temp, new_rtx);
3843
3844 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3845 new_rtx = gen_const_mem (Pmode, new_rtx);
3846
3847 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3848 temp = gen_reg_rtx (Pmode);
3849 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3850 }
3851 else
3852 {
3853 /* In position-dependent code, load the absolute address of
3854 the GOT entry from the literal pool. */
3855
3856 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3857 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3858 new_rtx = force_const_mem (Pmode, new_rtx);
3859 temp = gen_reg_rtx (Pmode);
3860 emit_move_insn (temp, new_rtx);
3861
3862 new_rtx = temp;
3863 new_rtx = gen_const_mem (Pmode, new_rtx);
3864 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3865 temp = gen_reg_rtx (Pmode);
3866 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3867 }
3868
3869 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3870 if (reg != 0)
3871 {
3872 s390_load_address (reg, new_rtx);
3873 new_rtx = reg;
3874 }
3875 break;
3876
3877 case TLS_MODEL_LOCAL_EXEC:
3878 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3879 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3880 new_rtx = force_const_mem (Pmode, new_rtx);
3881 temp = gen_reg_rtx (Pmode);
3882 emit_move_insn (temp, new_rtx);
3883
3884 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3885 if (reg != 0)
3886 {
3887 s390_load_address (reg, new_rtx);
3888 new_rtx = reg;
3889 }
3890 break;
3891
3892 default:
3893 gcc_unreachable ();
3894 }
3895
3896 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3897 {
3898 switch (XINT (XEXP (addr, 0), 1))
3899 {
3900 case UNSPEC_INDNTPOFF:
3901 gcc_assert (TARGET_CPU_ZARCH);
3902 new_rtx = addr;
3903 break;
3904
3905 default:
3906 gcc_unreachable ();
3907 }
3908 }
3909
3910 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3911 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3912 {
3913 new_rtx = XEXP (XEXP (addr, 0), 0);
3914 if (GET_CODE (new_rtx) != SYMBOL_REF)
3915 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3916
3917 new_rtx = legitimize_tls_address (new_rtx, reg);
3918 new_rtx = plus_constant (Pmode, new_rtx,
3919 INTVAL (XEXP (XEXP (addr, 0), 1)));
3920 new_rtx = force_operand (new_rtx, 0);
3921 }
3922
3923 else
3924 gcc_unreachable (); /* for now ... */
3925
3926 return new_rtx;
3927 }
3928
3929 /* Emit insns making the address in operands[1] valid for a standard
3930 move to operands[0]. operands[1] is replaced by an address which
3931 should be used instead of the former RTX to emit the move
3932 pattern. */
3933
3934 void
3935 emit_symbolic_move (rtx *operands)
3936 {
3937 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3938
3939 if (GET_CODE (operands[0]) == MEM)
3940 operands[1] = force_reg (Pmode, operands[1]);
3941 else if (TLS_SYMBOLIC_CONST (operands[1]))
3942 operands[1] = legitimize_tls_address (operands[1], temp);
3943 else if (flag_pic)
3944 operands[1] = legitimize_pic_address (operands[1], temp);
3945 }
3946
3947 /* Try machine-dependent ways of modifying an illegitimate address X
3948 to be legitimate. If we find one, return the new, valid address.
3949
3950 OLDX is the address as it was before break_out_memory_refs was called.
3951 In some cases it is useful to look at this to decide what needs to be done.
3952
3953 MODE is the mode of the operand pointed to by X.
3954
3955 When -fpic is used, special handling is needed for symbolic references.
3956 See comments by legitimize_pic_address for details. */
3957
3958 static rtx
3959 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3960 enum machine_mode mode ATTRIBUTE_UNUSED)
3961 {
3962 rtx constant_term = const0_rtx;
3963
3964 if (TLS_SYMBOLIC_CONST (x))
3965 {
3966 x = legitimize_tls_address (x, 0);
3967
3968 if (s390_legitimate_address_p (mode, x, FALSE))
3969 return x;
3970 }
3971 else if (GET_CODE (x) == PLUS
3972 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3973 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3974 {
3975 return x;
3976 }
3977 else if (flag_pic)
3978 {
3979 if (SYMBOLIC_CONST (x)
3980 || (GET_CODE (x) == PLUS
3981 && (SYMBOLIC_CONST (XEXP (x, 0))
3982 || SYMBOLIC_CONST (XEXP (x, 1)))))
3983 x = legitimize_pic_address (x, 0);
3984
3985 if (s390_legitimate_address_p (mode, x, FALSE))
3986 return x;
3987 }
3988
3989 x = eliminate_constant_term (x, &constant_term);
3990
3991 /* Optimize loading of large displacements by splitting them
3992 into the multiple of 4K and the rest; this allows the
3993 former to be CSE'd if possible.
3994
3995 Don't do this if the displacement is added to a register
3996 pointing into the stack frame, as the offsets will
3997 change later anyway. */
3998
3999 if (GET_CODE (constant_term) == CONST_INT
4000 && !TARGET_LONG_DISPLACEMENT
4001 && !DISP_IN_RANGE (INTVAL (constant_term))
4002 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4003 {
4004 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4005 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4006
4007 rtx temp = gen_reg_rtx (Pmode);
4008 rtx val = force_operand (GEN_INT (upper), temp);
4009 if (val != temp)
4010 emit_move_insn (temp, val);
4011
4012 x = gen_rtx_PLUS (Pmode, x, temp);
4013 constant_term = GEN_INT (lower);
4014 }
4015
4016 if (GET_CODE (x) == PLUS)
4017 {
4018 if (GET_CODE (XEXP (x, 0)) == REG)
4019 {
4020 rtx temp = gen_reg_rtx (Pmode);
4021 rtx val = force_operand (XEXP (x, 1), temp);
4022 if (val != temp)
4023 emit_move_insn (temp, val);
4024
4025 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4026 }
4027
4028 else if (GET_CODE (XEXP (x, 1)) == REG)
4029 {
4030 rtx temp = gen_reg_rtx (Pmode);
4031 rtx val = force_operand (XEXP (x, 0), temp);
4032 if (val != temp)
4033 emit_move_insn (temp, val);
4034
4035 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4036 }
4037 }
4038
4039 if (constant_term != const0_rtx)
4040 x = gen_rtx_PLUS (Pmode, x, constant_term);
4041
4042 return x;
4043 }
4044
4045 /* Try a machine-dependent way of reloading an illegitimate address AD
4046 operand. If we find one, push the reload and return the new address.
4047
4048 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4049 and TYPE is the reload type of the current reload. */
4050
4051 rtx
4052 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4053 int opnum, int type)
4054 {
4055 if (!optimize || TARGET_LONG_DISPLACEMENT)
4056 return NULL_RTX;
4057
4058 if (GET_CODE (ad) == PLUS)
4059 {
4060 rtx tem = simplify_binary_operation (PLUS, Pmode,
4061 XEXP (ad, 0), XEXP (ad, 1));
4062 if (tem)
4063 ad = tem;
4064 }
4065
4066 if (GET_CODE (ad) == PLUS
4067 && GET_CODE (XEXP (ad, 0)) == REG
4068 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4069 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4070 {
4071 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4072 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4073 rtx cst, tem, new_rtx;
4074
4075 cst = GEN_INT (upper);
4076 if (!legitimate_reload_constant_p (cst))
4077 cst = force_const_mem (Pmode, cst);
4078
4079 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4080 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4081
4082 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4083 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4084 opnum, (enum reload_type) type);
4085 return new_rtx;
4086 }
4087
4088 return NULL_RTX;
4089 }
4090
4091 /* Emit code to move LEN bytes from DST to SRC. */
4092
4093 bool
4094 s390_expand_movmem (rtx dst, rtx src, rtx len)
4095 {
4096 /* When tuning for z10 or higher we rely on the Glibc functions to
4097 do the right thing. Only for constant lengths below 64k we will
4098 generate inline code. */
4099 if (s390_tune >= PROCESSOR_2097_Z10
4100 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4101 return false;
4102
4103 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4104 {
4105 if (INTVAL (len) > 0)
4106 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4107 }
4108
4109 else if (TARGET_MVCLE)
4110 {
4111 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4112 }
4113
4114 else
4115 {
4116 rtx dst_addr, src_addr, count, blocks, temp;
4117 rtx loop_start_label = gen_label_rtx ();
4118 rtx loop_end_label = gen_label_rtx ();
4119 rtx end_label = gen_label_rtx ();
4120 enum machine_mode mode;
4121
4122 mode = GET_MODE (len);
4123 if (mode == VOIDmode)
4124 mode = Pmode;
4125
4126 dst_addr = gen_reg_rtx (Pmode);
4127 src_addr = gen_reg_rtx (Pmode);
4128 count = gen_reg_rtx (mode);
4129 blocks = gen_reg_rtx (mode);
4130
4131 convert_move (count, len, 1);
4132 emit_cmp_and_jump_insns (count, const0_rtx,
4133 EQ, NULL_RTX, mode, 1, end_label);
4134
4135 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4136 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4137 dst = change_address (dst, VOIDmode, dst_addr);
4138 src = change_address (src, VOIDmode, src_addr);
4139
4140 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4141 OPTAB_DIRECT);
4142 if (temp != count)
4143 emit_move_insn (count, temp);
4144
4145 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4146 OPTAB_DIRECT);
4147 if (temp != blocks)
4148 emit_move_insn (blocks, temp);
4149
4150 emit_cmp_and_jump_insns (blocks, const0_rtx,
4151 EQ, NULL_RTX, mode, 1, loop_end_label);
4152
4153 emit_label (loop_start_label);
4154
4155 if (TARGET_Z10
4156 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4157 {
4158 rtx prefetch;
4159
4160 /* Issue a read prefetch for the +3 cache line. */
4161 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4162 const0_rtx, const0_rtx);
4163 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4164 emit_insn (prefetch);
4165
4166 /* Issue a write prefetch for the +3 cache line. */
4167 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4168 const1_rtx, const0_rtx);
4169 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4170 emit_insn (prefetch);
4171 }
4172
4173 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4174 s390_load_address (dst_addr,
4175 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4176 s390_load_address (src_addr,
4177 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4178
4179 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4180 OPTAB_DIRECT);
4181 if (temp != blocks)
4182 emit_move_insn (blocks, temp);
4183
4184 emit_cmp_and_jump_insns (blocks, const0_rtx,
4185 EQ, NULL_RTX, mode, 1, loop_end_label);
4186
4187 emit_jump (loop_start_label);
4188 emit_label (loop_end_label);
4189
4190 emit_insn (gen_movmem_short (dst, src,
4191 convert_to_mode (Pmode, count, 1)));
4192 emit_label (end_label);
4193 }
4194 return true;
4195 }
4196
4197 /* Emit code to set LEN bytes at DST to VAL.
4198 Make use of clrmem if VAL is zero. */
4199
4200 void
4201 s390_expand_setmem (rtx dst, rtx len, rtx val)
4202 {
4203 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4204 return;
4205
4206 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4207
4208 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4209 {
4210 if (val == const0_rtx && INTVAL (len) <= 256)
4211 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4212 else
4213 {
4214 /* Initialize memory by storing the first byte. */
4215 emit_move_insn (adjust_address (dst, QImode, 0), val);
4216
4217 if (INTVAL (len) > 1)
4218 {
4219 /* Initiate 1 byte overlap move.
4220 The first byte of DST is propagated through DSTP1.
4221 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4222 DST is set to size 1 so the rest of the memory location
4223 does not count as source operand. */
4224 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4225 set_mem_size (dst, 1);
4226
4227 emit_insn (gen_movmem_short (dstp1, dst,
4228 GEN_INT (INTVAL (len) - 2)));
4229 }
4230 }
4231 }
4232
4233 else if (TARGET_MVCLE)
4234 {
4235 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4236 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4237 }
4238
4239 else
4240 {
4241 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4242 rtx loop_start_label = gen_label_rtx ();
4243 rtx loop_end_label = gen_label_rtx ();
4244 rtx end_label = gen_label_rtx ();
4245 enum machine_mode mode;
4246
4247 mode = GET_MODE (len);
4248 if (mode == VOIDmode)
4249 mode = Pmode;
4250
4251 dst_addr = gen_reg_rtx (Pmode);
4252 count = gen_reg_rtx (mode);
4253 blocks = gen_reg_rtx (mode);
4254
4255 convert_move (count, len, 1);
4256 emit_cmp_and_jump_insns (count, const0_rtx,
4257 EQ, NULL_RTX, mode, 1, end_label);
4258
4259 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4260 dst = change_address (dst, VOIDmode, dst_addr);
4261
4262 if (val == const0_rtx)
4263 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4264 OPTAB_DIRECT);
4265 else
4266 {
4267 dstp1 = adjust_address (dst, VOIDmode, 1);
4268 set_mem_size (dst, 1);
4269
4270 /* Initialize memory by storing the first byte. */
4271 emit_move_insn (adjust_address (dst, QImode, 0), val);
4272
4273 /* If count is 1 we are done. */
4274 emit_cmp_and_jump_insns (count, const1_rtx,
4275 EQ, NULL_RTX, mode, 1, end_label);
4276
4277 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4278 OPTAB_DIRECT);
4279 }
4280 if (temp != count)
4281 emit_move_insn (count, temp);
4282
4283 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4284 OPTAB_DIRECT);
4285 if (temp != blocks)
4286 emit_move_insn (blocks, temp);
4287
4288 emit_cmp_and_jump_insns (blocks, const0_rtx,
4289 EQ, NULL_RTX, mode, 1, loop_end_label);
4290
4291 emit_label (loop_start_label);
4292
4293 if (TARGET_Z10
4294 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4295 {
4296 /* Issue a write prefetch for the +4 cache line. */
4297 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4298 GEN_INT (1024)),
4299 const1_rtx, const0_rtx);
4300 emit_insn (prefetch);
4301 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4302 }
4303
4304 if (val == const0_rtx)
4305 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4306 else
4307 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4308 s390_load_address (dst_addr,
4309 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4310
4311 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4312 OPTAB_DIRECT);
4313 if (temp != blocks)
4314 emit_move_insn (blocks, temp);
4315
4316 emit_cmp_and_jump_insns (blocks, const0_rtx,
4317 EQ, NULL_RTX, mode, 1, loop_end_label);
4318
4319 emit_jump (loop_start_label);
4320 emit_label (loop_end_label);
4321
4322 if (val == const0_rtx)
4323 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4324 else
4325 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4326 emit_label (end_label);
4327 }
4328 }
4329
4330 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4331 and return the result in TARGET. */
4332
4333 bool
4334 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4335 {
4336 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4337 rtx tmp;
4338
4339 /* When tuning for z10 or higher we rely on the Glibc functions to
4340 do the right thing. Only for constant lengths below 64k we will
4341 generate inline code. */
4342 if (s390_tune >= PROCESSOR_2097_Z10
4343 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4344 return false;
4345
4346 /* As the result of CMPINT is inverted compared to what we need,
4347 we have to swap the operands. */
4348 tmp = op0; op0 = op1; op1 = tmp;
4349
4350 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4351 {
4352 if (INTVAL (len) > 0)
4353 {
4354 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4355 emit_insn (gen_cmpint (target, ccreg));
4356 }
4357 else
4358 emit_move_insn (target, const0_rtx);
4359 }
4360 else if (TARGET_MVCLE)
4361 {
4362 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4363 emit_insn (gen_cmpint (target, ccreg));
4364 }
4365 else
4366 {
4367 rtx addr0, addr1, count, blocks, temp;
4368 rtx loop_start_label = gen_label_rtx ();
4369 rtx loop_end_label = gen_label_rtx ();
4370 rtx end_label = gen_label_rtx ();
4371 enum machine_mode mode;
4372
4373 mode = GET_MODE (len);
4374 if (mode == VOIDmode)
4375 mode = Pmode;
4376
4377 addr0 = gen_reg_rtx (Pmode);
4378 addr1 = gen_reg_rtx (Pmode);
4379 count = gen_reg_rtx (mode);
4380 blocks = gen_reg_rtx (mode);
4381
4382 convert_move (count, len, 1);
4383 emit_cmp_and_jump_insns (count, const0_rtx,
4384 EQ, NULL_RTX, mode, 1, end_label);
4385
4386 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4387 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4388 op0 = change_address (op0, VOIDmode, addr0);
4389 op1 = change_address (op1, VOIDmode, addr1);
4390
4391 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4392 OPTAB_DIRECT);
4393 if (temp != count)
4394 emit_move_insn (count, temp);
4395
4396 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4397 OPTAB_DIRECT);
4398 if (temp != blocks)
4399 emit_move_insn (blocks, temp);
4400
4401 emit_cmp_and_jump_insns (blocks, const0_rtx,
4402 EQ, NULL_RTX, mode, 1, loop_end_label);
4403
4404 emit_label (loop_start_label);
4405
4406 if (TARGET_Z10
4407 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4408 {
4409 rtx prefetch;
4410
4411 /* Issue a read prefetch for the +2 cache line of operand 1. */
4412 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4413 const0_rtx, const0_rtx);
4414 emit_insn (prefetch);
4415 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4416
4417 /* Issue a read prefetch for the +2 cache line of operand 2. */
4418 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4419 const0_rtx, const0_rtx);
4420 emit_insn (prefetch);
4421 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4422 }
4423
4424 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4425 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4426 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4427 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4428 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4429 emit_jump_insn (temp);
4430
4431 s390_load_address (addr0,
4432 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4433 s390_load_address (addr1,
4434 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4435
4436 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4437 OPTAB_DIRECT);
4438 if (temp != blocks)
4439 emit_move_insn (blocks, temp);
4440
4441 emit_cmp_and_jump_insns (blocks, const0_rtx,
4442 EQ, NULL_RTX, mode, 1, loop_end_label);
4443
4444 emit_jump (loop_start_label);
4445 emit_label (loop_end_label);
4446
4447 emit_insn (gen_cmpmem_short (op0, op1,
4448 convert_to_mode (Pmode, count, 1)));
4449 emit_label (end_label);
4450
4451 emit_insn (gen_cmpint (target, ccreg));
4452 }
4453 return true;
4454 }
4455
4456
4457 /* Expand conditional increment or decrement using alc/slb instructions.
4458 Should generate code setting DST to either SRC or SRC + INCREMENT,
4459 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4460 Returns true if successful, false otherwise.
4461
4462 That makes it possible to implement some if-constructs without jumps e.g.:
4463 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4464 unsigned int a, b, c;
4465 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4466 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4467 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4468 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4469
4470 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4471 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4472 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4473 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4474 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4475
4476 bool
4477 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4478 rtx dst, rtx src, rtx increment)
4479 {
4480 enum machine_mode cmp_mode;
4481 enum machine_mode cc_mode;
4482 rtx op_res;
4483 rtx insn;
4484 rtvec p;
4485 int ret;
4486
4487 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4488 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4489 cmp_mode = SImode;
4490 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4491 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4492 cmp_mode = DImode;
4493 else
4494 return false;
4495
4496 /* Try ADD LOGICAL WITH CARRY. */
4497 if (increment == const1_rtx)
4498 {
4499 /* Determine CC mode to use. */
4500 if (cmp_code == EQ || cmp_code == NE)
4501 {
4502 if (cmp_op1 != const0_rtx)
4503 {
4504 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4505 NULL_RTX, 0, OPTAB_WIDEN);
4506 cmp_op1 = const0_rtx;
4507 }
4508
4509 cmp_code = cmp_code == EQ ? LEU : GTU;
4510 }
4511
4512 if (cmp_code == LTU || cmp_code == LEU)
4513 {
4514 rtx tem = cmp_op0;
4515 cmp_op0 = cmp_op1;
4516 cmp_op1 = tem;
4517 cmp_code = swap_condition (cmp_code);
4518 }
4519
4520 switch (cmp_code)
4521 {
4522 case GTU:
4523 cc_mode = CCUmode;
4524 break;
4525
4526 case GEU:
4527 cc_mode = CCL3mode;
4528 break;
4529
4530 default:
4531 return false;
4532 }
4533
4534 /* Emit comparison instruction pattern. */
4535 if (!register_operand (cmp_op0, cmp_mode))
4536 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4537
4538 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4539 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4540 /* We use insn_invalid_p here to add clobbers if required. */
4541 ret = insn_invalid_p (emit_insn (insn), false);
4542 gcc_assert (!ret);
4543
4544 /* Emit ALC instruction pattern. */
4545 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4546 gen_rtx_REG (cc_mode, CC_REGNUM),
4547 const0_rtx);
4548
4549 if (src != const0_rtx)
4550 {
4551 if (!register_operand (src, GET_MODE (dst)))
4552 src = force_reg (GET_MODE (dst), src);
4553
4554 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4555 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4556 }
4557
4558 p = rtvec_alloc (2);
4559 RTVEC_ELT (p, 0) =
4560 gen_rtx_SET (VOIDmode, dst, op_res);
4561 RTVEC_ELT (p, 1) =
4562 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4563 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4564
4565 return true;
4566 }
4567
4568 /* Try SUBTRACT LOGICAL WITH BORROW. */
4569 if (increment == constm1_rtx)
4570 {
4571 /* Determine CC mode to use. */
4572 if (cmp_code == EQ || cmp_code == NE)
4573 {
4574 if (cmp_op1 != const0_rtx)
4575 {
4576 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4577 NULL_RTX, 0, OPTAB_WIDEN);
4578 cmp_op1 = const0_rtx;
4579 }
4580
4581 cmp_code = cmp_code == EQ ? LEU : GTU;
4582 }
4583
4584 if (cmp_code == GTU || cmp_code == GEU)
4585 {
4586 rtx tem = cmp_op0;
4587 cmp_op0 = cmp_op1;
4588 cmp_op1 = tem;
4589 cmp_code = swap_condition (cmp_code);
4590 }
4591
4592 switch (cmp_code)
4593 {
4594 case LEU:
4595 cc_mode = CCUmode;
4596 break;
4597
4598 case LTU:
4599 cc_mode = CCL3mode;
4600 break;
4601
4602 default:
4603 return false;
4604 }
4605
4606 /* Emit comparison instruction pattern. */
4607 if (!register_operand (cmp_op0, cmp_mode))
4608 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4609
4610 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4611 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4612 /* We use insn_invalid_p here to add clobbers if required. */
4613 ret = insn_invalid_p (emit_insn (insn), false);
4614 gcc_assert (!ret);
4615
4616 /* Emit SLB instruction pattern. */
4617 if (!register_operand (src, GET_MODE (dst)))
4618 src = force_reg (GET_MODE (dst), src);
4619
4620 op_res = gen_rtx_MINUS (GET_MODE (dst),
4621 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4622 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4623 gen_rtx_REG (cc_mode, CC_REGNUM),
4624 const0_rtx));
4625 p = rtvec_alloc (2);
4626 RTVEC_ELT (p, 0) =
4627 gen_rtx_SET (VOIDmode, dst, op_res);
4628 RTVEC_ELT (p, 1) =
4629 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4630 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4631
4632 return true;
4633 }
4634
4635 return false;
4636 }
4637
4638 /* Expand code for the insv template. Return true if successful. */
4639
4640 bool
4641 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4642 {
4643 int bitsize = INTVAL (op1);
4644 int bitpos = INTVAL (op2);
4645 enum machine_mode mode = GET_MODE (dest);
4646 enum machine_mode smode;
4647 int smode_bsize, mode_bsize;
4648 rtx op, clobber;
4649
4650 /* Generate INSERT IMMEDIATE (IILL et al). */
4651 /* (set (ze (reg)) (const_int)). */
4652 if (TARGET_ZARCH
4653 && register_operand (dest, word_mode)
4654 && (bitpos % 16) == 0
4655 && (bitsize % 16) == 0
4656 && const_int_operand (src, VOIDmode))
4657 {
4658 HOST_WIDE_INT val = INTVAL (src);
4659 int regpos = bitpos + bitsize;
4660
4661 while (regpos > bitpos)
4662 {
4663 enum machine_mode putmode;
4664 int putsize;
4665
4666 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4667 putmode = SImode;
4668 else
4669 putmode = HImode;
4670
4671 putsize = GET_MODE_BITSIZE (putmode);
4672 regpos -= putsize;
4673 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4674 GEN_INT (putsize),
4675 GEN_INT (regpos)),
4676 gen_int_mode (val, putmode));
4677 val >>= putsize;
4678 }
4679 gcc_assert (regpos == bitpos);
4680 return true;
4681 }
4682
4683 smode = smallest_mode_for_size (bitsize, MODE_INT);
4684 smode_bsize = GET_MODE_BITSIZE (smode);
4685 mode_bsize = GET_MODE_BITSIZE (mode);
4686
4687 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4688 if (bitpos == 0
4689 && (bitsize % BITS_PER_UNIT) == 0
4690 && MEM_P (dest)
4691 && (register_operand (src, word_mode)
4692 || const_int_operand (src, VOIDmode)))
4693 {
4694 /* Emit standard pattern if possible. */
4695 if (smode_bsize == bitsize)
4696 {
4697 emit_move_insn (adjust_address (dest, smode, 0),
4698 gen_lowpart (smode, src));
4699 return true;
4700 }
4701
4702 /* (set (ze (mem)) (const_int)). */
4703 else if (const_int_operand (src, VOIDmode))
4704 {
4705 int size = bitsize / BITS_PER_UNIT;
4706 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4707 BLKmode,
4708 UNITS_PER_WORD - size);
4709
4710 dest = adjust_address (dest, BLKmode, 0);
4711 set_mem_size (dest, size);
4712 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4713 return true;
4714 }
4715
4716 /* (set (ze (mem)) (reg)). */
4717 else if (register_operand (src, word_mode))
4718 {
4719 if (bitsize <= 32)
4720 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4721 const0_rtx), src);
4722 else
4723 {
4724 /* Emit st,stcmh sequence. */
4725 int stcmh_width = bitsize - 32;
4726 int size = stcmh_width / BITS_PER_UNIT;
4727
4728 emit_move_insn (adjust_address (dest, SImode, size),
4729 gen_lowpart (SImode, src));
4730 set_mem_size (dest, size);
4731 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4732 GEN_INT (stcmh_width),
4733 const0_rtx),
4734 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4735 }
4736 return true;
4737 }
4738 }
4739
4740 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4741 if ((bitpos % BITS_PER_UNIT) == 0
4742 && (bitsize % BITS_PER_UNIT) == 0
4743 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4744 && MEM_P (src)
4745 && (mode == DImode || mode == SImode)
4746 && register_operand (dest, mode))
4747 {
4748 /* Emit a strict_low_part pattern if possible. */
4749 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4750 {
4751 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4752 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4753 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4754 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4755 return true;
4756 }
4757
4758 /* ??? There are more powerful versions of ICM that are not
4759 completely represented in the md file. */
4760 }
4761
4762 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4763 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4764 {
4765 enum machine_mode mode_s = GET_MODE (src);
4766
4767 if (mode_s == VOIDmode)
4768 {
4769 /* Assume const_int etc already in the proper mode. */
4770 src = force_reg (mode, src);
4771 }
4772 else if (mode_s != mode)
4773 {
4774 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4775 src = force_reg (mode_s, src);
4776 src = gen_lowpart (mode, src);
4777 }
4778
4779 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4780 op = gen_rtx_SET (VOIDmode, op, src);
4781
4782 if (!TARGET_ZEC12)
4783 {
4784 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4785 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4786 }
4787 emit_insn (op);
4788
4789 return true;
4790 }
4791
4792 return false;
4793 }
4794
4795 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4796 register that holds VAL of mode MODE shifted by COUNT bits. */
4797
4798 static inline rtx
4799 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4800 {
4801 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4802 NULL_RTX, 1, OPTAB_DIRECT);
4803 return expand_simple_binop (SImode, ASHIFT, val, count,
4804 NULL_RTX, 1, OPTAB_DIRECT);
4805 }
4806
4807 /* Structure to hold the initial parameters for a compare_and_swap operation
4808 in HImode and QImode. */
4809
4810 struct alignment_context
4811 {
4812 rtx memsi; /* SI aligned memory location. */
4813 rtx shift; /* Bit offset with regard to lsb. */
4814 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4815 rtx modemaski; /* ~modemask */
4816 bool aligned; /* True if memory is aligned, false else. */
4817 };
4818
4819 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4820 structure AC for transparent simplifying, if the memory alignment is known
4821 to be at least 32bit. MEM is the memory location for the actual operation
4822 and MODE its mode. */
4823
4824 static void
4825 init_alignment_context (struct alignment_context *ac, rtx mem,
4826 enum machine_mode mode)
4827 {
4828 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4829 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4830
4831 if (ac->aligned)
4832 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4833 else
4834 {
4835 /* Alignment is unknown. */
4836 rtx byteoffset, addr, align;
4837
4838 /* Force the address into a register. */
4839 addr = force_reg (Pmode, XEXP (mem, 0));
4840
4841 /* Align it to SImode. */
4842 align = expand_simple_binop (Pmode, AND, addr,
4843 GEN_INT (-GET_MODE_SIZE (SImode)),
4844 NULL_RTX, 1, OPTAB_DIRECT);
4845 /* Generate MEM. */
4846 ac->memsi = gen_rtx_MEM (SImode, align);
4847 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4848 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4849 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4850
4851 /* Calculate shiftcount. */
4852 byteoffset = expand_simple_binop (Pmode, AND, addr,
4853 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4854 NULL_RTX, 1, OPTAB_DIRECT);
4855 /* As we already have some offset, evaluate the remaining distance. */
4856 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4857 NULL_RTX, 1, OPTAB_DIRECT);
4858 }
4859
4860 /* Shift is the byte count, but we need the bitcount. */
4861 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4862 NULL_RTX, 1, OPTAB_DIRECT);
4863
4864 /* Calculate masks. */
4865 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4866 GEN_INT (GET_MODE_MASK (mode)),
4867 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4868 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4869 NULL_RTX, 1);
4870 }
4871
4872 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4873 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4874 perform the merge in SEQ2. */
4875
4876 static rtx
4877 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4878 enum machine_mode mode, rtx val, rtx ins)
4879 {
4880 rtx tmp;
4881
4882 if (ac->aligned)
4883 {
4884 start_sequence ();
4885 tmp = copy_to_mode_reg (SImode, val);
4886 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4887 const0_rtx, ins))
4888 {
4889 *seq1 = NULL;
4890 *seq2 = get_insns ();
4891 end_sequence ();
4892 return tmp;
4893 }
4894 end_sequence ();
4895 }
4896
4897 /* Failed to use insv. Generate a two part shift and mask. */
4898 start_sequence ();
4899 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4900 *seq1 = get_insns ();
4901 end_sequence ();
4902
4903 start_sequence ();
4904 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4905 *seq2 = get_insns ();
4906 end_sequence ();
4907
4908 return tmp;
4909 }
4910
4911 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4912 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4913 value to set if CMP == MEM. */
4914
4915 void
4916 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4917 rtx cmp, rtx new_rtx, bool is_weak)
4918 {
4919 struct alignment_context ac;
4920 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4921 rtx res = gen_reg_rtx (SImode);
4922 rtx csloop = NULL, csend = NULL;
4923
4924 gcc_assert (MEM_P (mem));
4925
4926 init_alignment_context (&ac, mem, mode);
4927
4928 /* Load full word. Subsequent loads are performed by CS. */
4929 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4930 NULL_RTX, 1, OPTAB_DIRECT);
4931
4932 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4933 possible, we try to use insv to make this happen efficiently. If
4934 that fails we'll generate code both inside and outside the loop. */
4935 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4936 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4937
4938 if (seq0)
4939 emit_insn (seq0);
4940 if (seq1)
4941 emit_insn (seq1);
4942
4943 /* Start CS loop. */
4944 if (!is_weak)
4945 {
4946 /* Begin assuming success. */
4947 emit_move_insn (btarget, const1_rtx);
4948
4949 csloop = gen_label_rtx ();
4950 csend = gen_label_rtx ();
4951 emit_label (csloop);
4952 }
4953
4954 /* val = "<mem>00..0<mem>"
4955 * cmp = "00..0<cmp>00..0"
4956 * new = "00..0<new>00..0"
4957 */
4958
4959 emit_insn (seq2);
4960 emit_insn (seq3);
4961
4962 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4963 if (is_weak)
4964 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4965 else
4966 {
4967 rtx tmp;
4968
4969 /* Jump to end if we're done (likely?). */
4970 s390_emit_jump (csend, cc);
4971
4972 /* Check for changes outside mode, and loop internal if so.
4973 Arrange the moves so that the compare is adjacent to the
4974 branch so that we can generate CRJ. */
4975 tmp = copy_to_reg (val);
4976 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4977 1, OPTAB_DIRECT);
4978 cc = s390_emit_compare (NE, val, tmp);
4979 s390_emit_jump (csloop, cc);
4980
4981 /* Failed. */
4982 emit_move_insn (btarget, const0_rtx);
4983 emit_label (csend);
4984 }
4985
4986 /* Return the correct part of the bitfield. */
4987 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4988 NULL_RTX, 1, OPTAB_DIRECT), 1);
4989 }
4990
4991 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4992 and VAL the value to play with. If AFTER is true then store the value
4993 MEM holds after the operation, if AFTER is false then store the value MEM
4994 holds before the operation. If TARGET is zero then discard that value, else
4995 store it to TARGET. */
4996
4997 void
4998 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4999 rtx target, rtx mem, rtx val, bool after)
5000 {
5001 struct alignment_context ac;
5002 rtx cmp;
5003 rtx new_rtx = gen_reg_rtx (SImode);
5004 rtx orig = gen_reg_rtx (SImode);
5005 rtx csloop = gen_label_rtx ();
5006
5007 gcc_assert (!target || register_operand (target, VOIDmode));
5008 gcc_assert (MEM_P (mem));
5009
5010 init_alignment_context (&ac, mem, mode);
5011
5012 /* Shift val to the correct bit positions.
5013 Preserve "icm", but prevent "ex icm". */
5014 if (!(ac.aligned && code == SET && MEM_P (val)))
5015 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5016
5017 /* Further preparation insns. */
5018 if (code == PLUS || code == MINUS)
5019 emit_move_insn (orig, val);
5020 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5021 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5022 NULL_RTX, 1, OPTAB_DIRECT);
5023
5024 /* Load full word. Subsequent loads are performed by CS. */
5025 cmp = force_reg (SImode, ac.memsi);
5026
5027 /* Start CS loop. */
5028 emit_label (csloop);
5029 emit_move_insn (new_rtx, cmp);
5030
5031 /* Patch new with val at correct position. */
5032 switch (code)
5033 {
5034 case PLUS:
5035 case MINUS:
5036 val = expand_simple_binop (SImode, code, new_rtx, orig,
5037 NULL_RTX, 1, OPTAB_DIRECT);
5038 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5039 NULL_RTX, 1, OPTAB_DIRECT);
5040 /* FALLTHRU */
5041 case SET:
5042 if (ac.aligned && MEM_P (val))
5043 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5044 0, 0, SImode, val);
5045 else
5046 {
5047 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5048 NULL_RTX, 1, OPTAB_DIRECT);
5049 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5050 NULL_RTX, 1, OPTAB_DIRECT);
5051 }
5052 break;
5053 case AND:
5054 case IOR:
5055 case XOR:
5056 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5057 NULL_RTX, 1, OPTAB_DIRECT);
5058 break;
5059 case MULT: /* NAND */
5060 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5062 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5063 NULL_RTX, 1, OPTAB_DIRECT);
5064 break;
5065 default:
5066 gcc_unreachable ();
5067 }
5068
5069 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5070 ac.memsi, cmp, new_rtx));
5071
5072 /* Return the correct part of the bitfield. */
5073 if (target)
5074 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5075 after ? new_rtx : cmp, ac.shift,
5076 NULL_RTX, 1, OPTAB_DIRECT), 1);
5077 }
5078
5079 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5080 We need to emit DTP-relative relocations. */
5081
5082 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5083
5084 static void
5085 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5086 {
5087 switch (size)
5088 {
5089 case 4:
5090 fputs ("\t.long\t", file);
5091 break;
5092 case 8:
5093 fputs ("\t.quad\t", file);
5094 break;
5095 default:
5096 gcc_unreachable ();
5097 }
5098 output_addr_const (file, x);
5099 fputs ("@DTPOFF", file);
5100 }
5101
5102 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5103 /* Implement TARGET_MANGLE_TYPE. */
5104
5105 static const char *
5106 s390_mangle_type (const_tree type)
5107 {
5108 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5109 && TARGET_LONG_DOUBLE_128)
5110 return "g";
5111
5112 /* For all other types, use normal C++ mangling. */
5113 return NULL;
5114 }
5115 #endif
5116
5117 /* In the name of slightly smaller debug output, and to cater to
5118 general assembler lossage, recognize various UNSPEC sequences
5119 and turn them back into a direct symbol reference. */
5120
5121 static rtx
5122 s390_delegitimize_address (rtx orig_x)
5123 {
5124 rtx x, y;
5125
5126 orig_x = delegitimize_mem_from_attrs (orig_x);
5127 x = orig_x;
5128
5129 /* Extract the symbol ref from:
5130 (plus:SI (reg:SI 12 %r12)
5131 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5132 UNSPEC_GOTOFF/PLTOFF)))
5133 and
5134 (plus:SI (reg:SI 12 %r12)
5135 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5136 UNSPEC_GOTOFF/PLTOFF)
5137 (const_int 4 [0x4])))) */
5138 if (GET_CODE (x) == PLUS
5139 && REG_P (XEXP (x, 0))
5140 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5141 && GET_CODE (XEXP (x, 1)) == CONST)
5142 {
5143 HOST_WIDE_INT offset = 0;
5144
5145 /* The const operand. */
5146 y = XEXP (XEXP (x, 1), 0);
5147
5148 if (GET_CODE (y) == PLUS
5149 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5150 {
5151 offset = INTVAL (XEXP (y, 1));
5152 y = XEXP (y, 0);
5153 }
5154
5155 if (GET_CODE (y) == UNSPEC
5156 && (XINT (y, 1) == UNSPEC_GOTOFF
5157 || XINT (y, 1) == UNSPEC_PLTOFF))
5158 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5159 }
5160
5161 if (GET_CODE (x) != MEM)
5162 return orig_x;
5163
5164 x = XEXP (x, 0);
5165 if (GET_CODE (x) == PLUS
5166 && GET_CODE (XEXP (x, 1)) == CONST
5167 && GET_CODE (XEXP (x, 0)) == REG
5168 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5169 {
5170 y = XEXP (XEXP (x, 1), 0);
5171 if (GET_CODE (y) == UNSPEC
5172 && XINT (y, 1) == UNSPEC_GOT)
5173 y = XVECEXP (y, 0, 0);
5174 else
5175 return orig_x;
5176 }
5177 else if (GET_CODE (x) == CONST)
5178 {
5179 /* Extract the symbol ref from:
5180 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5181 UNSPEC_PLT/GOTENT))) */
5182
5183 y = XEXP (x, 0);
5184 if (GET_CODE (y) == UNSPEC
5185 && (XINT (y, 1) == UNSPEC_GOTENT
5186 || XINT (y, 1) == UNSPEC_PLT))
5187 y = XVECEXP (y, 0, 0);
5188 else
5189 return orig_x;
5190 }
5191 else
5192 return orig_x;
5193
5194 if (GET_MODE (orig_x) != Pmode)
5195 {
5196 if (GET_MODE (orig_x) == BLKmode)
5197 return orig_x;
5198 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5199 if (y == NULL_RTX)
5200 return orig_x;
5201 }
5202 return y;
5203 }
5204
5205 /* Output operand OP to stdio stream FILE.
5206 OP is an address (register + offset) which is not used to address data;
5207 instead the rightmost bits are interpreted as the value. */
5208
5209 static void
5210 print_shift_count_operand (FILE *file, rtx op)
5211 {
5212 HOST_WIDE_INT offset;
5213 rtx base;
5214
5215 /* Extract base register and offset. */
5216 if (!s390_decompose_shift_count (op, &base, &offset))
5217 gcc_unreachable ();
5218
5219 /* Sanity check. */
5220 if (base)
5221 {
5222 gcc_assert (GET_CODE (base) == REG);
5223 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5224 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5225 }
5226
5227 /* Offsets are constricted to twelve bits. */
5228 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5229 if (base)
5230 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5231 }
5232
5233 /* See 'get_some_local_dynamic_name'. */
5234
5235 static int
5236 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5237 {
5238 rtx x = *px;
5239
5240 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5241 {
5242 x = get_pool_constant (x);
5243 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5244 }
5245
5246 if (GET_CODE (x) == SYMBOL_REF
5247 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5248 {
5249 cfun->machine->some_ld_name = XSTR (x, 0);
5250 return 1;
5251 }
5252
5253 return 0;
5254 }
5255
5256 /* Locate some local-dynamic symbol still in use by this function
5257 so that we can print its name in local-dynamic base patterns. */
5258
5259 static const char *
5260 get_some_local_dynamic_name (void)
5261 {
5262 rtx insn;
5263
5264 if (cfun->machine->some_ld_name)
5265 return cfun->machine->some_ld_name;
5266
5267 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5268 if (INSN_P (insn)
5269 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5270 return cfun->machine->some_ld_name;
5271
5272 gcc_unreachable ();
5273 }
5274
5275 /* Output machine-dependent UNSPECs occurring in address constant X
5276 in assembler syntax to stdio stream FILE. Returns true if the
5277 constant X could be recognized, false otherwise. */
5278
5279 static bool
5280 s390_output_addr_const_extra (FILE *file, rtx x)
5281 {
5282 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5283 switch (XINT (x, 1))
5284 {
5285 case UNSPEC_GOTENT:
5286 output_addr_const (file, XVECEXP (x, 0, 0));
5287 fprintf (file, "@GOTENT");
5288 return true;
5289 case UNSPEC_GOT:
5290 output_addr_const (file, XVECEXP (x, 0, 0));
5291 fprintf (file, "@GOT");
5292 return true;
5293 case UNSPEC_GOTOFF:
5294 output_addr_const (file, XVECEXP (x, 0, 0));
5295 fprintf (file, "@GOTOFF");
5296 return true;
5297 case UNSPEC_PLT:
5298 output_addr_const (file, XVECEXP (x, 0, 0));
5299 fprintf (file, "@PLT");
5300 return true;
5301 case UNSPEC_PLTOFF:
5302 output_addr_const (file, XVECEXP (x, 0, 0));
5303 fprintf (file, "@PLTOFF");
5304 return true;
5305 case UNSPEC_TLSGD:
5306 output_addr_const (file, XVECEXP (x, 0, 0));
5307 fprintf (file, "@TLSGD");
5308 return true;
5309 case UNSPEC_TLSLDM:
5310 assemble_name (file, get_some_local_dynamic_name ());
5311 fprintf (file, "@TLSLDM");
5312 return true;
5313 case UNSPEC_DTPOFF:
5314 output_addr_const (file, XVECEXP (x, 0, 0));
5315 fprintf (file, "@DTPOFF");
5316 return true;
5317 case UNSPEC_NTPOFF:
5318 output_addr_const (file, XVECEXP (x, 0, 0));
5319 fprintf (file, "@NTPOFF");
5320 return true;
5321 case UNSPEC_GOTNTPOFF:
5322 output_addr_const (file, XVECEXP (x, 0, 0));
5323 fprintf (file, "@GOTNTPOFF");
5324 return true;
5325 case UNSPEC_INDNTPOFF:
5326 output_addr_const (file, XVECEXP (x, 0, 0));
5327 fprintf (file, "@INDNTPOFF");
5328 return true;
5329 }
5330
5331 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5332 switch (XINT (x, 1))
5333 {
5334 case UNSPEC_POOL_OFFSET:
5335 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5336 output_addr_const (file, x);
5337 return true;
5338 }
5339 return false;
5340 }
5341
5342 /* Output address operand ADDR in assembler syntax to
5343 stdio stream FILE. */
5344
5345 void
5346 print_operand_address (FILE *file, rtx addr)
5347 {
5348 struct s390_address ad;
5349
5350 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5351 {
5352 if (!TARGET_Z10)
5353 {
5354 output_operand_lossage ("symbolic memory references are "
5355 "only supported on z10 or later");
5356 return;
5357 }
5358 output_addr_const (file, addr);
5359 return;
5360 }
5361
5362 if (!s390_decompose_address (addr, &ad)
5363 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5364 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5365 output_operand_lossage ("cannot decompose address");
5366
5367 if (ad.disp)
5368 output_addr_const (file, ad.disp);
5369 else
5370 fprintf (file, "0");
5371
5372 if (ad.base && ad.indx)
5373 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5374 reg_names[REGNO (ad.base)]);
5375 else if (ad.base)
5376 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5377 }
5378
5379 /* Output operand X in assembler syntax to stdio stream FILE.
5380 CODE specified the format flag. The following format flags
5381 are recognized:
5382
5383 'C': print opcode suffix for branch condition.
5384 'D': print opcode suffix for inverse branch condition.
5385 'E': print opcode suffix for branch on index instruction.
5386 'G': print the size of the operand in bytes.
5387 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5388 'M': print the second word of a TImode operand.
5389 'N': print the second word of a DImode operand.
5390 'O': print only the displacement of a memory reference.
5391 'R': print only the base register of a memory reference.
5392 'S': print S-type memory reference (base+displacement).
5393 'Y': print shift count operand.
5394
5395 'b': print integer X as if it's an unsigned byte.
5396 'c': print integer X as if it's an signed byte.
5397 'e': "end" of DImode contiguous bitmask X.
5398 'f': "end" of SImode contiguous bitmask X.
5399 'h': print integer X as if it's a signed halfword.
5400 'i': print the first nonzero HImode part of X.
5401 'j': print the first HImode part unequal to -1 of X.
5402 'k': print the first nonzero SImode part of X.
5403 'm': print the first SImode part unequal to -1 of X.
5404 'o': print integer X as if it's an unsigned 32bit word.
5405 's': "start" of DImode contiguous bitmask X.
5406 't': "start" of SImode contiguous bitmask X.
5407 'x': print integer X as if it's an unsigned halfword.
5408 */
5409
5410 void
5411 print_operand (FILE *file, rtx x, int code)
5412 {
5413 HOST_WIDE_INT ival;
5414
5415 switch (code)
5416 {
5417 case 'C':
5418 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5419 return;
5420
5421 case 'D':
5422 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5423 return;
5424
5425 case 'E':
5426 if (GET_CODE (x) == LE)
5427 fprintf (file, "l");
5428 else if (GET_CODE (x) == GT)
5429 fprintf (file, "h");
5430 else
5431 output_operand_lossage ("invalid comparison operator "
5432 "for 'E' output modifier");
5433 return;
5434
5435 case 'J':
5436 if (GET_CODE (x) == SYMBOL_REF)
5437 {
5438 fprintf (file, "%s", ":tls_load:");
5439 output_addr_const (file, x);
5440 }
5441 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5442 {
5443 fprintf (file, "%s", ":tls_gdcall:");
5444 output_addr_const (file, XVECEXP (x, 0, 0));
5445 }
5446 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5447 {
5448 fprintf (file, "%s", ":tls_ldcall:");
5449 assemble_name (file, get_some_local_dynamic_name ());
5450 }
5451 else
5452 output_operand_lossage ("invalid reference for 'J' output modifier");
5453 return;
5454
5455 case 'G':
5456 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5457 return;
5458
5459 case 'O':
5460 {
5461 struct s390_address ad;
5462 int ret;
5463
5464 if (!MEM_P (x))
5465 {
5466 output_operand_lossage ("memory reference expected for "
5467 "'O' output modifier");
5468 return;
5469 }
5470
5471 ret = s390_decompose_address (XEXP (x, 0), &ad);
5472
5473 if (!ret
5474 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5475 || ad.indx)
5476 {
5477 output_operand_lossage ("invalid address for 'O' output modifier");
5478 return;
5479 }
5480
5481 if (ad.disp)
5482 output_addr_const (file, ad.disp);
5483 else
5484 fprintf (file, "0");
5485 }
5486 return;
5487
5488 case 'R':
5489 {
5490 struct s390_address ad;
5491 int ret;
5492
5493 if (!MEM_P (x))
5494 {
5495 output_operand_lossage ("memory reference expected for "
5496 "'R' output modifier");
5497 return;
5498 }
5499
5500 ret = s390_decompose_address (XEXP (x, 0), &ad);
5501
5502 if (!ret
5503 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5504 || ad.indx)
5505 {
5506 output_operand_lossage ("invalid address for 'R' output modifier");
5507 return;
5508 }
5509
5510 if (ad.base)
5511 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5512 else
5513 fprintf (file, "0");
5514 }
5515 return;
5516
5517 case 'S':
5518 {
5519 struct s390_address ad;
5520 int ret;
5521
5522 if (!MEM_P (x))
5523 {
5524 output_operand_lossage ("memory reference expected for "
5525 "'S' output modifier");
5526 return;
5527 }
5528 ret = s390_decompose_address (XEXP (x, 0), &ad);
5529
5530 if (!ret
5531 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5532 || ad.indx)
5533 {
5534 output_operand_lossage ("invalid address for 'S' output modifier");
5535 return;
5536 }
5537
5538 if (ad.disp)
5539 output_addr_const (file, ad.disp);
5540 else
5541 fprintf (file, "0");
5542
5543 if (ad.base)
5544 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5545 }
5546 return;
5547
5548 case 'N':
5549 if (GET_CODE (x) == REG)
5550 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5551 else if (GET_CODE (x) == MEM)
5552 x = change_address (x, VOIDmode,
5553 plus_constant (Pmode, XEXP (x, 0), 4));
5554 else
5555 output_operand_lossage ("register or memory expression expected "
5556 "for 'N' output modifier");
5557 break;
5558
5559 case 'M':
5560 if (GET_CODE (x) == REG)
5561 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5562 else if (GET_CODE (x) == MEM)
5563 x = change_address (x, VOIDmode,
5564 plus_constant (Pmode, XEXP (x, 0), 8));
5565 else
5566 output_operand_lossage ("register or memory expression expected "
5567 "for 'M' output modifier");
5568 break;
5569
5570 case 'Y':
5571 print_shift_count_operand (file, x);
5572 return;
5573 }
5574
5575 switch (GET_CODE (x))
5576 {
5577 case REG:
5578 fprintf (file, "%s", reg_names[REGNO (x)]);
5579 break;
5580
5581 case MEM:
5582 output_address (XEXP (x, 0));
5583 break;
5584
5585 case CONST:
5586 case CODE_LABEL:
5587 case LABEL_REF:
5588 case SYMBOL_REF:
5589 output_addr_const (file, x);
5590 break;
5591
5592 case CONST_INT:
5593 ival = INTVAL (x);
5594 switch (code)
5595 {
5596 case 0:
5597 break;
5598 case 'b':
5599 ival &= 0xff;
5600 break;
5601 case 'c':
5602 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5603 break;
5604 case 'x':
5605 ival &= 0xffff;
5606 break;
5607 case 'h':
5608 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5609 break;
5610 case 'i':
5611 ival = s390_extract_part (x, HImode, 0);
5612 break;
5613 case 'j':
5614 ival = s390_extract_part (x, HImode, -1);
5615 break;
5616 case 'k':
5617 ival = s390_extract_part (x, SImode, 0);
5618 break;
5619 case 'm':
5620 ival = s390_extract_part (x, SImode, -1);
5621 break;
5622 case 'o':
5623 ival &= 0xffffffff;
5624 break;
5625 case 'e': case 'f':
5626 case 's': case 't':
5627 {
5628 int pos, len;
5629 bool ok;
5630
5631 len = (code == 's' || code == 'e' ? 64 : 32);
5632 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5633 gcc_assert (ok);
5634 if (code == 's' || code == 't')
5635 ival = 64 - pos - len;
5636 else
5637 ival = 64 - 1 - pos;
5638 }
5639 break;
5640 default:
5641 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5642 }
5643 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5644 break;
5645
5646 case CONST_DOUBLE:
5647 gcc_assert (GET_MODE (x) == VOIDmode);
5648 if (code == 'b')
5649 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5650 else if (code == 'x')
5651 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5652 else if (code == 'h')
5653 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5654 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5655 else
5656 {
5657 if (code == 0)
5658 output_operand_lossage ("invalid constant - try using "
5659 "an output modifier");
5660 else
5661 output_operand_lossage ("invalid constant for output modifier '%c'",
5662 code);
5663 }
5664 break;
5665
5666 default:
5667 if (code == 0)
5668 output_operand_lossage ("invalid expression - try using "
5669 "an output modifier");
5670 else
5671 output_operand_lossage ("invalid expression for output "
5672 "modifier '%c'", code);
5673 break;
5674 }
5675 }
5676
5677 /* Target hook for assembling integer objects. We need to define it
5678 here to work a round a bug in some versions of GAS, which couldn't
5679 handle values smaller than INT_MIN when printed in decimal. */
5680
5681 static bool
5682 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5683 {
5684 if (size == 8 && aligned_p
5685 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5686 {
5687 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5688 INTVAL (x));
5689 return true;
5690 }
5691 return default_assemble_integer (x, size, aligned_p);
5692 }
5693
5694 /* Returns true if register REGNO is used for forming
5695 a memory address in expression X. */
5696
5697 static bool
5698 reg_used_in_mem_p (int regno, rtx x)
5699 {
5700 enum rtx_code code = GET_CODE (x);
5701 int i, j;
5702 const char *fmt;
5703
5704 if (code == MEM)
5705 {
5706 if (refers_to_regno_p (regno, regno+1,
5707 XEXP (x, 0), 0))
5708 return true;
5709 }
5710 else if (code == SET
5711 && GET_CODE (SET_DEST (x)) == PC)
5712 {
5713 if (refers_to_regno_p (regno, regno+1,
5714 SET_SRC (x), 0))
5715 return true;
5716 }
5717
5718 fmt = GET_RTX_FORMAT (code);
5719 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5720 {
5721 if (fmt[i] == 'e'
5722 && reg_used_in_mem_p (regno, XEXP (x, i)))
5723 return true;
5724
5725 else if (fmt[i] == 'E')
5726 for (j = 0; j < XVECLEN (x, i); j++)
5727 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5728 return true;
5729 }
5730 return false;
5731 }
5732
5733 /* Returns true if expression DEP_RTX sets an address register
5734 used by instruction INSN to address memory. */
5735
5736 static bool
5737 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5738 {
5739 rtx target, pat;
5740
5741 if (GET_CODE (dep_rtx) == INSN)
5742 dep_rtx = PATTERN (dep_rtx);
5743
5744 if (GET_CODE (dep_rtx) == SET)
5745 {
5746 target = SET_DEST (dep_rtx);
5747 if (GET_CODE (target) == STRICT_LOW_PART)
5748 target = XEXP (target, 0);
5749 while (GET_CODE (target) == SUBREG)
5750 target = SUBREG_REG (target);
5751
5752 if (GET_CODE (target) == REG)
5753 {
5754 int regno = REGNO (target);
5755
5756 if (s390_safe_attr_type (insn) == TYPE_LA)
5757 {
5758 pat = PATTERN (insn);
5759 if (GET_CODE (pat) == PARALLEL)
5760 {
5761 gcc_assert (XVECLEN (pat, 0) == 2);
5762 pat = XVECEXP (pat, 0, 0);
5763 }
5764 gcc_assert (GET_CODE (pat) == SET);
5765 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5766 }
5767 else if (get_attr_atype (insn) == ATYPE_AGEN)
5768 return reg_used_in_mem_p (regno, PATTERN (insn));
5769 }
5770 }
5771 return false;
5772 }
5773
5774 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5775
5776 int
5777 s390_agen_dep_p (rtx dep_insn, rtx insn)
5778 {
5779 rtx dep_rtx = PATTERN (dep_insn);
5780 int i;
5781
5782 if (GET_CODE (dep_rtx) == SET
5783 && addr_generation_dependency_p (dep_rtx, insn))
5784 return 1;
5785 else if (GET_CODE (dep_rtx) == PARALLEL)
5786 {
5787 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5788 {
5789 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5790 return 1;
5791 }
5792 }
5793 return 0;
5794 }
5795
5796
5797 /* A C statement (sans semicolon) to update the integer scheduling priority
5798 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5799 reduce the priority to execute INSN later. Do not define this macro if
5800 you do not need to adjust the scheduling priorities of insns.
5801
5802 A STD instruction should be scheduled earlier,
5803 in order to use the bypass. */
5804 static int
5805 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5806 {
5807 if (! INSN_P (insn))
5808 return priority;
5809
5810 if (s390_tune != PROCESSOR_2084_Z990
5811 && s390_tune != PROCESSOR_2094_Z9_109
5812 && s390_tune != PROCESSOR_2097_Z10
5813 && s390_tune != PROCESSOR_2817_Z196
5814 && s390_tune != PROCESSOR_2827_ZEC12)
5815 return priority;
5816
5817 switch (s390_safe_attr_type (insn))
5818 {
5819 case TYPE_FSTOREDF:
5820 case TYPE_FSTORESF:
5821 priority = priority << 3;
5822 break;
5823 case TYPE_STORE:
5824 case TYPE_STM:
5825 priority = priority << 1;
5826 break;
5827 default:
5828 break;
5829 }
5830 return priority;
5831 }
5832
5833
5834 /* The number of instructions that can be issued per cycle. */
5835
5836 static int
5837 s390_issue_rate (void)
5838 {
5839 switch (s390_tune)
5840 {
5841 case PROCESSOR_2084_Z990:
5842 case PROCESSOR_2094_Z9_109:
5843 case PROCESSOR_2817_Z196:
5844 return 3;
5845 case PROCESSOR_2097_Z10:
5846 case PROCESSOR_2827_ZEC12:
5847 return 2;
5848 default:
5849 return 1;
5850 }
5851 }
5852
5853 static int
5854 s390_first_cycle_multipass_dfa_lookahead (void)
5855 {
5856 return 4;
5857 }
5858
5859 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5860 Fix up MEMs as required. */
5861
5862 static void
5863 annotate_constant_pool_refs (rtx *x)
5864 {
5865 int i, j;
5866 const char *fmt;
5867
5868 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5869 || !CONSTANT_POOL_ADDRESS_P (*x));
5870
5871 /* Literal pool references can only occur inside a MEM ... */
5872 if (GET_CODE (*x) == MEM)
5873 {
5874 rtx memref = XEXP (*x, 0);
5875
5876 if (GET_CODE (memref) == SYMBOL_REF
5877 && CONSTANT_POOL_ADDRESS_P (memref))
5878 {
5879 rtx base = cfun->machine->base_reg;
5880 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5881 UNSPEC_LTREF);
5882
5883 *x = replace_equiv_address (*x, addr);
5884 return;
5885 }
5886
5887 if (GET_CODE (memref) == CONST
5888 && GET_CODE (XEXP (memref, 0)) == PLUS
5889 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5890 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5891 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5892 {
5893 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5894 rtx sym = XEXP (XEXP (memref, 0), 0);
5895 rtx base = cfun->machine->base_reg;
5896 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5897 UNSPEC_LTREF);
5898
5899 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5900 return;
5901 }
5902 }
5903
5904 /* ... or a load-address type pattern. */
5905 if (GET_CODE (*x) == SET)
5906 {
5907 rtx addrref = SET_SRC (*x);
5908
5909 if (GET_CODE (addrref) == SYMBOL_REF
5910 && CONSTANT_POOL_ADDRESS_P (addrref))
5911 {
5912 rtx base = cfun->machine->base_reg;
5913 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5914 UNSPEC_LTREF);
5915
5916 SET_SRC (*x) = addr;
5917 return;
5918 }
5919
5920 if (GET_CODE (addrref) == CONST
5921 && GET_CODE (XEXP (addrref, 0)) == PLUS
5922 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5923 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5924 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5925 {
5926 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5927 rtx sym = XEXP (XEXP (addrref, 0), 0);
5928 rtx base = cfun->machine->base_reg;
5929 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5930 UNSPEC_LTREF);
5931
5932 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5933 return;
5934 }
5935 }
5936
5937 /* Annotate LTREL_BASE as well. */
5938 if (GET_CODE (*x) == UNSPEC
5939 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5940 {
5941 rtx base = cfun->machine->base_reg;
5942 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5943 UNSPEC_LTREL_BASE);
5944 return;
5945 }
5946
5947 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5948 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5949 {
5950 if (fmt[i] == 'e')
5951 {
5952 annotate_constant_pool_refs (&XEXP (*x, i));
5953 }
5954 else if (fmt[i] == 'E')
5955 {
5956 for (j = 0; j < XVECLEN (*x, i); j++)
5957 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5958 }
5959 }
5960 }
5961
5962 /* Split all branches that exceed the maximum distance.
5963 Returns true if this created a new literal pool entry. */
5964
5965 static int
5966 s390_split_branches (void)
5967 {
5968 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5969 int new_literal = 0, ret;
5970 rtx insn, pat, tmp, target;
5971 rtx *label;
5972
5973 /* We need correct insn addresses. */
5974
5975 shorten_branches (get_insns ());
5976
5977 /* Find all branches that exceed 64KB, and split them. */
5978
5979 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5980 {
5981 if (GET_CODE (insn) != JUMP_INSN)
5982 continue;
5983
5984 pat = PATTERN (insn);
5985 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5986 pat = XVECEXP (pat, 0, 0);
5987 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5988 continue;
5989
5990 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5991 {
5992 label = &SET_SRC (pat);
5993 }
5994 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5995 {
5996 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5997 label = &XEXP (SET_SRC (pat), 1);
5998 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5999 label = &XEXP (SET_SRC (pat), 2);
6000 else
6001 continue;
6002 }
6003 else
6004 continue;
6005
6006 if (get_attr_length (insn) <= 4)
6007 continue;
6008
6009 /* We are going to use the return register as scratch register,
6010 make sure it will be saved/restored by the prologue/epilogue. */
6011 cfun_frame_layout.save_return_addr_p = 1;
6012
6013 if (!flag_pic)
6014 {
6015 new_literal = 1;
6016 tmp = force_const_mem (Pmode, *label);
6017 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
6018 INSN_ADDRESSES_NEW (tmp, -1);
6019 annotate_constant_pool_refs (&PATTERN (tmp));
6020
6021 target = temp_reg;
6022 }
6023 else
6024 {
6025 new_literal = 1;
6026 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6027 UNSPEC_LTREL_OFFSET);
6028 target = gen_rtx_CONST (Pmode, target);
6029 target = force_const_mem (Pmode, target);
6030 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6031 INSN_ADDRESSES_NEW (tmp, -1);
6032 annotate_constant_pool_refs (&PATTERN (tmp));
6033
6034 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6035 cfun->machine->base_reg),
6036 UNSPEC_LTREL_BASE);
6037 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6038 }
6039
6040 ret = validate_change (insn, label, target, 0);
6041 gcc_assert (ret);
6042 }
6043
6044 return new_literal;
6045 }
6046
6047
6048 /* Find an annotated literal pool symbol referenced in RTX X,
6049 and store it at REF. Will abort if X contains references to
6050 more than one such pool symbol; multiple references to the same
6051 symbol are allowed, however.
6052
6053 The rtx pointed to by REF must be initialized to NULL_RTX
6054 by the caller before calling this routine. */
6055
6056 static void
6057 find_constant_pool_ref (rtx x, rtx *ref)
6058 {
6059 int i, j;
6060 const char *fmt;
6061
6062 /* Ignore LTREL_BASE references. */
6063 if (GET_CODE (x) == UNSPEC
6064 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6065 return;
6066 /* Likewise POOL_ENTRY insns. */
6067 if (GET_CODE (x) == UNSPEC_VOLATILE
6068 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6069 return;
6070
6071 gcc_assert (GET_CODE (x) != SYMBOL_REF
6072 || !CONSTANT_POOL_ADDRESS_P (x));
6073
6074 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6075 {
6076 rtx sym = XVECEXP (x, 0, 0);
6077 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6078 && CONSTANT_POOL_ADDRESS_P (sym));
6079
6080 if (*ref == NULL_RTX)
6081 *ref = sym;
6082 else
6083 gcc_assert (*ref == sym);
6084
6085 return;
6086 }
6087
6088 fmt = GET_RTX_FORMAT (GET_CODE (x));
6089 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6090 {
6091 if (fmt[i] == 'e')
6092 {
6093 find_constant_pool_ref (XEXP (x, i), ref);
6094 }
6095 else if (fmt[i] == 'E')
6096 {
6097 for (j = 0; j < XVECLEN (x, i); j++)
6098 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6099 }
6100 }
6101 }
6102
6103 /* Replace every reference to the annotated literal pool
6104 symbol REF in X by its base plus OFFSET. */
6105
6106 static void
6107 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6108 {
6109 int i, j;
6110 const char *fmt;
6111
6112 gcc_assert (*x != ref);
6113
6114 if (GET_CODE (*x) == UNSPEC
6115 && XINT (*x, 1) == UNSPEC_LTREF
6116 && XVECEXP (*x, 0, 0) == ref)
6117 {
6118 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6119 return;
6120 }
6121
6122 if (GET_CODE (*x) == PLUS
6123 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6124 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6125 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6126 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6127 {
6128 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6129 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6130 return;
6131 }
6132
6133 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6134 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6135 {
6136 if (fmt[i] == 'e')
6137 {
6138 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6139 }
6140 else if (fmt[i] == 'E')
6141 {
6142 for (j = 0; j < XVECLEN (*x, i); j++)
6143 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6144 }
6145 }
6146 }
6147
6148 /* Check whether X contains an UNSPEC_LTREL_BASE.
6149 Return its constant pool symbol if found, NULL_RTX otherwise. */
6150
6151 static rtx
6152 find_ltrel_base (rtx x)
6153 {
6154 int i, j;
6155 const char *fmt;
6156
6157 if (GET_CODE (x) == UNSPEC
6158 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6159 return XVECEXP (x, 0, 0);
6160
6161 fmt = GET_RTX_FORMAT (GET_CODE (x));
6162 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6163 {
6164 if (fmt[i] == 'e')
6165 {
6166 rtx fnd = find_ltrel_base (XEXP (x, i));
6167 if (fnd)
6168 return fnd;
6169 }
6170 else if (fmt[i] == 'E')
6171 {
6172 for (j = 0; j < XVECLEN (x, i); j++)
6173 {
6174 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6175 if (fnd)
6176 return fnd;
6177 }
6178 }
6179 }
6180
6181 return NULL_RTX;
6182 }
6183
6184 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6185
6186 static void
6187 replace_ltrel_base (rtx *x)
6188 {
6189 int i, j;
6190 const char *fmt;
6191
6192 if (GET_CODE (*x) == UNSPEC
6193 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6194 {
6195 *x = XVECEXP (*x, 0, 1);
6196 return;
6197 }
6198
6199 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6200 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6201 {
6202 if (fmt[i] == 'e')
6203 {
6204 replace_ltrel_base (&XEXP (*x, i));
6205 }
6206 else if (fmt[i] == 'E')
6207 {
6208 for (j = 0; j < XVECLEN (*x, i); j++)
6209 replace_ltrel_base (&XVECEXP (*x, i, j));
6210 }
6211 }
6212 }
6213
6214
6215 /* We keep a list of constants which we have to add to internal
6216 constant tables in the middle of large functions. */
6217
6218 #define NR_C_MODES 11
6219 enum machine_mode constant_modes[NR_C_MODES] =
6220 {
6221 TFmode, TImode, TDmode,
6222 DFmode, DImode, DDmode,
6223 SFmode, SImode, SDmode,
6224 HImode,
6225 QImode
6226 };
6227
6228 struct constant
6229 {
6230 struct constant *next;
6231 rtx value;
6232 rtx label;
6233 };
6234
6235 struct constant_pool
6236 {
6237 struct constant_pool *next;
6238 rtx first_insn;
6239 rtx pool_insn;
6240 bitmap insns;
6241 rtx emit_pool_after;
6242
6243 struct constant *constants[NR_C_MODES];
6244 struct constant *execute;
6245 rtx label;
6246 int size;
6247 };
6248
6249 /* Allocate new constant_pool structure. */
6250
6251 static struct constant_pool *
6252 s390_alloc_pool (void)
6253 {
6254 struct constant_pool *pool;
6255 int i;
6256
6257 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6258 pool->next = NULL;
6259 for (i = 0; i < NR_C_MODES; i++)
6260 pool->constants[i] = NULL;
6261
6262 pool->execute = NULL;
6263 pool->label = gen_label_rtx ();
6264 pool->first_insn = NULL_RTX;
6265 pool->pool_insn = NULL_RTX;
6266 pool->insns = BITMAP_ALLOC (NULL);
6267 pool->size = 0;
6268 pool->emit_pool_after = NULL_RTX;
6269
6270 return pool;
6271 }
6272
6273 /* Create new constant pool covering instructions starting at INSN
6274 and chain it to the end of POOL_LIST. */
6275
6276 static struct constant_pool *
6277 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6278 {
6279 struct constant_pool *pool, **prev;
6280
6281 pool = s390_alloc_pool ();
6282 pool->first_insn = insn;
6283
6284 for (prev = pool_list; *prev; prev = &(*prev)->next)
6285 ;
6286 *prev = pool;
6287
6288 return pool;
6289 }
6290
6291 /* End range of instructions covered by POOL at INSN and emit
6292 placeholder insn representing the pool. */
6293
6294 static void
6295 s390_end_pool (struct constant_pool *pool, rtx insn)
6296 {
6297 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6298
6299 if (!insn)
6300 insn = get_last_insn ();
6301
6302 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6303 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6304 }
6305
6306 /* Add INSN to the list of insns covered by POOL. */
6307
6308 static void
6309 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6310 {
6311 bitmap_set_bit (pool->insns, INSN_UID (insn));
6312 }
6313
6314 /* Return pool out of POOL_LIST that covers INSN. */
6315
6316 static struct constant_pool *
6317 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6318 {
6319 struct constant_pool *pool;
6320
6321 for (pool = pool_list; pool; pool = pool->next)
6322 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6323 break;
6324
6325 return pool;
6326 }
6327
6328 /* Add constant VAL of mode MODE to the constant pool POOL. */
6329
6330 static void
6331 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6332 {
6333 struct constant *c;
6334 int i;
6335
6336 for (i = 0; i < NR_C_MODES; i++)
6337 if (constant_modes[i] == mode)
6338 break;
6339 gcc_assert (i != NR_C_MODES);
6340
6341 for (c = pool->constants[i]; c != NULL; c = c->next)
6342 if (rtx_equal_p (val, c->value))
6343 break;
6344
6345 if (c == NULL)
6346 {
6347 c = (struct constant *) xmalloc (sizeof *c);
6348 c->value = val;
6349 c->label = gen_label_rtx ();
6350 c->next = pool->constants[i];
6351 pool->constants[i] = c;
6352 pool->size += GET_MODE_SIZE (mode);
6353 }
6354 }
6355
6356 /* Return an rtx that represents the offset of X from the start of
6357 pool POOL. */
6358
6359 static rtx
6360 s390_pool_offset (struct constant_pool *pool, rtx x)
6361 {
6362 rtx label;
6363
6364 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6365 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6366 UNSPEC_POOL_OFFSET);
6367 return gen_rtx_CONST (GET_MODE (x), x);
6368 }
6369
6370 /* Find constant VAL of mode MODE in the constant pool POOL.
6371 Return an RTX describing the distance from the start of
6372 the pool to the location of the new constant. */
6373
6374 static rtx
6375 s390_find_constant (struct constant_pool *pool, rtx val,
6376 enum machine_mode mode)
6377 {
6378 struct constant *c;
6379 int i;
6380
6381 for (i = 0; i < NR_C_MODES; i++)
6382 if (constant_modes[i] == mode)
6383 break;
6384 gcc_assert (i != NR_C_MODES);
6385
6386 for (c = pool->constants[i]; c != NULL; c = c->next)
6387 if (rtx_equal_p (val, c->value))
6388 break;
6389
6390 gcc_assert (c);
6391
6392 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6393 }
6394
6395 /* Check whether INSN is an execute. Return the label_ref to its
6396 execute target template if so, NULL_RTX otherwise. */
6397
6398 static rtx
6399 s390_execute_label (rtx insn)
6400 {
6401 if (GET_CODE (insn) == INSN
6402 && GET_CODE (PATTERN (insn)) == PARALLEL
6403 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6404 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6405 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6406
6407 return NULL_RTX;
6408 }
6409
6410 /* Add execute target for INSN to the constant pool POOL. */
6411
6412 static void
6413 s390_add_execute (struct constant_pool *pool, rtx insn)
6414 {
6415 struct constant *c;
6416
6417 for (c = pool->execute; c != NULL; c = c->next)
6418 if (INSN_UID (insn) == INSN_UID (c->value))
6419 break;
6420
6421 if (c == NULL)
6422 {
6423 c = (struct constant *) xmalloc (sizeof *c);
6424 c->value = insn;
6425 c->label = gen_label_rtx ();
6426 c->next = pool->execute;
6427 pool->execute = c;
6428 pool->size += 6;
6429 }
6430 }
6431
6432 /* Find execute target for INSN in the constant pool POOL.
6433 Return an RTX describing the distance from the start of
6434 the pool to the location of the execute target. */
6435
6436 static rtx
6437 s390_find_execute (struct constant_pool *pool, rtx insn)
6438 {
6439 struct constant *c;
6440
6441 for (c = pool->execute; c != NULL; c = c->next)
6442 if (INSN_UID (insn) == INSN_UID (c->value))
6443 break;
6444
6445 gcc_assert (c);
6446
6447 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6448 }
6449
6450 /* For an execute INSN, extract the execute target template. */
6451
6452 static rtx
6453 s390_execute_target (rtx insn)
6454 {
6455 rtx pattern = PATTERN (insn);
6456 gcc_assert (s390_execute_label (insn));
6457
6458 if (XVECLEN (pattern, 0) == 2)
6459 {
6460 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6461 }
6462 else
6463 {
6464 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6465 int i;
6466
6467 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6468 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6469
6470 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6471 }
6472
6473 return pattern;
6474 }
6475
6476 /* Indicate that INSN cannot be duplicated. This is the case for
6477 execute insns that carry a unique label. */
6478
6479 static bool
6480 s390_cannot_copy_insn_p (rtx insn)
6481 {
6482 rtx label = s390_execute_label (insn);
6483 return label && label != const0_rtx;
6484 }
6485
6486 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6487 do not emit the pool base label. */
6488
6489 static void
6490 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6491 {
6492 struct constant *c;
6493 rtx insn = pool->pool_insn;
6494 int i;
6495
6496 /* Switch to rodata section. */
6497 if (TARGET_CPU_ZARCH)
6498 {
6499 insn = emit_insn_after (gen_pool_section_start (), insn);
6500 INSN_ADDRESSES_NEW (insn, -1);
6501 }
6502
6503 /* Ensure minimum pool alignment. */
6504 if (TARGET_CPU_ZARCH)
6505 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6506 else
6507 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6508 INSN_ADDRESSES_NEW (insn, -1);
6509
6510 /* Emit pool base label. */
6511 if (!remote_label)
6512 {
6513 insn = emit_label_after (pool->label, insn);
6514 INSN_ADDRESSES_NEW (insn, -1);
6515 }
6516
6517 /* Dump constants in descending alignment requirement order,
6518 ensuring proper alignment for every constant. */
6519 for (i = 0; i < NR_C_MODES; i++)
6520 for (c = pool->constants[i]; c; c = c->next)
6521 {
6522 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6523 rtx value = copy_rtx (c->value);
6524 if (GET_CODE (value) == CONST
6525 && GET_CODE (XEXP (value, 0)) == UNSPEC
6526 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6527 && XVECLEN (XEXP (value, 0), 0) == 1)
6528 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6529
6530 insn = emit_label_after (c->label, insn);
6531 INSN_ADDRESSES_NEW (insn, -1);
6532
6533 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6534 gen_rtvec (1, value),
6535 UNSPECV_POOL_ENTRY);
6536 insn = emit_insn_after (value, insn);
6537 INSN_ADDRESSES_NEW (insn, -1);
6538 }
6539
6540 /* Ensure minimum alignment for instructions. */
6541 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6542 INSN_ADDRESSES_NEW (insn, -1);
6543
6544 /* Output in-pool execute template insns. */
6545 for (c = pool->execute; c; c = c->next)
6546 {
6547 insn = emit_label_after (c->label, insn);
6548 INSN_ADDRESSES_NEW (insn, -1);
6549
6550 insn = emit_insn_after (s390_execute_target (c->value), insn);
6551 INSN_ADDRESSES_NEW (insn, -1);
6552 }
6553
6554 /* Switch back to previous section. */
6555 if (TARGET_CPU_ZARCH)
6556 {
6557 insn = emit_insn_after (gen_pool_section_end (), insn);
6558 INSN_ADDRESSES_NEW (insn, -1);
6559 }
6560
6561 insn = emit_barrier_after (insn);
6562 INSN_ADDRESSES_NEW (insn, -1);
6563
6564 /* Remove placeholder insn. */
6565 remove_insn (pool->pool_insn);
6566 }
6567
6568 /* Free all memory used by POOL. */
6569
6570 static void
6571 s390_free_pool (struct constant_pool *pool)
6572 {
6573 struct constant *c, *next;
6574 int i;
6575
6576 for (i = 0; i < NR_C_MODES; i++)
6577 for (c = pool->constants[i]; c; c = next)
6578 {
6579 next = c->next;
6580 free (c);
6581 }
6582
6583 for (c = pool->execute; c; c = next)
6584 {
6585 next = c->next;
6586 free (c);
6587 }
6588
6589 BITMAP_FREE (pool->insns);
6590 free (pool);
6591 }
6592
6593
6594 /* Collect main literal pool. Return NULL on overflow. */
6595
6596 static struct constant_pool *
6597 s390_mainpool_start (void)
6598 {
6599 struct constant_pool *pool;
6600 rtx insn;
6601
6602 pool = s390_alloc_pool ();
6603
6604 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6605 {
6606 if (GET_CODE (insn) == INSN
6607 && GET_CODE (PATTERN (insn)) == SET
6608 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6609 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6610 {
6611 gcc_assert (!pool->pool_insn);
6612 pool->pool_insn = insn;
6613 }
6614
6615 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6616 {
6617 s390_add_execute (pool, insn);
6618 }
6619 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6620 {
6621 rtx pool_ref = NULL_RTX;
6622 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6623 if (pool_ref)
6624 {
6625 rtx constant = get_pool_constant (pool_ref);
6626 enum machine_mode mode = get_pool_mode (pool_ref);
6627 s390_add_constant (pool, constant, mode);
6628 }
6629 }
6630
6631 /* If hot/cold partitioning is enabled we have to make sure that
6632 the literal pool is emitted in the same section where the
6633 initialization of the literal pool base pointer takes place.
6634 emit_pool_after is only used in the non-overflow case on non
6635 Z cpus where we can emit the literal pool at the end of the
6636 function body within the text section. */
6637 if (NOTE_P (insn)
6638 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6639 && !pool->emit_pool_after)
6640 pool->emit_pool_after = PREV_INSN (insn);
6641 }
6642
6643 gcc_assert (pool->pool_insn || pool->size == 0);
6644
6645 if (pool->size >= 4096)
6646 {
6647 /* We're going to chunkify the pool, so remove the main
6648 pool placeholder insn. */
6649 remove_insn (pool->pool_insn);
6650
6651 s390_free_pool (pool);
6652 pool = NULL;
6653 }
6654
6655 /* If the functions ends with the section where the literal pool
6656 should be emitted set the marker to its end. */
6657 if (pool && !pool->emit_pool_after)
6658 pool->emit_pool_after = get_last_insn ();
6659
6660 return pool;
6661 }
6662
6663 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6664 Modify the current function to output the pool constants as well as
6665 the pool register setup instruction. */
6666
6667 static void
6668 s390_mainpool_finish (struct constant_pool *pool)
6669 {
6670 rtx base_reg = cfun->machine->base_reg;
6671 rtx insn;
6672
6673 /* If the pool is empty, we're done. */
6674 if (pool->size == 0)
6675 {
6676 /* We don't actually need a base register after all. */
6677 cfun->machine->base_reg = NULL_RTX;
6678
6679 if (pool->pool_insn)
6680 remove_insn (pool->pool_insn);
6681 s390_free_pool (pool);
6682 return;
6683 }
6684
6685 /* We need correct insn addresses. */
6686 shorten_branches (get_insns ());
6687
6688 /* On zSeries, we use a LARL to load the pool register. The pool is
6689 located in the .rodata section, so we emit it after the function. */
6690 if (TARGET_CPU_ZARCH)
6691 {
6692 insn = gen_main_base_64 (base_reg, pool->label);
6693 insn = emit_insn_after (insn, pool->pool_insn);
6694 INSN_ADDRESSES_NEW (insn, -1);
6695 remove_insn (pool->pool_insn);
6696
6697 insn = get_last_insn ();
6698 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6699 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6700
6701 s390_dump_pool (pool, 0);
6702 }
6703
6704 /* On S/390, if the total size of the function's code plus literal pool
6705 does not exceed 4096 bytes, we use BASR to set up a function base
6706 pointer, and emit the literal pool at the end of the function. */
6707 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6708 + pool->size + 8 /* alignment slop */ < 4096)
6709 {
6710 insn = gen_main_base_31_small (base_reg, pool->label);
6711 insn = emit_insn_after (insn, pool->pool_insn);
6712 INSN_ADDRESSES_NEW (insn, -1);
6713 remove_insn (pool->pool_insn);
6714
6715 insn = emit_label_after (pool->label, insn);
6716 INSN_ADDRESSES_NEW (insn, -1);
6717
6718 /* emit_pool_after will be set by s390_mainpool_start to the
6719 last insn of the section where the literal pool should be
6720 emitted. */
6721 insn = pool->emit_pool_after;
6722
6723 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6724 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6725
6726 s390_dump_pool (pool, 1);
6727 }
6728
6729 /* Otherwise, we emit an inline literal pool and use BASR to branch
6730 over it, setting up the pool register at the same time. */
6731 else
6732 {
6733 rtx pool_end = gen_label_rtx ();
6734
6735 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6736 insn = emit_jump_insn_after (insn, pool->pool_insn);
6737 JUMP_LABEL (insn) = pool_end;
6738 INSN_ADDRESSES_NEW (insn, -1);
6739 remove_insn (pool->pool_insn);
6740
6741 insn = emit_label_after (pool->label, insn);
6742 INSN_ADDRESSES_NEW (insn, -1);
6743
6744 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6745 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6746
6747 insn = emit_label_after (pool_end, pool->pool_insn);
6748 INSN_ADDRESSES_NEW (insn, -1);
6749
6750 s390_dump_pool (pool, 1);
6751 }
6752
6753
6754 /* Replace all literal pool references. */
6755
6756 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6757 {
6758 if (INSN_P (insn))
6759 replace_ltrel_base (&PATTERN (insn));
6760
6761 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6762 {
6763 rtx addr, pool_ref = NULL_RTX;
6764 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6765 if (pool_ref)
6766 {
6767 if (s390_execute_label (insn))
6768 addr = s390_find_execute (pool, insn);
6769 else
6770 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6771 get_pool_mode (pool_ref));
6772
6773 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6774 INSN_CODE (insn) = -1;
6775 }
6776 }
6777 }
6778
6779
6780 /* Free the pool. */
6781 s390_free_pool (pool);
6782 }
6783
6784 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6785 We have decided we cannot use this pool, so revert all changes
6786 to the current function that were done by s390_mainpool_start. */
6787 static void
6788 s390_mainpool_cancel (struct constant_pool *pool)
6789 {
6790 /* We didn't actually change the instruction stream, so simply
6791 free the pool memory. */
6792 s390_free_pool (pool);
6793 }
6794
6795
6796 /* Chunkify the literal pool. */
6797
6798 #define S390_POOL_CHUNK_MIN 0xc00
6799 #define S390_POOL_CHUNK_MAX 0xe00
6800
6801 static struct constant_pool *
6802 s390_chunkify_start (void)
6803 {
6804 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6805 int extra_size = 0;
6806 bitmap far_labels;
6807 rtx pending_ltrel = NULL_RTX;
6808 rtx insn;
6809
6810 rtx (*gen_reload_base) (rtx, rtx) =
6811 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6812
6813
6814 /* We need correct insn addresses. */
6815
6816 shorten_branches (get_insns ());
6817
6818 /* Scan all insns and move literals to pool chunks. */
6819
6820 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6821 {
6822 bool section_switch_p = false;
6823
6824 /* Check for pending LTREL_BASE. */
6825 if (INSN_P (insn))
6826 {
6827 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6828 if (ltrel_base)
6829 {
6830 gcc_assert (ltrel_base == pending_ltrel);
6831 pending_ltrel = NULL_RTX;
6832 }
6833 }
6834
6835 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6836 {
6837 if (!curr_pool)
6838 curr_pool = s390_start_pool (&pool_list, insn);
6839
6840 s390_add_execute (curr_pool, insn);
6841 s390_add_pool_insn (curr_pool, insn);
6842 }
6843 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6844 {
6845 rtx pool_ref = NULL_RTX;
6846 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6847 if (pool_ref)
6848 {
6849 rtx constant = get_pool_constant (pool_ref);
6850 enum machine_mode mode = get_pool_mode (pool_ref);
6851
6852 if (!curr_pool)
6853 curr_pool = s390_start_pool (&pool_list, insn);
6854
6855 s390_add_constant (curr_pool, constant, mode);
6856 s390_add_pool_insn (curr_pool, insn);
6857
6858 /* Don't split the pool chunk between a LTREL_OFFSET load
6859 and the corresponding LTREL_BASE. */
6860 if (GET_CODE (constant) == CONST
6861 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6862 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6863 {
6864 gcc_assert (!pending_ltrel);
6865 pending_ltrel = pool_ref;
6866 }
6867 }
6868 }
6869
6870 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6871 {
6872 if (curr_pool)
6873 s390_add_pool_insn (curr_pool, insn);
6874 /* An LTREL_BASE must follow within the same basic block. */
6875 gcc_assert (!pending_ltrel);
6876 }
6877
6878 if (NOTE_P (insn))
6879 switch (NOTE_KIND (insn))
6880 {
6881 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6882 section_switch_p = true;
6883 break;
6884 case NOTE_INSN_VAR_LOCATION:
6885 case NOTE_INSN_CALL_ARG_LOCATION:
6886 continue;
6887 default:
6888 break;
6889 }
6890
6891 if (!curr_pool
6892 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6893 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6894 continue;
6895
6896 if (TARGET_CPU_ZARCH)
6897 {
6898 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6899 continue;
6900
6901 s390_end_pool (curr_pool, NULL_RTX);
6902 curr_pool = NULL;
6903 }
6904 else
6905 {
6906 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6907 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6908 + extra_size;
6909
6910 /* We will later have to insert base register reload insns.
6911 Those will have an effect on code size, which we need to
6912 consider here. This calculation makes rather pessimistic
6913 worst-case assumptions. */
6914 if (GET_CODE (insn) == CODE_LABEL)
6915 extra_size += 6;
6916
6917 if (chunk_size < S390_POOL_CHUNK_MIN
6918 && curr_pool->size < S390_POOL_CHUNK_MIN
6919 && !section_switch_p)
6920 continue;
6921
6922 /* Pool chunks can only be inserted after BARRIERs ... */
6923 if (GET_CODE (insn) == BARRIER)
6924 {
6925 s390_end_pool (curr_pool, insn);
6926 curr_pool = NULL;
6927 extra_size = 0;
6928 }
6929
6930 /* ... so if we don't find one in time, create one. */
6931 else if (chunk_size > S390_POOL_CHUNK_MAX
6932 || curr_pool->size > S390_POOL_CHUNK_MAX
6933 || section_switch_p)
6934 {
6935 rtx label, jump, barrier, next, prev;
6936
6937 if (!section_switch_p)
6938 {
6939 /* We can insert the barrier only after a 'real' insn. */
6940 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6941 continue;
6942 if (get_attr_length (insn) == 0)
6943 continue;
6944 /* Don't separate LTREL_BASE from the corresponding
6945 LTREL_OFFSET load. */
6946 if (pending_ltrel)
6947 continue;
6948 next = insn;
6949 do
6950 {
6951 insn = next;
6952 next = NEXT_INSN (insn);
6953 }
6954 while (next
6955 && NOTE_P (next)
6956 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6957 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6958 }
6959 else
6960 {
6961 gcc_assert (!pending_ltrel);
6962
6963 /* The old pool has to end before the section switch
6964 note in order to make it part of the current
6965 section. */
6966 insn = PREV_INSN (insn);
6967 }
6968
6969 label = gen_label_rtx ();
6970 prev = insn;
6971 if (prev && NOTE_P (prev))
6972 prev = prev_nonnote_insn (prev);
6973 if (prev)
6974 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6975 INSN_LOCATION (prev));
6976 else
6977 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6978 barrier = emit_barrier_after (jump);
6979 insn = emit_label_after (label, barrier);
6980 JUMP_LABEL (jump) = label;
6981 LABEL_NUSES (label) = 1;
6982
6983 INSN_ADDRESSES_NEW (jump, -1);
6984 INSN_ADDRESSES_NEW (barrier, -1);
6985 INSN_ADDRESSES_NEW (insn, -1);
6986
6987 s390_end_pool (curr_pool, barrier);
6988 curr_pool = NULL;
6989 extra_size = 0;
6990 }
6991 }
6992 }
6993
6994 if (curr_pool)
6995 s390_end_pool (curr_pool, NULL_RTX);
6996 gcc_assert (!pending_ltrel);
6997
6998 /* Find all labels that are branched into
6999 from an insn belonging to a different chunk. */
7000
7001 far_labels = BITMAP_ALLOC (NULL);
7002
7003 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7004 {
7005 /* Labels marked with LABEL_PRESERVE_P can be target
7006 of non-local jumps, so we have to mark them.
7007 The same holds for named labels.
7008
7009 Don't do that, however, if it is the label before
7010 a jump table. */
7011
7012 if (GET_CODE (insn) == CODE_LABEL
7013 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7014 {
7015 rtx vec_insn = next_real_insn (insn);
7016 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
7017 PATTERN (vec_insn) : NULL_RTX;
7018 if (!vec_pat
7019 || !(GET_CODE (vec_pat) == ADDR_VEC
7020 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
7021 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7022 }
7023
7024 /* If we have a direct jump (conditional or unconditional)
7025 or a casesi jump, check all potential targets. */
7026 else if (GET_CODE (insn) == JUMP_INSN)
7027 {
7028 rtx pat = PATTERN (insn);
7029 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
7030 pat = XVECEXP (pat, 0, 0);
7031
7032 if (GET_CODE (pat) == SET)
7033 {
7034 rtx label = JUMP_LABEL (insn);
7035 if (label)
7036 {
7037 if (s390_find_pool (pool_list, label)
7038 != s390_find_pool (pool_list, insn))
7039 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7040 }
7041 }
7042 else if (GET_CODE (pat) == PARALLEL
7043 && XVECLEN (pat, 0) == 2
7044 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
7045 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
7046 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
7047 {
7048 /* Find the jump table used by this casesi jump. */
7049 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
7050 rtx vec_insn = next_real_insn (vec_label);
7051 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
7052 PATTERN (vec_insn) : NULL_RTX;
7053 if (vec_pat
7054 && (GET_CODE (vec_pat) == ADDR_VEC
7055 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
7056 {
7057 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7058
7059 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7060 {
7061 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7062
7063 if (s390_find_pool (pool_list, label)
7064 != s390_find_pool (pool_list, insn))
7065 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7066 }
7067 }
7068 }
7069 }
7070 }
7071
7072 /* Insert base register reload insns before every pool. */
7073
7074 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7075 {
7076 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7077 curr_pool->label);
7078 rtx insn = curr_pool->first_insn;
7079 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7080 }
7081
7082 /* Insert base register reload insns at every far label. */
7083
7084 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7085 if (GET_CODE (insn) == CODE_LABEL
7086 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7087 {
7088 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7089 if (pool)
7090 {
7091 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7092 pool->label);
7093 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7094 }
7095 }
7096
7097
7098 BITMAP_FREE (far_labels);
7099
7100
7101 /* Recompute insn addresses. */
7102
7103 init_insn_lengths ();
7104 shorten_branches (get_insns ());
7105
7106 return pool_list;
7107 }
7108
7109 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7110 After we have decided to use this list, finish implementing
7111 all changes to the current function as required. */
7112
7113 static void
7114 s390_chunkify_finish (struct constant_pool *pool_list)
7115 {
7116 struct constant_pool *curr_pool = NULL;
7117 rtx insn;
7118
7119
7120 /* Replace all literal pool references. */
7121
7122 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7123 {
7124 if (INSN_P (insn))
7125 replace_ltrel_base (&PATTERN (insn));
7126
7127 curr_pool = s390_find_pool (pool_list, insn);
7128 if (!curr_pool)
7129 continue;
7130
7131 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
7132 {
7133 rtx addr, pool_ref = NULL_RTX;
7134 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7135 if (pool_ref)
7136 {
7137 if (s390_execute_label (insn))
7138 addr = s390_find_execute (curr_pool, insn);
7139 else
7140 addr = s390_find_constant (curr_pool,
7141 get_pool_constant (pool_ref),
7142 get_pool_mode (pool_ref));
7143
7144 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7145 INSN_CODE (insn) = -1;
7146 }
7147 }
7148 }
7149
7150 /* Dump out all literal pools. */
7151
7152 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7153 s390_dump_pool (curr_pool, 0);
7154
7155 /* Free pool list. */
7156
7157 while (pool_list)
7158 {
7159 struct constant_pool *next = pool_list->next;
7160 s390_free_pool (pool_list);
7161 pool_list = next;
7162 }
7163 }
7164
7165 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7166 We have decided we cannot use this list, so revert all changes
7167 to the current function that were done by s390_chunkify_start. */
7168
7169 static void
7170 s390_chunkify_cancel (struct constant_pool *pool_list)
7171 {
7172 struct constant_pool *curr_pool = NULL;
7173 rtx insn;
7174
7175 /* Remove all pool placeholder insns. */
7176
7177 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7178 {
7179 /* Did we insert an extra barrier? Remove it. */
7180 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7181 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7182 rtx label = NEXT_INSN (curr_pool->pool_insn);
7183
7184 if (jump && GET_CODE (jump) == JUMP_INSN
7185 && barrier && GET_CODE (barrier) == BARRIER
7186 && label && GET_CODE (label) == CODE_LABEL
7187 && GET_CODE (PATTERN (jump)) == SET
7188 && SET_DEST (PATTERN (jump)) == pc_rtx
7189 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7190 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7191 {
7192 remove_insn (jump);
7193 remove_insn (barrier);
7194 remove_insn (label);
7195 }
7196
7197 remove_insn (curr_pool->pool_insn);
7198 }
7199
7200 /* Remove all base register reload insns. */
7201
7202 for (insn = get_insns (); insn; )
7203 {
7204 rtx next_insn = NEXT_INSN (insn);
7205
7206 if (GET_CODE (insn) == INSN
7207 && GET_CODE (PATTERN (insn)) == SET
7208 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7209 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7210 remove_insn (insn);
7211
7212 insn = next_insn;
7213 }
7214
7215 /* Free pool list. */
7216
7217 while (pool_list)
7218 {
7219 struct constant_pool *next = pool_list->next;
7220 s390_free_pool (pool_list);
7221 pool_list = next;
7222 }
7223 }
7224
7225 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7226
7227 void
7228 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7229 {
7230 REAL_VALUE_TYPE r;
7231
7232 switch (GET_MODE_CLASS (mode))
7233 {
7234 case MODE_FLOAT:
7235 case MODE_DECIMAL_FLOAT:
7236 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7237
7238 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7239 assemble_real (r, mode, align);
7240 break;
7241
7242 case MODE_INT:
7243 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7244 mark_symbol_refs_as_used (exp);
7245 break;
7246
7247 default:
7248 gcc_unreachable ();
7249 }
7250 }
7251
7252
7253 /* Return an RTL expression representing the value of the return address
7254 for the frame COUNT steps up from the current frame. FRAME is the
7255 frame pointer of that frame. */
7256
7257 rtx
7258 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7259 {
7260 int offset;
7261 rtx addr;
7262
7263 /* Without backchain, we fail for all but the current frame. */
7264
7265 if (!TARGET_BACKCHAIN && count > 0)
7266 return NULL_RTX;
7267
7268 /* For the current frame, we need to make sure the initial
7269 value of RETURN_REGNUM is actually saved. */
7270
7271 if (count == 0)
7272 {
7273 /* On non-z architectures branch splitting could overwrite r14. */
7274 if (TARGET_CPU_ZARCH)
7275 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7276 else
7277 {
7278 cfun_frame_layout.save_return_addr_p = true;
7279 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7280 }
7281 }
7282
7283 if (TARGET_PACKED_STACK)
7284 offset = -2 * UNITS_PER_LONG;
7285 else
7286 offset = RETURN_REGNUM * UNITS_PER_LONG;
7287
7288 addr = plus_constant (Pmode, frame, offset);
7289 addr = memory_address (Pmode, addr);
7290 return gen_rtx_MEM (Pmode, addr);
7291 }
7292
7293 /* Return an RTL expression representing the back chain stored in
7294 the current stack frame. */
7295
7296 rtx
7297 s390_back_chain_rtx (void)
7298 {
7299 rtx chain;
7300
7301 gcc_assert (TARGET_BACKCHAIN);
7302
7303 if (TARGET_PACKED_STACK)
7304 chain = plus_constant (Pmode, stack_pointer_rtx,
7305 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7306 else
7307 chain = stack_pointer_rtx;
7308
7309 chain = gen_rtx_MEM (Pmode, chain);
7310 return chain;
7311 }
7312
7313 /* Find first call clobbered register unused in a function.
7314 This could be used as base register in a leaf function
7315 or for holding the return address before epilogue. */
7316
7317 static int
7318 find_unused_clobbered_reg (void)
7319 {
7320 int i;
7321 for (i = 0; i < 6; i++)
7322 if (!df_regs_ever_live_p (i))
7323 return i;
7324 return 0;
7325 }
7326
7327
7328 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7329 clobbered hard regs in SETREG. */
7330
7331 static void
7332 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7333 {
7334 int *regs_ever_clobbered = (int *)data;
7335 unsigned int i, regno;
7336 enum machine_mode mode = GET_MODE (setreg);
7337
7338 if (GET_CODE (setreg) == SUBREG)
7339 {
7340 rtx inner = SUBREG_REG (setreg);
7341 if (!GENERAL_REG_P (inner))
7342 return;
7343 regno = subreg_regno (setreg);
7344 }
7345 else if (GENERAL_REG_P (setreg))
7346 regno = REGNO (setreg);
7347 else
7348 return;
7349
7350 for (i = regno;
7351 i < regno + HARD_REGNO_NREGS (regno, mode);
7352 i++)
7353 regs_ever_clobbered[i] = 1;
7354 }
7355
7356 /* Walks through all basic blocks of the current function looking
7357 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7358 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7359 each of those regs. */
7360
7361 static void
7362 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7363 {
7364 basic_block cur_bb;
7365 rtx cur_insn;
7366 unsigned int i;
7367
7368 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7369
7370 /* For non-leaf functions we have to consider all call clobbered regs to be
7371 clobbered. */
7372 if (!crtl->is_leaf)
7373 {
7374 for (i = 0; i < 16; i++)
7375 regs_ever_clobbered[i] = call_really_used_regs[i];
7376 }
7377
7378 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7379 this work is done by liveness analysis (mark_regs_live_at_end).
7380 Special care is needed for functions containing landing pads. Landing pads
7381 may use the eh registers, but the code which sets these registers is not
7382 contained in that function. Hence s390_regs_ever_clobbered is not able to
7383 deal with this automatically. */
7384 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7385 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7386 if (crtl->calls_eh_return
7387 || (cfun->machine->has_landing_pad_p
7388 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7389 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7390
7391 /* For nonlocal gotos all call-saved registers have to be saved.
7392 This flag is also set for the unwinding code in libgcc.
7393 See expand_builtin_unwind_init. For regs_ever_live this is done by
7394 reload. */
7395 if (cfun->has_nonlocal_label)
7396 for (i = 0; i < 16; i++)
7397 if (!call_really_used_regs[i])
7398 regs_ever_clobbered[i] = 1;
7399
7400 FOR_EACH_BB (cur_bb)
7401 {
7402 FOR_BB_INSNS (cur_bb, cur_insn)
7403 {
7404 if (INSN_P (cur_insn))
7405 note_stores (PATTERN (cur_insn),
7406 s390_reg_clobbered_rtx,
7407 regs_ever_clobbered);
7408 }
7409 }
7410 }
7411
7412 /* Determine the frame area which actually has to be accessed
7413 in the function epilogue. The values are stored at the
7414 given pointers AREA_BOTTOM (address of the lowest used stack
7415 address) and AREA_TOP (address of the first item which does
7416 not belong to the stack frame). */
7417
7418 static void
7419 s390_frame_area (int *area_bottom, int *area_top)
7420 {
7421 int b, t;
7422 int i;
7423
7424 b = INT_MAX;
7425 t = INT_MIN;
7426
7427 if (cfun_frame_layout.first_restore_gpr != -1)
7428 {
7429 b = (cfun_frame_layout.gprs_offset
7430 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7431 t = b + (cfun_frame_layout.last_restore_gpr
7432 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7433 }
7434
7435 if (TARGET_64BIT && cfun_save_high_fprs_p)
7436 {
7437 b = MIN (b, cfun_frame_layout.f8_offset);
7438 t = MAX (t, (cfun_frame_layout.f8_offset
7439 + cfun_frame_layout.high_fprs * 8));
7440 }
7441
7442 if (!TARGET_64BIT)
7443 for (i = 2; i < 4; i++)
7444 if (cfun_fpr_bit_p (i))
7445 {
7446 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7447 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7448 }
7449
7450 *area_bottom = b;
7451 *area_top = t;
7452 }
7453
7454 /* Fill cfun->machine with info about register usage of current function.
7455 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7456
7457 static void
7458 s390_register_info (int clobbered_regs[])
7459 {
7460 int i, j;
7461
7462 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7463 cfun_frame_layout.fpr_bitmap = 0;
7464 cfun_frame_layout.high_fprs = 0;
7465 if (TARGET_64BIT)
7466 for (i = 24; i < 32; i++)
7467 if (df_regs_ever_live_p (i) && !global_regs[i])
7468 {
7469 cfun_set_fpr_bit (i - 16);
7470 cfun_frame_layout.high_fprs++;
7471 }
7472
7473 /* Find first and last gpr to be saved. We trust regs_ever_live
7474 data, except that we don't save and restore global registers.
7475
7476 Also, all registers with special meaning to the compiler need
7477 to be handled extra. */
7478
7479 s390_regs_ever_clobbered (clobbered_regs);
7480
7481 for (i = 0; i < 16; i++)
7482 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7483
7484 if (frame_pointer_needed)
7485 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7486
7487 if (flag_pic)
7488 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7489 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7490
7491 clobbered_regs[BASE_REGNUM]
7492 |= (cfun->machine->base_reg
7493 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7494
7495 clobbered_regs[RETURN_REGNUM]
7496 |= (!crtl->is_leaf
7497 || TARGET_TPF_PROFILING
7498 || cfun->machine->split_branches_pending_p
7499 || cfun_frame_layout.save_return_addr_p
7500 || crtl->calls_eh_return
7501 || cfun->stdarg);
7502
7503 clobbered_regs[STACK_POINTER_REGNUM]
7504 |= (!crtl->is_leaf
7505 || TARGET_TPF_PROFILING
7506 || cfun_save_high_fprs_p
7507 || get_frame_size () > 0
7508 || cfun->calls_alloca
7509 || cfun->stdarg);
7510
7511 for (i = 6; i < 16; i++)
7512 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7513 break;
7514 for (j = 15; j > i; j--)
7515 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7516 break;
7517
7518 if (i == 16)
7519 {
7520 /* Nothing to save/restore. */
7521 cfun_frame_layout.first_save_gpr_slot = -1;
7522 cfun_frame_layout.last_save_gpr_slot = -1;
7523 cfun_frame_layout.first_save_gpr = -1;
7524 cfun_frame_layout.first_restore_gpr = -1;
7525 cfun_frame_layout.last_save_gpr = -1;
7526 cfun_frame_layout.last_restore_gpr = -1;
7527 }
7528 else
7529 {
7530 /* Save slots for gprs from i to j. */
7531 cfun_frame_layout.first_save_gpr_slot = i;
7532 cfun_frame_layout.last_save_gpr_slot = j;
7533
7534 for (i = cfun_frame_layout.first_save_gpr_slot;
7535 i < cfun_frame_layout.last_save_gpr_slot + 1;
7536 i++)
7537 if (clobbered_regs[i])
7538 break;
7539
7540 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7541 if (clobbered_regs[j])
7542 break;
7543
7544 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7545 {
7546 /* Nothing to save/restore. */
7547 cfun_frame_layout.first_save_gpr = -1;
7548 cfun_frame_layout.first_restore_gpr = -1;
7549 cfun_frame_layout.last_save_gpr = -1;
7550 cfun_frame_layout.last_restore_gpr = -1;
7551 }
7552 else
7553 {
7554 /* Save / Restore from gpr i to j. */
7555 cfun_frame_layout.first_save_gpr = i;
7556 cfun_frame_layout.first_restore_gpr = i;
7557 cfun_frame_layout.last_save_gpr = j;
7558 cfun_frame_layout.last_restore_gpr = j;
7559 }
7560 }
7561
7562 if (cfun->stdarg)
7563 {
7564 /* Varargs functions need to save gprs 2 to 6. */
7565 if (cfun->va_list_gpr_size
7566 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7567 {
7568 int min_gpr = crtl->args.info.gprs;
7569 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7570 if (max_gpr > GP_ARG_NUM_REG)
7571 max_gpr = GP_ARG_NUM_REG;
7572
7573 if (cfun_frame_layout.first_save_gpr == -1
7574 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7575 {
7576 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7577 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7578 }
7579
7580 if (cfun_frame_layout.last_save_gpr == -1
7581 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7582 {
7583 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7584 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7585 }
7586 }
7587
7588 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7589 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7590 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7591 {
7592 int min_fpr = crtl->args.info.fprs;
7593 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7594 if (max_fpr > FP_ARG_NUM_REG)
7595 max_fpr = FP_ARG_NUM_REG;
7596
7597 /* ??? This is currently required to ensure proper location
7598 of the fpr save slots within the va_list save area. */
7599 if (TARGET_PACKED_STACK)
7600 min_fpr = 0;
7601
7602 for (i = min_fpr; i < max_fpr; i++)
7603 cfun_set_fpr_bit (i);
7604 }
7605 }
7606
7607 if (!TARGET_64BIT)
7608 for (i = 2; i < 4; i++)
7609 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7610 cfun_set_fpr_bit (i);
7611 }
7612
7613 /* Fill cfun->machine with info about frame of current function. */
7614
7615 static void
7616 s390_frame_info (void)
7617 {
7618 int i;
7619
7620 cfun_frame_layout.frame_size = get_frame_size ();
7621 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7622 fatal_error ("total size of local variables exceeds architecture limit");
7623
7624 if (!TARGET_PACKED_STACK)
7625 {
7626 cfun_frame_layout.backchain_offset = 0;
7627 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7628 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7629 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7630 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7631 * UNITS_PER_LONG);
7632 }
7633 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7634 {
7635 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7636 - UNITS_PER_LONG);
7637 cfun_frame_layout.gprs_offset
7638 = (cfun_frame_layout.backchain_offset
7639 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7640 * UNITS_PER_LONG);
7641
7642 if (TARGET_64BIT)
7643 {
7644 cfun_frame_layout.f4_offset
7645 = (cfun_frame_layout.gprs_offset
7646 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7647
7648 cfun_frame_layout.f0_offset
7649 = (cfun_frame_layout.f4_offset
7650 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7651 }
7652 else
7653 {
7654 /* On 31 bit we have to care about alignment of the
7655 floating point regs to provide fastest access. */
7656 cfun_frame_layout.f0_offset
7657 = ((cfun_frame_layout.gprs_offset
7658 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7659 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7660
7661 cfun_frame_layout.f4_offset
7662 = (cfun_frame_layout.f0_offset
7663 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7664 }
7665 }
7666 else /* no backchain */
7667 {
7668 cfun_frame_layout.f4_offset
7669 = (STACK_POINTER_OFFSET
7670 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7671
7672 cfun_frame_layout.f0_offset
7673 = (cfun_frame_layout.f4_offset
7674 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7675
7676 cfun_frame_layout.gprs_offset
7677 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7678 }
7679
7680 if (crtl->is_leaf
7681 && !TARGET_TPF_PROFILING
7682 && cfun_frame_layout.frame_size == 0
7683 && !cfun_save_high_fprs_p
7684 && !cfun->calls_alloca
7685 && !cfun->stdarg)
7686 return;
7687
7688 if (!TARGET_PACKED_STACK)
7689 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7690 + crtl->outgoing_args_size
7691 + cfun_frame_layout.high_fprs * 8);
7692 else
7693 {
7694 if (TARGET_BACKCHAIN)
7695 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7696
7697 /* No alignment trouble here because f8-f15 are only saved under
7698 64 bit. */
7699 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7700 cfun_frame_layout.f4_offset),
7701 cfun_frame_layout.gprs_offset)
7702 - cfun_frame_layout.high_fprs * 8);
7703
7704 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7705
7706 for (i = 0; i < 8; i++)
7707 if (cfun_fpr_bit_p (i))
7708 cfun_frame_layout.frame_size += 8;
7709
7710 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7711
7712 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7713 the frame size to sustain 8 byte alignment of stack frames. */
7714 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7715 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7716 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7717
7718 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7719 }
7720 }
7721
7722 /* Generate frame layout. Fills in register and frame data for the current
7723 function in cfun->machine. This routine can be called multiple times;
7724 it will re-do the complete frame layout every time. */
7725
7726 static void
7727 s390_init_frame_layout (void)
7728 {
7729 HOST_WIDE_INT frame_size;
7730 int base_used;
7731 int clobbered_regs[16];
7732
7733 /* On S/390 machines, we may need to perform branch splitting, which
7734 will require both base and return address register. We have no
7735 choice but to assume we're going to need them until right at the
7736 end of the machine dependent reorg phase. */
7737 if (!TARGET_CPU_ZARCH)
7738 cfun->machine->split_branches_pending_p = true;
7739
7740 do
7741 {
7742 frame_size = cfun_frame_layout.frame_size;
7743
7744 /* Try to predict whether we'll need the base register. */
7745 base_used = cfun->machine->split_branches_pending_p
7746 || crtl->uses_const_pool
7747 || (!DISP_IN_RANGE (frame_size)
7748 && !CONST_OK_FOR_K (frame_size));
7749
7750 /* Decide which register to use as literal pool base. In small
7751 leaf functions, try to use an unused call-clobbered register
7752 as base register to avoid save/restore overhead. */
7753 if (!base_used)
7754 cfun->machine->base_reg = NULL_RTX;
7755 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7756 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7757 else
7758 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7759
7760 s390_register_info (clobbered_regs);
7761 s390_frame_info ();
7762 }
7763 while (frame_size != cfun_frame_layout.frame_size);
7764 }
7765
7766 /* Update frame layout. Recompute actual register save data based on
7767 current info and update regs_ever_live for the special registers.
7768 May be called multiple times, but may never cause *more* registers
7769 to be saved than s390_init_frame_layout allocated room for. */
7770
7771 static void
7772 s390_update_frame_layout (void)
7773 {
7774 int clobbered_regs[16];
7775
7776 s390_register_info (clobbered_regs);
7777
7778 df_set_regs_ever_live (BASE_REGNUM,
7779 clobbered_regs[BASE_REGNUM] ? true : false);
7780 df_set_regs_ever_live (RETURN_REGNUM,
7781 clobbered_regs[RETURN_REGNUM] ? true : false);
7782 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7783 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7784
7785 if (cfun->machine->base_reg)
7786 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7787 }
7788
7789 /* Return true if it is legal to put a value with MODE into REGNO. */
7790
7791 bool
7792 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7793 {
7794 switch (REGNO_REG_CLASS (regno))
7795 {
7796 case FP_REGS:
7797 if (REGNO_PAIR_OK (regno, mode))
7798 {
7799 if (mode == SImode || mode == DImode)
7800 return true;
7801
7802 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7803 return true;
7804 }
7805 break;
7806 case ADDR_REGS:
7807 if (FRAME_REGNO_P (regno) && mode == Pmode)
7808 return true;
7809
7810 /* fallthrough */
7811 case GENERAL_REGS:
7812 if (REGNO_PAIR_OK (regno, mode))
7813 {
7814 if (TARGET_ZARCH
7815 || (mode != TFmode && mode != TCmode && mode != TDmode))
7816 return true;
7817 }
7818 break;
7819 case CC_REGS:
7820 if (GET_MODE_CLASS (mode) == MODE_CC)
7821 return true;
7822 break;
7823 case ACCESS_REGS:
7824 if (REGNO_PAIR_OK (regno, mode))
7825 {
7826 if (mode == SImode || mode == Pmode)
7827 return true;
7828 }
7829 break;
7830 default:
7831 return false;
7832 }
7833
7834 return false;
7835 }
7836
7837 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7838
7839 bool
7840 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7841 {
7842 /* Once we've decided upon a register to use as base register, it must
7843 no longer be used for any other purpose. */
7844 if (cfun->machine->base_reg)
7845 if (REGNO (cfun->machine->base_reg) == old_reg
7846 || REGNO (cfun->machine->base_reg) == new_reg)
7847 return false;
7848
7849 return true;
7850 }
7851
7852 /* Maximum number of registers to represent a value of mode MODE
7853 in a register of class RCLASS. */
7854
7855 int
7856 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7857 {
7858 switch (rclass)
7859 {
7860 case FP_REGS:
7861 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7862 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7863 else
7864 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7865 case ACCESS_REGS:
7866 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7867 default:
7868 break;
7869 }
7870 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7871 }
7872
7873 /* Return true if register FROM can be eliminated via register TO. */
7874
7875 static bool
7876 s390_can_eliminate (const int from, const int to)
7877 {
7878 /* On zSeries machines, we have not marked the base register as fixed.
7879 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7880 If a function requires the base register, we say here that this
7881 elimination cannot be performed. This will cause reload to free
7882 up the base register (as if it were fixed). On the other hand,
7883 if the current function does *not* require the base register, we
7884 say here the elimination succeeds, which in turn allows reload
7885 to allocate the base register for any other purpose. */
7886 if (from == BASE_REGNUM && to == BASE_REGNUM)
7887 {
7888 if (TARGET_CPU_ZARCH)
7889 {
7890 s390_init_frame_layout ();
7891 return cfun->machine->base_reg == NULL_RTX;
7892 }
7893
7894 return false;
7895 }
7896
7897 /* Everything else must point into the stack frame. */
7898 gcc_assert (to == STACK_POINTER_REGNUM
7899 || to == HARD_FRAME_POINTER_REGNUM);
7900
7901 gcc_assert (from == FRAME_POINTER_REGNUM
7902 || from == ARG_POINTER_REGNUM
7903 || from == RETURN_ADDRESS_POINTER_REGNUM);
7904
7905 /* Make sure we actually saved the return address. */
7906 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7907 if (!crtl->calls_eh_return
7908 && !cfun->stdarg
7909 && !cfun_frame_layout.save_return_addr_p)
7910 return false;
7911
7912 return true;
7913 }
7914
7915 /* Return offset between register FROM and TO initially after prolog. */
7916
7917 HOST_WIDE_INT
7918 s390_initial_elimination_offset (int from, int to)
7919 {
7920 HOST_WIDE_INT offset;
7921 int index;
7922
7923 /* ??? Why are we called for non-eliminable pairs? */
7924 if (!s390_can_eliminate (from, to))
7925 return 0;
7926
7927 switch (from)
7928 {
7929 case FRAME_POINTER_REGNUM:
7930 offset = (get_frame_size()
7931 + STACK_POINTER_OFFSET
7932 + crtl->outgoing_args_size);
7933 break;
7934
7935 case ARG_POINTER_REGNUM:
7936 s390_init_frame_layout ();
7937 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7938 break;
7939
7940 case RETURN_ADDRESS_POINTER_REGNUM:
7941 s390_init_frame_layout ();
7942 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7943 gcc_assert (index >= 0);
7944 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7945 offset += index * UNITS_PER_LONG;
7946 break;
7947
7948 case BASE_REGNUM:
7949 offset = 0;
7950 break;
7951
7952 default:
7953 gcc_unreachable ();
7954 }
7955
7956 return offset;
7957 }
7958
7959 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7960 to register BASE. Return generated insn. */
7961
7962 static rtx
7963 save_fpr (rtx base, int offset, int regnum)
7964 {
7965 rtx addr;
7966 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7967
7968 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7969 set_mem_alias_set (addr, get_varargs_alias_set ());
7970 else
7971 set_mem_alias_set (addr, get_frame_alias_set ());
7972
7973 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7974 }
7975
7976 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7977 to register BASE. Return generated insn. */
7978
7979 static rtx
7980 restore_fpr (rtx base, int offset, int regnum)
7981 {
7982 rtx addr;
7983 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7984 set_mem_alias_set (addr, get_frame_alias_set ());
7985
7986 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7987 }
7988
7989 /* Return true if REGNO is a global register, but not one
7990 of the special ones that need to be saved/restored in anyway. */
7991
7992 static inline bool
7993 global_not_special_regno_p (int regno)
7994 {
7995 return (global_regs[regno]
7996 /* These registers are special and need to be
7997 restored in any case. */
7998 && !(regno == STACK_POINTER_REGNUM
7999 || regno == RETURN_REGNUM
8000 || regno == BASE_REGNUM
8001 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8002 }
8003
8004 /* Generate insn to save registers FIRST to LAST into
8005 the register save area located at offset OFFSET
8006 relative to register BASE. */
8007
8008 static rtx
8009 save_gprs (rtx base, int offset, int first, int last)
8010 {
8011 rtx addr, insn, note;
8012 int i;
8013
8014 addr = plus_constant (Pmode, base, offset);
8015 addr = gen_rtx_MEM (Pmode, addr);
8016
8017 set_mem_alias_set (addr, get_frame_alias_set ());
8018
8019 /* Special-case single register. */
8020 if (first == last)
8021 {
8022 if (TARGET_64BIT)
8023 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8024 else
8025 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8026
8027 if (!global_not_special_regno_p (first))
8028 RTX_FRAME_RELATED_P (insn) = 1;
8029 return insn;
8030 }
8031
8032
8033 insn = gen_store_multiple (addr,
8034 gen_rtx_REG (Pmode, first),
8035 GEN_INT (last - first + 1));
8036
8037 if (first <= 6 && cfun->stdarg)
8038 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8039 {
8040 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8041
8042 if (first + i <= 6)
8043 set_mem_alias_set (mem, get_varargs_alias_set ());
8044 }
8045
8046 /* We need to set the FRAME_RELATED flag on all SETs
8047 inside the store-multiple pattern.
8048
8049 However, we must not emit DWARF records for registers 2..5
8050 if they are stored for use by variable arguments ...
8051
8052 ??? Unfortunately, it is not enough to simply not the
8053 FRAME_RELATED flags for those SETs, because the first SET
8054 of the PARALLEL is always treated as if it had the flag
8055 set, even if it does not. Therefore we emit a new pattern
8056 without those registers as REG_FRAME_RELATED_EXPR note. */
8057
8058 if (first >= 6 && !global_not_special_regno_p (first))
8059 {
8060 rtx pat = PATTERN (insn);
8061
8062 for (i = 0; i < XVECLEN (pat, 0); i++)
8063 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8064 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8065 0, i)))))
8066 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8067
8068 RTX_FRAME_RELATED_P (insn) = 1;
8069 }
8070 else if (last >= 6)
8071 {
8072 int start;
8073
8074 for (start = first >= 6 ? first : 6; start <= last; start++)
8075 if (!global_not_special_regno_p (start))
8076 break;
8077
8078 if (start > last)
8079 return insn;
8080
8081 addr = plus_constant (Pmode, base,
8082 offset + (start - first) * UNITS_PER_LONG);
8083 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8084 gen_rtx_REG (Pmode, start),
8085 GEN_INT (last - start + 1));
8086 note = PATTERN (note);
8087
8088 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8089
8090 for (i = 0; i < XVECLEN (note, 0); i++)
8091 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8092 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8093 0, i)))))
8094 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8095
8096 RTX_FRAME_RELATED_P (insn) = 1;
8097 }
8098
8099 return insn;
8100 }
8101
8102 /* Generate insn to restore registers FIRST to LAST from
8103 the register save area located at offset OFFSET
8104 relative to register BASE. */
8105
8106 static rtx
8107 restore_gprs (rtx base, int offset, int first, int last)
8108 {
8109 rtx addr, insn;
8110
8111 addr = plus_constant (Pmode, base, offset);
8112 addr = gen_rtx_MEM (Pmode, addr);
8113 set_mem_alias_set (addr, get_frame_alias_set ());
8114
8115 /* Special-case single register. */
8116 if (first == last)
8117 {
8118 if (TARGET_64BIT)
8119 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8120 else
8121 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8122
8123 return insn;
8124 }
8125
8126 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8127 addr,
8128 GEN_INT (last - first + 1));
8129 return insn;
8130 }
8131
8132 /* Return insn sequence to load the GOT register. */
8133
8134 static GTY(()) rtx got_symbol;
8135 rtx
8136 s390_load_got (void)
8137 {
8138 rtx insns;
8139
8140 /* We cannot use pic_offset_table_rtx here since we use this
8141 function also for non-pic if __tls_get_offset is called and in
8142 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8143 aren't usable. */
8144 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8145
8146 if (!got_symbol)
8147 {
8148 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8149 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8150 }
8151
8152 start_sequence ();
8153
8154 if (TARGET_CPU_ZARCH)
8155 {
8156 emit_move_insn (got_rtx, got_symbol);
8157 }
8158 else
8159 {
8160 rtx offset;
8161
8162 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8163 UNSPEC_LTREL_OFFSET);
8164 offset = gen_rtx_CONST (Pmode, offset);
8165 offset = force_const_mem (Pmode, offset);
8166
8167 emit_move_insn (got_rtx, offset);
8168
8169 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8170 UNSPEC_LTREL_BASE);
8171 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8172
8173 emit_move_insn (got_rtx, offset);
8174 }
8175
8176 insns = get_insns ();
8177 end_sequence ();
8178 return insns;
8179 }
8180
8181 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8182 and the change to the stack pointer. */
8183
8184 static void
8185 s390_emit_stack_tie (void)
8186 {
8187 rtx mem = gen_frame_mem (BLKmode,
8188 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8189
8190 emit_insn (gen_stack_tie (mem));
8191 }
8192
8193 /* Expand the prologue into a bunch of separate insns. */
8194
8195 void
8196 s390_emit_prologue (void)
8197 {
8198 rtx insn, addr;
8199 rtx temp_reg;
8200 int i;
8201 int offset;
8202 int next_fpr = 0;
8203
8204 /* Complete frame layout. */
8205
8206 s390_update_frame_layout ();
8207
8208 /* Annotate all constant pool references to let the scheduler know
8209 they implicitly use the base register. */
8210
8211 push_topmost_sequence ();
8212
8213 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8214 if (INSN_P (insn))
8215 {
8216 annotate_constant_pool_refs (&PATTERN (insn));
8217 df_insn_rescan (insn);
8218 }
8219
8220 pop_topmost_sequence ();
8221
8222 /* Choose best register to use for temp use within prologue.
8223 See below for why TPF must use the register 1. */
8224
8225 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8226 && !crtl->is_leaf
8227 && !TARGET_TPF_PROFILING)
8228 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8229 else
8230 temp_reg = gen_rtx_REG (Pmode, 1);
8231
8232 /* Save call saved gprs. */
8233 if (cfun_frame_layout.first_save_gpr != -1)
8234 {
8235 insn = save_gprs (stack_pointer_rtx,
8236 cfun_frame_layout.gprs_offset +
8237 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8238 - cfun_frame_layout.first_save_gpr_slot),
8239 cfun_frame_layout.first_save_gpr,
8240 cfun_frame_layout.last_save_gpr);
8241 emit_insn (insn);
8242 }
8243
8244 /* Dummy insn to mark literal pool slot. */
8245
8246 if (cfun->machine->base_reg)
8247 emit_insn (gen_main_pool (cfun->machine->base_reg));
8248
8249 offset = cfun_frame_layout.f0_offset;
8250
8251 /* Save f0 and f2. */
8252 for (i = 0; i < 2; i++)
8253 {
8254 if (cfun_fpr_bit_p (i))
8255 {
8256 save_fpr (stack_pointer_rtx, offset, i + 16);
8257 offset += 8;
8258 }
8259 else if (!TARGET_PACKED_STACK)
8260 offset += 8;
8261 }
8262
8263 /* Save f4 and f6. */
8264 offset = cfun_frame_layout.f4_offset;
8265 for (i = 2; i < 4; i++)
8266 {
8267 if (cfun_fpr_bit_p (i))
8268 {
8269 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8270 offset += 8;
8271
8272 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8273 therefore are not frame related. */
8274 if (!call_really_used_regs[i + 16])
8275 RTX_FRAME_RELATED_P (insn) = 1;
8276 }
8277 else if (!TARGET_PACKED_STACK)
8278 offset += 8;
8279 }
8280
8281 if (TARGET_PACKED_STACK
8282 && cfun_save_high_fprs_p
8283 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8284 {
8285 offset = (cfun_frame_layout.f8_offset
8286 + (cfun_frame_layout.high_fprs - 1) * 8);
8287
8288 for (i = 15; i > 7 && offset >= 0; i--)
8289 if (cfun_fpr_bit_p (i))
8290 {
8291 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8292
8293 RTX_FRAME_RELATED_P (insn) = 1;
8294 offset -= 8;
8295 }
8296 if (offset >= cfun_frame_layout.f8_offset)
8297 next_fpr = i + 16;
8298 }
8299
8300 if (!TARGET_PACKED_STACK)
8301 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8302
8303 if (flag_stack_usage_info)
8304 current_function_static_stack_size = cfun_frame_layout.frame_size;
8305
8306 /* Decrement stack pointer. */
8307
8308 if (cfun_frame_layout.frame_size > 0)
8309 {
8310 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8311 rtx real_frame_off;
8312
8313 if (s390_stack_size)
8314 {
8315 HOST_WIDE_INT stack_guard;
8316
8317 if (s390_stack_guard)
8318 stack_guard = s390_stack_guard;
8319 else
8320 {
8321 /* If no value for stack guard is provided the smallest power of 2
8322 larger than the current frame size is chosen. */
8323 stack_guard = 1;
8324 while (stack_guard < cfun_frame_layout.frame_size)
8325 stack_guard <<= 1;
8326 }
8327
8328 if (cfun_frame_layout.frame_size >= s390_stack_size)
8329 {
8330 warning (0, "frame size of function %qs is %wd"
8331 " bytes exceeding user provided stack limit of "
8332 "%d bytes. "
8333 "An unconditional trap is added.",
8334 current_function_name(), cfun_frame_layout.frame_size,
8335 s390_stack_size);
8336 emit_insn (gen_trap ());
8337 }
8338 else
8339 {
8340 /* stack_guard has to be smaller than s390_stack_size.
8341 Otherwise we would emit an AND with zero which would
8342 not match the test under mask pattern. */
8343 if (stack_guard >= s390_stack_size)
8344 {
8345 warning (0, "frame size of function %qs is %wd"
8346 " bytes which is more than half the stack size. "
8347 "The dynamic check would not be reliable. "
8348 "No check emitted for this function.",
8349 current_function_name(),
8350 cfun_frame_layout.frame_size);
8351 }
8352 else
8353 {
8354 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8355 & ~(stack_guard - 1));
8356
8357 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8358 GEN_INT (stack_check_mask));
8359 if (TARGET_64BIT)
8360 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8361 t, const0_rtx),
8362 t, const0_rtx, const0_rtx));
8363 else
8364 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8365 t, const0_rtx),
8366 t, const0_rtx, const0_rtx));
8367 }
8368 }
8369 }
8370
8371 if (s390_warn_framesize > 0
8372 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8373 warning (0, "frame size of %qs is %wd bytes",
8374 current_function_name (), cfun_frame_layout.frame_size);
8375
8376 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8377 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8378
8379 /* Save incoming stack pointer into temp reg. */
8380 if (TARGET_BACKCHAIN || next_fpr)
8381 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8382
8383 /* Subtract frame size from stack pointer. */
8384
8385 if (DISP_IN_RANGE (INTVAL (frame_off)))
8386 {
8387 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8388 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8389 frame_off));
8390 insn = emit_insn (insn);
8391 }
8392 else
8393 {
8394 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8395 frame_off = force_const_mem (Pmode, frame_off);
8396
8397 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8398 annotate_constant_pool_refs (&PATTERN (insn));
8399 }
8400
8401 RTX_FRAME_RELATED_P (insn) = 1;
8402 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8403 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8404 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8405 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8406 real_frame_off)));
8407
8408 /* Set backchain. */
8409
8410 if (TARGET_BACKCHAIN)
8411 {
8412 if (cfun_frame_layout.backchain_offset)
8413 addr = gen_rtx_MEM (Pmode,
8414 plus_constant (Pmode, stack_pointer_rtx,
8415 cfun_frame_layout.backchain_offset));
8416 else
8417 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8418 set_mem_alias_set (addr, get_frame_alias_set ());
8419 insn = emit_insn (gen_move_insn (addr, temp_reg));
8420 }
8421
8422 /* If we support non-call exceptions (e.g. for Java),
8423 we need to make sure the backchain pointer is set up
8424 before any possibly trapping memory access. */
8425 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8426 {
8427 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8428 emit_clobber (addr);
8429 }
8430 }
8431
8432 /* Save fprs 8 - 15 (64 bit ABI). */
8433
8434 if (cfun_save_high_fprs_p && next_fpr)
8435 {
8436 /* If the stack might be accessed through a different register
8437 we have to make sure that the stack pointer decrement is not
8438 moved below the use of the stack slots. */
8439 s390_emit_stack_tie ();
8440
8441 insn = emit_insn (gen_add2_insn (temp_reg,
8442 GEN_INT (cfun_frame_layout.f8_offset)));
8443
8444 offset = 0;
8445
8446 for (i = 24; i <= next_fpr; i++)
8447 if (cfun_fpr_bit_p (i - 16))
8448 {
8449 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8450 cfun_frame_layout.frame_size
8451 + cfun_frame_layout.f8_offset
8452 + offset);
8453
8454 insn = save_fpr (temp_reg, offset, i);
8455 offset += 8;
8456 RTX_FRAME_RELATED_P (insn) = 1;
8457 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8458 gen_rtx_SET (VOIDmode,
8459 gen_rtx_MEM (DFmode, addr),
8460 gen_rtx_REG (DFmode, i)));
8461 }
8462 }
8463
8464 /* Set frame pointer, if needed. */
8465
8466 if (frame_pointer_needed)
8467 {
8468 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8469 RTX_FRAME_RELATED_P (insn) = 1;
8470 }
8471
8472 /* Set up got pointer, if needed. */
8473
8474 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8475 {
8476 rtx insns = s390_load_got ();
8477
8478 for (insn = insns; insn; insn = NEXT_INSN (insn))
8479 annotate_constant_pool_refs (&PATTERN (insn));
8480
8481 emit_insn (insns);
8482 }
8483
8484 if (TARGET_TPF_PROFILING)
8485 {
8486 /* Generate a BAS instruction to serve as a function
8487 entry intercept to facilitate the use of tracing
8488 algorithms located at the branch target. */
8489 emit_insn (gen_prologue_tpf ());
8490
8491 /* Emit a blockage here so that all code
8492 lies between the profiling mechanisms. */
8493 emit_insn (gen_blockage ());
8494 }
8495 }
8496
8497 /* Expand the epilogue into a bunch of separate insns. */
8498
8499 void
8500 s390_emit_epilogue (bool sibcall)
8501 {
8502 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8503 int area_bottom, area_top, offset = 0;
8504 int next_offset;
8505 rtvec p;
8506 int i;
8507
8508 if (TARGET_TPF_PROFILING)
8509 {
8510
8511 /* Generate a BAS instruction to serve as a function
8512 entry intercept to facilitate the use of tracing
8513 algorithms located at the branch target. */
8514
8515 /* Emit a blockage here so that all code
8516 lies between the profiling mechanisms. */
8517 emit_insn (gen_blockage ());
8518
8519 emit_insn (gen_epilogue_tpf ());
8520 }
8521
8522 /* Check whether to use frame or stack pointer for restore. */
8523
8524 frame_pointer = (frame_pointer_needed
8525 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8526
8527 s390_frame_area (&area_bottom, &area_top);
8528
8529 /* Check whether we can access the register save area.
8530 If not, increment the frame pointer as required. */
8531
8532 if (area_top <= area_bottom)
8533 {
8534 /* Nothing to restore. */
8535 }
8536 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8537 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8538 {
8539 /* Area is in range. */
8540 offset = cfun_frame_layout.frame_size;
8541 }
8542 else
8543 {
8544 rtx insn, frame_off, cfa;
8545
8546 offset = area_bottom < 0 ? -area_bottom : 0;
8547 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8548
8549 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8550 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8551 if (DISP_IN_RANGE (INTVAL (frame_off)))
8552 {
8553 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8554 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8555 insn = emit_insn (insn);
8556 }
8557 else
8558 {
8559 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8560 frame_off = force_const_mem (Pmode, frame_off);
8561
8562 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8563 annotate_constant_pool_refs (&PATTERN (insn));
8564 }
8565 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8566 RTX_FRAME_RELATED_P (insn) = 1;
8567 }
8568
8569 /* Restore call saved fprs. */
8570
8571 if (TARGET_64BIT)
8572 {
8573 if (cfun_save_high_fprs_p)
8574 {
8575 next_offset = cfun_frame_layout.f8_offset;
8576 for (i = 24; i < 32; i++)
8577 {
8578 if (cfun_fpr_bit_p (i - 16))
8579 {
8580 restore_fpr (frame_pointer,
8581 offset + next_offset, i);
8582 cfa_restores
8583 = alloc_reg_note (REG_CFA_RESTORE,
8584 gen_rtx_REG (DFmode, i), cfa_restores);
8585 next_offset += 8;
8586 }
8587 }
8588 }
8589
8590 }
8591 else
8592 {
8593 next_offset = cfun_frame_layout.f4_offset;
8594 for (i = 18; i < 20; i++)
8595 {
8596 if (cfun_fpr_bit_p (i - 16))
8597 {
8598 restore_fpr (frame_pointer,
8599 offset + next_offset, i);
8600 cfa_restores
8601 = alloc_reg_note (REG_CFA_RESTORE,
8602 gen_rtx_REG (DFmode, i), cfa_restores);
8603 next_offset += 8;
8604 }
8605 else if (!TARGET_PACKED_STACK)
8606 next_offset += 8;
8607 }
8608
8609 }
8610
8611 /* Return register. */
8612
8613 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8614
8615 /* Restore call saved gprs. */
8616
8617 if (cfun_frame_layout.first_restore_gpr != -1)
8618 {
8619 rtx insn, addr;
8620 int i;
8621
8622 /* Check for global register and save them
8623 to stack location from where they get restored. */
8624
8625 for (i = cfun_frame_layout.first_restore_gpr;
8626 i <= cfun_frame_layout.last_restore_gpr;
8627 i++)
8628 {
8629 if (global_not_special_regno_p (i))
8630 {
8631 addr = plus_constant (Pmode, frame_pointer,
8632 offset + cfun_frame_layout.gprs_offset
8633 + (i - cfun_frame_layout.first_save_gpr_slot)
8634 * UNITS_PER_LONG);
8635 addr = gen_rtx_MEM (Pmode, addr);
8636 set_mem_alias_set (addr, get_frame_alias_set ());
8637 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8638 }
8639 else
8640 cfa_restores
8641 = alloc_reg_note (REG_CFA_RESTORE,
8642 gen_rtx_REG (Pmode, i), cfa_restores);
8643 }
8644
8645 if (! sibcall)
8646 {
8647 /* Fetch return address from stack before load multiple,
8648 this will do good for scheduling. */
8649
8650 if (cfun_frame_layout.save_return_addr_p
8651 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8652 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8653 {
8654 int return_regnum = find_unused_clobbered_reg();
8655 if (!return_regnum)
8656 return_regnum = 4;
8657 return_reg = gen_rtx_REG (Pmode, return_regnum);
8658
8659 addr = plus_constant (Pmode, frame_pointer,
8660 offset + cfun_frame_layout.gprs_offset
8661 + (RETURN_REGNUM
8662 - cfun_frame_layout.first_save_gpr_slot)
8663 * UNITS_PER_LONG);
8664 addr = gen_rtx_MEM (Pmode, addr);
8665 set_mem_alias_set (addr, get_frame_alias_set ());
8666 emit_move_insn (return_reg, addr);
8667 }
8668 }
8669
8670 insn = restore_gprs (frame_pointer,
8671 offset + cfun_frame_layout.gprs_offset
8672 + (cfun_frame_layout.first_restore_gpr
8673 - cfun_frame_layout.first_save_gpr_slot)
8674 * UNITS_PER_LONG,
8675 cfun_frame_layout.first_restore_gpr,
8676 cfun_frame_layout.last_restore_gpr);
8677 insn = emit_insn (insn);
8678 REG_NOTES (insn) = cfa_restores;
8679 add_reg_note (insn, REG_CFA_DEF_CFA,
8680 plus_constant (Pmode, stack_pointer_rtx,
8681 STACK_POINTER_OFFSET));
8682 RTX_FRAME_RELATED_P (insn) = 1;
8683 }
8684
8685 if (! sibcall)
8686 {
8687
8688 /* Return to caller. */
8689
8690 p = rtvec_alloc (2);
8691
8692 RTVEC_ELT (p, 0) = ret_rtx;
8693 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8694 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8695 }
8696 }
8697
8698
8699 /* Return the size in bytes of a function argument of
8700 type TYPE and/or mode MODE. At least one of TYPE or
8701 MODE must be specified. */
8702
8703 static int
8704 s390_function_arg_size (enum machine_mode mode, const_tree type)
8705 {
8706 if (type)
8707 return int_size_in_bytes (type);
8708
8709 /* No type info available for some library calls ... */
8710 if (mode != BLKmode)
8711 return GET_MODE_SIZE (mode);
8712
8713 /* If we have neither type nor mode, abort */
8714 gcc_unreachable ();
8715 }
8716
8717 /* Return true if a function argument of type TYPE and mode MODE
8718 is to be passed in a floating-point register, if available. */
8719
8720 static bool
8721 s390_function_arg_float (enum machine_mode mode, const_tree type)
8722 {
8723 int size = s390_function_arg_size (mode, type);
8724 if (size > 8)
8725 return false;
8726
8727 /* Soft-float changes the ABI: no floating-point registers are used. */
8728 if (TARGET_SOFT_FLOAT)
8729 return false;
8730
8731 /* No type info available for some library calls ... */
8732 if (!type)
8733 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8734
8735 /* The ABI says that record types with a single member are treated
8736 just like that member would be. */
8737 while (TREE_CODE (type) == RECORD_TYPE)
8738 {
8739 tree field, single = NULL_TREE;
8740
8741 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8742 {
8743 if (TREE_CODE (field) != FIELD_DECL)
8744 continue;
8745
8746 if (single == NULL_TREE)
8747 single = TREE_TYPE (field);
8748 else
8749 return false;
8750 }
8751
8752 if (single == NULL_TREE)
8753 return false;
8754 else
8755 type = single;
8756 }
8757
8758 return TREE_CODE (type) == REAL_TYPE;
8759 }
8760
8761 /* Return true if a function argument of type TYPE and mode MODE
8762 is to be passed in an integer register, or a pair of integer
8763 registers, if available. */
8764
8765 static bool
8766 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8767 {
8768 int size = s390_function_arg_size (mode, type);
8769 if (size > 8)
8770 return false;
8771
8772 /* No type info available for some library calls ... */
8773 if (!type)
8774 return GET_MODE_CLASS (mode) == MODE_INT
8775 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8776
8777 /* We accept small integral (and similar) types. */
8778 if (INTEGRAL_TYPE_P (type)
8779 || POINTER_TYPE_P (type)
8780 || TREE_CODE (type) == NULLPTR_TYPE
8781 || TREE_CODE (type) == OFFSET_TYPE
8782 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8783 return true;
8784
8785 /* We also accept structs of size 1, 2, 4, 8 that are not
8786 passed in floating-point registers. */
8787 if (AGGREGATE_TYPE_P (type)
8788 && exact_log2 (size) >= 0
8789 && !s390_function_arg_float (mode, type))
8790 return true;
8791
8792 return false;
8793 }
8794
8795 /* Return 1 if a function argument of type TYPE and mode MODE
8796 is to be passed by reference. The ABI specifies that only
8797 structures of size 1, 2, 4, or 8 bytes are passed by value,
8798 all other structures (and complex numbers) are passed by
8799 reference. */
8800
8801 static bool
8802 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8803 enum machine_mode mode, const_tree type,
8804 bool named ATTRIBUTE_UNUSED)
8805 {
8806 int size = s390_function_arg_size (mode, type);
8807 if (size > 8)
8808 return true;
8809
8810 if (type)
8811 {
8812 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8813 return 1;
8814
8815 if (TREE_CODE (type) == COMPLEX_TYPE
8816 || TREE_CODE (type) == VECTOR_TYPE)
8817 return 1;
8818 }
8819
8820 return 0;
8821 }
8822
8823 /* Update the data in CUM to advance over an argument of mode MODE and
8824 data type TYPE. (TYPE is null for libcalls where that information
8825 may not be available.). The boolean NAMED specifies whether the
8826 argument is a named argument (as opposed to an unnamed argument
8827 matching an ellipsis). */
8828
8829 static void
8830 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8831 const_tree type, bool named ATTRIBUTE_UNUSED)
8832 {
8833 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8834
8835 if (s390_function_arg_float (mode, type))
8836 {
8837 cum->fprs += 1;
8838 }
8839 else if (s390_function_arg_integer (mode, type))
8840 {
8841 int size = s390_function_arg_size (mode, type);
8842 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8843 }
8844 else
8845 gcc_unreachable ();
8846 }
8847
8848 /* Define where to put the arguments to a function.
8849 Value is zero to push the argument on the stack,
8850 or a hard register in which to store the argument.
8851
8852 MODE is the argument's machine mode.
8853 TYPE is the data type of the argument (as a tree).
8854 This is null for libcalls where that information may
8855 not be available.
8856 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8857 the preceding args and about the function being called.
8858 NAMED is nonzero if this argument is a named parameter
8859 (otherwise it is an extra parameter matching an ellipsis).
8860
8861 On S/390, we use general purpose registers 2 through 6 to
8862 pass integer, pointer, and certain structure arguments, and
8863 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8864 to pass floating point arguments. All remaining arguments
8865 are pushed to the stack. */
8866
8867 static rtx
8868 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8869 const_tree type, bool named ATTRIBUTE_UNUSED)
8870 {
8871 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8872
8873 if (s390_function_arg_float (mode, type))
8874 {
8875 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8876 return 0;
8877 else
8878 return gen_rtx_REG (mode, cum->fprs + 16);
8879 }
8880 else if (s390_function_arg_integer (mode, type))
8881 {
8882 int size = s390_function_arg_size (mode, type);
8883 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8884
8885 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8886 return 0;
8887 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8888 return gen_rtx_REG (mode, cum->gprs + 2);
8889 else if (n_gprs == 2)
8890 {
8891 rtvec p = rtvec_alloc (2);
8892
8893 RTVEC_ELT (p, 0)
8894 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8895 const0_rtx);
8896 RTVEC_ELT (p, 1)
8897 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8898 GEN_INT (4));
8899
8900 return gen_rtx_PARALLEL (mode, p);
8901 }
8902 }
8903
8904 /* After the real arguments, expand_call calls us once again
8905 with a void_type_node type. Whatever we return here is
8906 passed as operand 2 to the call expanders.
8907
8908 We don't need this feature ... */
8909 else if (type == void_type_node)
8910 return const0_rtx;
8911
8912 gcc_unreachable ();
8913 }
8914
8915 /* Return true if return values of type TYPE should be returned
8916 in a memory buffer whose address is passed by the caller as
8917 hidden first argument. */
8918
8919 static bool
8920 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8921 {
8922 /* We accept small integral (and similar) types. */
8923 if (INTEGRAL_TYPE_P (type)
8924 || POINTER_TYPE_P (type)
8925 || TREE_CODE (type) == OFFSET_TYPE
8926 || TREE_CODE (type) == REAL_TYPE)
8927 return int_size_in_bytes (type) > 8;
8928
8929 /* Aggregates and similar constructs are always returned
8930 in memory. */
8931 if (AGGREGATE_TYPE_P (type)
8932 || TREE_CODE (type) == COMPLEX_TYPE
8933 || TREE_CODE (type) == VECTOR_TYPE)
8934 return true;
8935
8936 /* ??? We get called on all sorts of random stuff from
8937 aggregate_value_p. We can't abort, but it's not clear
8938 what's safe to return. Pretend it's a struct I guess. */
8939 return true;
8940 }
8941
8942 /* Function arguments and return values are promoted to word size. */
8943
8944 static enum machine_mode
8945 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8946 int *punsignedp,
8947 const_tree fntype ATTRIBUTE_UNUSED,
8948 int for_return ATTRIBUTE_UNUSED)
8949 {
8950 if (INTEGRAL_MODE_P (mode)
8951 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8952 {
8953 if (type != NULL_TREE && POINTER_TYPE_P (type))
8954 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8955 return Pmode;
8956 }
8957
8958 return mode;
8959 }
8960
8961 /* Define where to return a (scalar) value of type RET_TYPE.
8962 If RET_TYPE is null, define where to return a (scalar)
8963 value of mode MODE from a libcall. */
8964
8965 static rtx
8966 s390_function_and_libcall_value (enum machine_mode mode,
8967 const_tree ret_type,
8968 const_tree fntype_or_decl,
8969 bool outgoing ATTRIBUTE_UNUSED)
8970 {
8971 /* For normal functions perform the promotion as
8972 promote_function_mode would do. */
8973 if (ret_type)
8974 {
8975 int unsignedp = TYPE_UNSIGNED (ret_type);
8976 mode = promote_function_mode (ret_type, mode, &unsignedp,
8977 fntype_or_decl, 1);
8978 }
8979
8980 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8981 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8982
8983 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8984 return gen_rtx_REG (mode, 16);
8985 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8986 || UNITS_PER_LONG == UNITS_PER_WORD)
8987 return gen_rtx_REG (mode, 2);
8988 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8989 {
8990 /* This case is triggered when returning a 64 bit value with
8991 -m31 -mzarch. Although the value would fit into a single
8992 register it has to be forced into a 32 bit register pair in
8993 order to match the ABI. */
8994 rtvec p = rtvec_alloc (2);
8995
8996 RTVEC_ELT (p, 0)
8997 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8998 RTVEC_ELT (p, 1)
8999 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9000
9001 return gen_rtx_PARALLEL (mode, p);
9002 }
9003
9004 gcc_unreachable ();
9005 }
9006
9007 /* Define where to return a scalar return value of type RET_TYPE. */
9008
9009 static rtx
9010 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9011 bool outgoing)
9012 {
9013 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9014 fn_decl_or_type, outgoing);
9015 }
9016
9017 /* Define where to return a scalar libcall return value of mode
9018 MODE. */
9019
9020 static rtx
9021 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9022 {
9023 return s390_function_and_libcall_value (mode, NULL_TREE,
9024 NULL_TREE, true);
9025 }
9026
9027
9028 /* Create and return the va_list datatype.
9029
9030 On S/390, va_list is an array type equivalent to
9031
9032 typedef struct __va_list_tag
9033 {
9034 long __gpr;
9035 long __fpr;
9036 void *__overflow_arg_area;
9037 void *__reg_save_area;
9038 } va_list[1];
9039
9040 where __gpr and __fpr hold the number of general purpose
9041 or floating point arguments used up to now, respectively,
9042 __overflow_arg_area points to the stack location of the
9043 next argument passed on the stack, and __reg_save_area
9044 always points to the start of the register area in the
9045 call frame of the current function. The function prologue
9046 saves all registers used for argument passing into this
9047 area if the function uses variable arguments. */
9048
9049 static tree
9050 s390_build_builtin_va_list (void)
9051 {
9052 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9053
9054 record = lang_hooks.types.make_type (RECORD_TYPE);
9055
9056 type_decl =
9057 build_decl (BUILTINS_LOCATION,
9058 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9059
9060 f_gpr = build_decl (BUILTINS_LOCATION,
9061 FIELD_DECL, get_identifier ("__gpr"),
9062 long_integer_type_node);
9063 f_fpr = build_decl (BUILTINS_LOCATION,
9064 FIELD_DECL, get_identifier ("__fpr"),
9065 long_integer_type_node);
9066 f_ovf = build_decl (BUILTINS_LOCATION,
9067 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9068 ptr_type_node);
9069 f_sav = build_decl (BUILTINS_LOCATION,
9070 FIELD_DECL, get_identifier ("__reg_save_area"),
9071 ptr_type_node);
9072
9073 va_list_gpr_counter_field = f_gpr;
9074 va_list_fpr_counter_field = f_fpr;
9075
9076 DECL_FIELD_CONTEXT (f_gpr) = record;
9077 DECL_FIELD_CONTEXT (f_fpr) = record;
9078 DECL_FIELD_CONTEXT (f_ovf) = record;
9079 DECL_FIELD_CONTEXT (f_sav) = record;
9080
9081 TYPE_STUB_DECL (record) = type_decl;
9082 TYPE_NAME (record) = type_decl;
9083 TYPE_FIELDS (record) = f_gpr;
9084 DECL_CHAIN (f_gpr) = f_fpr;
9085 DECL_CHAIN (f_fpr) = f_ovf;
9086 DECL_CHAIN (f_ovf) = f_sav;
9087
9088 layout_type (record);
9089
9090 /* The correct type is an array type of one element. */
9091 return build_array_type (record, build_index_type (size_zero_node));
9092 }
9093
9094 /* Implement va_start by filling the va_list structure VALIST.
9095 STDARG_P is always true, and ignored.
9096 NEXTARG points to the first anonymous stack argument.
9097
9098 The following global variables are used to initialize
9099 the va_list structure:
9100
9101 crtl->args.info:
9102 holds number of gprs and fprs used for named arguments.
9103 crtl->args.arg_offset_rtx:
9104 holds the offset of the first anonymous stack argument
9105 (relative to the virtual arg pointer). */
9106
9107 static void
9108 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9109 {
9110 HOST_WIDE_INT n_gpr, n_fpr;
9111 int off;
9112 tree f_gpr, f_fpr, f_ovf, f_sav;
9113 tree gpr, fpr, ovf, sav, t;
9114
9115 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9116 f_fpr = DECL_CHAIN (f_gpr);
9117 f_ovf = DECL_CHAIN (f_fpr);
9118 f_sav = DECL_CHAIN (f_ovf);
9119
9120 valist = build_simple_mem_ref (valist);
9121 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9122 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9123 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9124 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9125
9126 /* Count number of gp and fp argument registers used. */
9127
9128 n_gpr = crtl->args.info.gprs;
9129 n_fpr = crtl->args.info.fprs;
9130
9131 if (cfun->va_list_gpr_size)
9132 {
9133 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9134 build_int_cst (NULL_TREE, n_gpr));
9135 TREE_SIDE_EFFECTS (t) = 1;
9136 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9137 }
9138
9139 if (cfun->va_list_fpr_size)
9140 {
9141 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9142 build_int_cst (NULL_TREE, n_fpr));
9143 TREE_SIDE_EFFECTS (t) = 1;
9144 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9145 }
9146
9147 /* Find the overflow area. */
9148 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9149 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9150 {
9151 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9152
9153 off = INTVAL (crtl->args.arg_offset_rtx);
9154 off = off < 0 ? 0 : off;
9155 if (TARGET_DEBUG_ARG)
9156 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9157 (int)n_gpr, (int)n_fpr, off);
9158
9159 t = fold_build_pointer_plus_hwi (t, off);
9160
9161 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9162 TREE_SIDE_EFFECTS (t) = 1;
9163 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9164 }
9165
9166 /* Find the register save area. */
9167 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9168 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9169 {
9170 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9171 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9172
9173 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9174 TREE_SIDE_EFFECTS (t) = 1;
9175 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9176 }
9177 }
9178
9179 /* Implement va_arg by updating the va_list structure
9180 VALIST as required to retrieve an argument of type
9181 TYPE, and returning that argument.
9182
9183 Generates code equivalent to:
9184
9185 if (integral value) {
9186 if (size <= 4 && args.gpr < 5 ||
9187 size > 4 && args.gpr < 4 )
9188 ret = args.reg_save_area[args.gpr+8]
9189 else
9190 ret = *args.overflow_arg_area++;
9191 } else if (float value) {
9192 if (args.fgpr < 2)
9193 ret = args.reg_save_area[args.fpr+64]
9194 else
9195 ret = *args.overflow_arg_area++;
9196 } else if (aggregate value) {
9197 if (args.gpr < 5)
9198 ret = *args.reg_save_area[args.gpr]
9199 else
9200 ret = **args.overflow_arg_area++;
9201 } */
9202
9203 static tree
9204 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9205 gimple_seq *post_p ATTRIBUTE_UNUSED)
9206 {
9207 tree f_gpr, f_fpr, f_ovf, f_sav;
9208 tree gpr, fpr, ovf, sav, reg, t, u;
9209 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9210 tree lab_false, lab_over, addr;
9211
9212 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9213 f_fpr = DECL_CHAIN (f_gpr);
9214 f_ovf = DECL_CHAIN (f_fpr);
9215 f_sav = DECL_CHAIN (f_ovf);
9216
9217 valist = build_va_arg_indirect_ref (valist);
9218 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9219 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9220 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9221
9222 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9223 both appear on a lhs. */
9224 valist = unshare_expr (valist);
9225 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9226
9227 size = int_size_in_bytes (type);
9228
9229 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9230 {
9231 if (TARGET_DEBUG_ARG)
9232 {
9233 fprintf (stderr, "va_arg: aggregate type");
9234 debug_tree (type);
9235 }
9236
9237 /* Aggregates are passed by reference. */
9238 indirect_p = 1;
9239 reg = gpr;
9240 n_reg = 1;
9241
9242 /* kernel stack layout on 31 bit: It is assumed here that no padding
9243 will be added by s390_frame_info because for va_args always an even
9244 number of gprs has to be saved r15-r2 = 14 regs. */
9245 sav_ofs = 2 * UNITS_PER_LONG;
9246 sav_scale = UNITS_PER_LONG;
9247 size = UNITS_PER_LONG;
9248 max_reg = GP_ARG_NUM_REG - n_reg;
9249 }
9250 else if (s390_function_arg_float (TYPE_MODE (type), type))
9251 {
9252 if (TARGET_DEBUG_ARG)
9253 {
9254 fprintf (stderr, "va_arg: float type");
9255 debug_tree (type);
9256 }
9257
9258 /* FP args go in FP registers, if present. */
9259 indirect_p = 0;
9260 reg = fpr;
9261 n_reg = 1;
9262 sav_ofs = 16 * UNITS_PER_LONG;
9263 sav_scale = 8;
9264 max_reg = FP_ARG_NUM_REG - n_reg;
9265 }
9266 else
9267 {
9268 if (TARGET_DEBUG_ARG)
9269 {
9270 fprintf (stderr, "va_arg: other type");
9271 debug_tree (type);
9272 }
9273
9274 /* Otherwise into GP registers. */
9275 indirect_p = 0;
9276 reg = gpr;
9277 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9278
9279 /* kernel stack layout on 31 bit: It is assumed here that no padding
9280 will be added by s390_frame_info because for va_args always an even
9281 number of gprs has to be saved r15-r2 = 14 regs. */
9282 sav_ofs = 2 * UNITS_PER_LONG;
9283
9284 if (size < UNITS_PER_LONG)
9285 sav_ofs += UNITS_PER_LONG - size;
9286
9287 sav_scale = UNITS_PER_LONG;
9288 max_reg = GP_ARG_NUM_REG - n_reg;
9289 }
9290
9291 /* Pull the value out of the saved registers ... */
9292
9293 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9294 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9295 addr = create_tmp_var (ptr_type_node, "addr");
9296
9297 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9298 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9299 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9300 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9301 gimplify_and_add (t, pre_p);
9302
9303 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9304 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9305 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9306 t = fold_build_pointer_plus (t, u);
9307
9308 gimplify_assign (addr, t, pre_p);
9309
9310 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9311
9312 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9313
9314
9315 /* ... Otherwise out of the overflow area. */
9316
9317 t = ovf;
9318 if (size < UNITS_PER_LONG)
9319 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9320
9321 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9322
9323 gimplify_assign (addr, t, pre_p);
9324
9325 t = fold_build_pointer_plus_hwi (t, size);
9326 gimplify_assign (ovf, t, pre_p);
9327
9328 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9329
9330
9331 /* Increment register save count. */
9332
9333 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9334 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9335 gimplify_and_add (u, pre_p);
9336
9337 if (indirect_p)
9338 {
9339 t = build_pointer_type_for_mode (build_pointer_type (type),
9340 ptr_mode, true);
9341 addr = fold_convert (t, addr);
9342 addr = build_va_arg_indirect_ref (addr);
9343 }
9344 else
9345 {
9346 t = build_pointer_type_for_mode (type, ptr_mode, true);
9347 addr = fold_convert (t, addr);
9348 }
9349
9350 return build_va_arg_indirect_ref (addr);
9351 }
9352
9353 /* Output assembly code for the trampoline template to
9354 stdio stream FILE.
9355
9356 On S/390, we use gpr 1 internally in the trampoline code;
9357 gpr 0 is used to hold the static chain. */
9358
9359 static void
9360 s390_asm_trampoline_template (FILE *file)
9361 {
9362 rtx op[2];
9363 op[0] = gen_rtx_REG (Pmode, 0);
9364 op[1] = gen_rtx_REG (Pmode, 1);
9365
9366 if (TARGET_64BIT)
9367 {
9368 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9369 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9370 output_asm_insn ("br\t%1", op); /* 2 byte */
9371 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9372 }
9373 else
9374 {
9375 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9376 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9377 output_asm_insn ("br\t%1", op); /* 2 byte */
9378 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9379 }
9380 }
9381
9382 /* Emit RTL insns to initialize the variable parts of a trampoline.
9383 FNADDR is an RTX for the address of the function's pure code.
9384 CXT is an RTX for the static chain value for the function. */
9385
9386 static void
9387 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9388 {
9389 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9390 rtx mem;
9391
9392 emit_block_move (m_tramp, assemble_trampoline_template (),
9393 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9394
9395 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9396 emit_move_insn (mem, cxt);
9397 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9398 emit_move_insn (mem, fnaddr);
9399 }
9400
9401 /* Output assembler code to FILE to increment profiler label # LABELNO
9402 for profiling a function entry. */
9403
9404 void
9405 s390_function_profiler (FILE *file, int labelno)
9406 {
9407 rtx op[7];
9408
9409 char label[128];
9410 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9411
9412 fprintf (file, "# function profiler \n");
9413
9414 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9415 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9416 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9417
9418 op[2] = gen_rtx_REG (Pmode, 1);
9419 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9420 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9421
9422 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9423 if (flag_pic)
9424 {
9425 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9426 op[4] = gen_rtx_CONST (Pmode, op[4]);
9427 }
9428
9429 if (TARGET_64BIT)
9430 {
9431 output_asm_insn ("stg\t%0,%1", op);
9432 output_asm_insn ("larl\t%2,%3", op);
9433 output_asm_insn ("brasl\t%0,%4", op);
9434 output_asm_insn ("lg\t%0,%1", op);
9435 }
9436 else if (!flag_pic)
9437 {
9438 op[6] = gen_label_rtx ();
9439
9440 output_asm_insn ("st\t%0,%1", op);
9441 output_asm_insn ("bras\t%2,%l6", op);
9442 output_asm_insn (".long\t%4", op);
9443 output_asm_insn (".long\t%3", op);
9444 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9445 output_asm_insn ("l\t%0,0(%2)", op);
9446 output_asm_insn ("l\t%2,4(%2)", op);
9447 output_asm_insn ("basr\t%0,%0", op);
9448 output_asm_insn ("l\t%0,%1", op);
9449 }
9450 else
9451 {
9452 op[5] = gen_label_rtx ();
9453 op[6] = gen_label_rtx ();
9454
9455 output_asm_insn ("st\t%0,%1", op);
9456 output_asm_insn ("bras\t%2,%l6", op);
9457 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9458 output_asm_insn (".long\t%4-%l5", op);
9459 output_asm_insn (".long\t%3-%l5", op);
9460 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9461 output_asm_insn ("lr\t%0,%2", op);
9462 output_asm_insn ("a\t%0,0(%2)", op);
9463 output_asm_insn ("a\t%2,4(%2)", op);
9464 output_asm_insn ("basr\t%0,%0", op);
9465 output_asm_insn ("l\t%0,%1", op);
9466 }
9467 }
9468
9469 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9470 into its SYMBOL_REF_FLAGS. */
9471
9472 static void
9473 s390_encode_section_info (tree decl, rtx rtl, int first)
9474 {
9475 default_encode_section_info (decl, rtl, first);
9476
9477 if (TREE_CODE (decl) == VAR_DECL)
9478 {
9479 /* If a variable has a forced alignment to < 2 bytes, mark it
9480 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9481 operand. */
9482 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9483 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9484 if (!DECL_SIZE (decl)
9485 || !DECL_ALIGN (decl)
9486 || !host_integerp (DECL_SIZE (decl), 0)
9487 || (DECL_ALIGN (decl) <= 64
9488 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9489 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9490 }
9491
9492 /* Literal pool references don't have a decl so they are handled
9493 differently here. We rely on the information in the MEM_ALIGN
9494 entry to decide upon natural alignment. */
9495 if (MEM_P (rtl)
9496 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9497 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9498 && (MEM_ALIGN (rtl) == 0
9499 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9500 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9501 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9502 }
9503
9504 /* Output thunk to FILE that implements a C++ virtual function call (with
9505 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9506 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9507 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9508 relative to the resulting this pointer. */
9509
9510 static void
9511 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9512 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9513 tree function)
9514 {
9515 rtx op[10];
9516 int nonlocal = 0;
9517
9518 /* Make sure unwind info is emitted for the thunk if needed. */
9519 final_start_function (emit_barrier (), file, 1);
9520
9521 /* Operand 0 is the target function. */
9522 op[0] = XEXP (DECL_RTL (function), 0);
9523 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9524 {
9525 nonlocal = 1;
9526 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9527 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9528 op[0] = gen_rtx_CONST (Pmode, op[0]);
9529 }
9530
9531 /* Operand 1 is the 'this' pointer. */
9532 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9533 op[1] = gen_rtx_REG (Pmode, 3);
9534 else
9535 op[1] = gen_rtx_REG (Pmode, 2);
9536
9537 /* Operand 2 is the delta. */
9538 op[2] = GEN_INT (delta);
9539
9540 /* Operand 3 is the vcall_offset. */
9541 op[3] = GEN_INT (vcall_offset);
9542
9543 /* Operand 4 is the temporary register. */
9544 op[4] = gen_rtx_REG (Pmode, 1);
9545
9546 /* Operands 5 to 8 can be used as labels. */
9547 op[5] = NULL_RTX;
9548 op[6] = NULL_RTX;
9549 op[7] = NULL_RTX;
9550 op[8] = NULL_RTX;
9551
9552 /* Operand 9 can be used for temporary register. */
9553 op[9] = NULL_RTX;
9554
9555 /* Generate code. */
9556 if (TARGET_64BIT)
9557 {
9558 /* Setup literal pool pointer if required. */
9559 if ((!DISP_IN_RANGE (delta)
9560 && !CONST_OK_FOR_K (delta)
9561 && !CONST_OK_FOR_Os (delta))
9562 || (!DISP_IN_RANGE (vcall_offset)
9563 && !CONST_OK_FOR_K (vcall_offset)
9564 && !CONST_OK_FOR_Os (vcall_offset)))
9565 {
9566 op[5] = gen_label_rtx ();
9567 output_asm_insn ("larl\t%4,%5", op);
9568 }
9569
9570 /* Add DELTA to this pointer. */
9571 if (delta)
9572 {
9573 if (CONST_OK_FOR_J (delta))
9574 output_asm_insn ("la\t%1,%2(%1)", op);
9575 else if (DISP_IN_RANGE (delta))
9576 output_asm_insn ("lay\t%1,%2(%1)", op);
9577 else if (CONST_OK_FOR_K (delta))
9578 output_asm_insn ("aghi\t%1,%2", op);
9579 else if (CONST_OK_FOR_Os (delta))
9580 output_asm_insn ("agfi\t%1,%2", op);
9581 else
9582 {
9583 op[6] = gen_label_rtx ();
9584 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9585 }
9586 }
9587
9588 /* Perform vcall adjustment. */
9589 if (vcall_offset)
9590 {
9591 if (DISP_IN_RANGE (vcall_offset))
9592 {
9593 output_asm_insn ("lg\t%4,0(%1)", op);
9594 output_asm_insn ("ag\t%1,%3(%4)", op);
9595 }
9596 else if (CONST_OK_FOR_K (vcall_offset))
9597 {
9598 output_asm_insn ("lghi\t%4,%3", op);
9599 output_asm_insn ("ag\t%4,0(%1)", op);
9600 output_asm_insn ("ag\t%1,0(%4)", op);
9601 }
9602 else if (CONST_OK_FOR_Os (vcall_offset))
9603 {
9604 output_asm_insn ("lgfi\t%4,%3", op);
9605 output_asm_insn ("ag\t%4,0(%1)", op);
9606 output_asm_insn ("ag\t%1,0(%4)", op);
9607 }
9608 else
9609 {
9610 op[7] = gen_label_rtx ();
9611 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9612 output_asm_insn ("ag\t%4,0(%1)", op);
9613 output_asm_insn ("ag\t%1,0(%4)", op);
9614 }
9615 }
9616
9617 /* Jump to target. */
9618 output_asm_insn ("jg\t%0", op);
9619
9620 /* Output literal pool if required. */
9621 if (op[5])
9622 {
9623 output_asm_insn (".align\t4", op);
9624 targetm.asm_out.internal_label (file, "L",
9625 CODE_LABEL_NUMBER (op[5]));
9626 }
9627 if (op[6])
9628 {
9629 targetm.asm_out.internal_label (file, "L",
9630 CODE_LABEL_NUMBER (op[6]));
9631 output_asm_insn (".long\t%2", op);
9632 }
9633 if (op[7])
9634 {
9635 targetm.asm_out.internal_label (file, "L",
9636 CODE_LABEL_NUMBER (op[7]));
9637 output_asm_insn (".long\t%3", op);
9638 }
9639 }
9640 else
9641 {
9642 /* Setup base pointer if required. */
9643 if (!vcall_offset
9644 || (!DISP_IN_RANGE (delta)
9645 && !CONST_OK_FOR_K (delta)
9646 && !CONST_OK_FOR_Os (delta))
9647 || (!DISP_IN_RANGE (delta)
9648 && !CONST_OK_FOR_K (vcall_offset)
9649 && !CONST_OK_FOR_Os (vcall_offset)))
9650 {
9651 op[5] = gen_label_rtx ();
9652 output_asm_insn ("basr\t%4,0", op);
9653 targetm.asm_out.internal_label (file, "L",
9654 CODE_LABEL_NUMBER (op[5]));
9655 }
9656
9657 /* Add DELTA to this pointer. */
9658 if (delta)
9659 {
9660 if (CONST_OK_FOR_J (delta))
9661 output_asm_insn ("la\t%1,%2(%1)", op);
9662 else if (DISP_IN_RANGE (delta))
9663 output_asm_insn ("lay\t%1,%2(%1)", op);
9664 else if (CONST_OK_FOR_K (delta))
9665 output_asm_insn ("ahi\t%1,%2", op);
9666 else if (CONST_OK_FOR_Os (delta))
9667 output_asm_insn ("afi\t%1,%2", op);
9668 else
9669 {
9670 op[6] = gen_label_rtx ();
9671 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9672 }
9673 }
9674
9675 /* Perform vcall adjustment. */
9676 if (vcall_offset)
9677 {
9678 if (CONST_OK_FOR_J (vcall_offset))
9679 {
9680 output_asm_insn ("l\t%4,0(%1)", op);
9681 output_asm_insn ("a\t%1,%3(%4)", op);
9682 }
9683 else if (DISP_IN_RANGE (vcall_offset))
9684 {
9685 output_asm_insn ("l\t%4,0(%1)", op);
9686 output_asm_insn ("ay\t%1,%3(%4)", op);
9687 }
9688 else if (CONST_OK_FOR_K (vcall_offset))
9689 {
9690 output_asm_insn ("lhi\t%4,%3", op);
9691 output_asm_insn ("a\t%4,0(%1)", op);
9692 output_asm_insn ("a\t%1,0(%4)", op);
9693 }
9694 else if (CONST_OK_FOR_Os (vcall_offset))
9695 {
9696 output_asm_insn ("iilf\t%4,%3", op);
9697 output_asm_insn ("a\t%4,0(%1)", op);
9698 output_asm_insn ("a\t%1,0(%4)", op);
9699 }
9700 else
9701 {
9702 op[7] = gen_label_rtx ();
9703 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9704 output_asm_insn ("a\t%4,0(%1)", op);
9705 output_asm_insn ("a\t%1,0(%4)", op);
9706 }
9707
9708 /* We had to clobber the base pointer register.
9709 Re-setup the base pointer (with a different base). */
9710 op[5] = gen_label_rtx ();
9711 output_asm_insn ("basr\t%4,0", op);
9712 targetm.asm_out.internal_label (file, "L",
9713 CODE_LABEL_NUMBER (op[5]));
9714 }
9715
9716 /* Jump to target. */
9717 op[8] = gen_label_rtx ();
9718
9719 if (!flag_pic)
9720 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9721 else if (!nonlocal)
9722 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9723 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9724 else if (flag_pic == 1)
9725 {
9726 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9727 output_asm_insn ("l\t%4,%0(%4)", op);
9728 }
9729 else if (flag_pic == 2)
9730 {
9731 op[9] = gen_rtx_REG (Pmode, 0);
9732 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9733 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9734 output_asm_insn ("ar\t%4,%9", op);
9735 output_asm_insn ("l\t%4,0(%4)", op);
9736 }
9737
9738 output_asm_insn ("br\t%4", op);
9739
9740 /* Output literal pool. */
9741 output_asm_insn (".align\t4", op);
9742
9743 if (nonlocal && flag_pic == 2)
9744 output_asm_insn (".long\t%0", op);
9745 if (nonlocal)
9746 {
9747 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9748 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9749 }
9750
9751 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9752 if (!flag_pic)
9753 output_asm_insn (".long\t%0", op);
9754 else
9755 output_asm_insn (".long\t%0-%5", op);
9756
9757 if (op[6])
9758 {
9759 targetm.asm_out.internal_label (file, "L",
9760 CODE_LABEL_NUMBER (op[6]));
9761 output_asm_insn (".long\t%2", op);
9762 }
9763 if (op[7])
9764 {
9765 targetm.asm_out.internal_label (file, "L",
9766 CODE_LABEL_NUMBER (op[7]));
9767 output_asm_insn (".long\t%3", op);
9768 }
9769 }
9770 final_end_function ();
9771 }
9772
9773 static bool
9774 s390_valid_pointer_mode (enum machine_mode mode)
9775 {
9776 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9777 }
9778
9779 /* Checks whether the given CALL_EXPR would use a caller
9780 saved register. This is used to decide whether sibling call
9781 optimization could be performed on the respective function
9782 call. */
9783
9784 static bool
9785 s390_call_saved_register_used (tree call_expr)
9786 {
9787 CUMULATIVE_ARGS cum_v;
9788 cumulative_args_t cum;
9789 tree parameter;
9790 enum machine_mode mode;
9791 tree type;
9792 rtx parm_rtx;
9793 int reg, i;
9794
9795 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9796 cum = pack_cumulative_args (&cum_v);
9797
9798 for (i = 0; i < call_expr_nargs (call_expr); i++)
9799 {
9800 parameter = CALL_EXPR_ARG (call_expr, i);
9801 gcc_assert (parameter);
9802
9803 /* For an undeclared variable passed as parameter we will get
9804 an ERROR_MARK node here. */
9805 if (TREE_CODE (parameter) == ERROR_MARK)
9806 return true;
9807
9808 type = TREE_TYPE (parameter);
9809 gcc_assert (type);
9810
9811 mode = TYPE_MODE (type);
9812 gcc_assert (mode);
9813
9814 if (pass_by_reference (&cum_v, mode, type, true))
9815 {
9816 mode = Pmode;
9817 type = build_pointer_type (type);
9818 }
9819
9820 parm_rtx = s390_function_arg (cum, mode, type, 0);
9821
9822 s390_function_arg_advance (cum, mode, type, 0);
9823
9824 if (!parm_rtx)
9825 continue;
9826
9827 if (REG_P (parm_rtx))
9828 {
9829 for (reg = 0;
9830 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9831 reg++)
9832 if (!call_used_regs[reg + REGNO (parm_rtx)])
9833 return true;
9834 }
9835
9836 if (GET_CODE (parm_rtx) == PARALLEL)
9837 {
9838 int i;
9839
9840 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9841 {
9842 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9843
9844 gcc_assert (REG_P (r));
9845
9846 for (reg = 0;
9847 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9848 reg++)
9849 if (!call_used_regs[reg + REGNO (r)])
9850 return true;
9851 }
9852 }
9853
9854 }
9855 return false;
9856 }
9857
9858 /* Return true if the given call expression can be
9859 turned into a sibling call.
9860 DECL holds the declaration of the function to be called whereas
9861 EXP is the call expression itself. */
9862
9863 static bool
9864 s390_function_ok_for_sibcall (tree decl, tree exp)
9865 {
9866 /* The TPF epilogue uses register 1. */
9867 if (TARGET_TPF_PROFILING)
9868 return false;
9869
9870 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9871 which would have to be restored before the sibcall. */
9872 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9873 return false;
9874
9875 /* Register 6 on s390 is available as an argument register but unfortunately
9876 "caller saved". This makes functions needing this register for arguments
9877 not suitable for sibcalls. */
9878 return !s390_call_saved_register_used (exp);
9879 }
9880
9881 /* Return the fixed registers used for condition codes. */
9882
9883 static bool
9884 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9885 {
9886 *p1 = CC_REGNUM;
9887 *p2 = INVALID_REGNUM;
9888
9889 return true;
9890 }
9891
9892 /* This function is used by the call expanders of the machine description.
9893 It emits the call insn itself together with the necessary operations
9894 to adjust the target address and returns the emitted insn.
9895 ADDR_LOCATION is the target address rtx
9896 TLS_CALL the location of the thread-local symbol
9897 RESULT_REG the register where the result of the call should be stored
9898 RETADDR_REG the register where the return address should be stored
9899 If this parameter is NULL_RTX the call is considered
9900 to be a sibling call. */
9901
9902 rtx
9903 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9904 rtx retaddr_reg)
9905 {
9906 bool plt_call = false;
9907 rtx insn;
9908 rtx call;
9909 rtx clobber;
9910 rtvec vec;
9911
9912 /* Direct function calls need special treatment. */
9913 if (GET_CODE (addr_location) == SYMBOL_REF)
9914 {
9915 /* When calling a global routine in PIC mode, we must
9916 replace the symbol itself with the PLT stub. */
9917 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9918 {
9919 if (retaddr_reg != NULL_RTX)
9920 {
9921 addr_location = gen_rtx_UNSPEC (Pmode,
9922 gen_rtvec (1, addr_location),
9923 UNSPEC_PLT);
9924 addr_location = gen_rtx_CONST (Pmode, addr_location);
9925 plt_call = true;
9926 }
9927 else
9928 /* For -fpic code the PLT entries might use r12 which is
9929 call-saved. Therefore we cannot do a sibcall when
9930 calling directly using a symbol ref. When reaching
9931 this point we decided (in s390_function_ok_for_sibcall)
9932 to do a sibcall for a function pointer but one of the
9933 optimizers was able to get rid of the function pointer
9934 by propagating the symbol ref into the call. This
9935 optimization is illegal for S/390 so we turn the direct
9936 call into a indirect call again. */
9937 addr_location = force_reg (Pmode, addr_location);
9938 }
9939
9940 /* Unless we can use the bras(l) insn, force the
9941 routine address into a register. */
9942 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9943 {
9944 if (flag_pic)
9945 addr_location = legitimize_pic_address (addr_location, 0);
9946 else
9947 addr_location = force_reg (Pmode, addr_location);
9948 }
9949 }
9950
9951 /* If it is already an indirect call or the code above moved the
9952 SYMBOL_REF to somewhere else make sure the address can be found in
9953 register 1. */
9954 if (retaddr_reg == NULL_RTX
9955 && GET_CODE (addr_location) != SYMBOL_REF
9956 && !plt_call)
9957 {
9958 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9959 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9960 }
9961
9962 addr_location = gen_rtx_MEM (QImode, addr_location);
9963 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9964
9965 if (result_reg != NULL_RTX)
9966 call = gen_rtx_SET (VOIDmode, result_reg, call);
9967
9968 if (retaddr_reg != NULL_RTX)
9969 {
9970 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9971
9972 if (tls_call != NULL_RTX)
9973 vec = gen_rtvec (3, call, clobber,
9974 gen_rtx_USE (VOIDmode, tls_call));
9975 else
9976 vec = gen_rtvec (2, call, clobber);
9977
9978 call = gen_rtx_PARALLEL (VOIDmode, vec);
9979 }
9980
9981 insn = emit_call_insn (call);
9982
9983 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9984 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9985 {
9986 /* s390_function_ok_for_sibcall should
9987 have denied sibcalls in this case. */
9988 gcc_assert (retaddr_reg != NULL_RTX);
9989 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9990 }
9991 return insn;
9992 }
9993
9994 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9995
9996 static void
9997 s390_conditional_register_usage (void)
9998 {
9999 int i;
10000
10001 if (flag_pic)
10002 {
10003 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10004 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10005 }
10006 if (TARGET_CPU_ZARCH)
10007 {
10008 fixed_regs[BASE_REGNUM] = 0;
10009 call_used_regs[BASE_REGNUM] = 0;
10010 fixed_regs[RETURN_REGNUM] = 0;
10011 call_used_regs[RETURN_REGNUM] = 0;
10012 }
10013 if (TARGET_64BIT)
10014 {
10015 for (i = 24; i < 32; i++)
10016 call_used_regs[i] = call_really_used_regs[i] = 0;
10017 }
10018 else
10019 {
10020 for (i = 18; i < 20; i++)
10021 call_used_regs[i] = call_really_used_regs[i] = 0;
10022 }
10023
10024 if (TARGET_SOFT_FLOAT)
10025 {
10026 for (i = 16; i < 32; i++)
10027 call_used_regs[i] = fixed_regs[i] = 1;
10028 }
10029 }
10030
10031 /* Corresponding function to eh_return expander. */
10032
10033 static GTY(()) rtx s390_tpf_eh_return_symbol;
10034 void
10035 s390_emit_tpf_eh_return (rtx target)
10036 {
10037 rtx insn, reg;
10038
10039 if (!s390_tpf_eh_return_symbol)
10040 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10041
10042 reg = gen_rtx_REG (Pmode, 2);
10043
10044 emit_move_insn (reg, target);
10045 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10046 gen_rtx_REG (Pmode, RETURN_REGNUM));
10047 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10048
10049 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10050 }
10051
10052 /* Rework the prologue/epilogue to avoid saving/restoring
10053 registers unnecessarily. */
10054
10055 static void
10056 s390_optimize_prologue (void)
10057 {
10058 rtx insn, new_insn, next_insn;
10059
10060 /* Do a final recompute of the frame-related data. */
10061
10062 s390_update_frame_layout ();
10063
10064 /* If all special registers are in fact used, there's nothing we
10065 can do, so no point in walking the insn list. */
10066
10067 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10068 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10069 && (TARGET_CPU_ZARCH
10070 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10071 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10072 return;
10073
10074 /* Search for prologue/epilogue insns and replace them. */
10075
10076 for (insn = get_insns (); insn; insn = next_insn)
10077 {
10078 int first, last, off;
10079 rtx set, base, offset;
10080
10081 next_insn = NEXT_INSN (insn);
10082
10083 if (GET_CODE (insn) != INSN)
10084 continue;
10085
10086 if (GET_CODE (PATTERN (insn)) == PARALLEL
10087 && store_multiple_operation (PATTERN (insn), VOIDmode))
10088 {
10089 set = XVECEXP (PATTERN (insn), 0, 0);
10090 first = REGNO (SET_SRC (set));
10091 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10092 offset = const0_rtx;
10093 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10094 off = INTVAL (offset);
10095
10096 if (GET_CODE (base) != REG || off < 0)
10097 continue;
10098 if (cfun_frame_layout.first_save_gpr != -1
10099 && (cfun_frame_layout.first_save_gpr < first
10100 || cfun_frame_layout.last_save_gpr > last))
10101 continue;
10102 if (REGNO (base) != STACK_POINTER_REGNUM
10103 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10104 continue;
10105 if (first > BASE_REGNUM || last < BASE_REGNUM)
10106 continue;
10107
10108 if (cfun_frame_layout.first_save_gpr != -1)
10109 {
10110 new_insn = save_gprs (base,
10111 off + (cfun_frame_layout.first_save_gpr
10112 - first) * UNITS_PER_LONG,
10113 cfun_frame_layout.first_save_gpr,
10114 cfun_frame_layout.last_save_gpr);
10115 new_insn = emit_insn_before (new_insn, insn);
10116 INSN_ADDRESSES_NEW (new_insn, -1);
10117 }
10118
10119 remove_insn (insn);
10120 continue;
10121 }
10122
10123 if (cfun_frame_layout.first_save_gpr == -1
10124 && GET_CODE (PATTERN (insn)) == SET
10125 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10126 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10127 || (!TARGET_CPU_ZARCH
10128 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10129 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10130 {
10131 set = PATTERN (insn);
10132 first = REGNO (SET_SRC (set));
10133 offset = const0_rtx;
10134 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10135 off = INTVAL (offset);
10136
10137 if (GET_CODE (base) != REG || off < 0)
10138 continue;
10139 if (REGNO (base) != STACK_POINTER_REGNUM
10140 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10141 continue;
10142
10143 remove_insn (insn);
10144 continue;
10145 }
10146
10147 if (GET_CODE (PATTERN (insn)) == PARALLEL
10148 && load_multiple_operation (PATTERN (insn), VOIDmode))
10149 {
10150 set = XVECEXP (PATTERN (insn), 0, 0);
10151 first = REGNO (SET_DEST (set));
10152 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10153 offset = const0_rtx;
10154 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10155 off = INTVAL (offset);
10156
10157 if (GET_CODE (base) != REG || off < 0)
10158 continue;
10159 if (cfun_frame_layout.first_restore_gpr != -1
10160 && (cfun_frame_layout.first_restore_gpr < first
10161 || cfun_frame_layout.last_restore_gpr > last))
10162 continue;
10163 if (REGNO (base) != STACK_POINTER_REGNUM
10164 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10165 continue;
10166 if (first > BASE_REGNUM || last < BASE_REGNUM)
10167 continue;
10168
10169 if (cfun_frame_layout.first_restore_gpr != -1)
10170 {
10171 new_insn = restore_gprs (base,
10172 off + (cfun_frame_layout.first_restore_gpr
10173 - first) * UNITS_PER_LONG,
10174 cfun_frame_layout.first_restore_gpr,
10175 cfun_frame_layout.last_restore_gpr);
10176 new_insn = emit_insn_before (new_insn, insn);
10177 INSN_ADDRESSES_NEW (new_insn, -1);
10178 }
10179
10180 remove_insn (insn);
10181 continue;
10182 }
10183
10184 if (cfun_frame_layout.first_restore_gpr == -1
10185 && GET_CODE (PATTERN (insn)) == SET
10186 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10187 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10188 || (!TARGET_CPU_ZARCH
10189 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10190 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10191 {
10192 set = PATTERN (insn);
10193 first = REGNO (SET_DEST (set));
10194 offset = const0_rtx;
10195 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10196 off = INTVAL (offset);
10197
10198 if (GET_CODE (base) != REG || off < 0)
10199 continue;
10200 if (REGNO (base) != STACK_POINTER_REGNUM
10201 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10202 continue;
10203
10204 remove_insn (insn);
10205 continue;
10206 }
10207 }
10208 }
10209
10210 /* On z10 and later the dynamic branch prediction must see the
10211 backward jump within a certain windows. If not it falls back to
10212 the static prediction. This function rearranges the loop backward
10213 branch in a way which makes the static prediction always correct.
10214 The function returns true if it added an instruction. */
10215 static bool
10216 s390_fix_long_loop_prediction (rtx insn)
10217 {
10218 rtx set = single_set (insn);
10219 rtx code_label, label_ref, new_label;
10220 rtx uncond_jump;
10221 rtx cur_insn;
10222 rtx tmp;
10223 int distance;
10224
10225 /* This will exclude branch on count and branch on index patterns
10226 since these are correctly statically predicted. */
10227 if (!set
10228 || SET_DEST (set) != pc_rtx
10229 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10230 return false;
10231
10232 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10233 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10234
10235 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10236
10237 code_label = XEXP (label_ref, 0);
10238
10239 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10240 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10241 || (INSN_ADDRESSES (INSN_UID (insn))
10242 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10243 return false;
10244
10245 for (distance = 0, cur_insn = PREV_INSN (insn);
10246 distance < PREDICT_DISTANCE - 6;
10247 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10248 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10249 return false;
10250
10251 new_label = gen_label_rtx ();
10252 uncond_jump = emit_jump_insn_after (
10253 gen_rtx_SET (VOIDmode, pc_rtx,
10254 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10255 insn);
10256 emit_label_after (new_label, uncond_jump);
10257
10258 tmp = XEXP (SET_SRC (set), 1);
10259 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10260 XEXP (SET_SRC (set), 2) = tmp;
10261 INSN_CODE (insn) = -1;
10262
10263 XEXP (label_ref, 0) = new_label;
10264 JUMP_LABEL (insn) = new_label;
10265 JUMP_LABEL (uncond_jump) = code_label;
10266
10267 return true;
10268 }
10269
10270 /* Returns 1 if INSN reads the value of REG for purposes not related
10271 to addressing of memory, and 0 otherwise. */
10272 static int
10273 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10274 {
10275 return reg_referenced_p (reg, PATTERN (insn))
10276 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10277 }
10278
10279 /* Starting from INSN find_cond_jump looks downwards in the insn
10280 stream for a single jump insn which is the last user of the
10281 condition code set in INSN. */
10282 static rtx
10283 find_cond_jump (rtx insn)
10284 {
10285 for (; insn; insn = NEXT_INSN (insn))
10286 {
10287 rtx ite, cc;
10288
10289 if (LABEL_P (insn))
10290 break;
10291
10292 if (!JUMP_P (insn))
10293 {
10294 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10295 break;
10296 continue;
10297 }
10298
10299 /* This will be triggered by a return. */
10300 if (GET_CODE (PATTERN (insn)) != SET)
10301 break;
10302
10303 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10304 ite = SET_SRC (PATTERN (insn));
10305
10306 if (GET_CODE (ite) != IF_THEN_ELSE)
10307 break;
10308
10309 cc = XEXP (XEXP (ite, 0), 0);
10310 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10311 break;
10312
10313 if (find_reg_note (insn, REG_DEAD, cc))
10314 return insn;
10315 break;
10316 }
10317
10318 return NULL_RTX;
10319 }
10320
10321 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10322 the semantics does not change. If NULL_RTX is passed as COND the
10323 function tries to find the conditional jump starting with INSN. */
10324 static void
10325 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10326 {
10327 rtx tmp = *op0;
10328
10329 if (cond == NULL_RTX)
10330 {
10331 rtx jump = find_cond_jump (NEXT_INSN (insn));
10332 jump = jump ? single_set (jump) : NULL_RTX;
10333
10334 if (jump == NULL_RTX)
10335 return;
10336
10337 cond = XEXP (XEXP (jump, 1), 0);
10338 }
10339
10340 *op0 = *op1;
10341 *op1 = tmp;
10342 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10343 }
10344
10345 /* On z10, instructions of the compare-and-branch family have the
10346 property to access the register occurring as second operand with
10347 its bits complemented. If such a compare is grouped with a second
10348 instruction that accesses the same register non-complemented, and
10349 if that register's value is delivered via a bypass, then the
10350 pipeline recycles, thereby causing significant performance decline.
10351 This function locates such situations and exchanges the two
10352 operands of the compare. The function return true whenever it
10353 added an insn. */
10354 static bool
10355 s390_z10_optimize_cmp (rtx insn)
10356 {
10357 rtx prev_insn, next_insn;
10358 bool insn_added_p = false;
10359 rtx cond, *op0, *op1;
10360
10361 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10362 {
10363 /* Handle compare and branch and branch on count
10364 instructions. */
10365 rtx pattern = single_set (insn);
10366
10367 if (!pattern
10368 || SET_DEST (pattern) != pc_rtx
10369 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10370 return false;
10371
10372 cond = XEXP (SET_SRC (pattern), 0);
10373 op0 = &XEXP (cond, 0);
10374 op1 = &XEXP (cond, 1);
10375 }
10376 else if (GET_CODE (PATTERN (insn)) == SET)
10377 {
10378 rtx src, dest;
10379
10380 /* Handle normal compare instructions. */
10381 src = SET_SRC (PATTERN (insn));
10382 dest = SET_DEST (PATTERN (insn));
10383
10384 if (!REG_P (dest)
10385 || !CC_REGNO_P (REGNO (dest))
10386 || GET_CODE (src) != COMPARE)
10387 return false;
10388
10389 /* s390_swap_cmp will try to find the conditional
10390 jump when passing NULL_RTX as condition. */
10391 cond = NULL_RTX;
10392 op0 = &XEXP (src, 0);
10393 op1 = &XEXP (src, 1);
10394 }
10395 else
10396 return false;
10397
10398 if (!REG_P (*op0) || !REG_P (*op1))
10399 return false;
10400
10401 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10402 return false;
10403
10404 /* Swap the COMPARE arguments and its mask if there is a
10405 conflicting access in the previous insn. */
10406 prev_insn = prev_active_insn (insn);
10407 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10408 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10409 s390_swap_cmp (cond, op0, op1, insn);
10410
10411 /* Check if there is a conflict with the next insn. If there
10412 was no conflict with the previous insn, then swap the
10413 COMPARE arguments and its mask. If we already swapped
10414 the operands, or if swapping them would cause a conflict
10415 with the previous insn, issue a NOP after the COMPARE in
10416 order to separate the two instuctions. */
10417 next_insn = next_active_insn (insn);
10418 if (next_insn != NULL_RTX && INSN_P (next_insn)
10419 && s390_non_addr_reg_read_p (*op1, next_insn))
10420 {
10421 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10422 && s390_non_addr_reg_read_p (*op0, prev_insn))
10423 {
10424 if (REGNO (*op1) == 0)
10425 emit_insn_after (gen_nop1 (), insn);
10426 else
10427 emit_insn_after (gen_nop (), insn);
10428 insn_added_p = true;
10429 }
10430 else
10431 s390_swap_cmp (cond, op0, op1, insn);
10432 }
10433 return insn_added_p;
10434 }
10435
10436 /* Perform machine-dependent processing. */
10437
10438 static void
10439 s390_reorg (void)
10440 {
10441 bool pool_overflow = false;
10442
10443 /* Make sure all splits have been performed; splits after
10444 machine_dependent_reorg might confuse insn length counts. */
10445 split_all_insns_noflow ();
10446
10447 /* Install the main literal pool and the associated base
10448 register load insns.
10449
10450 In addition, there are two problematic situations we need
10451 to correct:
10452
10453 - the literal pool might be > 4096 bytes in size, so that
10454 some of its elements cannot be directly accessed
10455
10456 - a branch target might be > 64K away from the branch, so that
10457 it is not possible to use a PC-relative instruction.
10458
10459 To fix those, we split the single literal pool into multiple
10460 pool chunks, reloading the pool base register at various
10461 points throughout the function to ensure it always points to
10462 the pool chunk the following code expects, and / or replace
10463 PC-relative branches by absolute branches.
10464
10465 However, the two problems are interdependent: splitting the
10466 literal pool can move a branch further away from its target,
10467 causing the 64K limit to overflow, and on the other hand,
10468 replacing a PC-relative branch by an absolute branch means
10469 we need to put the branch target address into the literal
10470 pool, possibly causing it to overflow.
10471
10472 So, we loop trying to fix up both problems until we manage
10473 to satisfy both conditions at the same time. Note that the
10474 loop is guaranteed to terminate as every pass of the loop
10475 strictly decreases the total number of PC-relative branches
10476 in the function. (This is not completely true as there
10477 might be branch-over-pool insns introduced by chunkify_start.
10478 Those never need to be split however.) */
10479
10480 for (;;)
10481 {
10482 struct constant_pool *pool = NULL;
10483
10484 /* Collect the literal pool. */
10485 if (!pool_overflow)
10486 {
10487 pool = s390_mainpool_start ();
10488 if (!pool)
10489 pool_overflow = true;
10490 }
10491
10492 /* If literal pool overflowed, start to chunkify it. */
10493 if (pool_overflow)
10494 pool = s390_chunkify_start ();
10495
10496 /* Split out-of-range branches. If this has created new
10497 literal pool entries, cancel current chunk list and
10498 recompute it. zSeries machines have large branch
10499 instructions, so we never need to split a branch. */
10500 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10501 {
10502 if (pool_overflow)
10503 s390_chunkify_cancel (pool);
10504 else
10505 s390_mainpool_cancel (pool);
10506
10507 continue;
10508 }
10509
10510 /* If we made it up to here, both conditions are satisfied.
10511 Finish up literal pool related changes. */
10512 if (pool_overflow)
10513 s390_chunkify_finish (pool);
10514 else
10515 s390_mainpool_finish (pool);
10516
10517 /* We're done splitting branches. */
10518 cfun->machine->split_branches_pending_p = false;
10519 break;
10520 }
10521
10522 /* Generate out-of-pool execute target insns. */
10523 if (TARGET_CPU_ZARCH)
10524 {
10525 rtx insn, label, target;
10526
10527 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10528 {
10529 label = s390_execute_label (insn);
10530 if (!label)
10531 continue;
10532
10533 gcc_assert (label != const0_rtx);
10534
10535 target = emit_label (XEXP (label, 0));
10536 INSN_ADDRESSES_NEW (target, -1);
10537
10538 target = emit_insn (s390_execute_target (insn));
10539 INSN_ADDRESSES_NEW (target, -1);
10540 }
10541 }
10542
10543 /* Try to optimize prologue and epilogue further. */
10544 s390_optimize_prologue ();
10545
10546 /* Walk over the insns and do some >=z10 specific changes. */
10547 if (s390_tune == PROCESSOR_2097_Z10
10548 || s390_tune == PROCESSOR_2817_Z196
10549 || s390_tune == PROCESSOR_2827_ZEC12)
10550 {
10551 rtx insn;
10552 bool insn_added_p = false;
10553
10554 /* The insn lengths and addresses have to be up to date for the
10555 following manipulations. */
10556 shorten_branches (get_insns ());
10557
10558 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10559 {
10560 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10561 continue;
10562
10563 if (JUMP_P (insn))
10564 insn_added_p |= s390_fix_long_loop_prediction (insn);
10565
10566 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10567 || GET_CODE (PATTERN (insn)) == SET)
10568 && s390_tune == PROCESSOR_2097_Z10)
10569 insn_added_p |= s390_z10_optimize_cmp (insn);
10570 }
10571
10572 /* Adjust branches if we added new instructions. */
10573 if (insn_added_p)
10574 shorten_branches (get_insns ());
10575 }
10576 }
10577
10578 /* Return true if INSN is a fp load insn writing register REGNO. */
10579 static inline bool
10580 s390_fpload_toreg (rtx insn, unsigned int regno)
10581 {
10582 rtx set;
10583 enum attr_type flag = s390_safe_attr_type (insn);
10584
10585 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10586 return false;
10587
10588 set = single_set (insn);
10589
10590 if (set == NULL_RTX)
10591 return false;
10592
10593 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10594 return false;
10595
10596 if (REGNO (SET_DEST (set)) != regno)
10597 return false;
10598
10599 return true;
10600 }
10601
10602 /* This value describes the distance to be avoided between an
10603 aritmetic fp instruction and an fp load writing the same register.
10604 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10605 fine but the exact value has to be avoided. Otherwise the FP
10606 pipeline will throw an exception causing a major penalty. */
10607 #define Z10_EARLYLOAD_DISTANCE 7
10608
10609 /* Rearrange the ready list in order to avoid the situation described
10610 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10611 moved to the very end of the ready list. */
10612 static void
10613 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10614 {
10615 unsigned int regno;
10616 int nready = *nready_p;
10617 rtx tmp;
10618 int i;
10619 rtx insn;
10620 rtx set;
10621 enum attr_type flag;
10622 int distance;
10623
10624 /* Skip DISTANCE - 1 active insns. */
10625 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10626 distance > 0 && insn != NULL_RTX;
10627 distance--, insn = prev_active_insn (insn))
10628 if (CALL_P (insn) || JUMP_P (insn))
10629 return;
10630
10631 if (insn == NULL_RTX)
10632 return;
10633
10634 set = single_set (insn);
10635
10636 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10637 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10638 return;
10639
10640 flag = s390_safe_attr_type (insn);
10641
10642 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10643 return;
10644
10645 regno = REGNO (SET_DEST (set));
10646 i = nready - 1;
10647
10648 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10649 i--;
10650
10651 if (!i)
10652 return;
10653
10654 tmp = ready[i];
10655 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10656 ready[0] = tmp;
10657 }
10658
10659
10660 /* The s390_sched_state variable tracks the state of the current or
10661 the last instruction group.
10662
10663 0,1,2 number of instructions scheduled in the current group
10664 3 the last group is complete - normal insns
10665 4 the last group was a cracked/expanded insn */
10666
10667 static int s390_sched_state;
10668
10669 #define S390_OOO_SCHED_STATE_NORMAL 3
10670 #define S390_OOO_SCHED_STATE_CRACKED 4
10671
10672 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
10673 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
10674 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
10675 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
10676
10677 static unsigned int
10678 s390_get_sched_attrmask (rtx insn)
10679 {
10680 unsigned int mask = 0;
10681
10682 if (get_attr_ooo_cracked (insn))
10683 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
10684 if (get_attr_ooo_expanded (insn))
10685 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
10686 if (get_attr_ooo_endgroup (insn))
10687 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
10688 if (get_attr_ooo_groupalone (insn))
10689 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
10690 return mask;
10691 }
10692
10693 /* Return the scheduling score for INSN. The higher the score the
10694 better. The score is calculated from the OOO scheduling attributes
10695 of INSN and the scheduling state s390_sched_state. */
10696 static int
10697 s390_sched_score (rtx insn)
10698 {
10699 unsigned int mask = s390_get_sched_attrmask (insn);
10700 int score = 0;
10701
10702 switch (s390_sched_state)
10703 {
10704 case 0:
10705 /* Try to put insns into the first slot which would otherwise
10706 break a group. */
10707 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10708 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10709 score += 5;
10710 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10711 score += 10;
10712 case 1:
10713 /* Prefer not cracked insns while trying to put together a
10714 group. */
10715 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10716 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10717 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10718 score += 10;
10719 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
10720 score += 5;
10721 break;
10722 case 2:
10723 /* Prefer not cracked insns while trying to put together a
10724 group. */
10725 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10726 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10727 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10728 score += 10;
10729 /* Prefer endgroup insns in the last slot. */
10730 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
10731 score += 10;
10732 break;
10733 case S390_OOO_SCHED_STATE_NORMAL:
10734 /* Prefer not cracked insns if the last was not cracked. */
10735 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10736 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
10737 score += 5;
10738 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10739 score += 10;
10740 break;
10741 case S390_OOO_SCHED_STATE_CRACKED:
10742 /* Try to keep cracked insns together to prevent them from
10743 interrupting groups. */
10744 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10745 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10746 score += 5;
10747 break;
10748 }
10749 return score;
10750 }
10751
10752 /* This function is called via hook TARGET_SCHED_REORDER before
10753 issueing one insn from list READY which contains *NREADYP entries.
10754 For target z10 it reorders load instructions to avoid early load
10755 conflicts in the floating point pipeline */
10756 static int
10757 s390_sched_reorder (FILE *file, int verbose,
10758 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10759 {
10760 if (s390_tune == PROCESSOR_2097_Z10)
10761 if (reload_completed && *nreadyp > 1)
10762 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10763
10764 if (s390_tune == PROCESSOR_2827_ZEC12
10765 && reload_completed
10766 && *nreadyp > 1)
10767 {
10768 int i;
10769 int last_index = *nreadyp - 1;
10770 int max_index = -1;
10771 int max_score = -1;
10772 rtx tmp;
10773
10774 /* Just move the insn with the highest score to the top (the
10775 end) of the list. A full sort is not needed since a conflict
10776 in the hazard recognition cannot happen. So the top insn in
10777 the ready list will always be taken. */
10778 for (i = last_index; i >= 0; i--)
10779 {
10780 int score;
10781
10782 if (recog_memoized (ready[i]) < 0)
10783 continue;
10784
10785 score = s390_sched_score (ready[i]);
10786 if (score > max_score)
10787 {
10788 max_score = score;
10789 max_index = i;
10790 }
10791 }
10792
10793 if (max_index != -1)
10794 {
10795 if (max_index != last_index)
10796 {
10797 tmp = ready[max_index];
10798 ready[max_index] = ready[last_index];
10799 ready[last_index] = tmp;
10800
10801 if (verbose > 5)
10802 fprintf (file,
10803 "move insn %d to the top of list\n",
10804 INSN_UID (ready[last_index]));
10805 }
10806 else if (verbose > 5)
10807 fprintf (file,
10808 "best insn %d already on top\n",
10809 INSN_UID (ready[last_index]));
10810 }
10811
10812 if (verbose > 5)
10813 {
10814 fprintf (file, "ready list ooo attributes - sched state: %d\n",
10815 s390_sched_state);
10816
10817 for (i = last_index; i >= 0; i--)
10818 {
10819 if (recog_memoized (ready[i]) < 0)
10820 continue;
10821 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
10822 s390_sched_score (ready[i]));
10823 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
10824 PRINT_OOO_ATTR (ooo_cracked);
10825 PRINT_OOO_ATTR (ooo_expanded);
10826 PRINT_OOO_ATTR (ooo_endgroup);
10827 PRINT_OOO_ATTR (ooo_groupalone);
10828 #undef PRINT_OOO_ATTR
10829 fprintf (file, "\n");
10830 }
10831 }
10832 }
10833
10834 return s390_issue_rate ();
10835 }
10836
10837
10838 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10839 the scheduler has issued INSN. It stores the last issued insn into
10840 last_scheduled_insn in order to make it available for
10841 s390_sched_reorder. */
10842 static int
10843 s390_sched_variable_issue (FILE *file, int verbose, rtx insn, int more)
10844 {
10845 last_scheduled_insn = insn;
10846
10847 if (s390_tune == PROCESSOR_2827_ZEC12
10848 && reload_completed
10849 && recog_memoized (insn) >= 0)
10850 {
10851 unsigned int mask = s390_get_sched_attrmask (insn);
10852
10853 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10854 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10855 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
10856 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
10857 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10858 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10859 else
10860 {
10861 /* Only normal insns are left (mask == 0). */
10862 switch (s390_sched_state)
10863 {
10864 case 0:
10865 case 1:
10866 case 2:
10867 case S390_OOO_SCHED_STATE_NORMAL:
10868 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
10869 s390_sched_state = 1;
10870 else
10871 s390_sched_state++;
10872
10873 break;
10874 case S390_OOO_SCHED_STATE_CRACKED:
10875 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10876 break;
10877 }
10878 }
10879 if (verbose > 5)
10880 {
10881 fprintf (file, "insn %d: ", INSN_UID (insn));
10882 #define PRINT_OOO_ATTR(ATTR) \
10883 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
10884 PRINT_OOO_ATTR (ooo_cracked);
10885 PRINT_OOO_ATTR (ooo_expanded);
10886 PRINT_OOO_ATTR (ooo_endgroup);
10887 PRINT_OOO_ATTR (ooo_groupalone);
10888 #undef PRINT_OOO_ATTR
10889 fprintf (file, "\n");
10890 fprintf (file, "sched state: %d\n", s390_sched_state);
10891 }
10892 }
10893
10894 if (GET_CODE (PATTERN (insn)) != USE
10895 && GET_CODE (PATTERN (insn)) != CLOBBER)
10896 return more - 1;
10897 else
10898 return more;
10899 }
10900
10901 static void
10902 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10903 int verbose ATTRIBUTE_UNUSED,
10904 int max_ready ATTRIBUTE_UNUSED)
10905 {
10906 last_scheduled_insn = NULL_RTX;
10907 s390_sched_state = 0;
10908 }
10909
10910 /* This function checks the whole of insn X for memory references. The
10911 function always returns zero because the framework it is called
10912 from would stop recursively analyzing the insn upon a return value
10913 other than zero. The real result of this function is updating
10914 counter variable MEM_COUNT. */
10915 static int
10916 check_dpu (rtx *x, unsigned *mem_count)
10917 {
10918 if (*x != NULL_RTX && MEM_P (*x))
10919 (*mem_count)++;
10920 return 0;
10921 }
10922
10923 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10924 a new number struct loop *loop should be unrolled if tuned for cpus with
10925 a built-in stride prefetcher.
10926 The loop is analyzed for memory accesses by calling check_dpu for
10927 each rtx of the loop. Depending on the loop_depth and the amount of
10928 memory accesses a new number <=nunroll is returned to improve the
10929 behaviour of the hardware prefetch unit. */
10930 static unsigned
10931 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10932 {
10933 basic_block *bbs;
10934 rtx insn;
10935 unsigned i;
10936 unsigned mem_count = 0;
10937
10938 if (s390_tune != PROCESSOR_2097_Z10
10939 && s390_tune != PROCESSOR_2817_Z196
10940 && s390_tune != PROCESSOR_2827_ZEC12)
10941 return nunroll;
10942
10943 /* Count the number of memory references within the loop body. */
10944 bbs = get_loop_body (loop);
10945 for (i = 0; i < loop->num_nodes; i++)
10946 {
10947 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10948 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10949 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10950 }
10951 free (bbs);
10952
10953 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10954 if (mem_count == 0)
10955 return nunroll;
10956
10957 switch (loop_depth(loop))
10958 {
10959 case 1:
10960 return MIN (nunroll, 28 / mem_count);
10961 case 2:
10962 return MIN (nunroll, 22 / mem_count);
10963 default:
10964 return MIN (nunroll, 16 / mem_count);
10965 }
10966 }
10967
10968 /* Initialize GCC target structure. */
10969
10970 #undef TARGET_ASM_ALIGNED_HI_OP
10971 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10972 #undef TARGET_ASM_ALIGNED_DI_OP
10973 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10974 #undef TARGET_ASM_INTEGER
10975 #define TARGET_ASM_INTEGER s390_assemble_integer
10976
10977 #undef TARGET_ASM_OPEN_PAREN
10978 #define TARGET_ASM_OPEN_PAREN ""
10979
10980 #undef TARGET_ASM_CLOSE_PAREN
10981 #define TARGET_ASM_CLOSE_PAREN ""
10982
10983 #undef TARGET_OPTION_OVERRIDE
10984 #define TARGET_OPTION_OVERRIDE s390_option_override
10985
10986 #undef TARGET_ENCODE_SECTION_INFO
10987 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10988
10989 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10990 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10991
10992 #ifdef HAVE_AS_TLS
10993 #undef TARGET_HAVE_TLS
10994 #define TARGET_HAVE_TLS true
10995 #endif
10996 #undef TARGET_CANNOT_FORCE_CONST_MEM
10997 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10998
10999 #undef TARGET_DELEGITIMIZE_ADDRESS
11000 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
11001
11002 #undef TARGET_LEGITIMIZE_ADDRESS
11003 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
11004
11005 #undef TARGET_RETURN_IN_MEMORY
11006 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
11007
11008 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
11009 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
11010
11011 #undef TARGET_ASM_OUTPUT_MI_THUNK
11012 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
11013 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11014 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11015
11016 #undef TARGET_SCHED_ADJUST_PRIORITY
11017 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
11018 #undef TARGET_SCHED_ISSUE_RATE
11019 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
11020 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11021 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
11022
11023 #undef TARGET_SCHED_VARIABLE_ISSUE
11024 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
11025 #undef TARGET_SCHED_REORDER
11026 #define TARGET_SCHED_REORDER s390_sched_reorder
11027 #undef TARGET_SCHED_INIT
11028 #define TARGET_SCHED_INIT s390_sched_init
11029
11030 #undef TARGET_CANNOT_COPY_INSN_P
11031 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
11032 #undef TARGET_RTX_COSTS
11033 #define TARGET_RTX_COSTS s390_rtx_costs
11034 #undef TARGET_ADDRESS_COST
11035 #define TARGET_ADDRESS_COST s390_address_cost
11036 #undef TARGET_REGISTER_MOVE_COST
11037 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
11038 #undef TARGET_MEMORY_MOVE_COST
11039 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
11040
11041 #undef TARGET_MACHINE_DEPENDENT_REORG
11042 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
11043
11044 #undef TARGET_VALID_POINTER_MODE
11045 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
11046
11047 #undef TARGET_BUILD_BUILTIN_VA_LIST
11048 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
11049 #undef TARGET_EXPAND_BUILTIN_VA_START
11050 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
11051 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11052 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
11053
11054 #undef TARGET_PROMOTE_FUNCTION_MODE
11055 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
11056 #undef TARGET_PASS_BY_REFERENCE
11057 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
11058
11059 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11060 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
11061 #undef TARGET_FUNCTION_ARG
11062 #define TARGET_FUNCTION_ARG s390_function_arg
11063 #undef TARGET_FUNCTION_ARG_ADVANCE
11064 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
11065 #undef TARGET_FUNCTION_VALUE
11066 #define TARGET_FUNCTION_VALUE s390_function_value
11067 #undef TARGET_LIBCALL_VALUE
11068 #define TARGET_LIBCALL_VALUE s390_libcall_value
11069
11070 #undef TARGET_FIXED_CONDITION_CODE_REGS
11071 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
11072
11073 #undef TARGET_CC_MODES_COMPATIBLE
11074 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
11075
11076 #undef TARGET_INVALID_WITHIN_DOLOOP
11077 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
11078
11079 #ifdef HAVE_AS_TLS
11080 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
11081 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
11082 #endif
11083
11084 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11085 #undef TARGET_MANGLE_TYPE
11086 #define TARGET_MANGLE_TYPE s390_mangle_type
11087 #endif
11088
11089 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11090 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11091
11092 #undef TARGET_PREFERRED_RELOAD_CLASS
11093 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
11094
11095 #undef TARGET_SECONDARY_RELOAD
11096 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
11097
11098 #undef TARGET_LIBGCC_CMP_RETURN_MODE
11099 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
11100
11101 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
11102 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
11103
11104 #undef TARGET_LEGITIMATE_ADDRESS_P
11105 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
11106
11107 #undef TARGET_LEGITIMATE_CONSTANT_P
11108 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
11109
11110 #undef TARGET_CAN_ELIMINATE
11111 #define TARGET_CAN_ELIMINATE s390_can_eliminate
11112
11113 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11114 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
11115
11116 #undef TARGET_LOOP_UNROLL_ADJUST
11117 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
11118
11119 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
11120 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
11121 #undef TARGET_TRAMPOLINE_INIT
11122 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
11123
11124 #undef TARGET_UNWIND_WORD_MODE
11125 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
11126
11127 #undef TARGET_CANONICALIZE_COMPARISON
11128 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
11129
11130 struct gcc_target targetm = TARGET_INITIALIZER;
11131
11132 #include "gt-s390.h"