re PR bootstrap/57609 (S/390 ESA mode bootstrap failure since r197266)
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "reload.h"
42 #include "diagnostic-core.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "debug.h"
48 #include "langhooks.h"
49 #include "optabs.h"
50 #include "gimple.h"
51 #include "df.h"
52 #include "params.h"
53 #include "cfgloop.h"
54 #include "opts.h"
55
56 /* Define the specific costs for a given cpu. */
57
58 struct processor_costs
59 {
60 /* multiplication */
61 const int m; /* cost of an M instruction. */
62 const int mghi; /* cost of an MGHI instruction. */
63 const int mh; /* cost of an MH instruction. */
64 const int mhi; /* cost of an MHI instruction. */
65 const int ml; /* cost of an ML instruction. */
66 const int mr; /* cost of an MR instruction. */
67 const int ms; /* cost of an MS instruction. */
68 const int msg; /* cost of an MSG instruction. */
69 const int msgf; /* cost of an MSGF instruction. */
70 const int msgfr; /* cost of an MSGFR instruction. */
71 const int msgr; /* cost of an MSGR instruction. */
72 const int msr; /* cost of an MSR instruction. */
73 const int mult_df; /* cost of multiplication in DFmode. */
74 const int mxbr;
75 /* square root */
76 const int sqxbr; /* cost of square root in TFmode. */
77 const int sqdbr; /* cost of square root in DFmode. */
78 const int sqebr; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr; /* cost of multiply and add in DFmode. */
81 const int maebr; /* cost of multiply and add in SFmode. */
82 /* division */
83 const int dxbr;
84 const int ddbr;
85 const int debr;
86 const int dlgr;
87 const int dlr;
88 const int dr;
89 const int dsgfr;
90 const int dsgr;
91 };
92
93 const struct processor_costs *s390_cost;
94
95 static const
96 struct processor_costs z900_cost =
97 {
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
125 };
126
127 static const
128 struct processor_costs z990_cost =
129 {
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
157 };
158
159 static const
160 struct processor_costs z9_109_cost =
161 {
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
189 };
190
191 static const
192 struct processor_costs z10_cost =
193 {
194 COSTS_N_INSNS (10), /* M */
195 COSTS_N_INSNS (10), /* MGHI */
196 COSTS_N_INSNS (10), /* MH */
197 COSTS_N_INSNS (10), /* MHI */
198 COSTS_N_INSNS (10), /* ML */
199 COSTS_N_INSNS (10), /* MR */
200 COSTS_N_INSNS (10), /* MS */
201 COSTS_N_INSNS (10), /* MSG */
202 COSTS_N_INSNS (10), /* MSGF */
203 COSTS_N_INSNS (10), /* MSGFR */
204 COSTS_N_INSNS (10), /* MSGR */
205 COSTS_N_INSNS (10), /* MSR */
206 COSTS_N_INSNS (1) , /* multiplication in DFmode */
207 COSTS_N_INSNS (50), /* MXBR */
208 COSTS_N_INSNS (120), /* SQXBR */
209 COSTS_N_INSNS (52), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (111), /* DXBR */
214 COSTS_N_INSNS (39), /* DDBR */
215 COSTS_N_INSNS (32), /* DEBR */
216 COSTS_N_INSNS (160), /* DLGR */
217 COSTS_N_INSNS (71), /* DLR */
218 COSTS_N_INSNS (71), /* DR */
219 COSTS_N_INSNS (71), /* DSGFR */
220 COSTS_N_INSNS (71), /* DSGR */
221 };
222
223 static const
224 struct processor_costs z196_cost =
225 {
226 COSTS_N_INSNS (7), /* M */
227 COSTS_N_INSNS (5), /* MGHI */
228 COSTS_N_INSNS (5), /* MH */
229 COSTS_N_INSNS (5), /* MHI */
230 COSTS_N_INSNS (7), /* ML */
231 COSTS_N_INSNS (7), /* MR */
232 COSTS_N_INSNS (6), /* MS */
233 COSTS_N_INSNS (8), /* MSG */
234 COSTS_N_INSNS (6), /* MSGF */
235 COSTS_N_INSNS (6), /* MSGFR */
236 COSTS_N_INSNS (8), /* MSGR */
237 COSTS_N_INSNS (6), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (40), /* MXBR B+40 */
240 COSTS_N_INSNS (100), /* SQXBR B+100 */
241 COSTS_N_INSNS (42), /* SQDBR B+42 */
242 COSTS_N_INSNS (28), /* SQEBR B+28 */
243 COSTS_N_INSNS (1), /* MADBR B */
244 COSTS_N_INSNS (1), /* MAEBR B */
245 COSTS_N_INSNS (101), /* DXBR B+101 */
246 COSTS_N_INSNS (29), /* DDBR */
247 COSTS_N_INSNS (22), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR cracked */
249 COSTS_N_INSNS (160), /* DLR cracked */
250 COSTS_N_INSNS (160), /* DR expanded */
251 COSTS_N_INSNS (160), /* DSGFR cracked */
252 COSTS_N_INSNS (160), /* DSGR cracked */
253 };
254
255 static const
256 struct processor_costs zEC12_cost =
257 {
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (131), /* DXBR B+131 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
285 };
286
287 extern int reload_completed;
288
289 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
290 static rtx last_scheduled_insn;
291
292 /* Structure used to hold the components of a S/390 memory
293 address. A legitimate address on S/390 is of the general
294 form
295 base + index + displacement
296 where any of the components is optional.
297
298 base and index are registers of the class ADDR_REGS,
299 displacement is an unsigned 12-bit immediate constant. */
300
301 struct s390_address
302 {
303 rtx base;
304 rtx indx;
305 rtx disp;
306 bool pointer;
307 bool literal_pool;
308 };
309
310 /* The following structure is embedded in the machine
311 specific part of struct function. */
312
313 struct GTY (()) s390_frame_layout
314 {
315 /* Offset within stack frame. */
316 HOST_WIDE_INT gprs_offset;
317 HOST_WIDE_INT f0_offset;
318 HOST_WIDE_INT f4_offset;
319 HOST_WIDE_INT f8_offset;
320 HOST_WIDE_INT backchain_offset;
321
322 /* Number of first and last gpr where slots in the register
323 save area are reserved for. */
324 int first_save_gpr_slot;
325 int last_save_gpr_slot;
326
327 /* Number of first and last gpr to be saved, restored. */
328 int first_save_gpr;
329 int first_restore_gpr;
330 int last_save_gpr;
331 int last_restore_gpr;
332
333 /* Bits standing for floating point registers. Set, if the
334 respective register has to be saved. Starting with reg 16 (f0)
335 at the rightmost bit.
336 Bit 15 - 8 7 6 5 4 3 2 1 0
337 fpr 15 - 8 7 5 3 1 6 4 2 0
338 reg 31 - 24 23 22 21 20 19 18 17 16 */
339 unsigned int fpr_bitmap;
340
341 /* Number of floating point registers f8-f15 which must be saved. */
342 int high_fprs;
343
344 /* Set if return address needs to be saved.
345 This flag is set by s390_return_addr_rtx if it could not use
346 the initial value of r14 and therefore depends on r14 saved
347 to the stack. */
348 bool save_return_addr_p;
349
350 /* Size of stack frame. */
351 HOST_WIDE_INT frame_size;
352 };
353
354 /* Define the structure for the machine field in struct function. */
355
356 struct GTY(()) machine_function
357 {
358 struct s390_frame_layout frame_layout;
359
360 /* Literal pool base register. */
361 rtx base_reg;
362
363 /* True if we may need to perform branch splitting. */
364 bool split_branches_pending_p;
365
366 /* Some local-dynamic TLS symbol name. */
367 const char *some_ld_name;
368
369 bool has_landing_pad_p;
370 };
371
372 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
373
374 #define cfun_frame_layout (cfun->machine->frame_layout)
375 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
376 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
377 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
378 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
379 (1 << (BITNUM)))
380 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
381 (1 << (BITNUM))))
382
383 /* Number of GPRs and FPRs used for argument passing. */
384 #define GP_ARG_NUM_REG 5
385 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
386
387 /* A couple of shortcuts. */
388 #define CONST_OK_FOR_J(x) \
389 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
390 #define CONST_OK_FOR_K(x) \
391 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
392 #define CONST_OK_FOR_Os(x) \
393 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
394 #define CONST_OK_FOR_Op(x) \
395 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
396 #define CONST_OK_FOR_On(x) \
397 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
398
399 #define REGNO_PAIR_OK(REGNO, MODE) \
400 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
401
402 /* That's the read ahead of the dynamic branch prediction unit in
403 bytes on a z10 (or higher) CPU. */
404 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
405
406 /* Return the alignment for LABEL. We default to the -falign-labels
407 value except for the literal pool base label. */
408 int
409 s390_label_align (rtx label)
410 {
411 rtx prev_insn = prev_active_insn (label);
412
413 if (prev_insn == NULL_RTX)
414 goto old;
415
416 prev_insn = single_set (prev_insn);
417
418 if (prev_insn == NULL_RTX)
419 goto old;
420
421 prev_insn = SET_SRC (prev_insn);
422
423 /* Don't align literal pool base labels. */
424 if (GET_CODE (prev_insn) == UNSPEC
425 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
426 return 0;
427
428 old:
429 return align_labels_log;
430 }
431
432 static enum machine_mode
433 s390_libgcc_cmp_return_mode (void)
434 {
435 return TARGET_64BIT ? DImode : SImode;
436 }
437
438 static enum machine_mode
439 s390_libgcc_shift_count_mode (void)
440 {
441 return TARGET_64BIT ? DImode : SImode;
442 }
443
444 static enum machine_mode
445 s390_unwind_word_mode (void)
446 {
447 return TARGET_64BIT ? DImode : SImode;
448 }
449
450 /* Return true if the back end supports mode MODE. */
451 static bool
452 s390_scalar_mode_supported_p (enum machine_mode mode)
453 {
454 /* In contrast to the default implementation reject TImode constants on 31bit
455 TARGET_ZARCH for ABI compliance. */
456 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
457 return false;
458
459 if (DECIMAL_FLOAT_MODE_P (mode))
460 return default_decimal_float_supported_p ();
461
462 return default_scalar_mode_supported_p (mode);
463 }
464
465 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
466
467 void
468 s390_set_has_landing_pad_p (bool value)
469 {
470 cfun->machine->has_landing_pad_p = value;
471 }
472
473 /* If two condition code modes are compatible, return a condition code
474 mode which is compatible with both. Otherwise, return
475 VOIDmode. */
476
477 static enum machine_mode
478 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
479 {
480 if (m1 == m2)
481 return m1;
482
483 switch (m1)
484 {
485 case CCZmode:
486 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
487 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
488 return m2;
489 return VOIDmode;
490
491 case CCSmode:
492 case CCUmode:
493 case CCTmode:
494 case CCSRmode:
495 case CCURmode:
496 case CCZ1mode:
497 if (m2 == CCZmode)
498 return m1;
499
500 return VOIDmode;
501
502 default:
503 return VOIDmode;
504 }
505 return VOIDmode;
506 }
507
508 /* Return true if SET either doesn't set the CC register, or else
509 the source and destination have matching CC modes and that
510 CC mode is at least as constrained as REQ_MODE. */
511
512 static bool
513 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
514 {
515 enum machine_mode set_mode;
516
517 gcc_assert (GET_CODE (set) == SET);
518
519 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
520 return 1;
521
522 set_mode = GET_MODE (SET_DEST (set));
523 switch (set_mode)
524 {
525 case CCSmode:
526 case CCSRmode:
527 case CCUmode:
528 case CCURmode:
529 case CCLmode:
530 case CCL1mode:
531 case CCL2mode:
532 case CCL3mode:
533 case CCT1mode:
534 case CCT2mode:
535 case CCT3mode:
536 if (req_mode != set_mode)
537 return 0;
538 break;
539
540 case CCZmode:
541 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
542 && req_mode != CCSRmode && req_mode != CCURmode)
543 return 0;
544 break;
545
546 case CCAPmode:
547 case CCANmode:
548 if (req_mode != CCAmode)
549 return 0;
550 break;
551
552 default:
553 gcc_unreachable ();
554 }
555
556 return (GET_MODE (SET_SRC (set)) == set_mode);
557 }
558
559 /* Return true if every SET in INSN that sets the CC register
560 has source and destination with matching CC modes and that
561 CC mode is at least as constrained as REQ_MODE.
562 If REQ_MODE is VOIDmode, always return false. */
563
564 bool
565 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
566 {
567 int i;
568
569 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
570 if (req_mode == VOIDmode)
571 return false;
572
573 if (GET_CODE (PATTERN (insn)) == SET)
574 return s390_match_ccmode_set (PATTERN (insn), req_mode);
575
576 if (GET_CODE (PATTERN (insn)) == PARALLEL)
577 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
578 {
579 rtx set = XVECEXP (PATTERN (insn), 0, i);
580 if (GET_CODE (set) == SET)
581 if (!s390_match_ccmode_set (set, req_mode))
582 return false;
583 }
584
585 return true;
586 }
587
588 /* If a test-under-mask instruction can be used to implement
589 (compare (and ... OP1) OP2), return the CC mode required
590 to do that. Otherwise, return VOIDmode.
591 MIXED is true if the instruction can distinguish between
592 CC1 and CC2 for mixed selected bits (TMxx), it is false
593 if the instruction cannot (TM). */
594
595 enum machine_mode
596 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
597 {
598 int bit0, bit1;
599
600 /* ??? Fixme: should work on CONST_DOUBLE as well. */
601 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
602 return VOIDmode;
603
604 /* Selected bits all zero: CC0.
605 e.g.: int a; if ((a & (16 + 128)) == 0) */
606 if (INTVAL (op2) == 0)
607 return CCTmode;
608
609 /* Selected bits all one: CC3.
610 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
611 if (INTVAL (op2) == INTVAL (op1))
612 return CCT3mode;
613
614 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
615 int a;
616 if ((a & (16 + 128)) == 16) -> CCT1
617 if ((a & (16 + 128)) == 128) -> CCT2 */
618 if (mixed)
619 {
620 bit1 = exact_log2 (INTVAL (op2));
621 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
622 if (bit0 != -1 && bit1 != -1)
623 return bit0 > bit1 ? CCT1mode : CCT2mode;
624 }
625
626 return VOIDmode;
627 }
628
629 /* Given a comparison code OP (EQ, NE, etc.) and the operands
630 OP0 and OP1 of a COMPARE, return the mode to be used for the
631 comparison. */
632
633 enum machine_mode
634 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
635 {
636 switch (code)
637 {
638 case EQ:
639 case NE:
640 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
641 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
642 return CCAPmode;
643 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
644 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
645 return CCAPmode;
646 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
647 || GET_CODE (op1) == NEG)
648 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
649 return CCLmode;
650
651 if (GET_CODE (op0) == AND)
652 {
653 /* Check whether we can potentially do it via TM. */
654 enum machine_mode ccmode;
655 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
656 if (ccmode != VOIDmode)
657 {
658 /* Relax CCTmode to CCZmode to allow fall-back to AND
659 if that turns out to be beneficial. */
660 return ccmode == CCTmode ? CCZmode : ccmode;
661 }
662 }
663
664 if (register_operand (op0, HImode)
665 && GET_CODE (op1) == CONST_INT
666 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
667 return CCT3mode;
668 if (register_operand (op0, QImode)
669 && GET_CODE (op1) == CONST_INT
670 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
671 return CCT3mode;
672
673 return CCZmode;
674
675 case LE:
676 case LT:
677 case GE:
678 case GT:
679 /* The only overflow condition of NEG and ABS happens when
680 -INT_MAX is used as parameter, which stays negative. So
681 we have an overflow from a positive value to a negative.
682 Using CCAP mode the resulting cc can be used for comparisons. */
683 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCAPmode;
686
687 /* If constants are involved in an add instruction it is possible to use
688 the resulting cc for comparisons with zero. Knowing the sign of the
689 constant the overflow behavior gets predictable. e.g.:
690 int a, b; if ((b = a + c) > 0)
691 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
692 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
693 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
694 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
695 /* Avoid INT32_MIN on 32 bit. */
696 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
697 {
698 if (INTVAL (XEXP((op0), 1)) < 0)
699 return CCANmode;
700 else
701 return CCAPmode;
702 }
703 /* Fall through. */
704 case UNORDERED:
705 case ORDERED:
706 case UNEQ:
707 case UNLE:
708 case UNLT:
709 case UNGE:
710 case UNGT:
711 case LTGT:
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCSRmode;
715 return CCSmode;
716
717 case LTU:
718 case GEU:
719 if (GET_CODE (op0) == PLUS
720 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
721 return CCL1mode;
722
723 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
724 && GET_CODE (op1) != CONST_INT)
725 return CCURmode;
726 return CCUmode;
727
728 case LEU:
729 case GTU:
730 if (GET_CODE (op0) == MINUS
731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
732 return CCL2mode;
733
734 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
735 && GET_CODE (op1) != CONST_INT)
736 return CCURmode;
737 return CCUmode;
738
739 default:
740 gcc_unreachable ();
741 }
742 }
743
744 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
745 that we can implement more efficiently. */
746
747 static void
748 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
749 bool op0_preserve_value)
750 {
751 if (op0_preserve_value)
752 return;
753
754 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == ZERO_EXTRACT
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
760 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
761 {
762 rtx inner = XEXP (*op0, 0);
763 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
764 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
765 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
766
767 if (len > 0 && len < modesize
768 && pos >= 0 && pos + len <= modesize
769 && modesize <= HOST_BITS_PER_WIDE_INT)
770 {
771 unsigned HOST_WIDE_INT block;
772 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
773 block <<= modesize - pos - len;
774
775 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
776 gen_int_mode (block, GET_MODE (inner)));
777 }
778 }
779
780 /* Narrow AND of memory against immediate to enable TM. */
781 if ((*code == EQ || *code == NE)
782 && *op1 == const0_rtx
783 && GET_CODE (*op0) == AND
784 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
785 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
786 {
787 rtx inner = XEXP (*op0, 0);
788 rtx mask = XEXP (*op0, 1);
789
790 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
791 if (GET_CODE (inner) == SUBREG
792 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
793 && (GET_MODE_SIZE (GET_MODE (inner))
794 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
795 && ((INTVAL (mask)
796 & GET_MODE_MASK (GET_MODE (inner))
797 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
798 == 0))
799 inner = SUBREG_REG (inner);
800
801 /* Do not change volatile MEMs. */
802 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
803 {
804 int part = s390_single_part (XEXP (*op0, 1),
805 GET_MODE (inner), QImode, 0);
806 if (part >= 0)
807 {
808 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
809 inner = adjust_address_nv (inner, QImode, part);
810 *op0 = gen_rtx_AND (QImode, inner, mask);
811 }
812 }
813 }
814
815 /* Narrow comparisons against 0xffff to HImode if possible. */
816 if ((*code == EQ || *code == NE)
817 && GET_CODE (*op1) == CONST_INT
818 && INTVAL (*op1) == 0xffff
819 && SCALAR_INT_MODE_P (GET_MODE (*op0))
820 && (nonzero_bits (*op0, GET_MODE (*op0))
821 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
822 {
823 *op0 = gen_lowpart (HImode, *op0);
824 *op1 = constm1_rtx;
825 }
826
827 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
828 if (GET_CODE (*op0) == UNSPEC
829 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
830 && XVECLEN (*op0, 0) == 1
831 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
832 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
833 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
834 && *op1 == const0_rtx)
835 {
836 enum rtx_code new_code = UNKNOWN;
837 switch (*code)
838 {
839 case EQ: new_code = EQ; break;
840 case NE: new_code = NE; break;
841 case LT: new_code = GTU; break;
842 case GT: new_code = LTU; break;
843 case LE: new_code = GEU; break;
844 case GE: new_code = LEU; break;
845 default: break;
846 }
847
848 if (new_code != UNKNOWN)
849 {
850 *op0 = XVECEXP (*op0, 0, 0);
851 *code = new_code;
852 }
853 }
854
855 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
856 if (GET_CODE (*op0) == UNSPEC
857 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
858 && XVECLEN (*op0, 0) == 1
859 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
860 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
861 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
862 && *op1 == const0_rtx)
863 {
864 enum rtx_code new_code = UNKNOWN;
865 switch (*code)
866 {
867 case EQ: new_code = EQ; break;
868 case NE: new_code = NE; break;
869 default: break;
870 }
871
872 if (new_code != UNKNOWN)
873 {
874 *op0 = XVECEXP (*op0, 0, 0);
875 *code = new_code;
876 }
877 }
878
879 /* Simplify cascaded EQ, NE with const0_rtx. */
880 if ((*code == NE || *code == EQ)
881 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
882 && GET_MODE (*op0) == SImode
883 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
884 && REG_P (XEXP (*op0, 0))
885 && XEXP (*op0, 1) == const0_rtx
886 && *op1 == const0_rtx)
887 {
888 if ((*code == EQ && GET_CODE (*op0) == NE)
889 || (*code == NE && GET_CODE (*op0) == EQ))
890 *code = EQ;
891 else
892 *code = NE;
893 *op0 = XEXP (*op0, 0);
894 }
895
896 /* Prefer register over memory as first operand. */
897 if (MEM_P (*op0) && REG_P (*op1))
898 {
899 rtx tem = *op0; *op0 = *op1; *op1 = tem;
900 *code = (int)swap_condition ((enum rtx_code)*code);
901 }
902 }
903
904 /* Emit a compare instruction suitable to implement the comparison
905 OP0 CODE OP1. Return the correct condition RTL to be placed in
906 the IF_THEN_ELSE of the conditional branch testing the result. */
907
908 rtx
909 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
910 {
911 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
912 rtx cc;
913
914 /* Do not output a redundant compare instruction if a compare_and_swap
915 pattern already computed the result and the machine modes are compatible. */
916 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
917 {
918 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
919 == GET_MODE (op0));
920 cc = op0;
921 }
922 else
923 {
924 cc = gen_rtx_REG (mode, CC_REGNUM);
925 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
926 }
927
928 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
929 }
930
931 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
932 matches CMP.
933 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
934 conditional branch testing the result. */
935
936 static rtx
937 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
938 rtx cmp, rtx new_rtx)
939 {
940 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
941 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
942 const0_rtx);
943 }
944
945 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
946 unconditional jump, else a conditional jump under condition COND. */
947
948 void
949 s390_emit_jump (rtx target, rtx cond)
950 {
951 rtx insn;
952
953 target = gen_rtx_LABEL_REF (VOIDmode, target);
954 if (cond)
955 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
956
957 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
958 emit_jump_insn (insn);
959 }
960
961 /* Return branch condition mask to implement a branch
962 specified by CODE. Return -1 for invalid comparisons. */
963
964 int
965 s390_branch_condition_mask (rtx code)
966 {
967 const int CC0 = 1 << 3;
968 const int CC1 = 1 << 2;
969 const int CC2 = 1 << 1;
970 const int CC3 = 1 << 0;
971
972 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
973 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
974 gcc_assert (XEXP (code, 1) == const0_rtx);
975
976 switch (GET_MODE (XEXP (code, 0)))
977 {
978 case CCZmode:
979 case CCZ1mode:
980 switch (GET_CODE (code))
981 {
982 case EQ: return CC0;
983 case NE: return CC1 | CC2 | CC3;
984 default: return -1;
985 }
986 break;
987
988 case CCT1mode:
989 switch (GET_CODE (code))
990 {
991 case EQ: return CC1;
992 case NE: return CC0 | CC2 | CC3;
993 default: return -1;
994 }
995 break;
996
997 case CCT2mode:
998 switch (GET_CODE (code))
999 {
1000 case EQ: return CC2;
1001 case NE: return CC0 | CC1 | CC3;
1002 default: return -1;
1003 }
1004 break;
1005
1006 case CCT3mode:
1007 switch (GET_CODE (code))
1008 {
1009 case EQ: return CC3;
1010 case NE: return CC0 | CC1 | CC2;
1011 default: return -1;
1012 }
1013 break;
1014
1015 case CCLmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0 | CC2;
1019 case NE: return CC1 | CC3;
1020 default: return -1;
1021 }
1022 break;
1023
1024 case CCL1mode:
1025 switch (GET_CODE (code))
1026 {
1027 case LTU: return CC2 | CC3; /* carry */
1028 case GEU: return CC0 | CC1; /* no carry */
1029 default: return -1;
1030 }
1031 break;
1032
1033 case CCL2mode:
1034 switch (GET_CODE (code))
1035 {
1036 case GTU: return CC0 | CC1; /* borrow */
1037 case LEU: return CC2 | CC3; /* no borrow */
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCL3mode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0 | CC2;
1046 case NE: return CC1 | CC3;
1047 case LTU: return CC1;
1048 case GTU: return CC3;
1049 case LEU: return CC1 | CC2;
1050 case GEU: return CC2 | CC3;
1051 default: return -1;
1052 }
1053
1054 case CCUmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LTU: return CC1;
1060 case GTU: return CC2;
1061 case LEU: return CC0 | CC1;
1062 case GEU: return CC0 | CC2;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCURmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC2 | CC1 | CC3;
1072 case LTU: return CC2;
1073 case GTU: return CC1;
1074 case LEU: return CC0 | CC2;
1075 case GEU: return CC0 | CC1;
1076 default: return -1;
1077 }
1078 break;
1079
1080 case CCAPmode:
1081 switch (GET_CODE (code))
1082 {
1083 case EQ: return CC0;
1084 case NE: return CC1 | CC2 | CC3;
1085 case LT: return CC1 | CC3;
1086 case GT: return CC2;
1087 case LE: return CC0 | CC1 | CC3;
1088 case GE: return CC0 | CC2;
1089 default: return -1;
1090 }
1091 break;
1092
1093 case CCANmode:
1094 switch (GET_CODE (code))
1095 {
1096 case EQ: return CC0;
1097 case NE: return CC1 | CC2 | CC3;
1098 case LT: return CC1;
1099 case GT: return CC2 | CC3;
1100 case LE: return CC0 | CC1;
1101 case GE: return CC0 | CC2 | CC3;
1102 default: return -1;
1103 }
1104 break;
1105
1106 case CCSmode:
1107 switch (GET_CODE (code))
1108 {
1109 case EQ: return CC0;
1110 case NE: return CC1 | CC2 | CC3;
1111 case LT: return CC1;
1112 case GT: return CC2;
1113 case LE: return CC0 | CC1;
1114 case GE: return CC0 | CC2;
1115 case UNORDERED: return CC3;
1116 case ORDERED: return CC0 | CC1 | CC2;
1117 case UNEQ: return CC0 | CC3;
1118 case UNLT: return CC1 | CC3;
1119 case UNGT: return CC2 | CC3;
1120 case UNLE: return CC0 | CC1 | CC3;
1121 case UNGE: return CC0 | CC2 | CC3;
1122 case LTGT: return CC1 | CC2;
1123 default: return -1;
1124 }
1125 break;
1126
1127 case CCSRmode:
1128 switch (GET_CODE (code))
1129 {
1130 case EQ: return CC0;
1131 case NE: return CC2 | CC1 | CC3;
1132 case LT: return CC2;
1133 case GT: return CC1;
1134 case LE: return CC0 | CC2;
1135 case GE: return CC0 | CC1;
1136 case UNORDERED: return CC3;
1137 case ORDERED: return CC0 | CC2 | CC1;
1138 case UNEQ: return CC0 | CC3;
1139 case UNLT: return CC2 | CC3;
1140 case UNGT: return CC1 | CC3;
1141 case UNLE: return CC0 | CC2 | CC3;
1142 case UNGE: return CC0 | CC1 | CC3;
1143 case LTGT: return CC2 | CC1;
1144 default: return -1;
1145 }
1146 break;
1147
1148 default:
1149 return -1;
1150 }
1151 }
1152
1153
1154 /* Return branch condition mask to implement a compare and branch
1155 specified by CODE. Return -1 for invalid comparisons. */
1156
1157 int
1158 s390_compare_and_branch_condition_mask (rtx code)
1159 {
1160 const int CC0 = 1 << 3;
1161 const int CC1 = 1 << 2;
1162 const int CC2 = 1 << 1;
1163
1164 switch (GET_CODE (code))
1165 {
1166 case EQ:
1167 return CC0;
1168 case NE:
1169 return CC1 | CC2;
1170 case LT:
1171 case LTU:
1172 return CC1;
1173 case GT:
1174 case GTU:
1175 return CC2;
1176 case LE:
1177 case LEU:
1178 return CC0 | CC1;
1179 case GE:
1180 case GEU:
1181 return CC0 | CC2;
1182 default:
1183 gcc_unreachable ();
1184 }
1185 return -1;
1186 }
1187
1188 /* If INV is false, return assembler mnemonic string to implement
1189 a branch specified by CODE. If INV is true, return mnemonic
1190 for the corresponding inverted branch. */
1191
1192 static const char *
1193 s390_branch_condition_mnemonic (rtx code, int inv)
1194 {
1195 int mask;
1196
1197 static const char *const mnemonic[16] =
1198 {
1199 NULL, "o", "h", "nle",
1200 "l", "nhe", "lh", "ne",
1201 "e", "nlh", "he", "nl",
1202 "le", "nh", "no", NULL
1203 };
1204
1205 if (GET_CODE (XEXP (code, 0)) == REG
1206 && REGNO (XEXP (code, 0)) == CC_REGNUM
1207 && XEXP (code, 1) == const0_rtx)
1208 mask = s390_branch_condition_mask (code);
1209 else
1210 mask = s390_compare_and_branch_condition_mask (code);
1211
1212 gcc_assert (mask >= 0);
1213
1214 if (inv)
1215 mask ^= 15;
1216
1217 gcc_assert (mask >= 1 && mask <= 14);
1218
1219 return mnemonic[mask];
1220 }
1221
1222 /* Return the part of op which has a value different from def.
1223 The size of the part is determined by mode.
1224 Use this function only if you already know that op really
1225 contains such a part. */
1226
1227 unsigned HOST_WIDE_INT
1228 s390_extract_part (rtx op, enum machine_mode mode, int def)
1229 {
1230 unsigned HOST_WIDE_INT value = 0;
1231 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1232 int part_bits = GET_MODE_BITSIZE (mode);
1233 unsigned HOST_WIDE_INT part_mask
1234 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1235 int i;
1236
1237 for (i = 0; i < max_parts; i++)
1238 {
1239 if (i == 0)
1240 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1241 else
1242 value >>= part_bits;
1243
1244 if ((value & part_mask) != (def & part_mask))
1245 return value & part_mask;
1246 }
1247
1248 gcc_unreachable ();
1249 }
1250
1251 /* If OP is an integer constant of mode MODE with exactly one
1252 part of mode PART_MODE unequal to DEF, return the number of that
1253 part. Otherwise, return -1. */
1254
1255 int
1256 s390_single_part (rtx op,
1257 enum machine_mode mode,
1258 enum machine_mode part_mode,
1259 int def)
1260 {
1261 unsigned HOST_WIDE_INT value = 0;
1262 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1263 unsigned HOST_WIDE_INT part_mask
1264 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1265 int i, part = -1;
1266
1267 if (GET_CODE (op) != CONST_INT)
1268 return -1;
1269
1270 for (i = 0; i < n_parts; i++)
1271 {
1272 if (i == 0)
1273 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1274 else
1275 value >>= GET_MODE_BITSIZE (part_mode);
1276
1277 if ((value & part_mask) != (def & part_mask))
1278 {
1279 if (part != -1)
1280 return -1;
1281 else
1282 part = i;
1283 }
1284 }
1285 return part == -1 ? -1 : n_parts - 1 - part;
1286 }
1287
1288 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1289 bits and no other bits are set in IN. POS and LENGTH can be used
1290 to obtain the start position and the length of the bitfield.
1291
1292 POS gives the position of the first bit of the bitfield counting
1293 from the lowest order bit starting with zero. In order to use this
1294 value for S/390 instructions this has to be converted to "bits big
1295 endian" style. */
1296
1297 bool
1298 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1299 int *pos, int *length)
1300 {
1301 int tmp_pos = 0;
1302 int tmp_length = 0;
1303 int i;
1304 unsigned HOST_WIDE_INT mask = 1ULL;
1305 bool contiguous = false;
1306
1307 for (i = 0; i < size; mask <<= 1, i++)
1308 {
1309 if (contiguous)
1310 {
1311 if (mask & in)
1312 tmp_length++;
1313 else
1314 break;
1315 }
1316 else
1317 {
1318 if (mask & in)
1319 {
1320 contiguous = true;
1321 tmp_length++;
1322 }
1323 else
1324 tmp_pos++;
1325 }
1326 }
1327
1328 if (!tmp_length)
1329 return false;
1330
1331 /* Calculate a mask for all bits beyond the contiguous bits. */
1332 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1333
1334 if (mask & in)
1335 return false;
1336
1337 if (tmp_length + tmp_pos - 1 > size)
1338 return false;
1339
1340 if (length)
1341 *length = tmp_length;
1342
1343 if (pos)
1344 *pos = tmp_pos;
1345
1346 return true;
1347 }
1348
1349 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1350 equivalent to a shift followed by the AND. In particular, CONTIG
1351 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1352 for ROTL indicate a rotate to the right. */
1353
1354 bool
1355 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1356 {
1357 int pos, len;
1358 bool ok;
1359
1360 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1361 gcc_assert (ok);
1362
1363 return ((rotl >= 0 && rotl <= pos)
1364 || (rotl < 0 && -rotl <= bitsize - len - pos));
1365 }
1366
1367 /* Check whether we can (and want to) split a double-word
1368 move in mode MODE from SRC to DST into two single-word
1369 moves, moving the subword FIRST_SUBWORD first. */
1370
1371 bool
1372 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1373 {
1374 /* Floating point registers cannot be split. */
1375 if (FP_REG_P (src) || FP_REG_P (dst))
1376 return false;
1377
1378 /* We don't need to split if operands are directly accessible. */
1379 if (s_operand (src, mode) || s_operand (dst, mode))
1380 return false;
1381
1382 /* Non-offsettable memory references cannot be split. */
1383 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1384 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1385 return false;
1386
1387 /* Moving the first subword must not clobber a register
1388 needed to move the second subword. */
1389 if (register_operand (dst, mode))
1390 {
1391 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1392 if (reg_overlap_mentioned_p (subreg, src))
1393 return false;
1394 }
1395
1396 return true;
1397 }
1398
1399 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1400 and [MEM2, MEM2 + SIZE] do overlap and false
1401 otherwise. */
1402
1403 bool
1404 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1405 {
1406 rtx addr1, addr2, addr_delta;
1407 HOST_WIDE_INT delta;
1408
1409 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1410 return true;
1411
1412 if (size == 0)
1413 return false;
1414
1415 addr1 = XEXP (mem1, 0);
1416 addr2 = XEXP (mem2, 0);
1417
1418 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1419
1420 /* This overlapping check is used by peepholes merging memory block operations.
1421 Overlapping operations would otherwise be recognized by the S/390 hardware
1422 and would fall back to a slower implementation. Allowing overlapping
1423 operations would lead to slow code but not to wrong code. Therefore we are
1424 somewhat optimistic if we cannot prove that the memory blocks are
1425 overlapping.
1426 That's why we return false here although this may accept operations on
1427 overlapping memory areas. */
1428 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1429 return false;
1430
1431 delta = INTVAL (addr_delta);
1432
1433 if (delta == 0
1434 || (delta > 0 && delta < size)
1435 || (delta < 0 && -delta < size))
1436 return true;
1437
1438 return false;
1439 }
1440
1441 /* Check whether the address of memory reference MEM2 equals exactly
1442 the address of memory reference MEM1 plus DELTA. Return true if
1443 we can prove this to be the case, false otherwise. */
1444
1445 bool
1446 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1447 {
1448 rtx addr1, addr2, addr_delta;
1449
1450 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1451 return false;
1452
1453 addr1 = XEXP (mem1, 0);
1454 addr2 = XEXP (mem2, 0);
1455
1456 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1457 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1458 return false;
1459
1460 return true;
1461 }
1462
1463 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1464
1465 void
1466 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1467 rtx *operands)
1468 {
1469 enum machine_mode wmode = mode;
1470 rtx dst = operands[0];
1471 rtx src1 = operands[1];
1472 rtx src2 = operands[2];
1473 rtx op, clob, tem;
1474
1475 /* If we cannot handle the operation directly, use a temp register. */
1476 if (!s390_logical_operator_ok_p (operands))
1477 dst = gen_reg_rtx (mode);
1478
1479 /* QImode and HImode patterns make sense only if we have a destination
1480 in memory. Otherwise perform the operation in SImode. */
1481 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1482 wmode = SImode;
1483
1484 /* Widen operands if required. */
1485 if (mode != wmode)
1486 {
1487 if (GET_CODE (dst) == SUBREG
1488 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1489 dst = tem;
1490 else if (REG_P (dst))
1491 dst = gen_rtx_SUBREG (wmode, dst, 0);
1492 else
1493 dst = gen_reg_rtx (wmode);
1494
1495 if (GET_CODE (src1) == SUBREG
1496 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1497 src1 = tem;
1498 else if (GET_MODE (src1) != VOIDmode)
1499 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1500
1501 if (GET_CODE (src2) == SUBREG
1502 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1503 src2 = tem;
1504 else if (GET_MODE (src2) != VOIDmode)
1505 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1506 }
1507
1508 /* Emit the instruction. */
1509 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1510 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1511 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1512
1513 /* Fix up the destination if needed. */
1514 if (dst != operands[0])
1515 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1516 }
1517
1518 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1519
1520 bool
1521 s390_logical_operator_ok_p (rtx *operands)
1522 {
1523 /* If the destination operand is in memory, it needs to coincide
1524 with one of the source operands. After reload, it has to be
1525 the first source operand. */
1526 if (GET_CODE (operands[0]) == MEM)
1527 return rtx_equal_p (operands[0], operands[1])
1528 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1529
1530 return true;
1531 }
1532
1533 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1534 operand IMMOP to switch from SS to SI type instructions. */
1535
1536 void
1537 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1538 {
1539 int def = code == AND ? -1 : 0;
1540 HOST_WIDE_INT mask;
1541 int part;
1542
1543 gcc_assert (GET_CODE (*memop) == MEM);
1544 gcc_assert (!MEM_VOLATILE_P (*memop));
1545
1546 mask = s390_extract_part (*immop, QImode, def);
1547 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1548 gcc_assert (part >= 0);
1549
1550 *memop = adjust_address (*memop, QImode, part);
1551 *immop = gen_int_mode (mask, QImode);
1552 }
1553
1554
1555 /* How to allocate a 'struct machine_function'. */
1556
1557 static struct machine_function *
1558 s390_init_machine_status (void)
1559 {
1560 return ggc_alloc_cleared_machine_function ();
1561 }
1562
1563 static void
1564 s390_option_override (void)
1565 {
1566 /* Set up function hooks. */
1567 init_machine_status = s390_init_machine_status;
1568
1569 /* Architecture mode defaults according to ABI. */
1570 if (!(target_flags_explicit & MASK_ZARCH))
1571 {
1572 if (TARGET_64BIT)
1573 target_flags |= MASK_ZARCH;
1574 else
1575 target_flags &= ~MASK_ZARCH;
1576 }
1577
1578 /* Set the march default in case it hasn't been specified on
1579 cmdline. */
1580 if (s390_arch == PROCESSOR_max)
1581 {
1582 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1583 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1584 s390_arch_flags = processor_flags_table[(int)s390_arch];
1585 }
1586
1587 /* Determine processor to tune for. */
1588 if (s390_tune == PROCESSOR_max)
1589 {
1590 s390_tune = s390_arch;
1591 s390_tune_flags = s390_arch_flags;
1592 }
1593
1594 /* Sanity checks. */
1595 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1596 error ("z/Architecture mode not supported on %s", s390_arch_string);
1597 if (TARGET_64BIT && !TARGET_ZARCH)
1598 error ("64-bit ABI not supported in ESA/390 mode");
1599
1600 /* Use hardware DFP if available and not explicitly disabled by
1601 user. E.g. with -m31 -march=z10 -mzarch */
1602 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1603 target_flags |= MASK_HARD_DFP;
1604
1605 if (TARGET_HARD_DFP && !TARGET_DFP)
1606 {
1607 if (target_flags_explicit & MASK_HARD_DFP)
1608 {
1609 if (!TARGET_CPU_DFP)
1610 error ("hardware decimal floating point instructions"
1611 " not available on %s", s390_arch_string);
1612 if (!TARGET_ZARCH)
1613 error ("hardware decimal floating point instructions"
1614 " not available in ESA/390 mode");
1615 }
1616 else
1617 target_flags &= ~MASK_HARD_DFP;
1618 }
1619
1620 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1621 {
1622 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1623 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1624
1625 target_flags &= ~MASK_HARD_DFP;
1626 }
1627
1628 /* Set processor cost function. */
1629 switch (s390_tune)
1630 {
1631 case PROCESSOR_2084_Z990:
1632 s390_cost = &z990_cost;
1633 break;
1634 case PROCESSOR_2094_Z9_109:
1635 s390_cost = &z9_109_cost;
1636 break;
1637 case PROCESSOR_2097_Z10:
1638 s390_cost = &z10_cost;
1639 break;
1640 case PROCESSOR_2817_Z196:
1641 s390_cost = &z196_cost;
1642 break;
1643 case PROCESSOR_2827_ZEC12:
1644 s390_cost = &zEC12_cost;
1645 break;
1646 default:
1647 s390_cost = &z900_cost;
1648 }
1649
1650 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1651 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1652 "in combination");
1653
1654 if (s390_stack_size)
1655 {
1656 if (s390_stack_guard >= s390_stack_size)
1657 error ("stack size must be greater than the stack guard value");
1658 else if (s390_stack_size > 1 << 16)
1659 error ("stack size must not be greater than 64k");
1660 }
1661 else if (s390_stack_guard)
1662 error ("-mstack-guard implies use of -mstack-size");
1663
1664 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1665 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1666 target_flags |= MASK_LONG_DOUBLE_128;
1667 #endif
1668
1669 if (s390_tune == PROCESSOR_2097_Z10
1670 || s390_tune == PROCESSOR_2817_Z196
1671 || s390_tune == PROCESSOR_2827_ZEC12)
1672 {
1673 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1674 global_options.x_param_values,
1675 global_options_set.x_param_values);
1676 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1677 global_options.x_param_values,
1678 global_options_set.x_param_values);
1679 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1680 global_options.x_param_values,
1681 global_options_set.x_param_values);
1682 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1683 global_options.x_param_values,
1684 global_options_set.x_param_values);
1685 }
1686
1687 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1688 global_options.x_param_values,
1689 global_options_set.x_param_values);
1690 /* values for loop prefetching */
1691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1692 global_options.x_param_values,
1693 global_options_set.x_param_values);
1694 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1695 global_options.x_param_values,
1696 global_options_set.x_param_values);
1697 /* s390 has more than 2 levels and the size is much larger. Since
1698 we are always running virtualized assume that we only get a small
1699 part of the caches above l1. */
1700 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1701 global_options.x_param_values,
1702 global_options_set.x_param_values);
1703 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1704 global_options.x_param_values,
1705 global_options_set.x_param_values);
1706 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1707 global_options.x_param_values,
1708 global_options_set.x_param_values);
1709
1710 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1711 requires the arch flags to be evaluated already. Since prefetching
1712 is beneficial on s390, we enable it if available. */
1713 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1714 flag_prefetch_loop_arrays = 1;
1715
1716 /* Use the alternative scheduling-pressure algorithm by default. */
1717 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1718 global_options.x_param_values,
1719 global_options_set.x_param_values);
1720
1721 if (TARGET_TPF)
1722 {
1723 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1724 debuggers do not yet support DWARF 3/4. */
1725 if (!global_options_set.x_dwarf_strict)
1726 dwarf_strict = 1;
1727 if (!global_options_set.x_dwarf_version)
1728 dwarf_version = 2;
1729 }
1730 }
1731
1732 /* Map for smallest class containing reg regno. */
1733
1734 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1735 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1736 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1737 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1738 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1739 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1740 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1741 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1742 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1743 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1744 ACCESS_REGS, ACCESS_REGS
1745 };
1746
1747 /* Return attribute type of insn. */
1748
1749 static enum attr_type
1750 s390_safe_attr_type (rtx insn)
1751 {
1752 if (recog_memoized (insn) >= 0)
1753 return get_attr_type (insn);
1754 else
1755 return TYPE_NONE;
1756 }
1757
1758 /* Return true if DISP is a valid short displacement. */
1759
1760 static bool
1761 s390_short_displacement (rtx disp)
1762 {
1763 /* No displacement is OK. */
1764 if (!disp)
1765 return true;
1766
1767 /* Without the long displacement facility we don't need to
1768 distingiush between long and short displacement. */
1769 if (!TARGET_LONG_DISPLACEMENT)
1770 return true;
1771
1772 /* Integer displacement in range. */
1773 if (GET_CODE (disp) == CONST_INT)
1774 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1775
1776 /* GOT offset is not OK, the GOT can be large. */
1777 if (GET_CODE (disp) == CONST
1778 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1779 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1780 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1781 return false;
1782
1783 /* All other symbolic constants are literal pool references,
1784 which are OK as the literal pool must be small. */
1785 if (GET_CODE (disp) == CONST)
1786 return true;
1787
1788 return false;
1789 }
1790
1791 /* Decompose a RTL expression ADDR for a memory address into
1792 its components, returned in OUT.
1793
1794 Returns false if ADDR is not a valid memory address, true
1795 otherwise. If OUT is NULL, don't return the components,
1796 but check for validity only.
1797
1798 Note: Only addresses in canonical form are recognized.
1799 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1800 canonical form so that they will be recognized. */
1801
1802 static int
1803 s390_decompose_address (rtx addr, struct s390_address *out)
1804 {
1805 HOST_WIDE_INT offset = 0;
1806 rtx base = NULL_RTX;
1807 rtx indx = NULL_RTX;
1808 rtx disp = NULL_RTX;
1809 rtx orig_disp;
1810 bool pointer = false;
1811 bool base_ptr = false;
1812 bool indx_ptr = false;
1813 bool literal_pool = false;
1814
1815 /* We may need to substitute the literal pool base register into the address
1816 below. However, at this point we do not know which register is going to
1817 be used as base, so we substitute the arg pointer register. This is going
1818 to be treated as holding a pointer below -- it shouldn't be used for any
1819 other purpose. */
1820 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1821
1822 /* Decompose address into base + index + displacement. */
1823
1824 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1825 base = addr;
1826
1827 else if (GET_CODE (addr) == PLUS)
1828 {
1829 rtx op0 = XEXP (addr, 0);
1830 rtx op1 = XEXP (addr, 1);
1831 enum rtx_code code0 = GET_CODE (op0);
1832 enum rtx_code code1 = GET_CODE (op1);
1833
1834 if (code0 == REG || code0 == UNSPEC)
1835 {
1836 if (code1 == REG || code1 == UNSPEC)
1837 {
1838 indx = op0; /* index + base */
1839 base = op1;
1840 }
1841
1842 else
1843 {
1844 base = op0; /* base + displacement */
1845 disp = op1;
1846 }
1847 }
1848
1849 else if (code0 == PLUS)
1850 {
1851 indx = XEXP (op0, 0); /* index + base + disp */
1852 base = XEXP (op0, 1);
1853 disp = op1;
1854 }
1855
1856 else
1857 {
1858 return false;
1859 }
1860 }
1861
1862 else
1863 disp = addr; /* displacement */
1864
1865 /* Extract integer part of displacement. */
1866 orig_disp = disp;
1867 if (disp)
1868 {
1869 if (GET_CODE (disp) == CONST_INT)
1870 {
1871 offset = INTVAL (disp);
1872 disp = NULL_RTX;
1873 }
1874 else if (GET_CODE (disp) == CONST
1875 && GET_CODE (XEXP (disp, 0)) == PLUS
1876 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1877 {
1878 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1879 disp = XEXP (XEXP (disp, 0), 0);
1880 }
1881 }
1882
1883 /* Strip off CONST here to avoid special case tests later. */
1884 if (disp && GET_CODE (disp) == CONST)
1885 disp = XEXP (disp, 0);
1886
1887 /* We can convert literal pool addresses to
1888 displacements by basing them off the base register. */
1889 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1890 {
1891 /* Either base or index must be free to hold the base register. */
1892 if (!base)
1893 base = fake_pool_base, literal_pool = true;
1894 else if (!indx)
1895 indx = fake_pool_base, literal_pool = true;
1896 else
1897 return false;
1898
1899 /* Mark up the displacement. */
1900 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1901 UNSPEC_LTREL_OFFSET);
1902 }
1903
1904 /* Validate base register. */
1905 if (base)
1906 {
1907 if (GET_CODE (base) == UNSPEC)
1908 switch (XINT (base, 1))
1909 {
1910 case UNSPEC_LTREF:
1911 if (!disp)
1912 disp = gen_rtx_UNSPEC (Pmode,
1913 gen_rtvec (1, XVECEXP (base, 0, 0)),
1914 UNSPEC_LTREL_OFFSET);
1915 else
1916 return false;
1917
1918 base = XVECEXP (base, 0, 1);
1919 break;
1920
1921 case UNSPEC_LTREL_BASE:
1922 if (XVECLEN (base, 0) == 1)
1923 base = fake_pool_base, literal_pool = true;
1924 else
1925 base = XVECEXP (base, 0, 1);
1926 break;
1927
1928 default:
1929 return false;
1930 }
1931
1932 if (!REG_P (base)
1933 || (GET_MODE (base) != SImode
1934 && GET_MODE (base) != Pmode))
1935 return false;
1936
1937 if (REGNO (base) == STACK_POINTER_REGNUM
1938 || REGNO (base) == FRAME_POINTER_REGNUM
1939 || ((reload_completed || reload_in_progress)
1940 && frame_pointer_needed
1941 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1942 || REGNO (base) == ARG_POINTER_REGNUM
1943 || (flag_pic
1944 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1945 pointer = base_ptr = true;
1946
1947 if ((reload_completed || reload_in_progress)
1948 && base == cfun->machine->base_reg)
1949 pointer = base_ptr = literal_pool = true;
1950 }
1951
1952 /* Validate index register. */
1953 if (indx)
1954 {
1955 if (GET_CODE (indx) == UNSPEC)
1956 switch (XINT (indx, 1))
1957 {
1958 case UNSPEC_LTREF:
1959 if (!disp)
1960 disp = gen_rtx_UNSPEC (Pmode,
1961 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1962 UNSPEC_LTREL_OFFSET);
1963 else
1964 return false;
1965
1966 indx = XVECEXP (indx, 0, 1);
1967 break;
1968
1969 case UNSPEC_LTREL_BASE:
1970 if (XVECLEN (indx, 0) == 1)
1971 indx = fake_pool_base, literal_pool = true;
1972 else
1973 indx = XVECEXP (indx, 0, 1);
1974 break;
1975
1976 default:
1977 return false;
1978 }
1979
1980 if (!REG_P (indx)
1981 || (GET_MODE (indx) != SImode
1982 && GET_MODE (indx) != Pmode))
1983 return false;
1984
1985 if (REGNO (indx) == STACK_POINTER_REGNUM
1986 || REGNO (indx) == FRAME_POINTER_REGNUM
1987 || ((reload_completed || reload_in_progress)
1988 && frame_pointer_needed
1989 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1990 || REGNO (indx) == ARG_POINTER_REGNUM
1991 || (flag_pic
1992 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1993 pointer = indx_ptr = true;
1994
1995 if ((reload_completed || reload_in_progress)
1996 && indx == cfun->machine->base_reg)
1997 pointer = indx_ptr = literal_pool = true;
1998 }
1999
2000 /* Prefer to use pointer as base, not index. */
2001 if (base && indx && !base_ptr
2002 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2003 {
2004 rtx tmp = base;
2005 base = indx;
2006 indx = tmp;
2007 }
2008
2009 /* Validate displacement. */
2010 if (!disp)
2011 {
2012 /* If virtual registers are involved, the displacement will change later
2013 anyway as the virtual registers get eliminated. This could make a
2014 valid displacement invalid, but it is more likely to make an invalid
2015 displacement valid, because we sometimes access the register save area
2016 via negative offsets to one of those registers.
2017 Thus we don't check the displacement for validity here. If after
2018 elimination the displacement turns out to be invalid after all,
2019 this is fixed up by reload in any case. */
2020 /* LRA maintains always displacements up to date and we need to
2021 know the displacement is right during all LRA not only at the
2022 final elimination. */
2023 if (lra_in_progress
2024 || (base != arg_pointer_rtx
2025 && indx != arg_pointer_rtx
2026 && base != return_address_pointer_rtx
2027 && indx != return_address_pointer_rtx
2028 && base != frame_pointer_rtx
2029 && indx != frame_pointer_rtx
2030 && base != virtual_stack_vars_rtx
2031 && indx != virtual_stack_vars_rtx))
2032 if (!DISP_IN_RANGE (offset))
2033 return false;
2034 }
2035 else
2036 {
2037 /* All the special cases are pointers. */
2038 pointer = true;
2039
2040 /* In the small-PIC case, the linker converts @GOT
2041 and @GOTNTPOFF offsets to possible displacements. */
2042 if (GET_CODE (disp) == UNSPEC
2043 && (XINT (disp, 1) == UNSPEC_GOT
2044 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2045 && flag_pic == 1)
2046 {
2047 ;
2048 }
2049
2050 /* Accept pool label offsets. */
2051 else if (GET_CODE (disp) == UNSPEC
2052 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2053 ;
2054
2055 /* Accept literal pool references. */
2056 else if (GET_CODE (disp) == UNSPEC
2057 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2058 {
2059 /* In case CSE pulled a non literal pool reference out of
2060 the pool we have to reject the address. This is
2061 especially important when loading the GOT pointer on non
2062 zarch CPUs. In this case the literal pool contains an lt
2063 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2064 will most likely exceed the displacement. */
2065 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2066 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2067 return false;
2068
2069 orig_disp = gen_rtx_CONST (Pmode, disp);
2070 if (offset)
2071 {
2072 /* If we have an offset, make sure it does not
2073 exceed the size of the constant pool entry. */
2074 rtx sym = XVECEXP (disp, 0, 0);
2075 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2076 return false;
2077
2078 orig_disp = plus_constant (Pmode, orig_disp, offset);
2079 }
2080 }
2081
2082 else
2083 return false;
2084 }
2085
2086 if (!base && !indx)
2087 pointer = true;
2088
2089 if (out)
2090 {
2091 out->base = base;
2092 out->indx = indx;
2093 out->disp = orig_disp;
2094 out->pointer = pointer;
2095 out->literal_pool = literal_pool;
2096 }
2097
2098 return true;
2099 }
2100
2101 /* Decompose a RTL expression OP for a shift count into its components,
2102 and return the base register in BASE and the offset in OFFSET.
2103
2104 Return true if OP is a valid shift count, false if not. */
2105
2106 bool
2107 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2108 {
2109 HOST_WIDE_INT off = 0;
2110
2111 /* We can have an integer constant, an address register,
2112 or a sum of the two. */
2113 if (GET_CODE (op) == CONST_INT)
2114 {
2115 off = INTVAL (op);
2116 op = NULL_RTX;
2117 }
2118 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2119 {
2120 off = INTVAL (XEXP (op, 1));
2121 op = XEXP (op, 0);
2122 }
2123 while (op && GET_CODE (op) == SUBREG)
2124 op = SUBREG_REG (op);
2125
2126 if (op && GET_CODE (op) != REG)
2127 return false;
2128
2129 if (offset)
2130 *offset = off;
2131 if (base)
2132 *base = op;
2133
2134 return true;
2135 }
2136
2137
2138 /* Return true if CODE is a valid address without index. */
2139
2140 bool
2141 s390_legitimate_address_without_index_p (rtx op)
2142 {
2143 struct s390_address addr;
2144
2145 if (!s390_decompose_address (XEXP (op, 0), &addr))
2146 return false;
2147 if (addr.indx)
2148 return false;
2149
2150 return true;
2151 }
2152
2153
2154 /* Return TRUE if ADDR is an operand valid for a load/store relative
2155 instruction. Be aware that the alignment of the operand needs to
2156 be checked separately.
2157 Valid addresses are single references or a sum of a reference and a
2158 constant integer. Return these parts in SYMREF and ADDEND. You can
2159 pass NULL in REF and/or ADDEND if you are not interested in these
2160 values. Literal pool references are *not* considered symbol
2161 references. */
2162
2163 static bool
2164 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2165 {
2166 HOST_WIDE_INT tmpaddend = 0;
2167
2168 if (GET_CODE (addr) == CONST)
2169 addr = XEXP (addr, 0);
2170
2171 if (GET_CODE (addr) == PLUS)
2172 {
2173 if (!CONST_INT_P (XEXP (addr, 1)))
2174 return false;
2175
2176 tmpaddend = INTVAL (XEXP (addr, 1));
2177 addr = XEXP (addr, 0);
2178 }
2179
2180 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2181 || (GET_CODE (addr) == UNSPEC
2182 && (XINT (addr, 1) == UNSPEC_GOTENT
2183 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2184 {
2185 if (symref)
2186 *symref = addr;
2187 if (addend)
2188 *addend = tmpaddend;
2189
2190 return true;
2191 }
2192 return false;
2193 }
2194
2195 /* Return true if the address in OP is valid for constraint letter C
2196 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2197 pool MEMs should be accepted. Only the Q, R, S, T constraint
2198 letters are allowed for C. */
2199
2200 static int
2201 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2202 {
2203 struct s390_address addr;
2204 bool decomposed = false;
2205
2206 /* This check makes sure that no symbolic address (except literal
2207 pool references) are accepted by the R or T constraints. */
2208 if (s390_loadrelative_operand_p (op, NULL, NULL))
2209 return 0;
2210
2211 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2212 if (!lit_pool_ok)
2213 {
2214 if (!s390_decompose_address (op, &addr))
2215 return 0;
2216 if (addr.literal_pool)
2217 return 0;
2218 decomposed = true;
2219 }
2220
2221 switch (c)
2222 {
2223 case 'Q': /* no index short displacement */
2224 if (!decomposed && !s390_decompose_address (op, &addr))
2225 return 0;
2226 if (addr.indx)
2227 return 0;
2228 if (!s390_short_displacement (addr.disp))
2229 return 0;
2230 break;
2231
2232 case 'R': /* with index short displacement */
2233 if (TARGET_LONG_DISPLACEMENT)
2234 {
2235 if (!decomposed && !s390_decompose_address (op, &addr))
2236 return 0;
2237 if (!s390_short_displacement (addr.disp))
2238 return 0;
2239 }
2240 /* Any invalid address here will be fixed up by reload,
2241 so accept it for the most generic constraint. */
2242 break;
2243
2244 case 'S': /* no index long displacement */
2245 if (!TARGET_LONG_DISPLACEMENT)
2246 return 0;
2247 if (!decomposed && !s390_decompose_address (op, &addr))
2248 return 0;
2249 if (addr.indx)
2250 return 0;
2251 if (s390_short_displacement (addr.disp))
2252 return 0;
2253 break;
2254
2255 case 'T': /* with index long displacement */
2256 if (!TARGET_LONG_DISPLACEMENT)
2257 return 0;
2258 /* Any invalid address here will be fixed up by reload,
2259 so accept it for the most generic constraint. */
2260 if ((decomposed || s390_decompose_address (op, &addr))
2261 && s390_short_displacement (addr.disp))
2262 return 0;
2263 break;
2264 default:
2265 return 0;
2266 }
2267 return 1;
2268 }
2269
2270
2271 /* Evaluates constraint strings described by the regular expression
2272 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2273 the constraint given in STR, or 0 else. */
2274
2275 int
2276 s390_mem_constraint (const char *str, rtx op)
2277 {
2278 char c = str[0];
2279
2280 switch (c)
2281 {
2282 case 'A':
2283 /* Check for offsettable variants of memory constraints. */
2284 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2285 return 0;
2286 if ((reload_completed || reload_in_progress)
2287 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2288 return 0;
2289 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2290 case 'B':
2291 /* Check for non-literal-pool variants of memory constraints. */
2292 if (!MEM_P (op))
2293 return 0;
2294 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2295 case 'Q':
2296 case 'R':
2297 case 'S':
2298 case 'T':
2299 if (GET_CODE (op) != MEM)
2300 return 0;
2301 return s390_check_qrst_address (c, XEXP (op, 0), true);
2302 case 'U':
2303 return (s390_check_qrst_address ('Q', op, true)
2304 || s390_check_qrst_address ('R', op, true));
2305 case 'W':
2306 return (s390_check_qrst_address ('S', op, true)
2307 || s390_check_qrst_address ('T', op, true));
2308 case 'Y':
2309 /* Simply check for the basic form of a shift count. Reload will
2310 take care of making sure we have a proper base register. */
2311 if (!s390_decompose_shift_count (op, NULL, NULL))
2312 return 0;
2313 break;
2314 case 'Z':
2315 return s390_check_qrst_address (str[1], op, true);
2316 default:
2317 return 0;
2318 }
2319 return 1;
2320 }
2321
2322
2323 /* Evaluates constraint strings starting with letter O. Input
2324 parameter C is the second letter following the "O" in the constraint
2325 string. Returns 1 if VALUE meets the respective constraint and 0
2326 otherwise. */
2327
2328 int
2329 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2330 {
2331 if (!TARGET_EXTIMM)
2332 return 0;
2333
2334 switch (c)
2335 {
2336 case 's':
2337 return trunc_int_for_mode (value, SImode) == value;
2338
2339 case 'p':
2340 return value == 0
2341 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2342
2343 case 'n':
2344 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2345
2346 default:
2347 gcc_unreachable ();
2348 }
2349 }
2350
2351
2352 /* Evaluates constraint strings starting with letter N. Parameter STR
2353 contains the letters following letter "N" in the constraint string.
2354 Returns true if VALUE matches the constraint. */
2355
2356 int
2357 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2358 {
2359 enum machine_mode mode, part_mode;
2360 int def;
2361 int part, part_goal;
2362
2363
2364 if (str[0] == 'x')
2365 part_goal = -1;
2366 else
2367 part_goal = str[0] - '0';
2368
2369 switch (str[1])
2370 {
2371 case 'Q':
2372 part_mode = QImode;
2373 break;
2374 case 'H':
2375 part_mode = HImode;
2376 break;
2377 case 'S':
2378 part_mode = SImode;
2379 break;
2380 default:
2381 return 0;
2382 }
2383
2384 switch (str[2])
2385 {
2386 case 'H':
2387 mode = HImode;
2388 break;
2389 case 'S':
2390 mode = SImode;
2391 break;
2392 case 'D':
2393 mode = DImode;
2394 break;
2395 default:
2396 return 0;
2397 }
2398
2399 switch (str[3])
2400 {
2401 case '0':
2402 def = 0;
2403 break;
2404 case 'F':
2405 def = -1;
2406 break;
2407 default:
2408 return 0;
2409 }
2410
2411 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2412 return 0;
2413
2414 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2415 if (part < 0)
2416 return 0;
2417 if (part_goal != -1 && part_goal != part)
2418 return 0;
2419
2420 return 1;
2421 }
2422
2423
2424 /* Returns true if the input parameter VALUE is a float zero. */
2425
2426 int
2427 s390_float_const_zero_p (rtx value)
2428 {
2429 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2430 && value == CONST0_RTX (GET_MODE (value)));
2431 }
2432
2433 /* Implement TARGET_REGISTER_MOVE_COST. */
2434
2435 static int
2436 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2437 reg_class_t from, reg_class_t to)
2438 {
2439 /* On s390, copy between fprs and gprs is expensive as long as no
2440 ldgr/lgdr can be used. */
2441 if ((!TARGET_Z10 || GET_MODE_SIZE (mode) != 8)
2442 && ((reg_classes_intersect_p (from, GENERAL_REGS)
2443 && reg_classes_intersect_p (to, FP_REGS))
2444 || (reg_classes_intersect_p (from, FP_REGS)
2445 && reg_classes_intersect_p (to, GENERAL_REGS))))
2446 return 10;
2447
2448 return 1;
2449 }
2450
2451 /* Implement TARGET_MEMORY_MOVE_COST. */
2452
2453 static int
2454 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2455 reg_class_t rclass ATTRIBUTE_UNUSED,
2456 bool in ATTRIBUTE_UNUSED)
2457 {
2458 return 1;
2459 }
2460
2461 /* Compute a (partial) cost for rtx X. Return true if the complete
2462 cost has been computed, and false if subexpressions should be
2463 scanned. In either case, *TOTAL contains the cost result.
2464 CODE contains GET_CODE (x), OUTER_CODE contains the code
2465 of the superexpression of x. */
2466
2467 static bool
2468 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2469 int *total, bool speed ATTRIBUTE_UNUSED)
2470 {
2471 switch (code)
2472 {
2473 case CONST:
2474 case CONST_INT:
2475 case LABEL_REF:
2476 case SYMBOL_REF:
2477 case CONST_DOUBLE:
2478 case MEM:
2479 *total = 0;
2480 return true;
2481
2482 case ASHIFT:
2483 case ASHIFTRT:
2484 case LSHIFTRT:
2485 case ROTATE:
2486 case ROTATERT:
2487 case AND:
2488 case IOR:
2489 case XOR:
2490 case NEG:
2491 case NOT:
2492 *total = COSTS_N_INSNS (1);
2493 return false;
2494
2495 case PLUS:
2496 case MINUS:
2497 *total = COSTS_N_INSNS (1);
2498 return false;
2499
2500 case MULT:
2501 switch (GET_MODE (x))
2502 {
2503 case SImode:
2504 {
2505 rtx left = XEXP (x, 0);
2506 rtx right = XEXP (x, 1);
2507 if (GET_CODE (right) == CONST_INT
2508 && CONST_OK_FOR_K (INTVAL (right)))
2509 *total = s390_cost->mhi;
2510 else if (GET_CODE (left) == SIGN_EXTEND)
2511 *total = s390_cost->mh;
2512 else
2513 *total = s390_cost->ms; /* msr, ms, msy */
2514 break;
2515 }
2516 case DImode:
2517 {
2518 rtx left = XEXP (x, 0);
2519 rtx right = XEXP (x, 1);
2520 if (TARGET_ZARCH)
2521 {
2522 if (GET_CODE (right) == CONST_INT
2523 && CONST_OK_FOR_K (INTVAL (right)))
2524 *total = s390_cost->mghi;
2525 else if (GET_CODE (left) == SIGN_EXTEND)
2526 *total = s390_cost->msgf;
2527 else
2528 *total = s390_cost->msg; /* msgr, msg */
2529 }
2530 else /* TARGET_31BIT */
2531 {
2532 if (GET_CODE (left) == SIGN_EXTEND
2533 && GET_CODE (right) == SIGN_EXTEND)
2534 /* mulsidi case: mr, m */
2535 *total = s390_cost->m;
2536 else if (GET_CODE (left) == ZERO_EXTEND
2537 && GET_CODE (right) == ZERO_EXTEND
2538 && TARGET_CPU_ZARCH)
2539 /* umulsidi case: ml, mlr */
2540 *total = s390_cost->ml;
2541 else
2542 /* Complex calculation is required. */
2543 *total = COSTS_N_INSNS (40);
2544 }
2545 break;
2546 }
2547 case SFmode:
2548 case DFmode:
2549 *total = s390_cost->mult_df;
2550 break;
2551 case TFmode:
2552 *total = s390_cost->mxbr;
2553 break;
2554 default:
2555 return false;
2556 }
2557 return false;
2558
2559 case FMA:
2560 switch (GET_MODE (x))
2561 {
2562 case DFmode:
2563 *total = s390_cost->madbr;
2564 break;
2565 case SFmode:
2566 *total = s390_cost->maebr;
2567 break;
2568 default:
2569 return false;
2570 }
2571 /* Negate in the third argument is free: FMSUB. */
2572 if (GET_CODE (XEXP (x, 2)) == NEG)
2573 {
2574 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2575 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2576 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2577 return true;
2578 }
2579 return false;
2580
2581 case UDIV:
2582 case UMOD:
2583 if (GET_MODE (x) == TImode) /* 128 bit division */
2584 *total = s390_cost->dlgr;
2585 else if (GET_MODE (x) == DImode)
2586 {
2587 rtx right = XEXP (x, 1);
2588 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2589 *total = s390_cost->dlr;
2590 else /* 64 by 64 bit division */
2591 *total = s390_cost->dlgr;
2592 }
2593 else if (GET_MODE (x) == SImode) /* 32 bit division */
2594 *total = s390_cost->dlr;
2595 return false;
2596
2597 case DIV:
2598 case MOD:
2599 if (GET_MODE (x) == DImode)
2600 {
2601 rtx right = XEXP (x, 1);
2602 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2603 if (TARGET_ZARCH)
2604 *total = s390_cost->dsgfr;
2605 else
2606 *total = s390_cost->dr;
2607 else /* 64 by 64 bit division */
2608 *total = s390_cost->dsgr;
2609 }
2610 else if (GET_MODE (x) == SImode) /* 32 bit division */
2611 *total = s390_cost->dlr;
2612 else if (GET_MODE (x) == SFmode)
2613 {
2614 *total = s390_cost->debr;
2615 }
2616 else if (GET_MODE (x) == DFmode)
2617 {
2618 *total = s390_cost->ddbr;
2619 }
2620 else if (GET_MODE (x) == TFmode)
2621 {
2622 *total = s390_cost->dxbr;
2623 }
2624 return false;
2625
2626 case SQRT:
2627 if (GET_MODE (x) == SFmode)
2628 *total = s390_cost->sqebr;
2629 else if (GET_MODE (x) == DFmode)
2630 *total = s390_cost->sqdbr;
2631 else /* TFmode */
2632 *total = s390_cost->sqxbr;
2633 return false;
2634
2635 case SIGN_EXTEND:
2636 case ZERO_EXTEND:
2637 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2638 || outer_code == PLUS || outer_code == MINUS
2639 || outer_code == COMPARE)
2640 *total = 0;
2641 return false;
2642
2643 case COMPARE:
2644 *total = COSTS_N_INSNS (1);
2645 if (GET_CODE (XEXP (x, 0)) == AND
2646 && GET_CODE (XEXP (x, 1)) == CONST_INT
2647 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2648 {
2649 rtx op0 = XEXP (XEXP (x, 0), 0);
2650 rtx op1 = XEXP (XEXP (x, 0), 1);
2651 rtx op2 = XEXP (x, 1);
2652
2653 if (memory_operand (op0, GET_MODE (op0))
2654 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2655 return true;
2656 if (register_operand (op0, GET_MODE (op0))
2657 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2658 return true;
2659 }
2660 return false;
2661
2662 default:
2663 return false;
2664 }
2665 }
2666
2667 /* Return the cost of an address rtx ADDR. */
2668
2669 static int
2670 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2671 addr_space_t as ATTRIBUTE_UNUSED,
2672 bool speed ATTRIBUTE_UNUSED)
2673 {
2674 struct s390_address ad;
2675 if (!s390_decompose_address (addr, &ad))
2676 return 1000;
2677
2678 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2679 }
2680
2681 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2682 otherwise return 0. */
2683
2684 int
2685 tls_symbolic_operand (rtx op)
2686 {
2687 if (GET_CODE (op) != SYMBOL_REF)
2688 return 0;
2689 return SYMBOL_REF_TLS_MODEL (op);
2690 }
2691 \f
2692 /* Split DImode access register reference REG (on 64-bit) into its constituent
2693 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2694 gen_highpart cannot be used as they assume all registers are word-sized,
2695 while our access registers have only half that size. */
2696
2697 void
2698 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2699 {
2700 gcc_assert (TARGET_64BIT);
2701 gcc_assert (ACCESS_REG_P (reg));
2702 gcc_assert (GET_MODE (reg) == DImode);
2703 gcc_assert (!(REGNO (reg) & 1));
2704
2705 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2706 *hi = gen_rtx_REG (SImode, REGNO (reg));
2707 }
2708
2709 /* Return true if OP contains a symbol reference */
2710
2711 bool
2712 symbolic_reference_mentioned_p (rtx op)
2713 {
2714 const char *fmt;
2715 int i;
2716
2717 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2718 return 1;
2719
2720 fmt = GET_RTX_FORMAT (GET_CODE (op));
2721 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2722 {
2723 if (fmt[i] == 'E')
2724 {
2725 int j;
2726
2727 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2728 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2729 return 1;
2730 }
2731
2732 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2733 return 1;
2734 }
2735
2736 return 0;
2737 }
2738
2739 /* Return true if OP contains a reference to a thread-local symbol. */
2740
2741 bool
2742 tls_symbolic_reference_mentioned_p (rtx op)
2743 {
2744 const char *fmt;
2745 int i;
2746
2747 if (GET_CODE (op) == SYMBOL_REF)
2748 return tls_symbolic_operand (op);
2749
2750 fmt = GET_RTX_FORMAT (GET_CODE (op));
2751 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2752 {
2753 if (fmt[i] == 'E')
2754 {
2755 int j;
2756
2757 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2758 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2759 return true;
2760 }
2761
2762 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2763 return true;
2764 }
2765
2766 return false;
2767 }
2768
2769
2770 /* Return true if OP is a legitimate general operand when
2771 generating PIC code. It is given that flag_pic is on
2772 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2773
2774 int
2775 legitimate_pic_operand_p (rtx op)
2776 {
2777 /* Accept all non-symbolic constants. */
2778 if (!SYMBOLIC_CONST (op))
2779 return 1;
2780
2781 /* Reject everything else; must be handled
2782 via emit_symbolic_move. */
2783 return 0;
2784 }
2785
2786 /* Returns true if the constant value OP is a legitimate general operand.
2787 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2788
2789 static bool
2790 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2791 {
2792 /* Accept all non-symbolic constants. */
2793 if (!SYMBOLIC_CONST (op))
2794 return 1;
2795
2796 /* Accept immediate LARL operands. */
2797 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2798 return 1;
2799
2800 /* Thread-local symbols are never legal constants. This is
2801 so that emit_call knows that computing such addresses
2802 might require a function call. */
2803 if (TLS_SYMBOLIC_CONST (op))
2804 return 0;
2805
2806 /* In the PIC case, symbolic constants must *not* be
2807 forced into the literal pool. We accept them here,
2808 so that they will be handled by emit_symbolic_move. */
2809 if (flag_pic)
2810 return 1;
2811
2812 /* All remaining non-PIC symbolic constants are
2813 forced into the literal pool. */
2814 return 0;
2815 }
2816
2817 /* Determine if it's legal to put X into the constant pool. This
2818 is not possible if X contains the address of a symbol that is
2819 not constant (TLS) or not known at final link time (PIC). */
2820
2821 static bool
2822 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2823 {
2824 switch (GET_CODE (x))
2825 {
2826 case CONST_INT:
2827 case CONST_DOUBLE:
2828 /* Accept all non-symbolic constants. */
2829 return false;
2830
2831 case LABEL_REF:
2832 /* Labels are OK iff we are non-PIC. */
2833 return flag_pic != 0;
2834
2835 case SYMBOL_REF:
2836 /* 'Naked' TLS symbol references are never OK,
2837 non-TLS symbols are OK iff we are non-PIC. */
2838 if (tls_symbolic_operand (x))
2839 return true;
2840 else
2841 return flag_pic != 0;
2842
2843 case CONST:
2844 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2845 case PLUS:
2846 case MINUS:
2847 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2848 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2849
2850 case UNSPEC:
2851 switch (XINT (x, 1))
2852 {
2853 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2854 case UNSPEC_LTREL_OFFSET:
2855 case UNSPEC_GOT:
2856 case UNSPEC_GOTOFF:
2857 case UNSPEC_PLTOFF:
2858 case UNSPEC_TLSGD:
2859 case UNSPEC_TLSLDM:
2860 case UNSPEC_NTPOFF:
2861 case UNSPEC_DTPOFF:
2862 case UNSPEC_GOTNTPOFF:
2863 case UNSPEC_INDNTPOFF:
2864 return false;
2865
2866 /* If the literal pool shares the code section, be put
2867 execute template placeholders into the pool as well. */
2868 case UNSPEC_INSN:
2869 return TARGET_CPU_ZARCH;
2870
2871 default:
2872 return true;
2873 }
2874 break;
2875
2876 default:
2877 gcc_unreachable ();
2878 }
2879 }
2880
2881 /* Returns true if the constant value OP is a legitimate general
2882 operand during and after reload. The difference to
2883 legitimate_constant_p is that this function will not accept
2884 a constant that would need to be forced to the literal pool
2885 before it can be used as operand.
2886 This function accepts all constants which can be loaded directly
2887 into a GPR. */
2888
2889 bool
2890 legitimate_reload_constant_p (rtx op)
2891 {
2892 /* Accept la(y) operands. */
2893 if (GET_CODE (op) == CONST_INT
2894 && DISP_IN_RANGE (INTVAL (op)))
2895 return true;
2896
2897 /* Accept l(g)hi/l(g)fi operands. */
2898 if (GET_CODE (op) == CONST_INT
2899 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2900 return true;
2901
2902 /* Accept lliXX operands. */
2903 if (TARGET_ZARCH
2904 && GET_CODE (op) == CONST_INT
2905 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2906 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2907 return true;
2908
2909 if (TARGET_EXTIMM
2910 && GET_CODE (op) == CONST_INT
2911 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2912 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2913 return true;
2914
2915 /* Accept larl operands. */
2916 if (TARGET_CPU_ZARCH
2917 && larl_operand (op, VOIDmode))
2918 return true;
2919
2920 /* Accept floating-point zero operands that fit into a single GPR. */
2921 if (GET_CODE (op) == CONST_DOUBLE
2922 && s390_float_const_zero_p (op)
2923 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2924 return true;
2925
2926 /* Accept double-word operands that can be split. */
2927 if (GET_CODE (op) == CONST_INT
2928 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2929 {
2930 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2931 rtx hi = operand_subword (op, 0, 0, dword_mode);
2932 rtx lo = operand_subword (op, 1, 0, dword_mode);
2933 return legitimate_reload_constant_p (hi)
2934 && legitimate_reload_constant_p (lo);
2935 }
2936
2937 /* Everything else cannot be handled without reload. */
2938 return false;
2939 }
2940
2941 /* Returns true if the constant value OP is a legitimate fp operand
2942 during and after reload.
2943 This function accepts all constants which can be loaded directly
2944 into an FPR. */
2945
2946 static bool
2947 legitimate_reload_fp_constant_p (rtx op)
2948 {
2949 /* Accept floating-point zero operands if the load zero instruction
2950 can be used. Prior to z196 the load fp zero instruction caused a
2951 performance penalty if the result is used as BFP number. */
2952 if (TARGET_Z196
2953 && GET_CODE (op) == CONST_DOUBLE
2954 && s390_float_const_zero_p (op))
2955 return true;
2956
2957 return false;
2958 }
2959
2960 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2961 return the class of reg to actually use. */
2962
2963 static reg_class_t
2964 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2965 {
2966 switch (GET_CODE (op))
2967 {
2968 /* Constants we cannot reload into general registers
2969 must be forced into the literal pool. */
2970 case CONST_DOUBLE:
2971 case CONST_INT:
2972 if (reg_class_subset_p (GENERAL_REGS, rclass)
2973 && legitimate_reload_constant_p (op))
2974 return GENERAL_REGS;
2975 else if (reg_class_subset_p (ADDR_REGS, rclass)
2976 && legitimate_reload_constant_p (op))
2977 return ADDR_REGS;
2978 else if (reg_class_subset_p (FP_REGS, rclass)
2979 && legitimate_reload_fp_constant_p (op))
2980 return FP_REGS;
2981 return NO_REGS;
2982
2983 /* If a symbolic constant or a PLUS is reloaded,
2984 it is most likely being used as an address, so
2985 prefer ADDR_REGS. If 'class' is not a superset
2986 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2987 case CONST:
2988 /* A larl operand with odd addend will get fixed via secondary
2989 reload. So don't request it to be pushed into literal
2990 pool. */
2991 if (TARGET_CPU_ZARCH
2992 && GET_CODE (XEXP (op, 0)) == PLUS
2993 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2994 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2995 {
2996 if (reg_class_subset_p (ADDR_REGS, rclass))
2997 return ADDR_REGS;
2998 else
2999 return NO_REGS;
3000 }
3001 /* fallthrough */
3002 case LABEL_REF:
3003 case SYMBOL_REF:
3004 if (!legitimate_reload_constant_p (op))
3005 return NO_REGS;
3006 /* fallthrough */
3007 case PLUS:
3008 /* load address will be used. */
3009 if (reg_class_subset_p (ADDR_REGS, rclass))
3010 return ADDR_REGS;
3011 else
3012 return NO_REGS;
3013
3014 default:
3015 break;
3016 }
3017
3018 return rclass;
3019 }
3020
3021 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3022 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3023 aligned. */
3024
3025 bool
3026 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3027 {
3028 HOST_WIDE_INT addend;
3029 rtx symref;
3030
3031 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3032 return false;
3033
3034 if (addend & (alignment - 1))
3035 return false;
3036
3037 if (GET_CODE (symref) == SYMBOL_REF
3038 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3039 return true;
3040
3041 if (GET_CODE (symref) == UNSPEC
3042 && alignment <= UNITS_PER_LONG)
3043 return true;
3044
3045 return false;
3046 }
3047
3048 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3049 operand SCRATCH is used to reload the even part of the address and
3050 adding one. */
3051
3052 void
3053 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3054 {
3055 HOST_WIDE_INT addend;
3056 rtx symref;
3057
3058 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3059 gcc_unreachable ();
3060
3061 if (!(addend & 1))
3062 /* Easy case. The addend is even so larl will do fine. */
3063 emit_move_insn (reg, addr);
3064 else
3065 {
3066 /* We can leave the scratch register untouched if the target
3067 register is a valid base register. */
3068 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3069 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3070 scratch = reg;
3071
3072 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3073 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3074
3075 if (addend != 1)
3076 emit_move_insn (scratch,
3077 gen_rtx_CONST (Pmode,
3078 gen_rtx_PLUS (Pmode, symref,
3079 GEN_INT (addend - 1))));
3080 else
3081 emit_move_insn (scratch, symref);
3082
3083 /* Increment the address using la in order to avoid clobbering cc. */
3084 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3085 }
3086 }
3087
3088 /* Generate what is necessary to move between REG and MEM using
3089 SCRATCH. The direction is given by TOMEM. */
3090
3091 void
3092 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3093 {
3094 /* Reload might have pulled a constant out of the literal pool.
3095 Force it back in. */
3096 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3097 || GET_CODE (mem) == CONST)
3098 mem = force_const_mem (GET_MODE (reg), mem);
3099
3100 gcc_assert (MEM_P (mem));
3101
3102 /* For a load from memory we can leave the scratch register
3103 untouched if the target register is a valid base register. */
3104 if (!tomem
3105 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3106 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3107 && GET_MODE (reg) == GET_MODE (scratch))
3108 scratch = reg;
3109
3110 /* Load address into scratch register. Since we can't have a
3111 secondary reload for a secondary reload we have to cover the case
3112 where larl would need a secondary reload here as well. */
3113 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3114
3115 /* Now we can use a standard load/store to do the move. */
3116 if (tomem)
3117 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3118 else
3119 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3120 }
3121
3122 /* Inform reload about cases where moving X with a mode MODE to a register in
3123 RCLASS requires an extra scratch or immediate register. Return the class
3124 needed for the immediate register. */
3125
3126 static reg_class_t
3127 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3128 enum machine_mode mode, secondary_reload_info *sri)
3129 {
3130 enum reg_class rclass = (enum reg_class) rclass_i;
3131
3132 /* Intermediate register needed. */
3133 if (reg_classes_intersect_p (CC_REGS, rclass))
3134 return GENERAL_REGS;
3135
3136 if (TARGET_Z10)
3137 {
3138 HOST_WIDE_INT offset;
3139 rtx symref;
3140
3141 /* On z10 several optimizer steps may generate larl operands with
3142 an odd addend. */
3143 if (in_p
3144 && s390_loadrelative_operand_p (x, &symref, &offset)
3145 && mode == Pmode
3146 && !SYMBOL_REF_ALIGN1_P (symref)
3147 && (offset & 1) == 1)
3148 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3149 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3150
3151 /* On z10 we need a scratch register when moving QI, TI or floating
3152 point mode values from or to a memory location with a SYMBOL_REF
3153 or if the symref addend of a SI or DI move is not aligned to the
3154 width of the access. */
3155 if (MEM_P (x)
3156 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3157 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3158 || (!TARGET_ZARCH && mode == DImode)
3159 || ((mode == HImode || mode == SImode || mode == DImode)
3160 && (!s390_check_symref_alignment (XEXP (x, 0),
3161 GET_MODE_SIZE (mode))))))
3162 {
3163 #define __SECONDARY_RELOAD_CASE(M,m) \
3164 case M##mode: \
3165 if (TARGET_64BIT) \
3166 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3167 CODE_FOR_reload##m##di_tomem_z10; \
3168 else \
3169 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3170 CODE_FOR_reload##m##si_tomem_z10; \
3171 break;
3172
3173 switch (GET_MODE (x))
3174 {
3175 __SECONDARY_RELOAD_CASE (QI, qi);
3176 __SECONDARY_RELOAD_CASE (HI, hi);
3177 __SECONDARY_RELOAD_CASE (SI, si);
3178 __SECONDARY_RELOAD_CASE (DI, di);
3179 __SECONDARY_RELOAD_CASE (TI, ti);
3180 __SECONDARY_RELOAD_CASE (SF, sf);
3181 __SECONDARY_RELOAD_CASE (DF, df);
3182 __SECONDARY_RELOAD_CASE (TF, tf);
3183 __SECONDARY_RELOAD_CASE (SD, sd);
3184 __SECONDARY_RELOAD_CASE (DD, dd);
3185 __SECONDARY_RELOAD_CASE (TD, td);
3186
3187 default:
3188 gcc_unreachable ();
3189 }
3190 #undef __SECONDARY_RELOAD_CASE
3191 }
3192 }
3193
3194 /* We need a scratch register when loading a PLUS expression which
3195 is not a legitimate operand of the LOAD ADDRESS instruction. */
3196 /* LRA can deal with transformation of plus op very well -- so we
3197 don't need to prompt LRA in this case. */
3198 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
3199 sri->icode = (TARGET_64BIT ?
3200 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3201
3202 /* Performing a multiword move from or to memory we have to make sure the
3203 second chunk in memory is addressable without causing a displacement
3204 overflow. If that would be the case we calculate the address in
3205 a scratch register. */
3206 if (MEM_P (x)
3207 && GET_CODE (XEXP (x, 0)) == PLUS
3208 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3209 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3210 + GET_MODE_SIZE (mode) - 1))
3211 {
3212 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3213 in a s_operand address since we may fallback to lm/stm. So we only
3214 have to care about overflows in the b+i+d case. */
3215 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3216 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3217 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3218 /* For FP_REGS no lm/stm is available so this check is triggered
3219 for displacement overflows in b+i+d and b+d like addresses. */
3220 || (reg_classes_intersect_p (FP_REGS, rclass)
3221 && s390_class_max_nregs (FP_REGS, mode) > 1))
3222 {
3223 if (in_p)
3224 sri->icode = (TARGET_64BIT ?
3225 CODE_FOR_reloaddi_nonoffmem_in :
3226 CODE_FOR_reloadsi_nonoffmem_in);
3227 else
3228 sri->icode = (TARGET_64BIT ?
3229 CODE_FOR_reloaddi_nonoffmem_out :
3230 CODE_FOR_reloadsi_nonoffmem_out);
3231 }
3232 }
3233
3234 /* A scratch address register is needed when a symbolic constant is
3235 copied to r0 compiling with -fPIC. In other cases the target
3236 register might be used as temporary (see legitimize_pic_address). */
3237 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3238 sri->icode = (TARGET_64BIT ?
3239 CODE_FOR_reloaddi_PIC_addr :
3240 CODE_FOR_reloadsi_PIC_addr);
3241
3242 /* Either scratch or no register needed. */
3243 return NO_REGS;
3244 }
3245
3246 /* Generate code to load SRC, which is PLUS that is not a
3247 legitimate operand for the LA instruction, into TARGET.
3248 SCRATCH may be used as scratch register. */
3249
3250 void
3251 s390_expand_plus_operand (rtx target, rtx src,
3252 rtx scratch)
3253 {
3254 rtx sum1, sum2;
3255 struct s390_address ad;
3256
3257 /* src must be a PLUS; get its two operands. */
3258 gcc_assert (GET_CODE (src) == PLUS);
3259 gcc_assert (GET_MODE (src) == Pmode);
3260
3261 /* Check if any of the two operands is already scheduled
3262 for replacement by reload. This can happen e.g. when
3263 float registers occur in an address. */
3264 sum1 = find_replacement (&XEXP (src, 0));
3265 sum2 = find_replacement (&XEXP (src, 1));
3266 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3267
3268 /* If the address is already strictly valid, there's nothing to do. */
3269 if (!s390_decompose_address (src, &ad)
3270 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3271 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3272 {
3273 /* Otherwise, one of the operands cannot be an address register;
3274 we reload its value into the scratch register. */
3275 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3276 {
3277 emit_move_insn (scratch, sum1);
3278 sum1 = scratch;
3279 }
3280 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3281 {
3282 emit_move_insn (scratch, sum2);
3283 sum2 = scratch;
3284 }
3285
3286 /* According to the way these invalid addresses are generated
3287 in reload.c, it should never happen (at least on s390) that
3288 *neither* of the PLUS components, after find_replacements
3289 was applied, is an address register. */
3290 if (sum1 == scratch && sum2 == scratch)
3291 {
3292 debug_rtx (src);
3293 gcc_unreachable ();
3294 }
3295
3296 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3297 }
3298
3299 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3300 is only ever performed on addresses, so we can mark the
3301 sum as legitimate for LA in any case. */
3302 s390_load_address (target, src);
3303 }
3304
3305
3306 /* Return true if ADDR is a valid memory address.
3307 STRICT specifies whether strict register checking applies. */
3308
3309 static bool
3310 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3311 {
3312 struct s390_address ad;
3313
3314 if (TARGET_Z10
3315 && larl_operand (addr, VOIDmode)
3316 && (mode == VOIDmode
3317 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3318 return true;
3319
3320 if (!s390_decompose_address (addr, &ad))
3321 return false;
3322
3323 if (strict)
3324 {
3325 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3326 return false;
3327
3328 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3329 return false;
3330 }
3331 else
3332 {
3333 if (ad.base
3334 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3335 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3336 return false;
3337
3338 if (ad.indx
3339 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3340 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3341 return false;
3342 }
3343 return true;
3344 }
3345
3346 /* Return true if OP is a valid operand for the LA instruction.
3347 In 31-bit, we need to prove that the result is used as an
3348 address, as LA performs only a 31-bit addition. */
3349
3350 bool
3351 legitimate_la_operand_p (rtx op)
3352 {
3353 struct s390_address addr;
3354 if (!s390_decompose_address (op, &addr))
3355 return false;
3356
3357 return (TARGET_64BIT || addr.pointer);
3358 }
3359
3360 /* Return true if it is valid *and* preferable to use LA to
3361 compute the sum of OP1 and OP2. */
3362
3363 bool
3364 preferred_la_operand_p (rtx op1, rtx op2)
3365 {
3366 struct s390_address addr;
3367
3368 if (op2 != const0_rtx)
3369 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3370
3371 if (!s390_decompose_address (op1, &addr))
3372 return false;
3373 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3374 return false;
3375 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3376 return false;
3377
3378 /* Avoid LA instructions with index register on z196; it is
3379 preferable to use regular add instructions when possible.
3380 Starting with zEC12 the la with index register is "uncracked"
3381 again. */
3382 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3383 return false;
3384
3385 if (!TARGET_64BIT && !addr.pointer)
3386 return false;
3387
3388 if (addr.pointer)
3389 return true;
3390
3391 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3392 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3393 return true;
3394
3395 return false;
3396 }
3397
3398 /* Emit a forced load-address operation to load SRC into DST.
3399 This will use the LOAD ADDRESS instruction even in situations
3400 where legitimate_la_operand_p (SRC) returns false. */
3401
3402 void
3403 s390_load_address (rtx dst, rtx src)
3404 {
3405 if (TARGET_64BIT)
3406 emit_move_insn (dst, src);
3407 else
3408 emit_insn (gen_force_la_31 (dst, src));
3409 }
3410
3411 /* Return a legitimate reference for ORIG (an address) using the
3412 register REG. If REG is 0, a new pseudo is generated.
3413
3414 There are two types of references that must be handled:
3415
3416 1. Global data references must load the address from the GOT, via
3417 the PIC reg. An insn is emitted to do this load, and the reg is
3418 returned.
3419
3420 2. Static data references, constant pool addresses, and code labels
3421 compute the address as an offset from the GOT, whose base is in
3422 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3423 differentiate them from global data objects. The returned
3424 address is the PIC reg + an unspec constant.
3425
3426 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3427 reg also appears in the address. */
3428
3429 rtx
3430 legitimize_pic_address (rtx orig, rtx reg)
3431 {
3432 rtx addr = orig;
3433 rtx addend = const0_rtx;
3434 rtx new_rtx = orig;
3435
3436 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3437
3438 if (GET_CODE (addr) == CONST)
3439 addr = XEXP (addr, 0);
3440
3441 if (GET_CODE (addr) == PLUS)
3442 {
3443 addend = XEXP (addr, 1);
3444 addr = XEXP (addr, 0);
3445 }
3446
3447 if ((GET_CODE (addr) == LABEL_REF
3448 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3449 || (GET_CODE (addr) == UNSPEC &&
3450 (XINT (addr, 1) == UNSPEC_GOTENT
3451 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3452 && GET_CODE (addend) == CONST_INT)
3453 {
3454 /* This can be locally addressed. */
3455
3456 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3457 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3458 gen_rtx_CONST (Pmode, addr) : addr);
3459
3460 if (TARGET_CPU_ZARCH
3461 && larl_operand (const_addr, VOIDmode)
3462 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3463 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3464 {
3465 if (INTVAL (addend) & 1)
3466 {
3467 /* LARL can't handle odd offsets, so emit a pair of LARL
3468 and LA. */
3469 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3470
3471 if (!DISP_IN_RANGE (INTVAL (addend)))
3472 {
3473 HOST_WIDE_INT even = INTVAL (addend) - 1;
3474 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3475 addr = gen_rtx_CONST (Pmode, addr);
3476 addend = const1_rtx;
3477 }
3478
3479 emit_move_insn (temp, addr);
3480 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3481
3482 if (reg != 0)
3483 {
3484 s390_load_address (reg, new_rtx);
3485 new_rtx = reg;
3486 }
3487 }
3488 else
3489 {
3490 /* If the offset is even, we can just use LARL. This
3491 will happen automatically. */
3492 }
3493 }
3494 else
3495 {
3496 /* No larl - Access local symbols relative to the GOT. */
3497
3498 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3499
3500 if (reload_in_progress || reload_completed)
3501 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3502
3503 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3504 if (addend != const0_rtx)
3505 addr = gen_rtx_PLUS (Pmode, addr, addend);
3506 addr = gen_rtx_CONST (Pmode, addr);
3507 addr = force_const_mem (Pmode, addr);
3508 emit_move_insn (temp, addr);
3509
3510 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3511 if (reg != 0)
3512 {
3513 s390_load_address (reg, new_rtx);
3514 new_rtx = reg;
3515 }
3516 }
3517 }
3518 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3519 {
3520 /* A non-local symbol reference without addend.
3521
3522 The symbol ref is wrapped into an UNSPEC to make sure the
3523 proper operand modifier (@GOT or @GOTENT) will be emitted.
3524 This will tell the linker to put the symbol into the GOT.
3525
3526 Additionally the code dereferencing the GOT slot is emitted here.
3527
3528 An addend to the symref needs to be added afterwards.
3529 legitimize_pic_address calls itself recursively to handle
3530 that case. So no need to do it here. */
3531
3532 if (reg == 0)
3533 reg = gen_reg_rtx (Pmode);
3534
3535 if (TARGET_Z10)
3536 {
3537 /* Use load relative if possible.
3538 lgrl <target>, sym@GOTENT */
3539 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3540 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3541 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3542
3543 emit_move_insn (reg, new_rtx);
3544 new_rtx = reg;
3545 }
3546 else if (flag_pic == 1)
3547 {
3548 /* Assume GOT offset is a valid displacement operand (< 4k
3549 or < 512k with z990). This is handled the same way in
3550 both 31- and 64-bit code (@GOT).
3551 lg <target>, sym@GOT(r12) */
3552
3553 if (reload_in_progress || reload_completed)
3554 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3555
3556 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3557 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3558 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3559 new_rtx = gen_const_mem (Pmode, new_rtx);
3560 emit_move_insn (reg, new_rtx);
3561 new_rtx = reg;
3562 }
3563 else if (TARGET_CPU_ZARCH)
3564 {
3565 /* If the GOT offset might be >= 4k, we determine the position
3566 of the GOT entry via a PC-relative LARL (@GOTENT).
3567 larl temp, sym@GOTENT
3568 lg <target>, 0(temp) */
3569
3570 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3571
3572 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3573 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3574
3575 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3576 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3577 emit_move_insn (temp, new_rtx);
3578
3579 new_rtx = gen_const_mem (Pmode, temp);
3580 emit_move_insn (reg, new_rtx);
3581
3582 new_rtx = reg;
3583 }
3584 else
3585 {
3586 /* If the GOT offset might be >= 4k, we have to load it
3587 from the literal pool (@GOT).
3588
3589 lg temp, lit-litbase(r13)
3590 lg <target>, 0(temp)
3591 lit: .long sym@GOT */
3592
3593 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3594
3595 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3596 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3597
3598 if (reload_in_progress || reload_completed)
3599 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3600
3601 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3602 addr = gen_rtx_CONST (Pmode, addr);
3603 addr = force_const_mem (Pmode, addr);
3604 emit_move_insn (temp, addr);
3605
3606 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3607 new_rtx = gen_const_mem (Pmode, new_rtx);
3608 emit_move_insn (reg, new_rtx);
3609 new_rtx = reg;
3610 }
3611 }
3612 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3613 {
3614 gcc_assert (XVECLEN (addr, 0) == 1);
3615 switch (XINT (addr, 1))
3616 {
3617 /* These address symbols (or PLT slots) relative to the GOT
3618 (not GOT slots!). In general this will exceed the
3619 displacement range so these value belong into the literal
3620 pool. */
3621 case UNSPEC_GOTOFF:
3622 case UNSPEC_PLTOFF:
3623 new_rtx = force_const_mem (Pmode, orig);
3624 break;
3625
3626 /* For -fPIC the GOT size might exceed the displacement
3627 range so make sure the value is in the literal pool. */
3628 case UNSPEC_GOT:
3629 if (flag_pic == 2)
3630 new_rtx = force_const_mem (Pmode, orig);
3631 break;
3632
3633 /* For @GOTENT larl is used. This is handled like local
3634 symbol refs. */
3635 case UNSPEC_GOTENT:
3636 gcc_unreachable ();
3637 break;
3638
3639 /* @PLT is OK as is on 64-bit, must be converted to
3640 GOT-relative @PLTOFF on 31-bit. */
3641 case UNSPEC_PLT:
3642 if (!TARGET_CPU_ZARCH)
3643 {
3644 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3645
3646 if (reload_in_progress || reload_completed)
3647 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3648
3649 addr = XVECEXP (addr, 0, 0);
3650 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3651 UNSPEC_PLTOFF);
3652 if (addend != const0_rtx)
3653 addr = gen_rtx_PLUS (Pmode, addr, addend);
3654 addr = gen_rtx_CONST (Pmode, addr);
3655 addr = force_const_mem (Pmode, addr);
3656 emit_move_insn (temp, addr);
3657
3658 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3659 if (reg != 0)
3660 {
3661 s390_load_address (reg, new_rtx);
3662 new_rtx = reg;
3663 }
3664 }
3665 else
3666 /* On 64 bit larl can be used. This case is handled like
3667 local symbol refs. */
3668 gcc_unreachable ();
3669 break;
3670
3671 /* Everything else cannot happen. */
3672 default:
3673 gcc_unreachable ();
3674 }
3675 }
3676 else if (addend != const0_rtx)
3677 {
3678 /* Otherwise, compute the sum. */
3679
3680 rtx base = legitimize_pic_address (addr, reg);
3681 new_rtx = legitimize_pic_address (addend,
3682 base == reg ? NULL_RTX : reg);
3683 if (GET_CODE (new_rtx) == CONST_INT)
3684 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3685 else
3686 {
3687 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3688 {
3689 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3690 new_rtx = XEXP (new_rtx, 1);
3691 }
3692 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3693 }
3694
3695 if (GET_CODE (new_rtx) == CONST)
3696 new_rtx = XEXP (new_rtx, 0);
3697 new_rtx = force_operand (new_rtx, 0);
3698 }
3699
3700 return new_rtx;
3701 }
3702
3703 /* Load the thread pointer into a register. */
3704
3705 rtx
3706 s390_get_thread_pointer (void)
3707 {
3708 rtx tp = gen_reg_rtx (Pmode);
3709
3710 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3711 mark_reg_pointer (tp, BITS_PER_WORD);
3712
3713 return tp;
3714 }
3715
3716 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3717 in s390_tls_symbol which always refers to __tls_get_offset.
3718 The returned offset is written to RESULT_REG and an USE rtx is
3719 generated for TLS_CALL. */
3720
3721 static GTY(()) rtx s390_tls_symbol;
3722
3723 static void
3724 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3725 {
3726 rtx insn;
3727
3728 if (!flag_pic)
3729 emit_insn (s390_load_got ());
3730
3731 if (!s390_tls_symbol)
3732 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3733
3734 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3735 gen_rtx_REG (Pmode, RETURN_REGNUM));
3736
3737 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3738 RTL_CONST_CALL_P (insn) = 1;
3739 }
3740
3741 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3742 this (thread-local) address. REG may be used as temporary. */
3743
3744 static rtx
3745 legitimize_tls_address (rtx addr, rtx reg)
3746 {
3747 rtx new_rtx, tls_call, temp, base, r2, insn;
3748
3749 if (GET_CODE (addr) == SYMBOL_REF)
3750 switch (tls_symbolic_operand (addr))
3751 {
3752 case TLS_MODEL_GLOBAL_DYNAMIC:
3753 start_sequence ();
3754 r2 = gen_rtx_REG (Pmode, 2);
3755 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3756 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3757 new_rtx = force_const_mem (Pmode, new_rtx);
3758 emit_move_insn (r2, new_rtx);
3759 s390_emit_tls_call_insn (r2, tls_call);
3760 insn = get_insns ();
3761 end_sequence ();
3762
3763 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3764 temp = gen_reg_rtx (Pmode);
3765 emit_libcall_block (insn, temp, r2, new_rtx);
3766
3767 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3768 if (reg != 0)
3769 {
3770 s390_load_address (reg, new_rtx);
3771 new_rtx = reg;
3772 }
3773 break;
3774
3775 case TLS_MODEL_LOCAL_DYNAMIC:
3776 start_sequence ();
3777 r2 = gen_rtx_REG (Pmode, 2);
3778 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3779 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3780 new_rtx = force_const_mem (Pmode, new_rtx);
3781 emit_move_insn (r2, new_rtx);
3782 s390_emit_tls_call_insn (r2, tls_call);
3783 insn = get_insns ();
3784 end_sequence ();
3785
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3787 temp = gen_reg_rtx (Pmode);
3788 emit_libcall_block (insn, temp, r2, new_rtx);
3789
3790 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3791 base = gen_reg_rtx (Pmode);
3792 s390_load_address (base, new_rtx);
3793
3794 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3795 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3796 new_rtx = force_const_mem (Pmode, new_rtx);
3797 temp = gen_reg_rtx (Pmode);
3798 emit_move_insn (temp, new_rtx);
3799
3800 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3801 if (reg != 0)
3802 {
3803 s390_load_address (reg, new_rtx);
3804 new_rtx = reg;
3805 }
3806 break;
3807
3808 case TLS_MODEL_INITIAL_EXEC:
3809 if (flag_pic == 1)
3810 {
3811 /* Assume GOT offset < 4k. This is handled the same way
3812 in both 31- and 64-bit code. */
3813
3814 if (reload_in_progress || reload_completed)
3815 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3816
3817 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3818 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3819 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3820 new_rtx = gen_const_mem (Pmode, new_rtx);
3821 temp = gen_reg_rtx (Pmode);
3822 emit_move_insn (temp, new_rtx);
3823 }
3824 else if (TARGET_CPU_ZARCH)
3825 {
3826 /* If the GOT offset might be >= 4k, we determine the position
3827 of the GOT entry via a PC-relative LARL. */
3828
3829 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3830 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3831 temp = gen_reg_rtx (Pmode);
3832 emit_move_insn (temp, new_rtx);
3833
3834 new_rtx = gen_const_mem (Pmode, temp);
3835 temp = gen_reg_rtx (Pmode);
3836 emit_move_insn (temp, new_rtx);
3837 }
3838 else if (flag_pic)
3839 {
3840 /* If the GOT offset might be >= 4k, we have to load it
3841 from the literal pool. */
3842
3843 if (reload_in_progress || reload_completed)
3844 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3845
3846 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3847 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3848 new_rtx = force_const_mem (Pmode, new_rtx);
3849 temp = gen_reg_rtx (Pmode);
3850 emit_move_insn (temp, new_rtx);
3851
3852 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3853 new_rtx = gen_const_mem (Pmode, new_rtx);
3854
3855 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3856 temp = gen_reg_rtx (Pmode);
3857 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3858 }
3859 else
3860 {
3861 /* In position-dependent code, load the absolute address of
3862 the GOT entry from the literal pool. */
3863
3864 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3865 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3866 new_rtx = force_const_mem (Pmode, new_rtx);
3867 temp = gen_reg_rtx (Pmode);
3868 emit_move_insn (temp, new_rtx);
3869
3870 new_rtx = temp;
3871 new_rtx = gen_const_mem (Pmode, new_rtx);
3872 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3873 temp = gen_reg_rtx (Pmode);
3874 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3875 }
3876
3877 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3878 if (reg != 0)
3879 {
3880 s390_load_address (reg, new_rtx);
3881 new_rtx = reg;
3882 }
3883 break;
3884
3885 case TLS_MODEL_LOCAL_EXEC:
3886 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3887 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3888 new_rtx = force_const_mem (Pmode, new_rtx);
3889 temp = gen_reg_rtx (Pmode);
3890 emit_move_insn (temp, new_rtx);
3891
3892 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3893 if (reg != 0)
3894 {
3895 s390_load_address (reg, new_rtx);
3896 new_rtx = reg;
3897 }
3898 break;
3899
3900 default:
3901 gcc_unreachable ();
3902 }
3903
3904 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3905 {
3906 switch (XINT (XEXP (addr, 0), 1))
3907 {
3908 case UNSPEC_INDNTPOFF:
3909 gcc_assert (TARGET_CPU_ZARCH);
3910 new_rtx = addr;
3911 break;
3912
3913 default:
3914 gcc_unreachable ();
3915 }
3916 }
3917
3918 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3919 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3920 {
3921 new_rtx = XEXP (XEXP (addr, 0), 0);
3922 if (GET_CODE (new_rtx) != SYMBOL_REF)
3923 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3924
3925 new_rtx = legitimize_tls_address (new_rtx, reg);
3926 new_rtx = plus_constant (Pmode, new_rtx,
3927 INTVAL (XEXP (XEXP (addr, 0), 1)));
3928 new_rtx = force_operand (new_rtx, 0);
3929 }
3930
3931 else
3932 gcc_unreachable (); /* for now ... */
3933
3934 return new_rtx;
3935 }
3936
3937 /* Emit insns making the address in operands[1] valid for a standard
3938 move to operands[0]. operands[1] is replaced by an address which
3939 should be used instead of the former RTX to emit the move
3940 pattern. */
3941
3942 void
3943 emit_symbolic_move (rtx *operands)
3944 {
3945 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3946
3947 if (GET_CODE (operands[0]) == MEM)
3948 operands[1] = force_reg (Pmode, operands[1]);
3949 else if (TLS_SYMBOLIC_CONST (operands[1]))
3950 operands[1] = legitimize_tls_address (operands[1], temp);
3951 else if (flag_pic)
3952 operands[1] = legitimize_pic_address (operands[1], temp);
3953 }
3954
3955 /* Try machine-dependent ways of modifying an illegitimate address X
3956 to be legitimate. If we find one, return the new, valid address.
3957
3958 OLDX is the address as it was before break_out_memory_refs was called.
3959 In some cases it is useful to look at this to decide what needs to be done.
3960
3961 MODE is the mode of the operand pointed to by X.
3962
3963 When -fpic is used, special handling is needed for symbolic references.
3964 See comments by legitimize_pic_address for details. */
3965
3966 static rtx
3967 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3968 enum machine_mode mode ATTRIBUTE_UNUSED)
3969 {
3970 rtx constant_term = const0_rtx;
3971
3972 if (TLS_SYMBOLIC_CONST (x))
3973 {
3974 x = legitimize_tls_address (x, 0);
3975
3976 if (s390_legitimate_address_p (mode, x, FALSE))
3977 return x;
3978 }
3979 else if (GET_CODE (x) == PLUS
3980 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3981 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3982 {
3983 return x;
3984 }
3985 else if (flag_pic)
3986 {
3987 if (SYMBOLIC_CONST (x)
3988 || (GET_CODE (x) == PLUS
3989 && (SYMBOLIC_CONST (XEXP (x, 0))
3990 || SYMBOLIC_CONST (XEXP (x, 1)))))
3991 x = legitimize_pic_address (x, 0);
3992
3993 if (s390_legitimate_address_p (mode, x, FALSE))
3994 return x;
3995 }
3996
3997 x = eliminate_constant_term (x, &constant_term);
3998
3999 /* Optimize loading of large displacements by splitting them
4000 into the multiple of 4K and the rest; this allows the
4001 former to be CSE'd if possible.
4002
4003 Don't do this if the displacement is added to a register
4004 pointing into the stack frame, as the offsets will
4005 change later anyway. */
4006
4007 if (GET_CODE (constant_term) == CONST_INT
4008 && !TARGET_LONG_DISPLACEMENT
4009 && !DISP_IN_RANGE (INTVAL (constant_term))
4010 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4011 {
4012 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4013 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4014
4015 rtx temp = gen_reg_rtx (Pmode);
4016 rtx val = force_operand (GEN_INT (upper), temp);
4017 if (val != temp)
4018 emit_move_insn (temp, val);
4019
4020 x = gen_rtx_PLUS (Pmode, x, temp);
4021 constant_term = GEN_INT (lower);
4022 }
4023
4024 if (GET_CODE (x) == PLUS)
4025 {
4026 if (GET_CODE (XEXP (x, 0)) == REG)
4027 {
4028 rtx temp = gen_reg_rtx (Pmode);
4029 rtx val = force_operand (XEXP (x, 1), temp);
4030 if (val != temp)
4031 emit_move_insn (temp, val);
4032
4033 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4034 }
4035
4036 else if (GET_CODE (XEXP (x, 1)) == REG)
4037 {
4038 rtx temp = gen_reg_rtx (Pmode);
4039 rtx val = force_operand (XEXP (x, 0), temp);
4040 if (val != temp)
4041 emit_move_insn (temp, val);
4042
4043 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4044 }
4045 }
4046
4047 if (constant_term != const0_rtx)
4048 x = gen_rtx_PLUS (Pmode, x, constant_term);
4049
4050 return x;
4051 }
4052
4053 /* Try a machine-dependent way of reloading an illegitimate address AD
4054 operand. If we find one, push the reload and return the new address.
4055
4056 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4057 and TYPE is the reload type of the current reload. */
4058
4059 rtx
4060 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4061 int opnum, int type)
4062 {
4063 if (!optimize || TARGET_LONG_DISPLACEMENT)
4064 return NULL_RTX;
4065
4066 if (GET_CODE (ad) == PLUS)
4067 {
4068 rtx tem = simplify_binary_operation (PLUS, Pmode,
4069 XEXP (ad, 0), XEXP (ad, 1));
4070 if (tem)
4071 ad = tem;
4072 }
4073
4074 if (GET_CODE (ad) == PLUS
4075 && GET_CODE (XEXP (ad, 0)) == REG
4076 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4077 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4078 {
4079 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4080 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4081 rtx cst, tem, new_rtx;
4082
4083 cst = GEN_INT (upper);
4084 if (!legitimate_reload_constant_p (cst))
4085 cst = force_const_mem (Pmode, cst);
4086
4087 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4088 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4089
4090 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4091 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4092 opnum, (enum reload_type) type);
4093 return new_rtx;
4094 }
4095
4096 return NULL_RTX;
4097 }
4098
4099 /* Emit code to move LEN bytes from DST to SRC. */
4100
4101 bool
4102 s390_expand_movmem (rtx dst, rtx src, rtx len)
4103 {
4104 /* When tuning for z10 or higher we rely on the Glibc functions to
4105 do the right thing. Only for constant lengths below 64k we will
4106 generate inline code. */
4107 if (s390_tune >= PROCESSOR_2097_Z10
4108 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4109 return false;
4110
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4112 {
4113 if (INTVAL (len) > 0)
4114 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4115 }
4116
4117 else if (TARGET_MVCLE)
4118 {
4119 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4120 }
4121
4122 else
4123 {
4124 rtx dst_addr, src_addr, count, blocks, temp;
4125 rtx loop_start_label = gen_label_rtx ();
4126 rtx loop_end_label = gen_label_rtx ();
4127 rtx end_label = gen_label_rtx ();
4128 enum machine_mode mode;
4129
4130 mode = GET_MODE (len);
4131 if (mode == VOIDmode)
4132 mode = Pmode;
4133
4134 dst_addr = gen_reg_rtx (Pmode);
4135 src_addr = gen_reg_rtx (Pmode);
4136 count = gen_reg_rtx (mode);
4137 blocks = gen_reg_rtx (mode);
4138
4139 convert_move (count, len, 1);
4140 emit_cmp_and_jump_insns (count, const0_rtx,
4141 EQ, NULL_RTX, mode, 1, end_label);
4142
4143 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4144 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4145 dst = change_address (dst, VOIDmode, dst_addr);
4146 src = change_address (src, VOIDmode, src_addr);
4147
4148 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4149 OPTAB_DIRECT);
4150 if (temp != count)
4151 emit_move_insn (count, temp);
4152
4153 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4154 OPTAB_DIRECT);
4155 if (temp != blocks)
4156 emit_move_insn (blocks, temp);
4157
4158 emit_cmp_and_jump_insns (blocks, const0_rtx,
4159 EQ, NULL_RTX, mode, 1, loop_end_label);
4160
4161 emit_label (loop_start_label);
4162
4163 if (TARGET_Z10
4164 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4165 {
4166 rtx prefetch;
4167
4168 /* Issue a read prefetch for the +3 cache line. */
4169 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4170 const0_rtx, const0_rtx);
4171 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4172 emit_insn (prefetch);
4173
4174 /* Issue a write prefetch for the +3 cache line. */
4175 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4176 const1_rtx, const0_rtx);
4177 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4178 emit_insn (prefetch);
4179 }
4180
4181 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4182 s390_load_address (dst_addr,
4183 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4184 s390_load_address (src_addr,
4185 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4186
4187 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4188 OPTAB_DIRECT);
4189 if (temp != blocks)
4190 emit_move_insn (blocks, temp);
4191
4192 emit_cmp_and_jump_insns (blocks, const0_rtx,
4193 EQ, NULL_RTX, mode, 1, loop_end_label);
4194
4195 emit_jump (loop_start_label);
4196 emit_label (loop_end_label);
4197
4198 emit_insn (gen_movmem_short (dst, src,
4199 convert_to_mode (Pmode, count, 1)));
4200 emit_label (end_label);
4201 }
4202 return true;
4203 }
4204
4205 /* Emit code to set LEN bytes at DST to VAL.
4206 Make use of clrmem if VAL is zero. */
4207
4208 void
4209 s390_expand_setmem (rtx dst, rtx len, rtx val)
4210 {
4211 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4212 return;
4213
4214 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4215
4216 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4217 {
4218 if (val == const0_rtx && INTVAL (len) <= 256)
4219 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4220 else
4221 {
4222 /* Initialize memory by storing the first byte. */
4223 emit_move_insn (adjust_address (dst, QImode, 0), val);
4224
4225 if (INTVAL (len) > 1)
4226 {
4227 /* Initiate 1 byte overlap move.
4228 The first byte of DST is propagated through DSTP1.
4229 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4230 DST is set to size 1 so the rest of the memory location
4231 does not count as source operand. */
4232 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4233 set_mem_size (dst, 1);
4234
4235 emit_insn (gen_movmem_short (dstp1, dst,
4236 GEN_INT (INTVAL (len) - 2)));
4237 }
4238 }
4239 }
4240
4241 else if (TARGET_MVCLE)
4242 {
4243 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4244 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4245 }
4246
4247 else
4248 {
4249 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4250 rtx loop_start_label = gen_label_rtx ();
4251 rtx loop_end_label = gen_label_rtx ();
4252 rtx end_label = gen_label_rtx ();
4253 enum machine_mode mode;
4254
4255 mode = GET_MODE (len);
4256 if (mode == VOIDmode)
4257 mode = Pmode;
4258
4259 dst_addr = gen_reg_rtx (Pmode);
4260 count = gen_reg_rtx (mode);
4261 blocks = gen_reg_rtx (mode);
4262
4263 convert_move (count, len, 1);
4264 emit_cmp_and_jump_insns (count, const0_rtx,
4265 EQ, NULL_RTX, mode, 1, end_label);
4266
4267 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4268 dst = change_address (dst, VOIDmode, dst_addr);
4269
4270 if (val == const0_rtx)
4271 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4272 OPTAB_DIRECT);
4273 else
4274 {
4275 dstp1 = adjust_address (dst, VOIDmode, 1);
4276 set_mem_size (dst, 1);
4277
4278 /* Initialize memory by storing the first byte. */
4279 emit_move_insn (adjust_address (dst, QImode, 0), val);
4280
4281 /* If count is 1 we are done. */
4282 emit_cmp_and_jump_insns (count, const1_rtx,
4283 EQ, NULL_RTX, mode, 1, end_label);
4284
4285 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4286 OPTAB_DIRECT);
4287 }
4288 if (temp != count)
4289 emit_move_insn (count, temp);
4290
4291 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4292 OPTAB_DIRECT);
4293 if (temp != blocks)
4294 emit_move_insn (blocks, temp);
4295
4296 emit_cmp_and_jump_insns (blocks, const0_rtx,
4297 EQ, NULL_RTX, mode, 1, loop_end_label);
4298
4299 emit_label (loop_start_label);
4300
4301 if (TARGET_Z10
4302 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4303 {
4304 /* Issue a write prefetch for the +4 cache line. */
4305 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4306 GEN_INT (1024)),
4307 const1_rtx, const0_rtx);
4308 emit_insn (prefetch);
4309 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4310 }
4311
4312 if (val == const0_rtx)
4313 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4314 else
4315 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4316 s390_load_address (dst_addr,
4317 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4318
4319 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4320 OPTAB_DIRECT);
4321 if (temp != blocks)
4322 emit_move_insn (blocks, temp);
4323
4324 emit_cmp_and_jump_insns (blocks, const0_rtx,
4325 EQ, NULL_RTX, mode, 1, loop_end_label);
4326
4327 emit_jump (loop_start_label);
4328 emit_label (loop_end_label);
4329
4330 if (val == const0_rtx)
4331 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4332 else
4333 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4334 emit_label (end_label);
4335 }
4336 }
4337
4338 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4339 and return the result in TARGET. */
4340
4341 bool
4342 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4343 {
4344 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4345 rtx tmp;
4346
4347 /* When tuning for z10 or higher we rely on the Glibc functions to
4348 do the right thing. Only for constant lengths below 64k we will
4349 generate inline code. */
4350 if (s390_tune >= PROCESSOR_2097_Z10
4351 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4352 return false;
4353
4354 /* As the result of CMPINT is inverted compared to what we need,
4355 we have to swap the operands. */
4356 tmp = op0; op0 = op1; op1 = tmp;
4357
4358 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4359 {
4360 if (INTVAL (len) > 0)
4361 {
4362 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4363 emit_insn (gen_cmpint (target, ccreg));
4364 }
4365 else
4366 emit_move_insn (target, const0_rtx);
4367 }
4368 else if (TARGET_MVCLE)
4369 {
4370 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4371 emit_insn (gen_cmpint (target, ccreg));
4372 }
4373 else
4374 {
4375 rtx addr0, addr1, count, blocks, temp;
4376 rtx loop_start_label = gen_label_rtx ();
4377 rtx loop_end_label = gen_label_rtx ();
4378 rtx end_label = gen_label_rtx ();
4379 enum machine_mode mode;
4380
4381 mode = GET_MODE (len);
4382 if (mode == VOIDmode)
4383 mode = Pmode;
4384
4385 addr0 = gen_reg_rtx (Pmode);
4386 addr1 = gen_reg_rtx (Pmode);
4387 count = gen_reg_rtx (mode);
4388 blocks = gen_reg_rtx (mode);
4389
4390 convert_move (count, len, 1);
4391 emit_cmp_and_jump_insns (count, const0_rtx,
4392 EQ, NULL_RTX, mode, 1, end_label);
4393
4394 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4395 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4396 op0 = change_address (op0, VOIDmode, addr0);
4397 op1 = change_address (op1, VOIDmode, addr1);
4398
4399 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4400 OPTAB_DIRECT);
4401 if (temp != count)
4402 emit_move_insn (count, temp);
4403
4404 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4405 OPTAB_DIRECT);
4406 if (temp != blocks)
4407 emit_move_insn (blocks, temp);
4408
4409 emit_cmp_and_jump_insns (blocks, const0_rtx,
4410 EQ, NULL_RTX, mode, 1, loop_end_label);
4411
4412 emit_label (loop_start_label);
4413
4414 if (TARGET_Z10
4415 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4416 {
4417 rtx prefetch;
4418
4419 /* Issue a read prefetch for the +2 cache line of operand 1. */
4420 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4421 const0_rtx, const0_rtx);
4422 emit_insn (prefetch);
4423 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4424
4425 /* Issue a read prefetch for the +2 cache line of operand 2. */
4426 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4427 const0_rtx, const0_rtx);
4428 emit_insn (prefetch);
4429 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4430 }
4431
4432 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4433 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4434 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4435 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4436 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4437 emit_jump_insn (temp);
4438
4439 s390_load_address (addr0,
4440 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4441 s390_load_address (addr1,
4442 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4443
4444 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4445 OPTAB_DIRECT);
4446 if (temp != blocks)
4447 emit_move_insn (blocks, temp);
4448
4449 emit_cmp_and_jump_insns (blocks, const0_rtx,
4450 EQ, NULL_RTX, mode, 1, loop_end_label);
4451
4452 emit_jump (loop_start_label);
4453 emit_label (loop_end_label);
4454
4455 emit_insn (gen_cmpmem_short (op0, op1,
4456 convert_to_mode (Pmode, count, 1)));
4457 emit_label (end_label);
4458
4459 emit_insn (gen_cmpint (target, ccreg));
4460 }
4461 return true;
4462 }
4463
4464
4465 /* Expand conditional increment or decrement using alc/slb instructions.
4466 Should generate code setting DST to either SRC or SRC + INCREMENT,
4467 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4468 Returns true if successful, false otherwise.
4469
4470 That makes it possible to implement some if-constructs without jumps e.g.:
4471 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4472 unsigned int a, b, c;
4473 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4474 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4475 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4476 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4477
4478 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4479 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4480 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4481 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4482 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4483
4484 bool
4485 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4486 rtx dst, rtx src, rtx increment)
4487 {
4488 enum machine_mode cmp_mode;
4489 enum machine_mode cc_mode;
4490 rtx op_res;
4491 rtx insn;
4492 rtvec p;
4493 int ret;
4494
4495 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4496 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4497 cmp_mode = SImode;
4498 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4499 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4500 cmp_mode = DImode;
4501 else
4502 return false;
4503
4504 /* Try ADD LOGICAL WITH CARRY. */
4505 if (increment == const1_rtx)
4506 {
4507 /* Determine CC mode to use. */
4508 if (cmp_code == EQ || cmp_code == NE)
4509 {
4510 if (cmp_op1 != const0_rtx)
4511 {
4512 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4513 NULL_RTX, 0, OPTAB_WIDEN);
4514 cmp_op1 = const0_rtx;
4515 }
4516
4517 cmp_code = cmp_code == EQ ? LEU : GTU;
4518 }
4519
4520 if (cmp_code == LTU || cmp_code == LEU)
4521 {
4522 rtx tem = cmp_op0;
4523 cmp_op0 = cmp_op1;
4524 cmp_op1 = tem;
4525 cmp_code = swap_condition (cmp_code);
4526 }
4527
4528 switch (cmp_code)
4529 {
4530 case GTU:
4531 cc_mode = CCUmode;
4532 break;
4533
4534 case GEU:
4535 cc_mode = CCL3mode;
4536 break;
4537
4538 default:
4539 return false;
4540 }
4541
4542 /* Emit comparison instruction pattern. */
4543 if (!register_operand (cmp_op0, cmp_mode))
4544 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4545
4546 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4547 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4548 /* We use insn_invalid_p here to add clobbers if required. */
4549 ret = insn_invalid_p (emit_insn (insn), false);
4550 gcc_assert (!ret);
4551
4552 /* Emit ALC instruction pattern. */
4553 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4554 gen_rtx_REG (cc_mode, CC_REGNUM),
4555 const0_rtx);
4556
4557 if (src != const0_rtx)
4558 {
4559 if (!register_operand (src, GET_MODE (dst)))
4560 src = force_reg (GET_MODE (dst), src);
4561
4562 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4563 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4564 }
4565
4566 p = rtvec_alloc (2);
4567 RTVEC_ELT (p, 0) =
4568 gen_rtx_SET (VOIDmode, dst, op_res);
4569 RTVEC_ELT (p, 1) =
4570 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4571 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4572
4573 return true;
4574 }
4575
4576 /* Try SUBTRACT LOGICAL WITH BORROW. */
4577 if (increment == constm1_rtx)
4578 {
4579 /* Determine CC mode to use. */
4580 if (cmp_code == EQ || cmp_code == NE)
4581 {
4582 if (cmp_op1 != const0_rtx)
4583 {
4584 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4585 NULL_RTX, 0, OPTAB_WIDEN);
4586 cmp_op1 = const0_rtx;
4587 }
4588
4589 cmp_code = cmp_code == EQ ? LEU : GTU;
4590 }
4591
4592 if (cmp_code == GTU || cmp_code == GEU)
4593 {
4594 rtx tem = cmp_op0;
4595 cmp_op0 = cmp_op1;
4596 cmp_op1 = tem;
4597 cmp_code = swap_condition (cmp_code);
4598 }
4599
4600 switch (cmp_code)
4601 {
4602 case LEU:
4603 cc_mode = CCUmode;
4604 break;
4605
4606 case LTU:
4607 cc_mode = CCL3mode;
4608 break;
4609
4610 default:
4611 return false;
4612 }
4613
4614 /* Emit comparison instruction pattern. */
4615 if (!register_operand (cmp_op0, cmp_mode))
4616 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4617
4618 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4619 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4620 /* We use insn_invalid_p here to add clobbers if required. */
4621 ret = insn_invalid_p (emit_insn (insn), false);
4622 gcc_assert (!ret);
4623
4624 /* Emit SLB instruction pattern. */
4625 if (!register_operand (src, GET_MODE (dst)))
4626 src = force_reg (GET_MODE (dst), src);
4627
4628 op_res = gen_rtx_MINUS (GET_MODE (dst),
4629 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4630 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4631 gen_rtx_REG (cc_mode, CC_REGNUM),
4632 const0_rtx));
4633 p = rtvec_alloc (2);
4634 RTVEC_ELT (p, 0) =
4635 gen_rtx_SET (VOIDmode, dst, op_res);
4636 RTVEC_ELT (p, 1) =
4637 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4638 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4639
4640 return true;
4641 }
4642
4643 return false;
4644 }
4645
4646 /* Expand code for the insv template. Return true if successful. */
4647
4648 bool
4649 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4650 {
4651 int bitsize = INTVAL (op1);
4652 int bitpos = INTVAL (op2);
4653 enum machine_mode mode = GET_MODE (dest);
4654 enum machine_mode smode;
4655 int smode_bsize, mode_bsize;
4656 rtx op, clobber;
4657
4658 if (bitsize + bitpos > GET_MODE_SIZE (mode))
4659 return false;
4660
4661 /* Generate INSERT IMMEDIATE (IILL et al). */
4662 /* (set (ze (reg)) (const_int)). */
4663 if (TARGET_ZARCH
4664 && register_operand (dest, word_mode)
4665 && (bitpos % 16) == 0
4666 && (bitsize % 16) == 0
4667 && const_int_operand (src, VOIDmode))
4668 {
4669 HOST_WIDE_INT val = INTVAL (src);
4670 int regpos = bitpos + bitsize;
4671
4672 while (regpos > bitpos)
4673 {
4674 enum machine_mode putmode;
4675 int putsize;
4676
4677 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4678 putmode = SImode;
4679 else
4680 putmode = HImode;
4681
4682 putsize = GET_MODE_BITSIZE (putmode);
4683 regpos -= putsize;
4684 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4685 GEN_INT (putsize),
4686 GEN_INT (regpos)),
4687 gen_int_mode (val, putmode));
4688 val >>= putsize;
4689 }
4690 gcc_assert (regpos == bitpos);
4691 return true;
4692 }
4693
4694 smode = smallest_mode_for_size (bitsize, MODE_INT);
4695 smode_bsize = GET_MODE_BITSIZE (smode);
4696 mode_bsize = GET_MODE_BITSIZE (mode);
4697
4698 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4699 if (bitpos == 0
4700 && (bitsize % BITS_PER_UNIT) == 0
4701 && MEM_P (dest)
4702 && (register_operand (src, word_mode)
4703 || const_int_operand (src, VOIDmode)))
4704 {
4705 /* Emit standard pattern if possible. */
4706 if (smode_bsize == bitsize)
4707 {
4708 emit_move_insn (adjust_address (dest, smode, 0),
4709 gen_lowpart (smode, src));
4710 return true;
4711 }
4712
4713 /* (set (ze (mem)) (const_int)). */
4714 else if (const_int_operand (src, VOIDmode))
4715 {
4716 int size = bitsize / BITS_PER_UNIT;
4717 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4718 BLKmode,
4719 UNITS_PER_WORD - size);
4720
4721 dest = adjust_address (dest, BLKmode, 0);
4722 set_mem_size (dest, size);
4723 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4724 return true;
4725 }
4726
4727 /* (set (ze (mem)) (reg)). */
4728 else if (register_operand (src, word_mode))
4729 {
4730 if (bitsize <= 32)
4731 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4732 const0_rtx), src);
4733 else
4734 {
4735 /* Emit st,stcmh sequence. */
4736 int stcmh_width = bitsize - 32;
4737 int size = stcmh_width / BITS_PER_UNIT;
4738
4739 emit_move_insn (adjust_address (dest, SImode, size),
4740 gen_lowpart (SImode, src));
4741 set_mem_size (dest, size);
4742 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4743 GEN_INT (stcmh_width),
4744 const0_rtx),
4745 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4746 }
4747 return true;
4748 }
4749 }
4750
4751 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4752 if ((bitpos % BITS_PER_UNIT) == 0
4753 && (bitsize % BITS_PER_UNIT) == 0
4754 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4755 && MEM_P (src)
4756 && (mode == DImode || mode == SImode)
4757 && register_operand (dest, mode))
4758 {
4759 /* Emit a strict_low_part pattern if possible. */
4760 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4761 {
4762 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4763 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4764 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4765 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4766 return true;
4767 }
4768
4769 /* ??? There are more powerful versions of ICM that are not
4770 completely represented in the md file. */
4771 }
4772
4773 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4774 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4775 {
4776 enum machine_mode mode_s = GET_MODE (src);
4777
4778 if (mode_s == VOIDmode)
4779 {
4780 /* Assume const_int etc already in the proper mode. */
4781 src = force_reg (mode, src);
4782 }
4783 else if (mode_s != mode)
4784 {
4785 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4786 src = force_reg (mode_s, src);
4787 src = gen_lowpart (mode, src);
4788 }
4789
4790 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4791 op = gen_rtx_SET (VOIDmode, op, src);
4792
4793 if (!TARGET_ZEC12)
4794 {
4795 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4796 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4797 }
4798 emit_insn (op);
4799
4800 return true;
4801 }
4802
4803 return false;
4804 }
4805
4806 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4807 register that holds VAL of mode MODE shifted by COUNT bits. */
4808
4809 static inline rtx
4810 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4811 {
4812 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4813 NULL_RTX, 1, OPTAB_DIRECT);
4814 return expand_simple_binop (SImode, ASHIFT, val, count,
4815 NULL_RTX, 1, OPTAB_DIRECT);
4816 }
4817
4818 /* Structure to hold the initial parameters for a compare_and_swap operation
4819 in HImode and QImode. */
4820
4821 struct alignment_context
4822 {
4823 rtx memsi; /* SI aligned memory location. */
4824 rtx shift; /* Bit offset with regard to lsb. */
4825 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4826 rtx modemaski; /* ~modemask */
4827 bool aligned; /* True if memory is aligned, false else. */
4828 };
4829
4830 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4831 structure AC for transparent simplifying, if the memory alignment is known
4832 to be at least 32bit. MEM is the memory location for the actual operation
4833 and MODE its mode. */
4834
4835 static void
4836 init_alignment_context (struct alignment_context *ac, rtx mem,
4837 enum machine_mode mode)
4838 {
4839 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4840 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4841
4842 if (ac->aligned)
4843 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4844 else
4845 {
4846 /* Alignment is unknown. */
4847 rtx byteoffset, addr, align;
4848
4849 /* Force the address into a register. */
4850 addr = force_reg (Pmode, XEXP (mem, 0));
4851
4852 /* Align it to SImode. */
4853 align = expand_simple_binop (Pmode, AND, addr,
4854 GEN_INT (-GET_MODE_SIZE (SImode)),
4855 NULL_RTX, 1, OPTAB_DIRECT);
4856 /* Generate MEM. */
4857 ac->memsi = gen_rtx_MEM (SImode, align);
4858 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4859 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4860 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4861
4862 /* Calculate shiftcount. */
4863 byteoffset = expand_simple_binop (Pmode, AND, addr,
4864 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4865 NULL_RTX, 1, OPTAB_DIRECT);
4866 /* As we already have some offset, evaluate the remaining distance. */
4867 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4868 NULL_RTX, 1, OPTAB_DIRECT);
4869 }
4870
4871 /* Shift is the byte count, but we need the bitcount. */
4872 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4873 NULL_RTX, 1, OPTAB_DIRECT);
4874
4875 /* Calculate masks. */
4876 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4877 GEN_INT (GET_MODE_MASK (mode)),
4878 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4879 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4880 NULL_RTX, 1);
4881 }
4882
4883 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4884 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4885 perform the merge in SEQ2. */
4886
4887 static rtx
4888 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4889 enum machine_mode mode, rtx val, rtx ins)
4890 {
4891 rtx tmp;
4892
4893 if (ac->aligned)
4894 {
4895 start_sequence ();
4896 tmp = copy_to_mode_reg (SImode, val);
4897 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4898 const0_rtx, ins))
4899 {
4900 *seq1 = NULL;
4901 *seq2 = get_insns ();
4902 end_sequence ();
4903 return tmp;
4904 }
4905 end_sequence ();
4906 }
4907
4908 /* Failed to use insv. Generate a two part shift and mask. */
4909 start_sequence ();
4910 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4911 *seq1 = get_insns ();
4912 end_sequence ();
4913
4914 start_sequence ();
4915 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4916 *seq2 = get_insns ();
4917 end_sequence ();
4918
4919 return tmp;
4920 }
4921
4922 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4923 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4924 value to set if CMP == MEM. */
4925
4926 void
4927 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4928 rtx cmp, rtx new_rtx, bool is_weak)
4929 {
4930 struct alignment_context ac;
4931 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4932 rtx res = gen_reg_rtx (SImode);
4933 rtx csloop = NULL, csend = NULL;
4934
4935 gcc_assert (MEM_P (mem));
4936
4937 init_alignment_context (&ac, mem, mode);
4938
4939 /* Load full word. Subsequent loads are performed by CS. */
4940 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4941 NULL_RTX, 1, OPTAB_DIRECT);
4942
4943 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4944 possible, we try to use insv to make this happen efficiently. If
4945 that fails we'll generate code both inside and outside the loop. */
4946 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4947 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4948
4949 if (seq0)
4950 emit_insn (seq0);
4951 if (seq1)
4952 emit_insn (seq1);
4953
4954 /* Start CS loop. */
4955 if (!is_weak)
4956 {
4957 /* Begin assuming success. */
4958 emit_move_insn (btarget, const1_rtx);
4959
4960 csloop = gen_label_rtx ();
4961 csend = gen_label_rtx ();
4962 emit_label (csloop);
4963 }
4964
4965 /* val = "<mem>00..0<mem>"
4966 * cmp = "00..0<cmp>00..0"
4967 * new = "00..0<new>00..0"
4968 */
4969
4970 emit_insn (seq2);
4971 emit_insn (seq3);
4972
4973 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4974 if (is_weak)
4975 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4976 else
4977 {
4978 rtx tmp;
4979
4980 /* Jump to end if we're done (likely?). */
4981 s390_emit_jump (csend, cc);
4982
4983 /* Check for changes outside mode, and loop internal if so.
4984 Arrange the moves so that the compare is adjacent to the
4985 branch so that we can generate CRJ. */
4986 tmp = copy_to_reg (val);
4987 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4988 1, OPTAB_DIRECT);
4989 cc = s390_emit_compare (NE, val, tmp);
4990 s390_emit_jump (csloop, cc);
4991
4992 /* Failed. */
4993 emit_move_insn (btarget, const0_rtx);
4994 emit_label (csend);
4995 }
4996
4997 /* Return the correct part of the bitfield. */
4998 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4999 NULL_RTX, 1, OPTAB_DIRECT), 1);
5000 }
5001
5002 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
5003 and VAL the value to play with. If AFTER is true then store the value
5004 MEM holds after the operation, if AFTER is false then store the value MEM
5005 holds before the operation. If TARGET is zero then discard that value, else
5006 store it to TARGET. */
5007
5008 void
5009 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
5010 rtx target, rtx mem, rtx val, bool after)
5011 {
5012 struct alignment_context ac;
5013 rtx cmp;
5014 rtx new_rtx = gen_reg_rtx (SImode);
5015 rtx orig = gen_reg_rtx (SImode);
5016 rtx csloop = gen_label_rtx ();
5017
5018 gcc_assert (!target || register_operand (target, VOIDmode));
5019 gcc_assert (MEM_P (mem));
5020
5021 init_alignment_context (&ac, mem, mode);
5022
5023 /* Shift val to the correct bit positions.
5024 Preserve "icm", but prevent "ex icm". */
5025 if (!(ac.aligned && code == SET && MEM_P (val)))
5026 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5027
5028 /* Further preparation insns. */
5029 if (code == PLUS || code == MINUS)
5030 emit_move_insn (orig, val);
5031 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5032 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5033 NULL_RTX, 1, OPTAB_DIRECT);
5034
5035 /* Load full word. Subsequent loads are performed by CS. */
5036 cmp = force_reg (SImode, ac.memsi);
5037
5038 /* Start CS loop. */
5039 emit_label (csloop);
5040 emit_move_insn (new_rtx, cmp);
5041
5042 /* Patch new with val at correct position. */
5043 switch (code)
5044 {
5045 case PLUS:
5046 case MINUS:
5047 val = expand_simple_binop (SImode, code, new_rtx, orig,
5048 NULL_RTX, 1, OPTAB_DIRECT);
5049 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5050 NULL_RTX, 1, OPTAB_DIRECT);
5051 /* FALLTHRU */
5052 case SET:
5053 if (ac.aligned && MEM_P (val))
5054 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5055 0, 0, SImode, val);
5056 else
5057 {
5058 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5059 NULL_RTX, 1, OPTAB_DIRECT);
5060 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5062 }
5063 break;
5064 case AND:
5065 case IOR:
5066 case XOR:
5067 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5068 NULL_RTX, 1, OPTAB_DIRECT);
5069 break;
5070 case MULT: /* NAND */
5071 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5072 NULL_RTX, 1, OPTAB_DIRECT);
5073 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5074 NULL_RTX, 1, OPTAB_DIRECT);
5075 break;
5076 default:
5077 gcc_unreachable ();
5078 }
5079
5080 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5081 ac.memsi, cmp, new_rtx));
5082
5083 /* Return the correct part of the bitfield. */
5084 if (target)
5085 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5086 after ? new_rtx : cmp, ac.shift,
5087 NULL_RTX, 1, OPTAB_DIRECT), 1);
5088 }
5089
5090 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5091 We need to emit DTP-relative relocations. */
5092
5093 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5094
5095 static void
5096 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5097 {
5098 switch (size)
5099 {
5100 case 4:
5101 fputs ("\t.long\t", file);
5102 break;
5103 case 8:
5104 fputs ("\t.quad\t", file);
5105 break;
5106 default:
5107 gcc_unreachable ();
5108 }
5109 output_addr_const (file, x);
5110 fputs ("@DTPOFF", file);
5111 }
5112
5113 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5114 /* Implement TARGET_MANGLE_TYPE. */
5115
5116 static const char *
5117 s390_mangle_type (const_tree type)
5118 {
5119 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5120 && TARGET_LONG_DOUBLE_128)
5121 return "g";
5122
5123 /* For all other types, use normal C++ mangling. */
5124 return NULL;
5125 }
5126 #endif
5127
5128 /* In the name of slightly smaller debug output, and to cater to
5129 general assembler lossage, recognize various UNSPEC sequences
5130 and turn them back into a direct symbol reference. */
5131
5132 static rtx
5133 s390_delegitimize_address (rtx orig_x)
5134 {
5135 rtx x, y;
5136
5137 orig_x = delegitimize_mem_from_attrs (orig_x);
5138 x = orig_x;
5139
5140 /* Extract the symbol ref from:
5141 (plus:SI (reg:SI 12 %r12)
5142 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5143 UNSPEC_GOTOFF/PLTOFF)))
5144 and
5145 (plus:SI (reg:SI 12 %r12)
5146 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5147 UNSPEC_GOTOFF/PLTOFF)
5148 (const_int 4 [0x4])))) */
5149 if (GET_CODE (x) == PLUS
5150 && REG_P (XEXP (x, 0))
5151 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5152 && GET_CODE (XEXP (x, 1)) == CONST)
5153 {
5154 HOST_WIDE_INT offset = 0;
5155
5156 /* The const operand. */
5157 y = XEXP (XEXP (x, 1), 0);
5158
5159 if (GET_CODE (y) == PLUS
5160 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5161 {
5162 offset = INTVAL (XEXP (y, 1));
5163 y = XEXP (y, 0);
5164 }
5165
5166 if (GET_CODE (y) == UNSPEC
5167 && (XINT (y, 1) == UNSPEC_GOTOFF
5168 || XINT (y, 1) == UNSPEC_PLTOFF))
5169 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5170 }
5171
5172 if (GET_CODE (x) != MEM)
5173 return orig_x;
5174
5175 x = XEXP (x, 0);
5176 if (GET_CODE (x) == PLUS
5177 && GET_CODE (XEXP (x, 1)) == CONST
5178 && GET_CODE (XEXP (x, 0)) == REG
5179 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5180 {
5181 y = XEXP (XEXP (x, 1), 0);
5182 if (GET_CODE (y) == UNSPEC
5183 && XINT (y, 1) == UNSPEC_GOT)
5184 y = XVECEXP (y, 0, 0);
5185 else
5186 return orig_x;
5187 }
5188 else if (GET_CODE (x) == CONST)
5189 {
5190 /* Extract the symbol ref from:
5191 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5192 UNSPEC_PLT/GOTENT))) */
5193
5194 y = XEXP (x, 0);
5195 if (GET_CODE (y) == UNSPEC
5196 && (XINT (y, 1) == UNSPEC_GOTENT
5197 || XINT (y, 1) == UNSPEC_PLT))
5198 y = XVECEXP (y, 0, 0);
5199 else
5200 return orig_x;
5201 }
5202 else
5203 return orig_x;
5204
5205 if (GET_MODE (orig_x) != Pmode)
5206 {
5207 if (GET_MODE (orig_x) == BLKmode)
5208 return orig_x;
5209 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5210 if (y == NULL_RTX)
5211 return orig_x;
5212 }
5213 return y;
5214 }
5215
5216 /* Output operand OP to stdio stream FILE.
5217 OP is an address (register + offset) which is not used to address data;
5218 instead the rightmost bits are interpreted as the value. */
5219
5220 static void
5221 print_shift_count_operand (FILE *file, rtx op)
5222 {
5223 HOST_WIDE_INT offset;
5224 rtx base;
5225
5226 /* Extract base register and offset. */
5227 if (!s390_decompose_shift_count (op, &base, &offset))
5228 gcc_unreachable ();
5229
5230 /* Sanity check. */
5231 if (base)
5232 {
5233 gcc_assert (GET_CODE (base) == REG);
5234 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5235 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5236 }
5237
5238 /* Offsets are constricted to twelve bits. */
5239 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5240 if (base)
5241 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5242 }
5243
5244 /* See 'get_some_local_dynamic_name'. */
5245
5246 static int
5247 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5248 {
5249 rtx x = *px;
5250
5251 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5252 {
5253 x = get_pool_constant (x);
5254 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5255 }
5256
5257 if (GET_CODE (x) == SYMBOL_REF
5258 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5259 {
5260 cfun->machine->some_ld_name = XSTR (x, 0);
5261 return 1;
5262 }
5263
5264 return 0;
5265 }
5266
5267 /* Locate some local-dynamic symbol still in use by this function
5268 so that we can print its name in local-dynamic base patterns. */
5269
5270 static const char *
5271 get_some_local_dynamic_name (void)
5272 {
5273 rtx insn;
5274
5275 if (cfun->machine->some_ld_name)
5276 return cfun->machine->some_ld_name;
5277
5278 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5279 if (INSN_P (insn)
5280 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5281 return cfun->machine->some_ld_name;
5282
5283 gcc_unreachable ();
5284 }
5285
5286 /* Output machine-dependent UNSPECs occurring in address constant X
5287 in assembler syntax to stdio stream FILE. Returns true if the
5288 constant X could be recognized, false otherwise. */
5289
5290 static bool
5291 s390_output_addr_const_extra (FILE *file, rtx x)
5292 {
5293 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5294 switch (XINT (x, 1))
5295 {
5296 case UNSPEC_GOTENT:
5297 output_addr_const (file, XVECEXP (x, 0, 0));
5298 fprintf (file, "@GOTENT");
5299 return true;
5300 case UNSPEC_GOT:
5301 output_addr_const (file, XVECEXP (x, 0, 0));
5302 fprintf (file, "@GOT");
5303 return true;
5304 case UNSPEC_GOTOFF:
5305 output_addr_const (file, XVECEXP (x, 0, 0));
5306 fprintf (file, "@GOTOFF");
5307 return true;
5308 case UNSPEC_PLT:
5309 output_addr_const (file, XVECEXP (x, 0, 0));
5310 fprintf (file, "@PLT");
5311 return true;
5312 case UNSPEC_PLTOFF:
5313 output_addr_const (file, XVECEXP (x, 0, 0));
5314 fprintf (file, "@PLTOFF");
5315 return true;
5316 case UNSPEC_TLSGD:
5317 output_addr_const (file, XVECEXP (x, 0, 0));
5318 fprintf (file, "@TLSGD");
5319 return true;
5320 case UNSPEC_TLSLDM:
5321 assemble_name (file, get_some_local_dynamic_name ());
5322 fprintf (file, "@TLSLDM");
5323 return true;
5324 case UNSPEC_DTPOFF:
5325 output_addr_const (file, XVECEXP (x, 0, 0));
5326 fprintf (file, "@DTPOFF");
5327 return true;
5328 case UNSPEC_NTPOFF:
5329 output_addr_const (file, XVECEXP (x, 0, 0));
5330 fprintf (file, "@NTPOFF");
5331 return true;
5332 case UNSPEC_GOTNTPOFF:
5333 output_addr_const (file, XVECEXP (x, 0, 0));
5334 fprintf (file, "@GOTNTPOFF");
5335 return true;
5336 case UNSPEC_INDNTPOFF:
5337 output_addr_const (file, XVECEXP (x, 0, 0));
5338 fprintf (file, "@INDNTPOFF");
5339 return true;
5340 }
5341
5342 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5343 switch (XINT (x, 1))
5344 {
5345 case UNSPEC_POOL_OFFSET:
5346 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5347 output_addr_const (file, x);
5348 return true;
5349 }
5350 return false;
5351 }
5352
5353 /* Output address operand ADDR in assembler syntax to
5354 stdio stream FILE. */
5355
5356 void
5357 print_operand_address (FILE *file, rtx addr)
5358 {
5359 struct s390_address ad;
5360
5361 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5362 {
5363 if (!TARGET_Z10)
5364 {
5365 output_operand_lossage ("symbolic memory references are "
5366 "only supported on z10 or later");
5367 return;
5368 }
5369 output_addr_const (file, addr);
5370 return;
5371 }
5372
5373 if (!s390_decompose_address (addr, &ad)
5374 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5375 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5376 output_operand_lossage ("cannot decompose address");
5377
5378 if (ad.disp)
5379 output_addr_const (file, ad.disp);
5380 else
5381 fprintf (file, "0");
5382
5383 if (ad.base && ad.indx)
5384 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5385 reg_names[REGNO (ad.base)]);
5386 else if (ad.base)
5387 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5388 }
5389
5390 /* Output operand X in assembler syntax to stdio stream FILE.
5391 CODE specified the format flag. The following format flags
5392 are recognized:
5393
5394 'C': print opcode suffix for branch condition.
5395 'D': print opcode suffix for inverse branch condition.
5396 'E': print opcode suffix for branch on index instruction.
5397 'G': print the size of the operand in bytes.
5398 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5399 'M': print the second word of a TImode operand.
5400 'N': print the second word of a DImode operand.
5401 'O': print only the displacement of a memory reference.
5402 'R': print only the base register of a memory reference.
5403 'S': print S-type memory reference (base+displacement).
5404 'Y': print shift count operand.
5405
5406 'b': print integer X as if it's an unsigned byte.
5407 'c': print integer X as if it's an signed byte.
5408 'e': "end" of DImode contiguous bitmask X.
5409 'f': "end" of SImode contiguous bitmask X.
5410 'h': print integer X as if it's a signed halfword.
5411 'i': print the first nonzero HImode part of X.
5412 'j': print the first HImode part unequal to -1 of X.
5413 'k': print the first nonzero SImode part of X.
5414 'm': print the first SImode part unequal to -1 of X.
5415 'o': print integer X as if it's an unsigned 32bit word.
5416 's': "start" of DImode contiguous bitmask X.
5417 't': "start" of SImode contiguous bitmask X.
5418 'x': print integer X as if it's an unsigned halfword.
5419 */
5420
5421 void
5422 print_operand (FILE *file, rtx x, int code)
5423 {
5424 HOST_WIDE_INT ival;
5425
5426 switch (code)
5427 {
5428 case 'C':
5429 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5430 return;
5431
5432 case 'D':
5433 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5434 return;
5435
5436 case 'E':
5437 if (GET_CODE (x) == LE)
5438 fprintf (file, "l");
5439 else if (GET_CODE (x) == GT)
5440 fprintf (file, "h");
5441 else
5442 output_operand_lossage ("invalid comparison operator "
5443 "for 'E' output modifier");
5444 return;
5445
5446 case 'J':
5447 if (GET_CODE (x) == SYMBOL_REF)
5448 {
5449 fprintf (file, "%s", ":tls_load:");
5450 output_addr_const (file, x);
5451 }
5452 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5453 {
5454 fprintf (file, "%s", ":tls_gdcall:");
5455 output_addr_const (file, XVECEXP (x, 0, 0));
5456 }
5457 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5458 {
5459 fprintf (file, "%s", ":tls_ldcall:");
5460 assemble_name (file, get_some_local_dynamic_name ());
5461 }
5462 else
5463 output_operand_lossage ("invalid reference for 'J' output modifier");
5464 return;
5465
5466 case 'G':
5467 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5468 return;
5469
5470 case 'O':
5471 {
5472 struct s390_address ad;
5473 int ret;
5474
5475 if (!MEM_P (x))
5476 {
5477 output_operand_lossage ("memory reference expected for "
5478 "'O' output modifier");
5479 return;
5480 }
5481
5482 ret = s390_decompose_address (XEXP (x, 0), &ad);
5483
5484 if (!ret
5485 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5486 || ad.indx)
5487 {
5488 output_operand_lossage ("invalid address for 'O' output modifier");
5489 return;
5490 }
5491
5492 if (ad.disp)
5493 output_addr_const (file, ad.disp);
5494 else
5495 fprintf (file, "0");
5496 }
5497 return;
5498
5499 case 'R':
5500 {
5501 struct s390_address ad;
5502 int ret;
5503
5504 if (!MEM_P (x))
5505 {
5506 output_operand_lossage ("memory reference expected for "
5507 "'R' output modifier");
5508 return;
5509 }
5510
5511 ret = s390_decompose_address (XEXP (x, 0), &ad);
5512
5513 if (!ret
5514 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5515 || ad.indx)
5516 {
5517 output_operand_lossage ("invalid address for 'R' output modifier");
5518 return;
5519 }
5520
5521 if (ad.base)
5522 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5523 else
5524 fprintf (file, "0");
5525 }
5526 return;
5527
5528 case 'S':
5529 {
5530 struct s390_address ad;
5531 int ret;
5532
5533 if (!MEM_P (x))
5534 {
5535 output_operand_lossage ("memory reference expected for "
5536 "'S' output modifier");
5537 return;
5538 }
5539 ret = s390_decompose_address (XEXP (x, 0), &ad);
5540
5541 if (!ret
5542 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5543 || ad.indx)
5544 {
5545 output_operand_lossage ("invalid address for 'S' output modifier");
5546 return;
5547 }
5548
5549 if (ad.disp)
5550 output_addr_const (file, ad.disp);
5551 else
5552 fprintf (file, "0");
5553
5554 if (ad.base)
5555 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5556 }
5557 return;
5558
5559 case 'N':
5560 if (GET_CODE (x) == REG)
5561 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5562 else if (GET_CODE (x) == MEM)
5563 x = change_address (x, VOIDmode,
5564 plus_constant (Pmode, XEXP (x, 0), 4));
5565 else
5566 output_operand_lossage ("register or memory expression expected "
5567 "for 'N' output modifier");
5568 break;
5569
5570 case 'M':
5571 if (GET_CODE (x) == REG)
5572 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5573 else if (GET_CODE (x) == MEM)
5574 x = change_address (x, VOIDmode,
5575 plus_constant (Pmode, XEXP (x, 0), 8));
5576 else
5577 output_operand_lossage ("register or memory expression expected "
5578 "for 'M' output modifier");
5579 break;
5580
5581 case 'Y':
5582 print_shift_count_operand (file, x);
5583 return;
5584 }
5585
5586 switch (GET_CODE (x))
5587 {
5588 case REG:
5589 fprintf (file, "%s", reg_names[REGNO (x)]);
5590 break;
5591
5592 case MEM:
5593 output_address (XEXP (x, 0));
5594 break;
5595
5596 case CONST:
5597 case CODE_LABEL:
5598 case LABEL_REF:
5599 case SYMBOL_REF:
5600 output_addr_const (file, x);
5601 break;
5602
5603 case CONST_INT:
5604 ival = INTVAL (x);
5605 switch (code)
5606 {
5607 case 0:
5608 break;
5609 case 'b':
5610 ival &= 0xff;
5611 break;
5612 case 'c':
5613 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5614 break;
5615 case 'x':
5616 ival &= 0xffff;
5617 break;
5618 case 'h':
5619 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5620 break;
5621 case 'i':
5622 ival = s390_extract_part (x, HImode, 0);
5623 break;
5624 case 'j':
5625 ival = s390_extract_part (x, HImode, -1);
5626 break;
5627 case 'k':
5628 ival = s390_extract_part (x, SImode, 0);
5629 break;
5630 case 'm':
5631 ival = s390_extract_part (x, SImode, -1);
5632 break;
5633 case 'o':
5634 ival &= 0xffffffff;
5635 break;
5636 case 'e': case 'f':
5637 case 's': case 't':
5638 {
5639 int pos, len;
5640 bool ok;
5641
5642 len = (code == 's' || code == 'e' ? 64 : 32);
5643 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5644 gcc_assert (ok);
5645 if (code == 's' || code == 't')
5646 ival = 64 - pos - len;
5647 else
5648 ival = 64 - 1 - pos;
5649 }
5650 break;
5651 default:
5652 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5653 }
5654 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5655 break;
5656
5657 case CONST_DOUBLE:
5658 gcc_assert (GET_MODE (x) == VOIDmode);
5659 if (code == 'b')
5660 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5661 else if (code == 'x')
5662 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5663 else if (code == 'h')
5664 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5665 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5666 else
5667 {
5668 if (code == 0)
5669 output_operand_lossage ("invalid constant - try using "
5670 "an output modifier");
5671 else
5672 output_operand_lossage ("invalid constant for output modifier '%c'",
5673 code);
5674 }
5675 break;
5676
5677 default:
5678 if (code == 0)
5679 output_operand_lossage ("invalid expression - try using "
5680 "an output modifier");
5681 else
5682 output_operand_lossage ("invalid expression for output "
5683 "modifier '%c'", code);
5684 break;
5685 }
5686 }
5687
5688 /* Target hook for assembling integer objects. We need to define it
5689 here to work a round a bug in some versions of GAS, which couldn't
5690 handle values smaller than INT_MIN when printed in decimal. */
5691
5692 static bool
5693 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5694 {
5695 if (size == 8 && aligned_p
5696 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5697 {
5698 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5699 INTVAL (x));
5700 return true;
5701 }
5702 return default_assemble_integer (x, size, aligned_p);
5703 }
5704
5705 /* Returns true if register REGNO is used for forming
5706 a memory address in expression X. */
5707
5708 static bool
5709 reg_used_in_mem_p (int regno, rtx x)
5710 {
5711 enum rtx_code code = GET_CODE (x);
5712 int i, j;
5713 const char *fmt;
5714
5715 if (code == MEM)
5716 {
5717 if (refers_to_regno_p (regno, regno+1,
5718 XEXP (x, 0), 0))
5719 return true;
5720 }
5721 else if (code == SET
5722 && GET_CODE (SET_DEST (x)) == PC)
5723 {
5724 if (refers_to_regno_p (regno, regno+1,
5725 SET_SRC (x), 0))
5726 return true;
5727 }
5728
5729 fmt = GET_RTX_FORMAT (code);
5730 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5731 {
5732 if (fmt[i] == 'e'
5733 && reg_used_in_mem_p (regno, XEXP (x, i)))
5734 return true;
5735
5736 else if (fmt[i] == 'E')
5737 for (j = 0; j < XVECLEN (x, i); j++)
5738 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5739 return true;
5740 }
5741 return false;
5742 }
5743
5744 /* Returns true if expression DEP_RTX sets an address register
5745 used by instruction INSN to address memory. */
5746
5747 static bool
5748 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5749 {
5750 rtx target, pat;
5751
5752 if (NONJUMP_INSN_P (dep_rtx))
5753 dep_rtx = PATTERN (dep_rtx);
5754
5755 if (GET_CODE (dep_rtx) == SET)
5756 {
5757 target = SET_DEST (dep_rtx);
5758 if (GET_CODE (target) == STRICT_LOW_PART)
5759 target = XEXP (target, 0);
5760 while (GET_CODE (target) == SUBREG)
5761 target = SUBREG_REG (target);
5762
5763 if (GET_CODE (target) == REG)
5764 {
5765 int regno = REGNO (target);
5766
5767 if (s390_safe_attr_type (insn) == TYPE_LA)
5768 {
5769 pat = PATTERN (insn);
5770 if (GET_CODE (pat) == PARALLEL)
5771 {
5772 gcc_assert (XVECLEN (pat, 0) == 2);
5773 pat = XVECEXP (pat, 0, 0);
5774 }
5775 gcc_assert (GET_CODE (pat) == SET);
5776 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5777 }
5778 else if (get_attr_atype (insn) == ATYPE_AGEN)
5779 return reg_used_in_mem_p (regno, PATTERN (insn));
5780 }
5781 }
5782 return false;
5783 }
5784
5785 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5786
5787 int
5788 s390_agen_dep_p (rtx dep_insn, rtx insn)
5789 {
5790 rtx dep_rtx = PATTERN (dep_insn);
5791 int i;
5792
5793 if (GET_CODE (dep_rtx) == SET
5794 && addr_generation_dependency_p (dep_rtx, insn))
5795 return 1;
5796 else if (GET_CODE (dep_rtx) == PARALLEL)
5797 {
5798 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5799 {
5800 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5801 return 1;
5802 }
5803 }
5804 return 0;
5805 }
5806
5807
5808 /* A C statement (sans semicolon) to update the integer scheduling priority
5809 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5810 reduce the priority to execute INSN later. Do not define this macro if
5811 you do not need to adjust the scheduling priorities of insns.
5812
5813 A STD instruction should be scheduled earlier,
5814 in order to use the bypass. */
5815 static int
5816 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5817 {
5818 if (! INSN_P (insn))
5819 return priority;
5820
5821 if (s390_tune != PROCESSOR_2084_Z990
5822 && s390_tune != PROCESSOR_2094_Z9_109
5823 && s390_tune != PROCESSOR_2097_Z10
5824 && s390_tune != PROCESSOR_2817_Z196
5825 && s390_tune != PROCESSOR_2827_ZEC12)
5826 return priority;
5827
5828 switch (s390_safe_attr_type (insn))
5829 {
5830 case TYPE_FSTOREDF:
5831 case TYPE_FSTORESF:
5832 priority = priority << 3;
5833 break;
5834 case TYPE_STORE:
5835 case TYPE_STM:
5836 priority = priority << 1;
5837 break;
5838 default:
5839 break;
5840 }
5841 return priority;
5842 }
5843
5844
5845 /* The number of instructions that can be issued per cycle. */
5846
5847 static int
5848 s390_issue_rate (void)
5849 {
5850 switch (s390_tune)
5851 {
5852 case PROCESSOR_2084_Z990:
5853 case PROCESSOR_2094_Z9_109:
5854 case PROCESSOR_2817_Z196:
5855 return 3;
5856 case PROCESSOR_2097_Z10:
5857 case PROCESSOR_2827_ZEC12:
5858 return 2;
5859 default:
5860 return 1;
5861 }
5862 }
5863
5864 static int
5865 s390_first_cycle_multipass_dfa_lookahead (void)
5866 {
5867 return 4;
5868 }
5869
5870 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5871 Fix up MEMs as required. */
5872
5873 static void
5874 annotate_constant_pool_refs (rtx *x)
5875 {
5876 int i, j;
5877 const char *fmt;
5878
5879 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5880 || !CONSTANT_POOL_ADDRESS_P (*x));
5881
5882 /* Literal pool references can only occur inside a MEM ... */
5883 if (GET_CODE (*x) == MEM)
5884 {
5885 rtx memref = XEXP (*x, 0);
5886
5887 if (GET_CODE (memref) == SYMBOL_REF
5888 && CONSTANT_POOL_ADDRESS_P (memref))
5889 {
5890 rtx base = cfun->machine->base_reg;
5891 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5892 UNSPEC_LTREF);
5893
5894 *x = replace_equiv_address (*x, addr);
5895 return;
5896 }
5897
5898 if (GET_CODE (memref) == CONST
5899 && GET_CODE (XEXP (memref, 0)) == PLUS
5900 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5901 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5902 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5903 {
5904 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5905 rtx sym = XEXP (XEXP (memref, 0), 0);
5906 rtx base = cfun->machine->base_reg;
5907 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5908 UNSPEC_LTREF);
5909
5910 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5911 return;
5912 }
5913 }
5914
5915 /* ... or a load-address type pattern. */
5916 if (GET_CODE (*x) == SET)
5917 {
5918 rtx addrref = SET_SRC (*x);
5919
5920 if (GET_CODE (addrref) == SYMBOL_REF
5921 && CONSTANT_POOL_ADDRESS_P (addrref))
5922 {
5923 rtx base = cfun->machine->base_reg;
5924 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5925 UNSPEC_LTREF);
5926
5927 SET_SRC (*x) = addr;
5928 return;
5929 }
5930
5931 if (GET_CODE (addrref) == CONST
5932 && GET_CODE (XEXP (addrref, 0)) == PLUS
5933 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5934 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5935 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5936 {
5937 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5938 rtx sym = XEXP (XEXP (addrref, 0), 0);
5939 rtx base = cfun->machine->base_reg;
5940 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5941 UNSPEC_LTREF);
5942
5943 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5944 return;
5945 }
5946 }
5947
5948 /* Annotate LTREL_BASE as well. */
5949 if (GET_CODE (*x) == UNSPEC
5950 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5951 {
5952 rtx base = cfun->machine->base_reg;
5953 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5954 UNSPEC_LTREL_BASE);
5955 return;
5956 }
5957
5958 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5959 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5960 {
5961 if (fmt[i] == 'e')
5962 {
5963 annotate_constant_pool_refs (&XEXP (*x, i));
5964 }
5965 else if (fmt[i] == 'E')
5966 {
5967 for (j = 0; j < XVECLEN (*x, i); j++)
5968 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5969 }
5970 }
5971 }
5972
5973 /* Split all branches that exceed the maximum distance.
5974 Returns true if this created a new literal pool entry. */
5975
5976 static int
5977 s390_split_branches (void)
5978 {
5979 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5980 int new_literal = 0, ret;
5981 rtx insn, pat, tmp, target;
5982 rtx *label;
5983
5984 /* We need correct insn addresses. */
5985
5986 shorten_branches (get_insns ());
5987
5988 /* Find all branches that exceed 64KB, and split them. */
5989
5990 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5991 {
5992 if (! JUMP_P (insn))
5993 continue;
5994
5995 pat = PATTERN (insn);
5996 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5997 pat = XVECEXP (pat, 0, 0);
5998 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5999 continue;
6000
6001 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
6002 {
6003 label = &SET_SRC (pat);
6004 }
6005 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
6006 {
6007 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
6008 label = &XEXP (SET_SRC (pat), 1);
6009 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
6010 label = &XEXP (SET_SRC (pat), 2);
6011 else
6012 continue;
6013 }
6014 else
6015 continue;
6016
6017 if (get_attr_length (insn) <= 4)
6018 continue;
6019
6020 /* We are going to use the return register as scratch register,
6021 make sure it will be saved/restored by the prologue/epilogue. */
6022 cfun_frame_layout.save_return_addr_p = 1;
6023
6024 if (!flag_pic)
6025 {
6026 new_literal = 1;
6027 tmp = force_const_mem (Pmode, *label);
6028 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
6029 INSN_ADDRESSES_NEW (tmp, -1);
6030 annotate_constant_pool_refs (&PATTERN (tmp));
6031
6032 target = temp_reg;
6033 }
6034 else
6035 {
6036 new_literal = 1;
6037 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6038 UNSPEC_LTREL_OFFSET);
6039 target = gen_rtx_CONST (Pmode, target);
6040 target = force_const_mem (Pmode, target);
6041 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6042 INSN_ADDRESSES_NEW (tmp, -1);
6043 annotate_constant_pool_refs (&PATTERN (tmp));
6044
6045 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6046 cfun->machine->base_reg),
6047 UNSPEC_LTREL_BASE);
6048 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6049 }
6050
6051 ret = validate_change (insn, label, target, 0);
6052 gcc_assert (ret);
6053 }
6054
6055 return new_literal;
6056 }
6057
6058
6059 /* Find an annotated literal pool symbol referenced in RTX X,
6060 and store it at REF. Will abort if X contains references to
6061 more than one such pool symbol; multiple references to the same
6062 symbol are allowed, however.
6063
6064 The rtx pointed to by REF must be initialized to NULL_RTX
6065 by the caller before calling this routine. */
6066
6067 static void
6068 find_constant_pool_ref (rtx x, rtx *ref)
6069 {
6070 int i, j;
6071 const char *fmt;
6072
6073 /* Ignore LTREL_BASE references. */
6074 if (GET_CODE (x) == UNSPEC
6075 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6076 return;
6077 /* Likewise POOL_ENTRY insns. */
6078 if (GET_CODE (x) == UNSPEC_VOLATILE
6079 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6080 return;
6081
6082 gcc_assert (GET_CODE (x) != SYMBOL_REF
6083 || !CONSTANT_POOL_ADDRESS_P (x));
6084
6085 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6086 {
6087 rtx sym = XVECEXP (x, 0, 0);
6088 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6089 && CONSTANT_POOL_ADDRESS_P (sym));
6090
6091 if (*ref == NULL_RTX)
6092 *ref = sym;
6093 else
6094 gcc_assert (*ref == sym);
6095
6096 return;
6097 }
6098
6099 fmt = GET_RTX_FORMAT (GET_CODE (x));
6100 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6101 {
6102 if (fmt[i] == 'e')
6103 {
6104 find_constant_pool_ref (XEXP (x, i), ref);
6105 }
6106 else if (fmt[i] == 'E')
6107 {
6108 for (j = 0; j < XVECLEN (x, i); j++)
6109 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6110 }
6111 }
6112 }
6113
6114 /* Replace every reference to the annotated literal pool
6115 symbol REF in X by its base plus OFFSET. */
6116
6117 static void
6118 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6119 {
6120 int i, j;
6121 const char *fmt;
6122
6123 gcc_assert (*x != ref);
6124
6125 if (GET_CODE (*x) == UNSPEC
6126 && XINT (*x, 1) == UNSPEC_LTREF
6127 && XVECEXP (*x, 0, 0) == ref)
6128 {
6129 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6130 return;
6131 }
6132
6133 if (GET_CODE (*x) == PLUS
6134 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6135 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6136 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6137 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6138 {
6139 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6140 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6141 return;
6142 }
6143
6144 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6145 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6146 {
6147 if (fmt[i] == 'e')
6148 {
6149 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6150 }
6151 else if (fmt[i] == 'E')
6152 {
6153 for (j = 0; j < XVECLEN (*x, i); j++)
6154 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6155 }
6156 }
6157 }
6158
6159 /* Check whether X contains an UNSPEC_LTREL_BASE.
6160 Return its constant pool symbol if found, NULL_RTX otherwise. */
6161
6162 static rtx
6163 find_ltrel_base (rtx x)
6164 {
6165 int i, j;
6166 const char *fmt;
6167
6168 if (GET_CODE (x) == UNSPEC
6169 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6170 return XVECEXP (x, 0, 0);
6171
6172 fmt = GET_RTX_FORMAT (GET_CODE (x));
6173 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6174 {
6175 if (fmt[i] == 'e')
6176 {
6177 rtx fnd = find_ltrel_base (XEXP (x, i));
6178 if (fnd)
6179 return fnd;
6180 }
6181 else if (fmt[i] == 'E')
6182 {
6183 for (j = 0; j < XVECLEN (x, i); j++)
6184 {
6185 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6186 if (fnd)
6187 return fnd;
6188 }
6189 }
6190 }
6191
6192 return NULL_RTX;
6193 }
6194
6195 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6196
6197 static void
6198 replace_ltrel_base (rtx *x)
6199 {
6200 int i, j;
6201 const char *fmt;
6202
6203 if (GET_CODE (*x) == UNSPEC
6204 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6205 {
6206 *x = XVECEXP (*x, 0, 1);
6207 return;
6208 }
6209
6210 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6211 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6212 {
6213 if (fmt[i] == 'e')
6214 {
6215 replace_ltrel_base (&XEXP (*x, i));
6216 }
6217 else if (fmt[i] == 'E')
6218 {
6219 for (j = 0; j < XVECLEN (*x, i); j++)
6220 replace_ltrel_base (&XVECEXP (*x, i, j));
6221 }
6222 }
6223 }
6224
6225
6226 /* We keep a list of constants which we have to add to internal
6227 constant tables in the middle of large functions. */
6228
6229 #define NR_C_MODES 11
6230 enum machine_mode constant_modes[NR_C_MODES] =
6231 {
6232 TFmode, TImode, TDmode,
6233 DFmode, DImode, DDmode,
6234 SFmode, SImode, SDmode,
6235 HImode,
6236 QImode
6237 };
6238
6239 struct constant
6240 {
6241 struct constant *next;
6242 rtx value;
6243 rtx label;
6244 };
6245
6246 struct constant_pool
6247 {
6248 struct constant_pool *next;
6249 rtx first_insn;
6250 rtx pool_insn;
6251 bitmap insns;
6252 rtx emit_pool_after;
6253
6254 struct constant *constants[NR_C_MODES];
6255 struct constant *execute;
6256 rtx label;
6257 int size;
6258 };
6259
6260 /* Allocate new constant_pool structure. */
6261
6262 static struct constant_pool *
6263 s390_alloc_pool (void)
6264 {
6265 struct constant_pool *pool;
6266 int i;
6267
6268 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6269 pool->next = NULL;
6270 for (i = 0; i < NR_C_MODES; i++)
6271 pool->constants[i] = NULL;
6272
6273 pool->execute = NULL;
6274 pool->label = gen_label_rtx ();
6275 pool->first_insn = NULL_RTX;
6276 pool->pool_insn = NULL_RTX;
6277 pool->insns = BITMAP_ALLOC (NULL);
6278 pool->size = 0;
6279 pool->emit_pool_after = NULL_RTX;
6280
6281 return pool;
6282 }
6283
6284 /* Create new constant pool covering instructions starting at INSN
6285 and chain it to the end of POOL_LIST. */
6286
6287 static struct constant_pool *
6288 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6289 {
6290 struct constant_pool *pool, **prev;
6291
6292 pool = s390_alloc_pool ();
6293 pool->first_insn = insn;
6294
6295 for (prev = pool_list; *prev; prev = &(*prev)->next)
6296 ;
6297 *prev = pool;
6298
6299 return pool;
6300 }
6301
6302 /* End range of instructions covered by POOL at INSN and emit
6303 placeholder insn representing the pool. */
6304
6305 static void
6306 s390_end_pool (struct constant_pool *pool, rtx insn)
6307 {
6308 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6309
6310 if (!insn)
6311 insn = get_last_insn ();
6312
6313 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6314 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6315 }
6316
6317 /* Add INSN to the list of insns covered by POOL. */
6318
6319 static void
6320 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6321 {
6322 bitmap_set_bit (pool->insns, INSN_UID (insn));
6323 }
6324
6325 /* Return pool out of POOL_LIST that covers INSN. */
6326
6327 static struct constant_pool *
6328 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6329 {
6330 struct constant_pool *pool;
6331
6332 for (pool = pool_list; pool; pool = pool->next)
6333 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6334 break;
6335
6336 return pool;
6337 }
6338
6339 /* Add constant VAL of mode MODE to the constant pool POOL. */
6340
6341 static void
6342 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6343 {
6344 struct constant *c;
6345 int i;
6346
6347 for (i = 0; i < NR_C_MODES; i++)
6348 if (constant_modes[i] == mode)
6349 break;
6350 gcc_assert (i != NR_C_MODES);
6351
6352 for (c = pool->constants[i]; c != NULL; c = c->next)
6353 if (rtx_equal_p (val, c->value))
6354 break;
6355
6356 if (c == NULL)
6357 {
6358 c = (struct constant *) xmalloc (sizeof *c);
6359 c->value = val;
6360 c->label = gen_label_rtx ();
6361 c->next = pool->constants[i];
6362 pool->constants[i] = c;
6363 pool->size += GET_MODE_SIZE (mode);
6364 }
6365 }
6366
6367 /* Return an rtx that represents the offset of X from the start of
6368 pool POOL. */
6369
6370 static rtx
6371 s390_pool_offset (struct constant_pool *pool, rtx x)
6372 {
6373 rtx label;
6374
6375 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6376 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6377 UNSPEC_POOL_OFFSET);
6378 return gen_rtx_CONST (GET_MODE (x), x);
6379 }
6380
6381 /* Find constant VAL of mode MODE in the constant pool POOL.
6382 Return an RTX describing the distance from the start of
6383 the pool to the location of the new constant. */
6384
6385 static rtx
6386 s390_find_constant (struct constant_pool *pool, rtx val,
6387 enum machine_mode mode)
6388 {
6389 struct constant *c;
6390 int i;
6391
6392 for (i = 0; i < NR_C_MODES; i++)
6393 if (constant_modes[i] == mode)
6394 break;
6395 gcc_assert (i != NR_C_MODES);
6396
6397 for (c = pool->constants[i]; c != NULL; c = c->next)
6398 if (rtx_equal_p (val, c->value))
6399 break;
6400
6401 gcc_assert (c);
6402
6403 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6404 }
6405
6406 /* Check whether INSN is an execute. Return the label_ref to its
6407 execute target template if so, NULL_RTX otherwise. */
6408
6409 static rtx
6410 s390_execute_label (rtx insn)
6411 {
6412 if (NONJUMP_INSN_P (insn)
6413 && GET_CODE (PATTERN (insn)) == PARALLEL
6414 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6415 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6416 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6417
6418 return NULL_RTX;
6419 }
6420
6421 /* Add execute target for INSN to the constant pool POOL. */
6422
6423 static void
6424 s390_add_execute (struct constant_pool *pool, rtx insn)
6425 {
6426 struct constant *c;
6427
6428 for (c = pool->execute; c != NULL; c = c->next)
6429 if (INSN_UID (insn) == INSN_UID (c->value))
6430 break;
6431
6432 if (c == NULL)
6433 {
6434 c = (struct constant *) xmalloc (sizeof *c);
6435 c->value = insn;
6436 c->label = gen_label_rtx ();
6437 c->next = pool->execute;
6438 pool->execute = c;
6439 pool->size += 6;
6440 }
6441 }
6442
6443 /* Find execute target for INSN in the constant pool POOL.
6444 Return an RTX describing the distance from the start of
6445 the pool to the location of the execute target. */
6446
6447 static rtx
6448 s390_find_execute (struct constant_pool *pool, rtx insn)
6449 {
6450 struct constant *c;
6451
6452 for (c = pool->execute; c != NULL; c = c->next)
6453 if (INSN_UID (insn) == INSN_UID (c->value))
6454 break;
6455
6456 gcc_assert (c);
6457
6458 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6459 }
6460
6461 /* For an execute INSN, extract the execute target template. */
6462
6463 static rtx
6464 s390_execute_target (rtx insn)
6465 {
6466 rtx pattern = PATTERN (insn);
6467 gcc_assert (s390_execute_label (insn));
6468
6469 if (XVECLEN (pattern, 0) == 2)
6470 {
6471 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6472 }
6473 else
6474 {
6475 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6476 int i;
6477
6478 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6479 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6480
6481 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6482 }
6483
6484 return pattern;
6485 }
6486
6487 /* Indicate that INSN cannot be duplicated. This is the case for
6488 execute insns that carry a unique label. */
6489
6490 static bool
6491 s390_cannot_copy_insn_p (rtx insn)
6492 {
6493 rtx label = s390_execute_label (insn);
6494 return label && label != const0_rtx;
6495 }
6496
6497 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6498 do not emit the pool base label. */
6499
6500 static void
6501 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6502 {
6503 struct constant *c;
6504 rtx insn = pool->pool_insn;
6505 int i;
6506
6507 /* Switch to rodata section. */
6508 if (TARGET_CPU_ZARCH)
6509 {
6510 insn = emit_insn_after (gen_pool_section_start (), insn);
6511 INSN_ADDRESSES_NEW (insn, -1);
6512 }
6513
6514 /* Ensure minimum pool alignment. */
6515 if (TARGET_CPU_ZARCH)
6516 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6517 else
6518 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6519 INSN_ADDRESSES_NEW (insn, -1);
6520
6521 /* Emit pool base label. */
6522 if (!remote_label)
6523 {
6524 insn = emit_label_after (pool->label, insn);
6525 INSN_ADDRESSES_NEW (insn, -1);
6526 }
6527
6528 /* Dump constants in descending alignment requirement order,
6529 ensuring proper alignment for every constant. */
6530 for (i = 0; i < NR_C_MODES; i++)
6531 for (c = pool->constants[i]; c; c = c->next)
6532 {
6533 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6534 rtx value = copy_rtx (c->value);
6535 if (GET_CODE (value) == CONST
6536 && GET_CODE (XEXP (value, 0)) == UNSPEC
6537 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6538 && XVECLEN (XEXP (value, 0), 0) == 1)
6539 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6540
6541 insn = emit_label_after (c->label, insn);
6542 INSN_ADDRESSES_NEW (insn, -1);
6543
6544 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6545 gen_rtvec (1, value),
6546 UNSPECV_POOL_ENTRY);
6547 insn = emit_insn_after (value, insn);
6548 INSN_ADDRESSES_NEW (insn, -1);
6549 }
6550
6551 /* Ensure minimum alignment for instructions. */
6552 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6553 INSN_ADDRESSES_NEW (insn, -1);
6554
6555 /* Output in-pool execute template insns. */
6556 for (c = pool->execute; c; c = c->next)
6557 {
6558 insn = emit_label_after (c->label, insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6560
6561 insn = emit_insn_after (s390_execute_target (c->value), insn);
6562 INSN_ADDRESSES_NEW (insn, -1);
6563 }
6564
6565 /* Switch back to previous section. */
6566 if (TARGET_CPU_ZARCH)
6567 {
6568 insn = emit_insn_after (gen_pool_section_end (), insn);
6569 INSN_ADDRESSES_NEW (insn, -1);
6570 }
6571
6572 insn = emit_barrier_after (insn);
6573 INSN_ADDRESSES_NEW (insn, -1);
6574
6575 /* Remove placeholder insn. */
6576 remove_insn (pool->pool_insn);
6577 }
6578
6579 /* Free all memory used by POOL. */
6580
6581 static void
6582 s390_free_pool (struct constant_pool *pool)
6583 {
6584 struct constant *c, *next;
6585 int i;
6586
6587 for (i = 0; i < NR_C_MODES; i++)
6588 for (c = pool->constants[i]; c; c = next)
6589 {
6590 next = c->next;
6591 free (c);
6592 }
6593
6594 for (c = pool->execute; c; c = next)
6595 {
6596 next = c->next;
6597 free (c);
6598 }
6599
6600 BITMAP_FREE (pool->insns);
6601 free (pool);
6602 }
6603
6604
6605 /* Collect main literal pool. Return NULL on overflow. */
6606
6607 static struct constant_pool *
6608 s390_mainpool_start (void)
6609 {
6610 struct constant_pool *pool;
6611 rtx insn;
6612
6613 pool = s390_alloc_pool ();
6614
6615 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6616 {
6617 if (NONJUMP_INSN_P (insn)
6618 && GET_CODE (PATTERN (insn)) == SET
6619 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6620 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6621 {
6622 gcc_assert (!pool->pool_insn);
6623 pool->pool_insn = insn;
6624 }
6625
6626 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6627 {
6628 s390_add_execute (pool, insn);
6629 }
6630 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6631 {
6632 rtx pool_ref = NULL_RTX;
6633 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6634 if (pool_ref)
6635 {
6636 rtx constant = get_pool_constant (pool_ref);
6637 enum machine_mode mode = get_pool_mode (pool_ref);
6638 s390_add_constant (pool, constant, mode);
6639 }
6640 }
6641
6642 /* If hot/cold partitioning is enabled we have to make sure that
6643 the literal pool is emitted in the same section where the
6644 initialization of the literal pool base pointer takes place.
6645 emit_pool_after is only used in the non-overflow case on non
6646 Z cpus where we can emit the literal pool at the end of the
6647 function body within the text section. */
6648 if (NOTE_P (insn)
6649 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6650 && !pool->emit_pool_after)
6651 pool->emit_pool_after = PREV_INSN (insn);
6652 }
6653
6654 gcc_assert (pool->pool_insn || pool->size == 0);
6655
6656 if (pool->size >= 4096)
6657 {
6658 /* We're going to chunkify the pool, so remove the main
6659 pool placeholder insn. */
6660 remove_insn (pool->pool_insn);
6661
6662 s390_free_pool (pool);
6663 pool = NULL;
6664 }
6665
6666 /* If the functions ends with the section where the literal pool
6667 should be emitted set the marker to its end. */
6668 if (pool && !pool->emit_pool_after)
6669 pool->emit_pool_after = get_last_insn ();
6670
6671 return pool;
6672 }
6673
6674 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6675 Modify the current function to output the pool constants as well as
6676 the pool register setup instruction. */
6677
6678 static void
6679 s390_mainpool_finish (struct constant_pool *pool)
6680 {
6681 rtx base_reg = cfun->machine->base_reg;
6682 rtx insn;
6683
6684 /* If the pool is empty, we're done. */
6685 if (pool->size == 0)
6686 {
6687 /* We don't actually need a base register after all. */
6688 cfun->machine->base_reg = NULL_RTX;
6689
6690 if (pool->pool_insn)
6691 remove_insn (pool->pool_insn);
6692 s390_free_pool (pool);
6693 return;
6694 }
6695
6696 /* We need correct insn addresses. */
6697 shorten_branches (get_insns ());
6698
6699 /* On zSeries, we use a LARL to load the pool register. The pool is
6700 located in the .rodata section, so we emit it after the function. */
6701 if (TARGET_CPU_ZARCH)
6702 {
6703 insn = gen_main_base_64 (base_reg, pool->label);
6704 insn = emit_insn_after (insn, pool->pool_insn);
6705 INSN_ADDRESSES_NEW (insn, -1);
6706 remove_insn (pool->pool_insn);
6707
6708 insn = get_last_insn ();
6709 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6710 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6711
6712 s390_dump_pool (pool, 0);
6713 }
6714
6715 /* On S/390, if the total size of the function's code plus literal pool
6716 does not exceed 4096 bytes, we use BASR to set up a function base
6717 pointer, and emit the literal pool at the end of the function. */
6718 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6719 + pool->size + 8 /* alignment slop */ < 4096)
6720 {
6721 insn = gen_main_base_31_small (base_reg, pool->label);
6722 insn = emit_insn_after (insn, pool->pool_insn);
6723 INSN_ADDRESSES_NEW (insn, -1);
6724 remove_insn (pool->pool_insn);
6725
6726 insn = emit_label_after (pool->label, insn);
6727 INSN_ADDRESSES_NEW (insn, -1);
6728
6729 /* emit_pool_after will be set by s390_mainpool_start to the
6730 last insn of the section where the literal pool should be
6731 emitted. */
6732 insn = pool->emit_pool_after;
6733
6734 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6735 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6736
6737 s390_dump_pool (pool, 1);
6738 }
6739
6740 /* Otherwise, we emit an inline literal pool and use BASR to branch
6741 over it, setting up the pool register at the same time. */
6742 else
6743 {
6744 rtx pool_end = gen_label_rtx ();
6745
6746 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6747 insn = emit_jump_insn_after (insn, pool->pool_insn);
6748 JUMP_LABEL (insn) = pool_end;
6749 INSN_ADDRESSES_NEW (insn, -1);
6750 remove_insn (pool->pool_insn);
6751
6752 insn = emit_label_after (pool->label, insn);
6753 INSN_ADDRESSES_NEW (insn, -1);
6754
6755 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6756 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6757
6758 insn = emit_label_after (pool_end, pool->pool_insn);
6759 INSN_ADDRESSES_NEW (insn, -1);
6760
6761 s390_dump_pool (pool, 1);
6762 }
6763
6764
6765 /* Replace all literal pool references. */
6766
6767 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6768 {
6769 if (INSN_P (insn))
6770 replace_ltrel_base (&PATTERN (insn));
6771
6772 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6773 {
6774 rtx addr, pool_ref = NULL_RTX;
6775 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6776 if (pool_ref)
6777 {
6778 if (s390_execute_label (insn))
6779 addr = s390_find_execute (pool, insn);
6780 else
6781 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6782 get_pool_mode (pool_ref));
6783
6784 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6785 INSN_CODE (insn) = -1;
6786 }
6787 }
6788 }
6789
6790
6791 /* Free the pool. */
6792 s390_free_pool (pool);
6793 }
6794
6795 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6796 We have decided we cannot use this pool, so revert all changes
6797 to the current function that were done by s390_mainpool_start. */
6798 static void
6799 s390_mainpool_cancel (struct constant_pool *pool)
6800 {
6801 /* We didn't actually change the instruction stream, so simply
6802 free the pool memory. */
6803 s390_free_pool (pool);
6804 }
6805
6806
6807 /* Chunkify the literal pool. */
6808
6809 #define S390_POOL_CHUNK_MIN 0xc00
6810 #define S390_POOL_CHUNK_MAX 0xe00
6811
6812 static struct constant_pool *
6813 s390_chunkify_start (void)
6814 {
6815 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6816 int extra_size = 0;
6817 bitmap far_labels;
6818 rtx pending_ltrel = NULL_RTX;
6819 rtx insn;
6820
6821 rtx (*gen_reload_base) (rtx, rtx) =
6822 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6823
6824
6825 /* We need correct insn addresses. */
6826
6827 shorten_branches (get_insns ());
6828
6829 /* Scan all insns and move literals to pool chunks. */
6830
6831 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6832 {
6833 bool section_switch_p = false;
6834
6835 /* Check for pending LTREL_BASE. */
6836 if (INSN_P (insn))
6837 {
6838 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6839 if (ltrel_base)
6840 {
6841 gcc_assert (ltrel_base == pending_ltrel);
6842 pending_ltrel = NULL_RTX;
6843 }
6844 }
6845
6846 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6847 {
6848 if (!curr_pool)
6849 curr_pool = s390_start_pool (&pool_list, insn);
6850
6851 s390_add_execute (curr_pool, insn);
6852 s390_add_pool_insn (curr_pool, insn);
6853 }
6854 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6855 {
6856 rtx pool_ref = NULL_RTX;
6857 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6858 if (pool_ref)
6859 {
6860 rtx constant = get_pool_constant (pool_ref);
6861 enum machine_mode mode = get_pool_mode (pool_ref);
6862
6863 if (!curr_pool)
6864 curr_pool = s390_start_pool (&pool_list, insn);
6865
6866 s390_add_constant (curr_pool, constant, mode);
6867 s390_add_pool_insn (curr_pool, insn);
6868
6869 /* Don't split the pool chunk between a LTREL_OFFSET load
6870 and the corresponding LTREL_BASE. */
6871 if (GET_CODE (constant) == CONST
6872 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6873 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6874 {
6875 gcc_assert (!pending_ltrel);
6876 pending_ltrel = pool_ref;
6877 }
6878 }
6879 }
6880
6881 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6882 {
6883 if (curr_pool)
6884 s390_add_pool_insn (curr_pool, insn);
6885 /* An LTREL_BASE must follow within the same basic block. */
6886 gcc_assert (!pending_ltrel);
6887 }
6888
6889 if (NOTE_P (insn))
6890 switch (NOTE_KIND (insn))
6891 {
6892 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6893 section_switch_p = true;
6894 break;
6895 case NOTE_INSN_VAR_LOCATION:
6896 case NOTE_INSN_CALL_ARG_LOCATION:
6897 continue;
6898 default:
6899 break;
6900 }
6901
6902 if (!curr_pool
6903 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6904 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6905 continue;
6906
6907 if (TARGET_CPU_ZARCH)
6908 {
6909 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6910 continue;
6911
6912 s390_end_pool (curr_pool, NULL_RTX);
6913 curr_pool = NULL;
6914 }
6915 else
6916 {
6917 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6918 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6919 + extra_size;
6920
6921 /* We will later have to insert base register reload insns.
6922 Those will have an effect on code size, which we need to
6923 consider here. This calculation makes rather pessimistic
6924 worst-case assumptions. */
6925 if (LABEL_P (insn))
6926 extra_size += 6;
6927
6928 if (chunk_size < S390_POOL_CHUNK_MIN
6929 && curr_pool->size < S390_POOL_CHUNK_MIN
6930 && !section_switch_p)
6931 continue;
6932
6933 /* Pool chunks can only be inserted after BARRIERs ... */
6934 if (BARRIER_P (insn))
6935 {
6936 s390_end_pool (curr_pool, insn);
6937 curr_pool = NULL;
6938 extra_size = 0;
6939 }
6940
6941 /* ... so if we don't find one in time, create one. */
6942 else if (chunk_size > S390_POOL_CHUNK_MAX
6943 || curr_pool->size > S390_POOL_CHUNK_MAX
6944 || section_switch_p)
6945 {
6946 rtx label, jump, barrier, next, prev;
6947
6948 if (!section_switch_p)
6949 {
6950 /* We can insert the barrier only after a 'real' insn. */
6951 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
6952 continue;
6953 if (get_attr_length (insn) == 0)
6954 continue;
6955 /* Don't separate LTREL_BASE from the corresponding
6956 LTREL_OFFSET load. */
6957 if (pending_ltrel)
6958 continue;
6959 next = insn;
6960 do
6961 {
6962 insn = next;
6963 next = NEXT_INSN (insn);
6964 }
6965 while (next
6966 && NOTE_P (next)
6967 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6968 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6969 }
6970 else
6971 {
6972 gcc_assert (!pending_ltrel);
6973
6974 /* The old pool has to end before the section switch
6975 note in order to make it part of the current
6976 section. */
6977 insn = PREV_INSN (insn);
6978 }
6979
6980 label = gen_label_rtx ();
6981 prev = insn;
6982 if (prev && NOTE_P (prev))
6983 prev = prev_nonnote_insn (prev);
6984 if (prev)
6985 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6986 INSN_LOCATION (prev));
6987 else
6988 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6989 barrier = emit_barrier_after (jump);
6990 insn = emit_label_after (label, barrier);
6991 JUMP_LABEL (jump) = label;
6992 LABEL_NUSES (label) = 1;
6993
6994 INSN_ADDRESSES_NEW (jump, -1);
6995 INSN_ADDRESSES_NEW (barrier, -1);
6996 INSN_ADDRESSES_NEW (insn, -1);
6997
6998 s390_end_pool (curr_pool, barrier);
6999 curr_pool = NULL;
7000 extra_size = 0;
7001 }
7002 }
7003 }
7004
7005 if (curr_pool)
7006 s390_end_pool (curr_pool, NULL_RTX);
7007 gcc_assert (!pending_ltrel);
7008
7009 /* Find all labels that are branched into
7010 from an insn belonging to a different chunk. */
7011
7012 far_labels = BITMAP_ALLOC (NULL);
7013
7014 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7015 {
7016 /* Labels marked with LABEL_PRESERVE_P can be target
7017 of non-local jumps, so we have to mark them.
7018 The same holds for named labels.
7019
7020 Don't do that, however, if it is the label before
7021 a jump table. */
7022
7023 if (LABEL_P (insn)
7024 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7025 {
7026 rtx vec_insn = next_active_insn (insn);
7027 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7028 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7029 }
7030
7031 /* If we have a direct jump (conditional or unconditional)
7032 or a casesi jump, check all potential targets. */
7033 else if (JUMP_P (insn))
7034 {
7035 rtx pat = PATTERN (insn);
7036 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
7037 pat = XVECEXP (pat, 0, 0);
7038
7039 if (GET_CODE (pat) == SET)
7040 {
7041 rtx label = JUMP_LABEL (insn);
7042 if (label)
7043 {
7044 if (s390_find_pool (pool_list, label)
7045 != s390_find_pool (pool_list, insn))
7046 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7047 }
7048 }
7049 else if (GET_CODE (pat) == PARALLEL
7050 && XVECLEN (pat, 0) == 2
7051 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
7052 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
7053 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
7054 {
7055 /* Find the jump table used by this casesi jump. */
7056 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
7057 rtx vec_insn = next_active_insn (vec_label);
7058 if (vec_insn && JUMP_TABLE_DATA_P (vec_insn))
7059 {
7060 rtx vec_pat = PATTERN (vec_insn);
7061 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7062
7063 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7064 {
7065 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7066
7067 if (s390_find_pool (pool_list, label)
7068 != s390_find_pool (pool_list, insn))
7069 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7070 }
7071 }
7072 }
7073 }
7074 }
7075
7076 /* Insert base register reload insns before every pool. */
7077
7078 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7079 {
7080 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7081 curr_pool->label);
7082 rtx insn = curr_pool->first_insn;
7083 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7084 }
7085
7086 /* Insert base register reload insns at every far label. */
7087
7088 if (dump_file)
7089 {
7090 fprintf (dump_file, "Function: %s\n", current_function_name ());
7091 fprintf (dump_file, "far labels:\n");
7092 }
7093
7094 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7095 if (LABEL_P (insn)
7096 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7097 {
7098 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7099 if (dump_file)
7100 print_rtx (insn);
7101
7102 if (pool)
7103 {
7104 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7105 pool->label);
7106 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7107 }
7108 }
7109
7110
7111 BITMAP_FREE (far_labels);
7112
7113
7114 /* Recompute insn addresses. */
7115
7116 init_insn_lengths ();
7117 shorten_branches (get_insns ());
7118
7119 return pool_list;
7120 }
7121
7122 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7123 After we have decided to use this list, finish implementing
7124 all changes to the current function as required. */
7125
7126 static void
7127 s390_chunkify_finish (struct constant_pool *pool_list)
7128 {
7129 struct constant_pool *curr_pool = NULL;
7130 rtx insn;
7131
7132
7133 /* Replace all literal pool references. */
7134
7135 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7136 {
7137 if (INSN_P (insn))
7138 replace_ltrel_base (&PATTERN (insn));
7139
7140 curr_pool = s390_find_pool (pool_list, insn);
7141 if (!curr_pool)
7142 continue;
7143
7144 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7145 {
7146 rtx addr, pool_ref = NULL_RTX;
7147 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7148 if (pool_ref)
7149 {
7150 if (s390_execute_label (insn))
7151 addr = s390_find_execute (curr_pool, insn);
7152 else
7153 addr = s390_find_constant (curr_pool,
7154 get_pool_constant (pool_ref),
7155 get_pool_mode (pool_ref));
7156
7157 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7158 INSN_CODE (insn) = -1;
7159 }
7160 }
7161 }
7162
7163 /* Dump out all literal pools. */
7164
7165 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7166 s390_dump_pool (curr_pool, 0);
7167
7168 /* Free pool list. */
7169
7170 while (pool_list)
7171 {
7172 struct constant_pool *next = pool_list->next;
7173 s390_free_pool (pool_list);
7174 pool_list = next;
7175 }
7176 }
7177
7178 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7179 We have decided we cannot use this list, so revert all changes
7180 to the current function that were done by s390_chunkify_start. */
7181
7182 static void
7183 s390_chunkify_cancel (struct constant_pool *pool_list)
7184 {
7185 struct constant_pool *curr_pool = NULL;
7186 rtx insn;
7187
7188 /* Remove all pool placeholder insns. */
7189
7190 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7191 {
7192 /* Did we insert an extra barrier? Remove it. */
7193 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7194 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7195 rtx label = NEXT_INSN (curr_pool->pool_insn);
7196
7197 if (jump && JUMP_P (jump)
7198 && barrier && BARRIER_P (barrier)
7199 && label && LABEL_P (label)
7200 && GET_CODE (PATTERN (jump)) == SET
7201 && SET_DEST (PATTERN (jump)) == pc_rtx
7202 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7203 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7204 {
7205 remove_insn (jump);
7206 remove_insn (barrier);
7207 remove_insn (label);
7208 }
7209
7210 remove_insn (curr_pool->pool_insn);
7211 }
7212
7213 /* Remove all base register reload insns. */
7214
7215 for (insn = get_insns (); insn; )
7216 {
7217 rtx next_insn = NEXT_INSN (insn);
7218
7219 if (NONJUMP_INSN_P (insn)
7220 && GET_CODE (PATTERN (insn)) == SET
7221 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7222 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7223 remove_insn (insn);
7224
7225 insn = next_insn;
7226 }
7227
7228 /* Free pool list. */
7229
7230 while (pool_list)
7231 {
7232 struct constant_pool *next = pool_list->next;
7233 s390_free_pool (pool_list);
7234 pool_list = next;
7235 }
7236 }
7237
7238 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7239
7240 void
7241 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7242 {
7243 REAL_VALUE_TYPE r;
7244
7245 switch (GET_MODE_CLASS (mode))
7246 {
7247 case MODE_FLOAT:
7248 case MODE_DECIMAL_FLOAT:
7249 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7250
7251 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7252 assemble_real (r, mode, align);
7253 break;
7254
7255 case MODE_INT:
7256 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7257 mark_symbol_refs_as_used (exp);
7258 break;
7259
7260 default:
7261 gcc_unreachable ();
7262 }
7263 }
7264
7265
7266 /* Return an RTL expression representing the value of the return address
7267 for the frame COUNT steps up from the current frame. FRAME is the
7268 frame pointer of that frame. */
7269
7270 rtx
7271 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7272 {
7273 int offset;
7274 rtx addr;
7275
7276 /* Without backchain, we fail for all but the current frame. */
7277
7278 if (!TARGET_BACKCHAIN && count > 0)
7279 return NULL_RTX;
7280
7281 /* For the current frame, we need to make sure the initial
7282 value of RETURN_REGNUM is actually saved. */
7283
7284 if (count == 0)
7285 {
7286 /* On non-z architectures branch splitting could overwrite r14. */
7287 if (TARGET_CPU_ZARCH)
7288 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7289 else
7290 {
7291 cfun_frame_layout.save_return_addr_p = true;
7292 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7293 }
7294 }
7295
7296 if (TARGET_PACKED_STACK)
7297 offset = -2 * UNITS_PER_LONG;
7298 else
7299 offset = RETURN_REGNUM * UNITS_PER_LONG;
7300
7301 addr = plus_constant (Pmode, frame, offset);
7302 addr = memory_address (Pmode, addr);
7303 return gen_rtx_MEM (Pmode, addr);
7304 }
7305
7306 /* Return an RTL expression representing the back chain stored in
7307 the current stack frame. */
7308
7309 rtx
7310 s390_back_chain_rtx (void)
7311 {
7312 rtx chain;
7313
7314 gcc_assert (TARGET_BACKCHAIN);
7315
7316 if (TARGET_PACKED_STACK)
7317 chain = plus_constant (Pmode, stack_pointer_rtx,
7318 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7319 else
7320 chain = stack_pointer_rtx;
7321
7322 chain = gen_rtx_MEM (Pmode, chain);
7323 return chain;
7324 }
7325
7326 /* Find first call clobbered register unused in a function.
7327 This could be used as base register in a leaf function
7328 or for holding the return address before epilogue. */
7329
7330 static int
7331 find_unused_clobbered_reg (void)
7332 {
7333 int i;
7334 for (i = 0; i < 6; i++)
7335 if (!df_regs_ever_live_p (i))
7336 return i;
7337 return 0;
7338 }
7339
7340
7341 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7342 clobbered hard regs in SETREG. */
7343
7344 static void
7345 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7346 {
7347 int *regs_ever_clobbered = (int *)data;
7348 unsigned int i, regno;
7349 enum machine_mode mode = GET_MODE (setreg);
7350
7351 if (GET_CODE (setreg) == SUBREG)
7352 {
7353 rtx inner = SUBREG_REG (setreg);
7354 if (!GENERAL_REG_P (inner))
7355 return;
7356 regno = subreg_regno (setreg);
7357 }
7358 else if (GENERAL_REG_P (setreg))
7359 regno = REGNO (setreg);
7360 else
7361 return;
7362
7363 for (i = regno;
7364 i < regno + HARD_REGNO_NREGS (regno, mode);
7365 i++)
7366 regs_ever_clobbered[i] = 1;
7367 }
7368
7369 /* Walks through all basic blocks of the current function looking
7370 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7371 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7372 each of those regs. */
7373
7374 static void
7375 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7376 {
7377 basic_block cur_bb;
7378 rtx cur_insn;
7379 unsigned int i;
7380
7381 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7382
7383 /* For non-leaf functions we have to consider all call clobbered regs to be
7384 clobbered. */
7385 if (!crtl->is_leaf)
7386 {
7387 for (i = 0; i < 16; i++)
7388 regs_ever_clobbered[i] = call_really_used_regs[i];
7389 }
7390
7391 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7392 this work is done by liveness analysis (mark_regs_live_at_end).
7393 Special care is needed for functions containing landing pads. Landing pads
7394 may use the eh registers, but the code which sets these registers is not
7395 contained in that function. Hence s390_regs_ever_clobbered is not able to
7396 deal with this automatically. */
7397 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7398 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7399 if (crtl->calls_eh_return
7400 || (cfun->machine->has_landing_pad_p
7401 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7402 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7403
7404 /* For nonlocal gotos all call-saved registers have to be saved.
7405 This flag is also set for the unwinding code in libgcc.
7406 See expand_builtin_unwind_init. For regs_ever_live this is done by
7407 reload. */
7408 if (cfun->has_nonlocal_label)
7409 for (i = 0; i < 16; i++)
7410 if (!call_really_used_regs[i])
7411 regs_ever_clobbered[i] = 1;
7412
7413 FOR_EACH_BB (cur_bb)
7414 {
7415 FOR_BB_INSNS (cur_bb, cur_insn)
7416 {
7417 if (INSN_P (cur_insn))
7418 note_stores (PATTERN (cur_insn),
7419 s390_reg_clobbered_rtx,
7420 regs_ever_clobbered);
7421 }
7422 }
7423 }
7424
7425 /* Determine the frame area which actually has to be accessed
7426 in the function epilogue. The values are stored at the
7427 given pointers AREA_BOTTOM (address of the lowest used stack
7428 address) and AREA_TOP (address of the first item which does
7429 not belong to the stack frame). */
7430
7431 static void
7432 s390_frame_area (int *area_bottom, int *area_top)
7433 {
7434 int b, t;
7435 int i;
7436
7437 b = INT_MAX;
7438 t = INT_MIN;
7439
7440 if (cfun_frame_layout.first_restore_gpr != -1)
7441 {
7442 b = (cfun_frame_layout.gprs_offset
7443 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7444 t = b + (cfun_frame_layout.last_restore_gpr
7445 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7446 }
7447
7448 if (TARGET_64BIT && cfun_save_high_fprs_p)
7449 {
7450 b = MIN (b, cfun_frame_layout.f8_offset);
7451 t = MAX (t, (cfun_frame_layout.f8_offset
7452 + cfun_frame_layout.high_fprs * 8));
7453 }
7454
7455 if (!TARGET_64BIT)
7456 for (i = 2; i < 4; i++)
7457 if (cfun_fpr_bit_p (i))
7458 {
7459 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7460 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7461 }
7462
7463 *area_bottom = b;
7464 *area_top = t;
7465 }
7466
7467 /* Fill cfun->machine with info about register usage of current function.
7468 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7469
7470 static void
7471 s390_register_info (int clobbered_regs[])
7472 {
7473 int i, j;
7474
7475 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7476 cfun_frame_layout.fpr_bitmap = 0;
7477 cfun_frame_layout.high_fprs = 0;
7478 if (TARGET_64BIT)
7479 for (i = 24; i < 32; i++)
7480 if (df_regs_ever_live_p (i) && !global_regs[i])
7481 {
7482 cfun_set_fpr_bit (i - 16);
7483 cfun_frame_layout.high_fprs++;
7484 }
7485
7486 /* Find first and last gpr to be saved. We trust regs_ever_live
7487 data, except that we don't save and restore global registers.
7488
7489 Also, all registers with special meaning to the compiler need
7490 to be handled extra. */
7491
7492 s390_regs_ever_clobbered (clobbered_regs);
7493
7494 for (i = 0; i < 16; i++)
7495 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7496
7497 if (frame_pointer_needed)
7498 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7499
7500 if (flag_pic)
7501 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7502 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7503
7504 clobbered_regs[BASE_REGNUM]
7505 |= (cfun->machine->base_reg
7506 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7507
7508 clobbered_regs[RETURN_REGNUM]
7509 |= (!crtl->is_leaf
7510 || TARGET_TPF_PROFILING
7511 || cfun->machine->split_branches_pending_p
7512 || cfun_frame_layout.save_return_addr_p
7513 || crtl->calls_eh_return
7514 || cfun->stdarg);
7515
7516 clobbered_regs[STACK_POINTER_REGNUM]
7517 |= (!crtl->is_leaf
7518 || TARGET_TPF_PROFILING
7519 || cfun_save_high_fprs_p
7520 || get_frame_size () > 0
7521 || cfun->calls_alloca
7522 || cfun->stdarg);
7523
7524 for (i = 6; i < 16; i++)
7525 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7526 break;
7527 for (j = 15; j > i; j--)
7528 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7529 break;
7530
7531 if (i == 16)
7532 {
7533 /* Nothing to save/restore. */
7534 cfun_frame_layout.first_save_gpr_slot = -1;
7535 cfun_frame_layout.last_save_gpr_slot = -1;
7536 cfun_frame_layout.first_save_gpr = -1;
7537 cfun_frame_layout.first_restore_gpr = -1;
7538 cfun_frame_layout.last_save_gpr = -1;
7539 cfun_frame_layout.last_restore_gpr = -1;
7540 }
7541 else
7542 {
7543 /* Save slots for gprs from i to j. */
7544 cfun_frame_layout.first_save_gpr_slot = i;
7545 cfun_frame_layout.last_save_gpr_slot = j;
7546
7547 for (i = cfun_frame_layout.first_save_gpr_slot;
7548 i < cfun_frame_layout.last_save_gpr_slot + 1;
7549 i++)
7550 if (clobbered_regs[i])
7551 break;
7552
7553 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7554 if (clobbered_regs[j])
7555 break;
7556
7557 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7558 {
7559 /* Nothing to save/restore. */
7560 cfun_frame_layout.first_save_gpr = -1;
7561 cfun_frame_layout.first_restore_gpr = -1;
7562 cfun_frame_layout.last_save_gpr = -1;
7563 cfun_frame_layout.last_restore_gpr = -1;
7564 }
7565 else
7566 {
7567 /* Save / Restore from gpr i to j. */
7568 cfun_frame_layout.first_save_gpr = i;
7569 cfun_frame_layout.first_restore_gpr = i;
7570 cfun_frame_layout.last_save_gpr = j;
7571 cfun_frame_layout.last_restore_gpr = j;
7572 }
7573 }
7574
7575 if (cfun->stdarg)
7576 {
7577 /* Varargs functions need to save gprs 2 to 6. */
7578 if (cfun->va_list_gpr_size
7579 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7580 {
7581 int min_gpr = crtl->args.info.gprs;
7582 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7583 if (max_gpr > GP_ARG_NUM_REG)
7584 max_gpr = GP_ARG_NUM_REG;
7585
7586 if (cfun_frame_layout.first_save_gpr == -1
7587 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7588 {
7589 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7590 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7591 }
7592
7593 if (cfun_frame_layout.last_save_gpr == -1
7594 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7595 {
7596 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7597 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7598 }
7599 }
7600
7601 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7602 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7603 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7604 {
7605 int min_fpr = crtl->args.info.fprs;
7606 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7607 if (max_fpr > FP_ARG_NUM_REG)
7608 max_fpr = FP_ARG_NUM_REG;
7609
7610 /* ??? This is currently required to ensure proper location
7611 of the fpr save slots within the va_list save area. */
7612 if (TARGET_PACKED_STACK)
7613 min_fpr = 0;
7614
7615 for (i = min_fpr; i < max_fpr; i++)
7616 cfun_set_fpr_bit (i);
7617 }
7618 }
7619
7620 if (!TARGET_64BIT)
7621 for (i = 2; i < 4; i++)
7622 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7623 cfun_set_fpr_bit (i);
7624 }
7625
7626 /* Fill cfun->machine with info about frame of current function. */
7627
7628 static void
7629 s390_frame_info (void)
7630 {
7631 int i;
7632
7633 cfun_frame_layout.frame_size = get_frame_size ();
7634 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7635 fatal_error ("total size of local variables exceeds architecture limit");
7636
7637 if (!TARGET_PACKED_STACK)
7638 {
7639 cfun_frame_layout.backchain_offset = 0;
7640 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7641 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7642 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7643 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7644 * UNITS_PER_LONG);
7645 }
7646 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7647 {
7648 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7649 - UNITS_PER_LONG);
7650 cfun_frame_layout.gprs_offset
7651 = (cfun_frame_layout.backchain_offset
7652 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7653 * UNITS_PER_LONG);
7654
7655 if (TARGET_64BIT)
7656 {
7657 cfun_frame_layout.f4_offset
7658 = (cfun_frame_layout.gprs_offset
7659 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7660
7661 cfun_frame_layout.f0_offset
7662 = (cfun_frame_layout.f4_offset
7663 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7664 }
7665 else
7666 {
7667 /* On 31 bit we have to care about alignment of the
7668 floating point regs to provide fastest access. */
7669 cfun_frame_layout.f0_offset
7670 = ((cfun_frame_layout.gprs_offset
7671 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7672 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7673
7674 cfun_frame_layout.f4_offset
7675 = (cfun_frame_layout.f0_offset
7676 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7677 }
7678 }
7679 else /* no backchain */
7680 {
7681 cfun_frame_layout.f4_offset
7682 = (STACK_POINTER_OFFSET
7683 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7684
7685 cfun_frame_layout.f0_offset
7686 = (cfun_frame_layout.f4_offset
7687 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7688
7689 cfun_frame_layout.gprs_offset
7690 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7691 }
7692
7693 if (crtl->is_leaf
7694 && !TARGET_TPF_PROFILING
7695 && cfun_frame_layout.frame_size == 0
7696 && !cfun_save_high_fprs_p
7697 && !cfun->calls_alloca
7698 && !cfun->stdarg)
7699 return;
7700
7701 if (!TARGET_PACKED_STACK)
7702 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7703 + crtl->outgoing_args_size
7704 + cfun_frame_layout.high_fprs * 8);
7705 else
7706 {
7707 if (TARGET_BACKCHAIN)
7708 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7709
7710 /* No alignment trouble here because f8-f15 are only saved under
7711 64 bit. */
7712 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7713 cfun_frame_layout.f4_offset),
7714 cfun_frame_layout.gprs_offset)
7715 - cfun_frame_layout.high_fprs * 8);
7716
7717 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7718
7719 for (i = 0; i < 8; i++)
7720 if (cfun_fpr_bit_p (i))
7721 cfun_frame_layout.frame_size += 8;
7722
7723 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7724
7725 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7726 the frame size to sustain 8 byte alignment of stack frames. */
7727 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7728 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7729 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7730
7731 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7732 }
7733 }
7734
7735 /* Generate frame layout. Fills in register and frame data for the current
7736 function in cfun->machine. This routine can be called multiple times;
7737 it will re-do the complete frame layout every time. */
7738
7739 static void
7740 s390_init_frame_layout (void)
7741 {
7742 HOST_WIDE_INT frame_size;
7743 int base_used;
7744 int clobbered_regs[16];
7745
7746 /* On S/390 machines, we may need to perform branch splitting, which
7747 will require both base and return address register. We have no
7748 choice but to assume we're going to need them until right at the
7749 end of the machine dependent reorg phase. */
7750 if (!TARGET_CPU_ZARCH)
7751 cfun->machine->split_branches_pending_p = true;
7752
7753 do
7754 {
7755 frame_size = cfun_frame_layout.frame_size;
7756
7757 /* Try to predict whether we'll need the base register. */
7758 base_used = cfun->machine->split_branches_pending_p
7759 || crtl->uses_const_pool
7760 || (!DISP_IN_RANGE (frame_size)
7761 && !CONST_OK_FOR_K (frame_size));
7762
7763 /* Decide which register to use as literal pool base. In small
7764 leaf functions, try to use an unused call-clobbered register
7765 as base register to avoid save/restore overhead. */
7766 if (!base_used)
7767 cfun->machine->base_reg = NULL_RTX;
7768 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7769 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7770 else
7771 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7772
7773 s390_register_info (clobbered_regs);
7774 s390_frame_info ();
7775 }
7776 while (frame_size != cfun_frame_layout.frame_size);
7777 }
7778
7779 /* Update frame layout. Recompute actual register save data based on
7780 current info and update regs_ever_live for the special registers.
7781 May be called multiple times, but may never cause *more* registers
7782 to be saved than s390_init_frame_layout allocated room for. */
7783
7784 static void
7785 s390_update_frame_layout (void)
7786 {
7787 int clobbered_regs[16];
7788
7789 s390_register_info (clobbered_regs);
7790
7791 df_set_regs_ever_live (BASE_REGNUM,
7792 clobbered_regs[BASE_REGNUM] ? true : false);
7793 df_set_regs_ever_live (RETURN_REGNUM,
7794 clobbered_regs[RETURN_REGNUM] ? true : false);
7795 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7796 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7797
7798 if (cfun->machine->base_reg)
7799 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7800 }
7801
7802 /* Return true if it is legal to put a value with MODE into REGNO. */
7803
7804 bool
7805 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7806 {
7807 switch (REGNO_REG_CLASS (regno))
7808 {
7809 case FP_REGS:
7810 if (REGNO_PAIR_OK (regno, mode))
7811 {
7812 if (mode == SImode || mode == DImode)
7813 return true;
7814
7815 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7816 return true;
7817 }
7818 break;
7819 case ADDR_REGS:
7820 if (FRAME_REGNO_P (regno) && mode == Pmode)
7821 return true;
7822
7823 /* fallthrough */
7824 case GENERAL_REGS:
7825 if (REGNO_PAIR_OK (regno, mode))
7826 {
7827 if (TARGET_ZARCH
7828 || (mode != TFmode && mode != TCmode && mode != TDmode))
7829 return true;
7830 }
7831 break;
7832 case CC_REGS:
7833 if (GET_MODE_CLASS (mode) == MODE_CC)
7834 return true;
7835 break;
7836 case ACCESS_REGS:
7837 if (REGNO_PAIR_OK (regno, mode))
7838 {
7839 if (mode == SImode || mode == Pmode)
7840 return true;
7841 }
7842 break;
7843 default:
7844 return false;
7845 }
7846
7847 return false;
7848 }
7849
7850 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7851
7852 bool
7853 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7854 {
7855 /* Once we've decided upon a register to use as base register, it must
7856 no longer be used for any other purpose. */
7857 if (cfun->machine->base_reg)
7858 if (REGNO (cfun->machine->base_reg) == old_reg
7859 || REGNO (cfun->machine->base_reg) == new_reg)
7860 return false;
7861
7862 return true;
7863 }
7864
7865 /* Maximum number of registers to represent a value of mode MODE
7866 in a register of class RCLASS. */
7867
7868 int
7869 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7870 {
7871 switch (rclass)
7872 {
7873 case FP_REGS:
7874 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7875 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7876 else
7877 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7878 case ACCESS_REGS:
7879 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7880 default:
7881 break;
7882 }
7883 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7884 }
7885
7886 /* Return true if we use LRA instead of reload pass. */
7887 static bool
7888 s390_lra_p (void)
7889 {
7890 return s390_lra_flag;
7891 }
7892
7893 /* Return true if register FROM can be eliminated via register TO. */
7894
7895 static bool
7896 s390_can_eliminate (const int from, const int to)
7897 {
7898 /* On zSeries machines, we have not marked the base register as fixed.
7899 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7900 If a function requires the base register, we say here that this
7901 elimination cannot be performed. This will cause reload to free
7902 up the base register (as if it were fixed). On the other hand,
7903 if the current function does *not* require the base register, we
7904 say here the elimination succeeds, which in turn allows reload
7905 to allocate the base register for any other purpose. */
7906 if (from == BASE_REGNUM && to == BASE_REGNUM)
7907 {
7908 if (TARGET_CPU_ZARCH)
7909 {
7910 s390_init_frame_layout ();
7911 return cfun->machine->base_reg == NULL_RTX;
7912 }
7913
7914 return false;
7915 }
7916
7917 /* Everything else must point into the stack frame. */
7918 gcc_assert (to == STACK_POINTER_REGNUM
7919 || to == HARD_FRAME_POINTER_REGNUM);
7920
7921 gcc_assert (from == FRAME_POINTER_REGNUM
7922 || from == ARG_POINTER_REGNUM
7923 || from == RETURN_ADDRESS_POINTER_REGNUM);
7924
7925 /* Make sure we actually saved the return address. */
7926 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7927 if (!crtl->calls_eh_return
7928 && !cfun->stdarg
7929 && !cfun_frame_layout.save_return_addr_p)
7930 return false;
7931
7932 return true;
7933 }
7934
7935 /* Return offset between register FROM and TO initially after prolog. */
7936
7937 HOST_WIDE_INT
7938 s390_initial_elimination_offset (int from, int to)
7939 {
7940 HOST_WIDE_INT offset;
7941 int index;
7942
7943 /* ??? Why are we called for non-eliminable pairs? */
7944 if (!s390_can_eliminate (from, to))
7945 return 0;
7946
7947 switch (from)
7948 {
7949 case FRAME_POINTER_REGNUM:
7950 offset = (get_frame_size()
7951 + STACK_POINTER_OFFSET
7952 + crtl->outgoing_args_size);
7953 break;
7954
7955 case ARG_POINTER_REGNUM:
7956 s390_init_frame_layout ();
7957 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7958 break;
7959
7960 case RETURN_ADDRESS_POINTER_REGNUM:
7961 s390_init_frame_layout ();
7962 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7963 gcc_assert (index >= 0);
7964 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7965 offset += index * UNITS_PER_LONG;
7966 break;
7967
7968 case BASE_REGNUM:
7969 offset = 0;
7970 break;
7971
7972 default:
7973 gcc_unreachable ();
7974 }
7975
7976 return offset;
7977 }
7978
7979 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7980 to register BASE. Return generated insn. */
7981
7982 static rtx
7983 save_fpr (rtx base, int offset, int regnum)
7984 {
7985 rtx addr;
7986 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7987
7988 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7989 set_mem_alias_set (addr, get_varargs_alias_set ());
7990 else
7991 set_mem_alias_set (addr, get_frame_alias_set ());
7992
7993 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7994 }
7995
7996 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7997 to register BASE. Return generated insn. */
7998
7999 static rtx
8000 restore_fpr (rtx base, int offset, int regnum)
8001 {
8002 rtx addr;
8003 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8004 set_mem_alias_set (addr, get_frame_alias_set ());
8005
8006 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
8007 }
8008
8009 /* Return true if REGNO is a global register, but not one
8010 of the special ones that need to be saved/restored in anyway. */
8011
8012 static inline bool
8013 global_not_special_regno_p (int regno)
8014 {
8015 return (global_regs[regno]
8016 /* These registers are special and need to be
8017 restored in any case. */
8018 && !(regno == STACK_POINTER_REGNUM
8019 || regno == RETURN_REGNUM
8020 || regno == BASE_REGNUM
8021 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8022 }
8023
8024 /* Generate insn to save registers FIRST to LAST into
8025 the register save area located at offset OFFSET
8026 relative to register BASE. */
8027
8028 static rtx
8029 save_gprs (rtx base, int offset, int first, int last)
8030 {
8031 rtx addr, insn, note;
8032 int i;
8033
8034 addr = plus_constant (Pmode, base, offset);
8035 addr = gen_rtx_MEM (Pmode, addr);
8036
8037 set_mem_alias_set (addr, get_frame_alias_set ());
8038
8039 /* Special-case single register. */
8040 if (first == last)
8041 {
8042 if (TARGET_64BIT)
8043 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8044 else
8045 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8046
8047 if (!global_not_special_regno_p (first))
8048 RTX_FRAME_RELATED_P (insn) = 1;
8049 return insn;
8050 }
8051
8052
8053 insn = gen_store_multiple (addr,
8054 gen_rtx_REG (Pmode, first),
8055 GEN_INT (last - first + 1));
8056
8057 if (first <= 6 && cfun->stdarg)
8058 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8059 {
8060 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8061
8062 if (first + i <= 6)
8063 set_mem_alias_set (mem, get_varargs_alias_set ());
8064 }
8065
8066 /* We need to set the FRAME_RELATED flag on all SETs
8067 inside the store-multiple pattern.
8068
8069 However, we must not emit DWARF records for registers 2..5
8070 if they are stored for use by variable arguments ...
8071
8072 ??? Unfortunately, it is not enough to simply not the
8073 FRAME_RELATED flags for those SETs, because the first SET
8074 of the PARALLEL is always treated as if it had the flag
8075 set, even if it does not. Therefore we emit a new pattern
8076 without those registers as REG_FRAME_RELATED_EXPR note. */
8077
8078 if (first >= 6 && !global_not_special_regno_p (first))
8079 {
8080 rtx pat = PATTERN (insn);
8081
8082 for (i = 0; i < XVECLEN (pat, 0); i++)
8083 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8084 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8085 0, i)))))
8086 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8087
8088 RTX_FRAME_RELATED_P (insn) = 1;
8089 }
8090 else if (last >= 6)
8091 {
8092 int start;
8093
8094 for (start = first >= 6 ? first : 6; start <= last; start++)
8095 if (!global_not_special_regno_p (start))
8096 break;
8097
8098 if (start > last)
8099 return insn;
8100
8101 addr = plus_constant (Pmode, base,
8102 offset + (start - first) * UNITS_PER_LONG);
8103 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8104 gen_rtx_REG (Pmode, start),
8105 GEN_INT (last - start + 1));
8106 note = PATTERN (note);
8107
8108 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8109
8110 for (i = 0; i < XVECLEN (note, 0); i++)
8111 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8112 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8113 0, i)))))
8114 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8115
8116 RTX_FRAME_RELATED_P (insn) = 1;
8117 }
8118
8119 return insn;
8120 }
8121
8122 /* Generate insn to restore registers FIRST to LAST from
8123 the register save area located at offset OFFSET
8124 relative to register BASE. */
8125
8126 static rtx
8127 restore_gprs (rtx base, int offset, int first, int last)
8128 {
8129 rtx addr, insn;
8130
8131 addr = plus_constant (Pmode, base, offset);
8132 addr = gen_rtx_MEM (Pmode, addr);
8133 set_mem_alias_set (addr, get_frame_alias_set ());
8134
8135 /* Special-case single register. */
8136 if (first == last)
8137 {
8138 if (TARGET_64BIT)
8139 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8140 else
8141 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8142
8143 return insn;
8144 }
8145
8146 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8147 addr,
8148 GEN_INT (last - first + 1));
8149 return insn;
8150 }
8151
8152 /* Return insn sequence to load the GOT register. */
8153
8154 static GTY(()) rtx got_symbol;
8155 rtx
8156 s390_load_got (void)
8157 {
8158 rtx insns;
8159
8160 /* We cannot use pic_offset_table_rtx here since we use this
8161 function also for non-pic if __tls_get_offset is called and in
8162 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8163 aren't usable. */
8164 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8165
8166 if (!got_symbol)
8167 {
8168 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8169 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8170 }
8171
8172 start_sequence ();
8173
8174 if (TARGET_CPU_ZARCH)
8175 {
8176 emit_move_insn (got_rtx, got_symbol);
8177 }
8178 else
8179 {
8180 rtx offset;
8181
8182 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8183 UNSPEC_LTREL_OFFSET);
8184 offset = gen_rtx_CONST (Pmode, offset);
8185 offset = force_const_mem (Pmode, offset);
8186
8187 emit_move_insn (got_rtx, offset);
8188
8189 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8190 UNSPEC_LTREL_BASE);
8191 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8192
8193 emit_move_insn (got_rtx, offset);
8194 }
8195
8196 insns = get_insns ();
8197 end_sequence ();
8198 return insns;
8199 }
8200
8201 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8202 and the change to the stack pointer. */
8203
8204 static void
8205 s390_emit_stack_tie (void)
8206 {
8207 rtx mem = gen_frame_mem (BLKmode,
8208 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8209
8210 emit_insn (gen_stack_tie (mem));
8211 }
8212
8213 /* Expand the prologue into a bunch of separate insns. */
8214
8215 void
8216 s390_emit_prologue (void)
8217 {
8218 rtx insn, addr;
8219 rtx temp_reg;
8220 int i;
8221 int offset;
8222 int next_fpr = 0;
8223
8224 /* Complete frame layout. */
8225
8226 s390_update_frame_layout ();
8227
8228 /* Annotate all constant pool references to let the scheduler know
8229 they implicitly use the base register. */
8230
8231 push_topmost_sequence ();
8232
8233 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8234 if (INSN_P (insn))
8235 {
8236 annotate_constant_pool_refs (&PATTERN (insn));
8237 df_insn_rescan (insn);
8238 }
8239
8240 pop_topmost_sequence ();
8241
8242 /* Choose best register to use for temp use within prologue.
8243 See below for why TPF must use the register 1. */
8244
8245 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8246 && !crtl->is_leaf
8247 && !TARGET_TPF_PROFILING)
8248 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8249 else
8250 temp_reg = gen_rtx_REG (Pmode, 1);
8251
8252 /* Save call saved gprs. */
8253 if (cfun_frame_layout.first_save_gpr != -1)
8254 {
8255 insn = save_gprs (stack_pointer_rtx,
8256 cfun_frame_layout.gprs_offset +
8257 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8258 - cfun_frame_layout.first_save_gpr_slot),
8259 cfun_frame_layout.first_save_gpr,
8260 cfun_frame_layout.last_save_gpr);
8261 emit_insn (insn);
8262 }
8263
8264 /* Dummy insn to mark literal pool slot. */
8265
8266 if (cfun->machine->base_reg)
8267 emit_insn (gen_main_pool (cfun->machine->base_reg));
8268
8269 offset = cfun_frame_layout.f0_offset;
8270
8271 /* Save f0 and f2. */
8272 for (i = 0; i < 2; i++)
8273 {
8274 if (cfun_fpr_bit_p (i))
8275 {
8276 save_fpr (stack_pointer_rtx, offset, i + 16);
8277 offset += 8;
8278 }
8279 else if (!TARGET_PACKED_STACK)
8280 offset += 8;
8281 }
8282
8283 /* Save f4 and f6. */
8284 offset = cfun_frame_layout.f4_offset;
8285 for (i = 2; i < 4; i++)
8286 {
8287 if (cfun_fpr_bit_p (i))
8288 {
8289 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8290 offset += 8;
8291
8292 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8293 therefore are not frame related. */
8294 if (!call_really_used_regs[i + 16])
8295 RTX_FRAME_RELATED_P (insn) = 1;
8296 }
8297 else if (!TARGET_PACKED_STACK)
8298 offset += 8;
8299 }
8300
8301 if (TARGET_PACKED_STACK
8302 && cfun_save_high_fprs_p
8303 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8304 {
8305 offset = (cfun_frame_layout.f8_offset
8306 + (cfun_frame_layout.high_fprs - 1) * 8);
8307
8308 for (i = 15; i > 7 && offset >= 0; i--)
8309 if (cfun_fpr_bit_p (i))
8310 {
8311 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8312
8313 RTX_FRAME_RELATED_P (insn) = 1;
8314 offset -= 8;
8315 }
8316 if (offset >= cfun_frame_layout.f8_offset)
8317 next_fpr = i + 16;
8318 }
8319
8320 if (!TARGET_PACKED_STACK)
8321 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8322
8323 if (flag_stack_usage_info)
8324 current_function_static_stack_size = cfun_frame_layout.frame_size;
8325
8326 /* Decrement stack pointer. */
8327
8328 if (cfun_frame_layout.frame_size > 0)
8329 {
8330 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8331 rtx real_frame_off;
8332
8333 if (s390_stack_size)
8334 {
8335 HOST_WIDE_INT stack_guard;
8336
8337 if (s390_stack_guard)
8338 stack_guard = s390_stack_guard;
8339 else
8340 {
8341 /* If no value for stack guard is provided the smallest power of 2
8342 larger than the current frame size is chosen. */
8343 stack_guard = 1;
8344 while (stack_guard < cfun_frame_layout.frame_size)
8345 stack_guard <<= 1;
8346 }
8347
8348 if (cfun_frame_layout.frame_size >= s390_stack_size)
8349 {
8350 warning (0, "frame size of function %qs is %wd"
8351 " bytes exceeding user provided stack limit of "
8352 "%d bytes. "
8353 "An unconditional trap is added.",
8354 current_function_name(), cfun_frame_layout.frame_size,
8355 s390_stack_size);
8356 emit_insn (gen_trap ());
8357 }
8358 else
8359 {
8360 /* stack_guard has to be smaller than s390_stack_size.
8361 Otherwise we would emit an AND with zero which would
8362 not match the test under mask pattern. */
8363 if (stack_guard >= s390_stack_size)
8364 {
8365 warning (0, "frame size of function %qs is %wd"
8366 " bytes which is more than half the stack size. "
8367 "The dynamic check would not be reliable. "
8368 "No check emitted for this function.",
8369 current_function_name(),
8370 cfun_frame_layout.frame_size);
8371 }
8372 else
8373 {
8374 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8375 & ~(stack_guard - 1));
8376
8377 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8378 GEN_INT (stack_check_mask));
8379 if (TARGET_64BIT)
8380 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8381 t, const0_rtx),
8382 t, const0_rtx, const0_rtx));
8383 else
8384 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8385 t, const0_rtx),
8386 t, const0_rtx, const0_rtx));
8387 }
8388 }
8389 }
8390
8391 if (s390_warn_framesize > 0
8392 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8393 warning (0, "frame size of %qs is %wd bytes",
8394 current_function_name (), cfun_frame_layout.frame_size);
8395
8396 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8397 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8398
8399 /* Save incoming stack pointer into temp reg. */
8400 if (TARGET_BACKCHAIN || next_fpr)
8401 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8402
8403 /* Subtract frame size from stack pointer. */
8404
8405 if (DISP_IN_RANGE (INTVAL (frame_off)))
8406 {
8407 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8408 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8409 frame_off));
8410 insn = emit_insn (insn);
8411 }
8412 else
8413 {
8414 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8415 frame_off = force_const_mem (Pmode, frame_off);
8416
8417 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8418 annotate_constant_pool_refs (&PATTERN (insn));
8419 }
8420
8421 RTX_FRAME_RELATED_P (insn) = 1;
8422 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8423 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8424 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8425 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8426 real_frame_off)));
8427
8428 /* Set backchain. */
8429
8430 if (TARGET_BACKCHAIN)
8431 {
8432 if (cfun_frame_layout.backchain_offset)
8433 addr = gen_rtx_MEM (Pmode,
8434 plus_constant (Pmode, stack_pointer_rtx,
8435 cfun_frame_layout.backchain_offset));
8436 else
8437 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8438 set_mem_alias_set (addr, get_frame_alias_set ());
8439 insn = emit_insn (gen_move_insn (addr, temp_reg));
8440 }
8441
8442 /* If we support non-call exceptions (e.g. for Java),
8443 we need to make sure the backchain pointer is set up
8444 before any possibly trapping memory access. */
8445 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8446 {
8447 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8448 emit_clobber (addr);
8449 }
8450 }
8451
8452 /* Save fprs 8 - 15 (64 bit ABI). */
8453
8454 if (cfun_save_high_fprs_p && next_fpr)
8455 {
8456 /* If the stack might be accessed through a different register
8457 we have to make sure that the stack pointer decrement is not
8458 moved below the use of the stack slots. */
8459 s390_emit_stack_tie ();
8460
8461 insn = emit_insn (gen_add2_insn (temp_reg,
8462 GEN_INT (cfun_frame_layout.f8_offset)));
8463
8464 offset = 0;
8465
8466 for (i = 24; i <= next_fpr; i++)
8467 if (cfun_fpr_bit_p (i - 16))
8468 {
8469 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8470 cfun_frame_layout.frame_size
8471 + cfun_frame_layout.f8_offset
8472 + offset);
8473
8474 insn = save_fpr (temp_reg, offset, i);
8475 offset += 8;
8476 RTX_FRAME_RELATED_P (insn) = 1;
8477 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8478 gen_rtx_SET (VOIDmode,
8479 gen_rtx_MEM (DFmode, addr),
8480 gen_rtx_REG (DFmode, i)));
8481 }
8482 }
8483
8484 /* Set frame pointer, if needed. */
8485
8486 if (frame_pointer_needed)
8487 {
8488 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8489 RTX_FRAME_RELATED_P (insn) = 1;
8490 }
8491
8492 /* Set up got pointer, if needed. */
8493
8494 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8495 {
8496 rtx insns = s390_load_got ();
8497
8498 for (insn = insns; insn; insn = NEXT_INSN (insn))
8499 annotate_constant_pool_refs (&PATTERN (insn));
8500
8501 emit_insn (insns);
8502 }
8503
8504 if (TARGET_TPF_PROFILING)
8505 {
8506 /* Generate a BAS instruction to serve as a function
8507 entry intercept to facilitate the use of tracing
8508 algorithms located at the branch target. */
8509 emit_insn (gen_prologue_tpf ());
8510
8511 /* Emit a blockage here so that all code
8512 lies between the profiling mechanisms. */
8513 emit_insn (gen_blockage ());
8514 }
8515 }
8516
8517 /* Expand the epilogue into a bunch of separate insns. */
8518
8519 void
8520 s390_emit_epilogue (bool sibcall)
8521 {
8522 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8523 int area_bottom, area_top, offset = 0;
8524 int next_offset;
8525 rtvec p;
8526 int i;
8527
8528 if (TARGET_TPF_PROFILING)
8529 {
8530
8531 /* Generate a BAS instruction to serve as a function
8532 entry intercept to facilitate the use of tracing
8533 algorithms located at the branch target. */
8534
8535 /* Emit a blockage here so that all code
8536 lies between the profiling mechanisms. */
8537 emit_insn (gen_blockage ());
8538
8539 emit_insn (gen_epilogue_tpf ());
8540 }
8541
8542 /* Check whether to use frame or stack pointer for restore. */
8543
8544 frame_pointer = (frame_pointer_needed
8545 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8546
8547 s390_frame_area (&area_bottom, &area_top);
8548
8549 /* Check whether we can access the register save area.
8550 If not, increment the frame pointer as required. */
8551
8552 if (area_top <= area_bottom)
8553 {
8554 /* Nothing to restore. */
8555 }
8556 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8557 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8558 {
8559 /* Area is in range. */
8560 offset = cfun_frame_layout.frame_size;
8561 }
8562 else
8563 {
8564 rtx insn, frame_off, cfa;
8565
8566 offset = area_bottom < 0 ? -area_bottom : 0;
8567 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8568
8569 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8570 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8571 if (DISP_IN_RANGE (INTVAL (frame_off)))
8572 {
8573 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8574 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8575 insn = emit_insn (insn);
8576 }
8577 else
8578 {
8579 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8580 frame_off = force_const_mem (Pmode, frame_off);
8581
8582 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8583 annotate_constant_pool_refs (&PATTERN (insn));
8584 }
8585 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8586 RTX_FRAME_RELATED_P (insn) = 1;
8587 }
8588
8589 /* Restore call saved fprs. */
8590
8591 if (TARGET_64BIT)
8592 {
8593 if (cfun_save_high_fprs_p)
8594 {
8595 next_offset = cfun_frame_layout.f8_offset;
8596 for (i = 24; i < 32; i++)
8597 {
8598 if (cfun_fpr_bit_p (i - 16))
8599 {
8600 restore_fpr (frame_pointer,
8601 offset + next_offset, i);
8602 cfa_restores
8603 = alloc_reg_note (REG_CFA_RESTORE,
8604 gen_rtx_REG (DFmode, i), cfa_restores);
8605 next_offset += 8;
8606 }
8607 }
8608 }
8609
8610 }
8611 else
8612 {
8613 next_offset = cfun_frame_layout.f4_offset;
8614 for (i = 18; i < 20; i++)
8615 {
8616 if (cfun_fpr_bit_p (i - 16))
8617 {
8618 restore_fpr (frame_pointer,
8619 offset + next_offset, i);
8620 cfa_restores
8621 = alloc_reg_note (REG_CFA_RESTORE,
8622 gen_rtx_REG (DFmode, i), cfa_restores);
8623 next_offset += 8;
8624 }
8625 else if (!TARGET_PACKED_STACK)
8626 next_offset += 8;
8627 }
8628
8629 }
8630
8631 /* Return register. */
8632
8633 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8634
8635 /* Restore call saved gprs. */
8636
8637 if (cfun_frame_layout.first_restore_gpr != -1)
8638 {
8639 rtx insn, addr;
8640 int i;
8641
8642 /* Check for global register and save them
8643 to stack location from where they get restored. */
8644
8645 for (i = cfun_frame_layout.first_restore_gpr;
8646 i <= cfun_frame_layout.last_restore_gpr;
8647 i++)
8648 {
8649 if (global_not_special_regno_p (i))
8650 {
8651 addr = plus_constant (Pmode, frame_pointer,
8652 offset + cfun_frame_layout.gprs_offset
8653 + (i - cfun_frame_layout.first_save_gpr_slot)
8654 * UNITS_PER_LONG);
8655 addr = gen_rtx_MEM (Pmode, addr);
8656 set_mem_alias_set (addr, get_frame_alias_set ());
8657 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8658 }
8659 else
8660 cfa_restores
8661 = alloc_reg_note (REG_CFA_RESTORE,
8662 gen_rtx_REG (Pmode, i), cfa_restores);
8663 }
8664
8665 if (! sibcall)
8666 {
8667 /* Fetch return address from stack before load multiple,
8668 this will do good for scheduling. */
8669
8670 if (cfun_frame_layout.save_return_addr_p
8671 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8672 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8673 {
8674 int return_regnum = find_unused_clobbered_reg();
8675 if (!return_regnum)
8676 return_regnum = 4;
8677 return_reg = gen_rtx_REG (Pmode, return_regnum);
8678
8679 addr = plus_constant (Pmode, frame_pointer,
8680 offset + cfun_frame_layout.gprs_offset
8681 + (RETURN_REGNUM
8682 - cfun_frame_layout.first_save_gpr_slot)
8683 * UNITS_PER_LONG);
8684 addr = gen_rtx_MEM (Pmode, addr);
8685 set_mem_alias_set (addr, get_frame_alias_set ());
8686 emit_move_insn (return_reg, addr);
8687 }
8688 }
8689
8690 insn = restore_gprs (frame_pointer,
8691 offset + cfun_frame_layout.gprs_offset
8692 + (cfun_frame_layout.first_restore_gpr
8693 - cfun_frame_layout.first_save_gpr_slot)
8694 * UNITS_PER_LONG,
8695 cfun_frame_layout.first_restore_gpr,
8696 cfun_frame_layout.last_restore_gpr);
8697 insn = emit_insn (insn);
8698 REG_NOTES (insn) = cfa_restores;
8699 add_reg_note (insn, REG_CFA_DEF_CFA,
8700 plus_constant (Pmode, stack_pointer_rtx,
8701 STACK_POINTER_OFFSET));
8702 RTX_FRAME_RELATED_P (insn) = 1;
8703 }
8704
8705 if (! sibcall)
8706 {
8707
8708 /* Return to caller. */
8709
8710 p = rtvec_alloc (2);
8711
8712 RTVEC_ELT (p, 0) = ret_rtx;
8713 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8714 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8715 }
8716 }
8717
8718
8719 /* Return the size in bytes of a function argument of
8720 type TYPE and/or mode MODE. At least one of TYPE or
8721 MODE must be specified. */
8722
8723 static int
8724 s390_function_arg_size (enum machine_mode mode, const_tree type)
8725 {
8726 if (type)
8727 return int_size_in_bytes (type);
8728
8729 /* No type info available for some library calls ... */
8730 if (mode != BLKmode)
8731 return GET_MODE_SIZE (mode);
8732
8733 /* If we have neither type nor mode, abort */
8734 gcc_unreachable ();
8735 }
8736
8737 /* Return true if a function argument of type TYPE and mode MODE
8738 is to be passed in a floating-point register, if available. */
8739
8740 static bool
8741 s390_function_arg_float (enum machine_mode mode, const_tree type)
8742 {
8743 int size = s390_function_arg_size (mode, type);
8744 if (size > 8)
8745 return false;
8746
8747 /* Soft-float changes the ABI: no floating-point registers are used. */
8748 if (TARGET_SOFT_FLOAT)
8749 return false;
8750
8751 /* No type info available for some library calls ... */
8752 if (!type)
8753 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8754
8755 /* The ABI says that record types with a single member are treated
8756 just like that member would be. */
8757 while (TREE_CODE (type) == RECORD_TYPE)
8758 {
8759 tree field, single = NULL_TREE;
8760
8761 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8762 {
8763 if (TREE_CODE (field) != FIELD_DECL)
8764 continue;
8765
8766 if (single == NULL_TREE)
8767 single = TREE_TYPE (field);
8768 else
8769 return false;
8770 }
8771
8772 if (single == NULL_TREE)
8773 return false;
8774 else
8775 type = single;
8776 }
8777
8778 return TREE_CODE (type) == REAL_TYPE;
8779 }
8780
8781 /* Return true if a function argument of type TYPE and mode MODE
8782 is to be passed in an integer register, or a pair of integer
8783 registers, if available. */
8784
8785 static bool
8786 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8787 {
8788 int size = s390_function_arg_size (mode, type);
8789 if (size > 8)
8790 return false;
8791
8792 /* No type info available for some library calls ... */
8793 if (!type)
8794 return GET_MODE_CLASS (mode) == MODE_INT
8795 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8796
8797 /* We accept small integral (and similar) types. */
8798 if (INTEGRAL_TYPE_P (type)
8799 || POINTER_TYPE_P (type)
8800 || TREE_CODE (type) == NULLPTR_TYPE
8801 || TREE_CODE (type) == OFFSET_TYPE
8802 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8803 return true;
8804
8805 /* We also accept structs of size 1, 2, 4, 8 that are not
8806 passed in floating-point registers. */
8807 if (AGGREGATE_TYPE_P (type)
8808 && exact_log2 (size) >= 0
8809 && !s390_function_arg_float (mode, type))
8810 return true;
8811
8812 return false;
8813 }
8814
8815 /* Return 1 if a function argument of type TYPE and mode MODE
8816 is to be passed by reference. The ABI specifies that only
8817 structures of size 1, 2, 4, or 8 bytes are passed by value,
8818 all other structures (and complex numbers) are passed by
8819 reference. */
8820
8821 static bool
8822 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8823 enum machine_mode mode, const_tree type,
8824 bool named ATTRIBUTE_UNUSED)
8825 {
8826 int size = s390_function_arg_size (mode, type);
8827 if (size > 8)
8828 return true;
8829
8830 if (type)
8831 {
8832 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8833 return 1;
8834
8835 if (TREE_CODE (type) == COMPLEX_TYPE
8836 || TREE_CODE (type) == VECTOR_TYPE)
8837 return 1;
8838 }
8839
8840 return 0;
8841 }
8842
8843 /* Update the data in CUM to advance over an argument of mode MODE and
8844 data type TYPE. (TYPE is null for libcalls where that information
8845 may not be available.). The boolean NAMED specifies whether the
8846 argument is a named argument (as opposed to an unnamed argument
8847 matching an ellipsis). */
8848
8849 static void
8850 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8851 const_tree type, bool named ATTRIBUTE_UNUSED)
8852 {
8853 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8854
8855 if (s390_function_arg_float (mode, type))
8856 {
8857 cum->fprs += 1;
8858 }
8859 else if (s390_function_arg_integer (mode, type))
8860 {
8861 int size = s390_function_arg_size (mode, type);
8862 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8863 }
8864 else
8865 gcc_unreachable ();
8866 }
8867
8868 /* Define where to put the arguments to a function.
8869 Value is zero to push the argument on the stack,
8870 or a hard register in which to store the argument.
8871
8872 MODE is the argument's machine mode.
8873 TYPE is the data type of the argument (as a tree).
8874 This is null for libcalls where that information may
8875 not be available.
8876 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8877 the preceding args and about the function being called.
8878 NAMED is nonzero if this argument is a named parameter
8879 (otherwise it is an extra parameter matching an ellipsis).
8880
8881 On S/390, we use general purpose registers 2 through 6 to
8882 pass integer, pointer, and certain structure arguments, and
8883 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8884 to pass floating point arguments. All remaining arguments
8885 are pushed to the stack. */
8886
8887 static rtx
8888 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8889 const_tree type, bool named ATTRIBUTE_UNUSED)
8890 {
8891 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8892
8893 if (s390_function_arg_float (mode, type))
8894 {
8895 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8896 return 0;
8897 else
8898 return gen_rtx_REG (mode, cum->fprs + 16);
8899 }
8900 else if (s390_function_arg_integer (mode, type))
8901 {
8902 int size = s390_function_arg_size (mode, type);
8903 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8904
8905 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8906 return 0;
8907 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8908 return gen_rtx_REG (mode, cum->gprs + 2);
8909 else if (n_gprs == 2)
8910 {
8911 rtvec p = rtvec_alloc (2);
8912
8913 RTVEC_ELT (p, 0)
8914 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8915 const0_rtx);
8916 RTVEC_ELT (p, 1)
8917 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8918 GEN_INT (4));
8919
8920 return gen_rtx_PARALLEL (mode, p);
8921 }
8922 }
8923
8924 /* After the real arguments, expand_call calls us once again
8925 with a void_type_node type. Whatever we return here is
8926 passed as operand 2 to the call expanders.
8927
8928 We don't need this feature ... */
8929 else if (type == void_type_node)
8930 return const0_rtx;
8931
8932 gcc_unreachable ();
8933 }
8934
8935 /* Return true if return values of type TYPE should be returned
8936 in a memory buffer whose address is passed by the caller as
8937 hidden first argument. */
8938
8939 static bool
8940 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8941 {
8942 /* We accept small integral (and similar) types. */
8943 if (INTEGRAL_TYPE_P (type)
8944 || POINTER_TYPE_P (type)
8945 || TREE_CODE (type) == OFFSET_TYPE
8946 || TREE_CODE (type) == REAL_TYPE)
8947 return int_size_in_bytes (type) > 8;
8948
8949 /* Aggregates and similar constructs are always returned
8950 in memory. */
8951 if (AGGREGATE_TYPE_P (type)
8952 || TREE_CODE (type) == COMPLEX_TYPE
8953 || TREE_CODE (type) == VECTOR_TYPE)
8954 return true;
8955
8956 /* ??? We get called on all sorts of random stuff from
8957 aggregate_value_p. We can't abort, but it's not clear
8958 what's safe to return. Pretend it's a struct I guess. */
8959 return true;
8960 }
8961
8962 /* Function arguments and return values are promoted to word size. */
8963
8964 static enum machine_mode
8965 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8966 int *punsignedp,
8967 const_tree fntype ATTRIBUTE_UNUSED,
8968 int for_return ATTRIBUTE_UNUSED)
8969 {
8970 if (INTEGRAL_MODE_P (mode)
8971 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8972 {
8973 if (type != NULL_TREE && POINTER_TYPE_P (type))
8974 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8975 return Pmode;
8976 }
8977
8978 return mode;
8979 }
8980
8981 /* Define where to return a (scalar) value of type RET_TYPE.
8982 If RET_TYPE is null, define where to return a (scalar)
8983 value of mode MODE from a libcall. */
8984
8985 static rtx
8986 s390_function_and_libcall_value (enum machine_mode mode,
8987 const_tree ret_type,
8988 const_tree fntype_or_decl,
8989 bool outgoing ATTRIBUTE_UNUSED)
8990 {
8991 /* For normal functions perform the promotion as
8992 promote_function_mode would do. */
8993 if (ret_type)
8994 {
8995 int unsignedp = TYPE_UNSIGNED (ret_type);
8996 mode = promote_function_mode (ret_type, mode, &unsignedp,
8997 fntype_or_decl, 1);
8998 }
8999
9000 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
9001 gcc_assert (GET_MODE_SIZE (mode) <= 8);
9002
9003 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
9004 return gen_rtx_REG (mode, 16);
9005 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
9006 || UNITS_PER_LONG == UNITS_PER_WORD)
9007 return gen_rtx_REG (mode, 2);
9008 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
9009 {
9010 /* This case is triggered when returning a 64 bit value with
9011 -m31 -mzarch. Although the value would fit into a single
9012 register it has to be forced into a 32 bit register pair in
9013 order to match the ABI. */
9014 rtvec p = rtvec_alloc (2);
9015
9016 RTVEC_ELT (p, 0)
9017 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
9018 RTVEC_ELT (p, 1)
9019 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9020
9021 return gen_rtx_PARALLEL (mode, p);
9022 }
9023
9024 gcc_unreachable ();
9025 }
9026
9027 /* Define where to return a scalar return value of type RET_TYPE. */
9028
9029 static rtx
9030 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9031 bool outgoing)
9032 {
9033 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9034 fn_decl_or_type, outgoing);
9035 }
9036
9037 /* Define where to return a scalar libcall return value of mode
9038 MODE. */
9039
9040 static rtx
9041 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9042 {
9043 return s390_function_and_libcall_value (mode, NULL_TREE,
9044 NULL_TREE, true);
9045 }
9046
9047
9048 /* Create and return the va_list datatype.
9049
9050 On S/390, va_list is an array type equivalent to
9051
9052 typedef struct __va_list_tag
9053 {
9054 long __gpr;
9055 long __fpr;
9056 void *__overflow_arg_area;
9057 void *__reg_save_area;
9058 } va_list[1];
9059
9060 where __gpr and __fpr hold the number of general purpose
9061 or floating point arguments used up to now, respectively,
9062 __overflow_arg_area points to the stack location of the
9063 next argument passed on the stack, and __reg_save_area
9064 always points to the start of the register area in the
9065 call frame of the current function. The function prologue
9066 saves all registers used for argument passing into this
9067 area if the function uses variable arguments. */
9068
9069 static tree
9070 s390_build_builtin_va_list (void)
9071 {
9072 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9073
9074 record = lang_hooks.types.make_type (RECORD_TYPE);
9075
9076 type_decl =
9077 build_decl (BUILTINS_LOCATION,
9078 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9079
9080 f_gpr = build_decl (BUILTINS_LOCATION,
9081 FIELD_DECL, get_identifier ("__gpr"),
9082 long_integer_type_node);
9083 f_fpr = build_decl (BUILTINS_LOCATION,
9084 FIELD_DECL, get_identifier ("__fpr"),
9085 long_integer_type_node);
9086 f_ovf = build_decl (BUILTINS_LOCATION,
9087 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9088 ptr_type_node);
9089 f_sav = build_decl (BUILTINS_LOCATION,
9090 FIELD_DECL, get_identifier ("__reg_save_area"),
9091 ptr_type_node);
9092
9093 va_list_gpr_counter_field = f_gpr;
9094 va_list_fpr_counter_field = f_fpr;
9095
9096 DECL_FIELD_CONTEXT (f_gpr) = record;
9097 DECL_FIELD_CONTEXT (f_fpr) = record;
9098 DECL_FIELD_CONTEXT (f_ovf) = record;
9099 DECL_FIELD_CONTEXT (f_sav) = record;
9100
9101 TYPE_STUB_DECL (record) = type_decl;
9102 TYPE_NAME (record) = type_decl;
9103 TYPE_FIELDS (record) = f_gpr;
9104 DECL_CHAIN (f_gpr) = f_fpr;
9105 DECL_CHAIN (f_fpr) = f_ovf;
9106 DECL_CHAIN (f_ovf) = f_sav;
9107
9108 layout_type (record);
9109
9110 /* The correct type is an array type of one element. */
9111 return build_array_type (record, build_index_type (size_zero_node));
9112 }
9113
9114 /* Implement va_start by filling the va_list structure VALIST.
9115 STDARG_P is always true, and ignored.
9116 NEXTARG points to the first anonymous stack argument.
9117
9118 The following global variables are used to initialize
9119 the va_list structure:
9120
9121 crtl->args.info:
9122 holds number of gprs and fprs used for named arguments.
9123 crtl->args.arg_offset_rtx:
9124 holds the offset of the first anonymous stack argument
9125 (relative to the virtual arg pointer). */
9126
9127 static void
9128 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9129 {
9130 HOST_WIDE_INT n_gpr, n_fpr;
9131 int off;
9132 tree f_gpr, f_fpr, f_ovf, f_sav;
9133 tree gpr, fpr, ovf, sav, t;
9134
9135 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9136 f_fpr = DECL_CHAIN (f_gpr);
9137 f_ovf = DECL_CHAIN (f_fpr);
9138 f_sav = DECL_CHAIN (f_ovf);
9139
9140 valist = build_simple_mem_ref (valist);
9141 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9142 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9143 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9144 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9145
9146 /* Count number of gp and fp argument registers used. */
9147
9148 n_gpr = crtl->args.info.gprs;
9149 n_fpr = crtl->args.info.fprs;
9150
9151 if (cfun->va_list_gpr_size)
9152 {
9153 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9154 build_int_cst (NULL_TREE, n_gpr));
9155 TREE_SIDE_EFFECTS (t) = 1;
9156 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9157 }
9158
9159 if (cfun->va_list_fpr_size)
9160 {
9161 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9162 build_int_cst (NULL_TREE, n_fpr));
9163 TREE_SIDE_EFFECTS (t) = 1;
9164 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9165 }
9166
9167 /* Find the overflow area. */
9168 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9169 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9170 {
9171 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9172
9173 off = INTVAL (crtl->args.arg_offset_rtx);
9174 off = off < 0 ? 0 : off;
9175 if (TARGET_DEBUG_ARG)
9176 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9177 (int)n_gpr, (int)n_fpr, off);
9178
9179 t = fold_build_pointer_plus_hwi (t, off);
9180
9181 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9182 TREE_SIDE_EFFECTS (t) = 1;
9183 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9184 }
9185
9186 /* Find the register save area. */
9187 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9188 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9189 {
9190 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9191 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9192
9193 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9194 TREE_SIDE_EFFECTS (t) = 1;
9195 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9196 }
9197 }
9198
9199 /* Implement va_arg by updating the va_list structure
9200 VALIST as required to retrieve an argument of type
9201 TYPE, and returning that argument.
9202
9203 Generates code equivalent to:
9204
9205 if (integral value) {
9206 if (size <= 4 && args.gpr < 5 ||
9207 size > 4 && args.gpr < 4 )
9208 ret = args.reg_save_area[args.gpr+8]
9209 else
9210 ret = *args.overflow_arg_area++;
9211 } else if (float value) {
9212 if (args.fgpr < 2)
9213 ret = args.reg_save_area[args.fpr+64]
9214 else
9215 ret = *args.overflow_arg_area++;
9216 } else if (aggregate value) {
9217 if (args.gpr < 5)
9218 ret = *args.reg_save_area[args.gpr]
9219 else
9220 ret = **args.overflow_arg_area++;
9221 } */
9222
9223 static tree
9224 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9225 gimple_seq *post_p ATTRIBUTE_UNUSED)
9226 {
9227 tree f_gpr, f_fpr, f_ovf, f_sav;
9228 tree gpr, fpr, ovf, sav, reg, t, u;
9229 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9230 tree lab_false, lab_over, addr;
9231
9232 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9233 f_fpr = DECL_CHAIN (f_gpr);
9234 f_ovf = DECL_CHAIN (f_fpr);
9235 f_sav = DECL_CHAIN (f_ovf);
9236
9237 valist = build_va_arg_indirect_ref (valist);
9238 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9239 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9240 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9241
9242 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9243 both appear on a lhs. */
9244 valist = unshare_expr (valist);
9245 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9246
9247 size = int_size_in_bytes (type);
9248
9249 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9250 {
9251 if (TARGET_DEBUG_ARG)
9252 {
9253 fprintf (stderr, "va_arg: aggregate type");
9254 debug_tree (type);
9255 }
9256
9257 /* Aggregates are passed by reference. */
9258 indirect_p = 1;
9259 reg = gpr;
9260 n_reg = 1;
9261
9262 /* kernel stack layout on 31 bit: It is assumed here that no padding
9263 will be added by s390_frame_info because for va_args always an even
9264 number of gprs has to be saved r15-r2 = 14 regs. */
9265 sav_ofs = 2 * UNITS_PER_LONG;
9266 sav_scale = UNITS_PER_LONG;
9267 size = UNITS_PER_LONG;
9268 max_reg = GP_ARG_NUM_REG - n_reg;
9269 }
9270 else if (s390_function_arg_float (TYPE_MODE (type), type))
9271 {
9272 if (TARGET_DEBUG_ARG)
9273 {
9274 fprintf (stderr, "va_arg: float type");
9275 debug_tree (type);
9276 }
9277
9278 /* FP args go in FP registers, if present. */
9279 indirect_p = 0;
9280 reg = fpr;
9281 n_reg = 1;
9282 sav_ofs = 16 * UNITS_PER_LONG;
9283 sav_scale = 8;
9284 max_reg = FP_ARG_NUM_REG - n_reg;
9285 }
9286 else
9287 {
9288 if (TARGET_DEBUG_ARG)
9289 {
9290 fprintf (stderr, "va_arg: other type");
9291 debug_tree (type);
9292 }
9293
9294 /* Otherwise into GP registers. */
9295 indirect_p = 0;
9296 reg = gpr;
9297 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9298
9299 /* kernel stack layout on 31 bit: It is assumed here that no padding
9300 will be added by s390_frame_info because for va_args always an even
9301 number of gprs has to be saved r15-r2 = 14 regs. */
9302 sav_ofs = 2 * UNITS_PER_LONG;
9303
9304 if (size < UNITS_PER_LONG)
9305 sav_ofs += UNITS_PER_LONG - size;
9306
9307 sav_scale = UNITS_PER_LONG;
9308 max_reg = GP_ARG_NUM_REG - n_reg;
9309 }
9310
9311 /* Pull the value out of the saved registers ... */
9312
9313 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9314 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9315 addr = create_tmp_var (ptr_type_node, "addr");
9316
9317 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9318 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9319 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9320 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9321 gimplify_and_add (t, pre_p);
9322
9323 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9324 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9325 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9326 t = fold_build_pointer_plus (t, u);
9327
9328 gimplify_assign (addr, t, pre_p);
9329
9330 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9331
9332 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9333
9334
9335 /* ... Otherwise out of the overflow area. */
9336
9337 t = ovf;
9338 if (size < UNITS_PER_LONG)
9339 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9340
9341 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9342
9343 gimplify_assign (addr, t, pre_p);
9344
9345 t = fold_build_pointer_plus_hwi (t, size);
9346 gimplify_assign (ovf, t, pre_p);
9347
9348 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9349
9350
9351 /* Increment register save count. */
9352
9353 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9354 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9355 gimplify_and_add (u, pre_p);
9356
9357 if (indirect_p)
9358 {
9359 t = build_pointer_type_for_mode (build_pointer_type (type),
9360 ptr_mode, true);
9361 addr = fold_convert (t, addr);
9362 addr = build_va_arg_indirect_ref (addr);
9363 }
9364 else
9365 {
9366 t = build_pointer_type_for_mode (type, ptr_mode, true);
9367 addr = fold_convert (t, addr);
9368 }
9369
9370 return build_va_arg_indirect_ref (addr);
9371 }
9372
9373 /* Output assembly code for the trampoline template to
9374 stdio stream FILE.
9375
9376 On S/390, we use gpr 1 internally in the trampoline code;
9377 gpr 0 is used to hold the static chain. */
9378
9379 static void
9380 s390_asm_trampoline_template (FILE *file)
9381 {
9382 rtx op[2];
9383 op[0] = gen_rtx_REG (Pmode, 0);
9384 op[1] = gen_rtx_REG (Pmode, 1);
9385
9386 if (TARGET_64BIT)
9387 {
9388 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9389 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9390 output_asm_insn ("br\t%1", op); /* 2 byte */
9391 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9392 }
9393 else
9394 {
9395 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9396 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9397 output_asm_insn ("br\t%1", op); /* 2 byte */
9398 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9399 }
9400 }
9401
9402 /* Emit RTL insns to initialize the variable parts of a trampoline.
9403 FNADDR is an RTX for the address of the function's pure code.
9404 CXT is an RTX for the static chain value for the function. */
9405
9406 static void
9407 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9408 {
9409 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9410 rtx mem;
9411
9412 emit_block_move (m_tramp, assemble_trampoline_template (),
9413 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9414
9415 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9416 emit_move_insn (mem, cxt);
9417 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9418 emit_move_insn (mem, fnaddr);
9419 }
9420
9421 /* Output assembler code to FILE to increment profiler label # LABELNO
9422 for profiling a function entry. */
9423
9424 void
9425 s390_function_profiler (FILE *file, int labelno)
9426 {
9427 rtx op[7];
9428
9429 char label[128];
9430 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9431
9432 fprintf (file, "# function profiler \n");
9433
9434 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9435 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9436 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9437
9438 op[2] = gen_rtx_REG (Pmode, 1);
9439 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9440 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9441
9442 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9443 if (flag_pic)
9444 {
9445 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9446 op[4] = gen_rtx_CONST (Pmode, op[4]);
9447 }
9448
9449 if (TARGET_64BIT)
9450 {
9451 output_asm_insn ("stg\t%0,%1", op);
9452 output_asm_insn ("larl\t%2,%3", op);
9453 output_asm_insn ("brasl\t%0,%4", op);
9454 output_asm_insn ("lg\t%0,%1", op);
9455 }
9456 else if (!flag_pic)
9457 {
9458 op[6] = gen_label_rtx ();
9459
9460 output_asm_insn ("st\t%0,%1", op);
9461 output_asm_insn ("bras\t%2,%l6", op);
9462 output_asm_insn (".long\t%4", op);
9463 output_asm_insn (".long\t%3", op);
9464 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9465 output_asm_insn ("l\t%0,0(%2)", op);
9466 output_asm_insn ("l\t%2,4(%2)", op);
9467 output_asm_insn ("basr\t%0,%0", op);
9468 output_asm_insn ("l\t%0,%1", op);
9469 }
9470 else
9471 {
9472 op[5] = gen_label_rtx ();
9473 op[6] = gen_label_rtx ();
9474
9475 output_asm_insn ("st\t%0,%1", op);
9476 output_asm_insn ("bras\t%2,%l6", op);
9477 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9478 output_asm_insn (".long\t%4-%l5", op);
9479 output_asm_insn (".long\t%3-%l5", op);
9480 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9481 output_asm_insn ("lr\t%0,%2", op);
9482 output_asm_insn ("a\t%0,0(%2)", op);
9483 output_asm_insn ("a\t%2,4(%2)", op);
9484 output_asm_insn ("basr\t%0,%0", op);
9485 output_asm_insn ("l\t%0,%1", op);
9486 }
9487 }
9488
9489 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9490 into its SYMBOL_REF_FLAGS. */
9491
9492 static void
9493 s390_encode_section_info (tree decl, rtx rtl, int first)
9494 {
9495 default_encode_section_info (decl, rtl, first);
9496
9497 if (TREE_CODE (decl) == VAR_DECL)
9498 {
9499 /* If a variable has a forced alignment to < 2 bytes, mark it
9500 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9501 operand. */
9502 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9503 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9504 if (!DECL_SIZE (decl)
9505 || !DECL_ALIGN (decl)
9506 || !host_integerp (DECL_SIZE (decl), 0)
9507 || (DECL_ALIGN (decl) <= 64
9508 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9509 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9510 }
9511
9512 /* Literal pool references don't have a decl so they are handled
9513 differently here. We rely on the information in the MEM_ALIGN
9514 entry to decide upon natural alignment. */
9515 if (MEM_P (rtl)
9516 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9517 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9518 && (MEM_ALIGN (rtl) == 0
9519 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9520 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9521 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9522 }
9523
9524 /* Output thunk to FILE that implements a C++ virtual function call (with
9525 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9526 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9527 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9528 relative to the resulting this pointer. */
9529
9530 static void
9531 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9532 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9533 tree function)
9534 {
9535 rtx op[10];
9536 int nonlocal = 0;
9537
9538 /* Make sure unwind info is emitted for the thunk if needed. */
9539 final_start_function (emit_barrier (), file, 1);
9540
9541 /* Operand 0 is the target function. */
9542 op[0] = XEXP (DECL_RTL (function), 0);
9543 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9544 {
9545 nonlocal = 1;
9546 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9547 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9548 op[0] = gen_rtx_CONST (Pmode, op[0]);
9549 }
9550
9551 /* Operand 1 is the 'this' pointer. */
9552 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9553 op[1] = gen_rtx_REG (Pmode, 3);
9554 else
9555 op[1] = gen_rtx_REG (Pmode, 2);
9556
9557 /* Operand 2 is the delta. */
9558 op[2] = GEN_INT (delta);
9559
9560 /* Operand 3 is the vcall_offset. */
9561 op[3] = GEN_INT (vcall_offset);
9562
9563 /* Operand 4 is the temporary register. */
9564 op[4] = gen_rtx_REG (Pmode, 1);
9565
9566 /* Operands 5 to 8 can be used as labels. */
9567 op[5] = NULL_RTX;
9568 op[6] = NULL_RTX;
9569 op[7] = NULL_RTX;
9570 op[8] = NULL_RTX;
9571
9572 /* Operand 9 can be used for temporary register. */
9573 op[9] = NULL_RTX;
9574
9575 /* Generate code. */
9576 if (TARGET_64BIT)
9577 {
9578 /* Setup literal pool pointer if required. */
9579 if ((!DISP_IN_RANGE (delta)
9580 && !CONST_OK_FOR_K (delta)
9581 && !CONST_OK_FOR_Os (delta))
9582 || (!DISP_IN_RANGE (vcall_offset)
9583 && !CONST_OK_FOR_K (vcall_offset)
9584 && !CONST_OK_FOR_Os (vcall_offset)))
9585 {
9586 op[5] = gen_label_rtx ();
9587 output_asm_insn ("larl\t%4,%5", op);
9588 }
9589
9590 /* Add DELTA to this pointer. */
9591 if (delta)
9592 {
9593 if (CONST_OK_FOR_J (delta))
9594 output_asm_insn ("la\t%1,%2(%1)", op);
9595 else if (DISP_IN_RANGE (delta))
9596 output_asm_insn ("lay\t%1,%2(%1)", op);
9597 else if (CONST_OK_FOR_K (delta))
9598 output_asm_insn ("aghi\t%1,%2", op);
9599 else if (CONST_OK_FOR_Os (delta))
9600 output_asm_insn ("agfi\t%1,%2", op);
9601 else
9602 {
9603 op[6] = gen_label_rtx ();
9604 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9605 }
9606 }
9607
9608 /* Perform vcall adjustment. */
9609 if (vcall_offset)
9610 {
9611 if (DISP_IN_RANGE (vcall_offset))
9612 {
9613 output_asm_insn ("lg\t%4,0(%1)", op);
9614 output_asm_insn ("ag\t%1,%3(%4)", op);
9615 }
9616 else if (CONST_OK_FOR_K (vcall_offset))
9617 {
9618 output_asm_insn ("lghi\t%4,%3", op);
9619 output_asm_insn ("ag\t%4,0(%1)", op);
9620 output_asm_insn ("ag\t%1,0(%4)", op);
9621 }
9622 else if (CONST_OK_FOR_Os (vcall_offset))
9623 {
9624 output_asm_insn ("lgfi\t%4,%3", op);
9625 output_asm_insn ("ag\t%4,0(%1)", op);
9626 output_asm_insn ("ag\t%1,0(%4)", op);
9627 }
9628 else
9629 {
9630 op[7] = gen_label_rtx ();
9631 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9632 output_asm_insn ("ag\t%4,0(%1)", op);
9633 output_asm_insn ("ag\t%1,0(%4)", op);
9634 }
9635 }
9636
9637 /* Jump to target. */
9638 output_asm_insn ("jg\t%0", op);
9639
9640 /* Output literal pool if required. */
9641 if (op[5])
9642 {
9643 output_asm_insn (".align\t4", op);
9644 targetm.asm_out.internal_label (file, "L",
9645 CODE_LABEL_NUMBER (op[5]));
9646 }
9647 if (op[6])
9648 {
9649 targetm.asm_out.internal_label (file, "L",
9650 CODE_LABEL_NUMBER (op[6]));
9651 output_asm_insn (".long\t%2", op);
9652 }
9653 if (op[7])
9654 {
9655 targetm.asm_out.internal_label (file, "L",
9656 CODE_LABEL_NUMBER (op[7]));
9657 output_asm_insn (".long\t%3", op);
9658 }
9659 }
9660 else
9661 {
9662 /* Setup base pointer if required. */
9663 if (!vcall_offset
9664 || (!DISP_IN_RANGE (delta)
9665 && !CONST_OK_FOR_K (delta)
9666 && !CONST_OK_FOR_Os (delta))
9667 || (!DISP_IN_RANGE (delta)
9668 && !CONST_OK_FOR_K (vcall_offset)
9669 && !CONST_OK_FOR_Os (vcall_offset)))
9670 {
9671 op[5] = gen_label_rtx ();
9672 output_asm_insn ("basr\t%4,0", op);
9673 targetm.asm_out.internal_label (file, "L",
9674 CODE_LABEL_NUMBER (op[5]));
9675 }
9676
9677 /* Add DELTA to this pointer. */
9678 if (delta)
9679 {
9680 if (CONST_OK_FOR_J (delta))
9681 output_asm_insn ("la\t%1,%2(%1)", op);
9682 else if (DISP_IN_RANGE (delta))
9683 output_asm_insn ("lay\t%1,%2(%1)", op);
9684 else if (CONST_OK_FOR_K (delta))
9685 output_asm_insn ("ahi\t%1,%2", op);
9686 else if (CONST_OK_FOR_Os (delta))
9687 output_asm_insn ("afi\t%1,%2", op);
9688 else
9689 {
9690 op[6] = gen_label_rtx ();
9691 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9692 }
9693 }
9694
9695 /* Perform vcall adjustment. */
9696 if (vcall_offset)
9697 {
9698 if (CONST_OK_FOR_J (vcall_offset))
9699 {
9700 output_asm_insn ("l\t%4,0(%1)", op);
9701 output_asm_insn ("a\t%1,%3(%4)", op);
9702 }
9703 else if (DISP_IN_RANGE (vcall_offset))
9704 {
9705 output_asm_insn ("l\t%4,0(%1)", op);
9706 output_asm_insn ("ay\t%1,%3(%4)", op);
9707 }
9708 else if (CONST_OK_FOR_K (vcall_offset))
9709 {
9710 output_asm_insn ("lhi\t%4,%3", op);
9711 output_asm_insn ("a\t%4,0(%1)", op);
9712 output_asm_insn ("a\t%1,0(%4)", op);
9713 }
9714 else if (CONST_OK_FOR_Os (vcall_offset))
9715 {
9716 output_asm_insn ("iilf\t%4,%3", op);
9717 output_asm_insn ("a\t%4,0(%1)", op);
9718 output_asm_insn ("a\t%1,0(%4)", op);
9719 }
9720 else
9721 {
9722 op[7] = gen_label_rtx ();
9723 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9724 output_asm_insn ("a\t%4,0(%1)", op);
9725 output_asm_insn ("a\t%1,0(%4)", op);
9726 }
9727
9728 /* We had to clobber the base pointer register.
9729 Re-setup the base pointer (with a different base). */
9730 op[5] = gen_label_rtx ();
9731 output_asm_insn ("basr\t%4,0", op);
9732 targetm.asm_out.internal_label (file, "L",
9733 CODE_LABEL_NUMBER (op[5]));
9734 }
9735
9736 /* Jump to target. */
9737 op[8] = gen_label_rtx ();
9738
9739 if (!flag_pic)
9740 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9741 else if (!nonlocal)
9742 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9743 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9744 else if (flag_pic == 1)
9745 {
9746 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9747 output_asm_insn ("l\t%4,%0(%4)", op);
9748 }
9749 else if (flag_pic == 2)
9750 {
9751 op[9] = gen_rtx_REG (Pmode, 0);
9752 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9753 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9754 output_asm_insn ("ar\t%4,%9", op);
9755 output_asm_insn ("l\t%4,0(%4)", op);
9756 }
9757
9758 output_asm_insn ("br\t%4", op);
9759
9760 /* Output literal pool. */
9761 output_asm_insn (".align\t4", op);
9762
9763 if (nonlocal && flag_pic == 2)
9764 output_asm_insn (".long\t%0", op);
9765 if (nonlocal)
9766 {
9767 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9768 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9769 }
9770
9771 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9772 if (!flag_pic)
9773 output_asm_insn (".long\t%0", op);
9774 else
9775 output_asm_insn (".long\t%0-%5", op);
9776
9777 if (op[6])
9778 {
9779 targetm.asm_out.internal_label (file, "L",
9780 CODE_LABEL_NUMBER (op[6]));
9781 output_asm_insn (".long\t%2", op);
9782 }
9783 if (op[7])
9784 {
9785 targetm.asm_out.internal_label (file, "L",
9786 CODE_LABEL_NUMBER (op[7]));
9787 output_asm_insn (".long\t%3", op);
9788 }
9789 }
9790 final_end_function ();
9791 }
9792
9793 static bool
9794 s390_valid_pointer_mode (enum machine_mode mode)
9795 {
9796 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9797 }
9798
9799 /* Checks whether the given CALL_EXPR would use a caller
9800 saved register. This is used to decide whether sibling call
9801 optimization could be performed on the respective function
9802 call. */
9803
9804 static bool
9805 s390_call_saved_register_used (tree call_expr)
9806 {
9807 CUMULATIVE_ARGS cum_v;
9808 cumulative_args_t cum;
9809 tree parameter;
9810 enum machine_mode mode;
9811 tree type;
9812 rtx parm_rtx;
9813 int reg, i;
9814
9815 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9816 cum = pack_cumulative_args (&cum_v);
9817
9818 for (i = 0; i < call_expr_nargs (call_expr); i++)
9819 {
9820 parameter = CALL_EXPR_ARG (call_expr, i);
9821 gcc_assert (parameter);
9822
9823 /* For an undeclared variable passed as parameter we will get
9824 an ERROR_MARK node here. */
9825 if (TREE_CODE (parameter) == ERROR_MARK)
9826 return true;
9827
9828 type = TREE_TYPE (parameter);
9829 gcc_assert (type);
9830
9831 mode = TYPE_MODE (type);
9832 gcc_assert (mode);
9833
9834 if (pass_by_reference (&cum_v, mode, type, true))
9835 {
9836 mode = Pmode;
9837 type = build_pointer_type (type);
9838 }
9839
9840 parm_rtx = s390_function_arg (cum, mode, type, 0);
9841
9842 s390_function_arg_advance (cum, mode, type, 0);
9843
9844 if (!parm_rtx)
9845 continue;
9846
9847 if (REG_P (parm_rtx))
9848 {
9849 for (reg = 0;
9850 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9851 reg++)
9852 if (!call_used_regs[reg + REGNO (parm_rtx)])
9853 return true;
9854 }
9855
9856 if (GET_CODE (parm_rtx) == PARALLEL)
9857 {
9858 int i;
9859
9860 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9861 {
9862 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9863
9864 gcc_assert (REG_P (r));
9865
9866 for (reg = 0;
9867 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9868 reg++)
9869 if (!call_used_regs[reg + REGNO (r)])
9870 return true;
9871 }
9872 }
9873
9874 }
9875 return false;
9876 }
9877
9878 /* Return true if the given call expression can be
9879 turned into a sibling call.
9880 DECL holds the declaration of the function to be called whereas
9881 EXP is the call expression itself. */
9882
9883 static bool
9884 s390_function_ok_for_sibcall (tree decl, tree exp)
9885 {
9886 /* The TPF epilogue uses register 1. */
9887 if (TARGET_TPF_PROFILING)
9888 return false;
9889
9890 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9891 which would have to be restored before the sibcall. */
9892 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9893 return false;
9894
9895 /* Register 6 on s390 is available as an argument register but unfortunately
9896 "caller saved". This makes functions needing this register for arguments
9897 not suitable for sibcalls. */
9898 return !s390_call_saved_register_used (exp);
9899 }
9900
9901 /* Return the fixed registers used for condition codes. */
9902
9903 static bool
9904 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9905 {
9906 *p1 = CC_REGNUM;
9907 *p2 = INVALID_REGNUM;
9908
9909 return true;
9910 }
9911
9912 /* This function is used by the call expanders of the machine description.
9913 It emits the call insn itself together with the necessary operations
9914 to adjust the target address and returns the emitted insn.
9915 ADDR_LOCATION is the target address rtx
9916 TLS_CALL the location of the thread-local symbol
9917 RESULT_REG the register where the result of the call should be stored
9918 RETADDR_REG the register where the return address should be stored
9919 If this parameter is NULL_RTX the call is considered
9920 to be a sibling call. */
9921
9922 rtx
9923 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9924 rtx retaddr_reg)
9925 {
9926 bool plt_call = false;
9927 rtx insn;
9928 rtx call;
9929 rtx clobber;
9930 rtvec vec;
9931
9932 /* Direct function calls need special treatment. */
9933 if (GET_CODE (addr_location) == SYMBOL_REF)
9934 {
9935 /* When calling a global routine in PIC mode, we must
9936 replace the symbol itself with the PLT stub. */
9937 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9938 {
9939 if (retaddr_reg != NULL_RTX)
9940 {
9941 addr_location = gen_rtx_UNSPEC (Pmode,
9942 gen_rtvec (1, addr_location),
9943 UNSPEC_PLT);
9944 addr_location = gen_rtx_CONST (Pmode, addr_location);
9945 plt_call = true;
9946 }
9947 else
9948 /* For -fpic code the PLT entries might use r12 which is
9949 call-saved. Therefore we cannot do a sibcall when
9950 calling directly using a symbol ref. When reaching
9951 this point we decided (in s390_function_ok_for_sibcall)
9952 to do a sibcall for a function pointer but one of the
9953 optimizers was able to get rid of the function pointer
9954 by propagating the symbol ref into the call. This
9955 optimization is illegal for S/390 so we turn the direct
9956 call into a indirect call again. */
9957 addr_location = force_reg (Pmode, addr_location);
9958 }
9959
9960 /* Unless we can use the bras(l) insn, force the
9961 routine address into a register. */
9962 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9963 {
9964 if (flag_pic)
9965 addr_location = legitimize_pic_address (addr_location, 0);
9966 else
9967 addr_location = force_reg (Pmode, addr_location);
9968 }
9969 }
9970
9971 /* If it is already an indirect call or the code above moved the
9972 SYMBOL_REF to somewhere else make sure the address can be found in
9973 register 1. */
9974 if (retaddr_reg == NULL_RTX
9975 && GET_CODE (addr_location) != SYMBOL_REF
9976 && !plt_call)
9977 {
9978 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9979 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9980 }
9981
9982 addr_location = gen_rtx_MEM (QImode, addr_location);
9983 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9984
9985 if (result_reg != NULL_RTX)
9986 call = gen_rtx_SET (VOIDmode, result_reg, call);
9987
9988 if (retaddr_reg != NULL_RTX)
9989 {
9990 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9991
9992 if (tls_call != NULL_RTX)
9993 vec = gen_rtvec (3, call, clobber,
9994 gen_rtx_USE (VOIDmode, tls_call));
9995 else
9996 vec = gen_rtvec (2, call, clobber);
9997
9998 call = gen_rtx_PARALLEL (VOIDmode, vec);
9999 }
10000
10001 insn = emit_call_insn (call);
10002
10003 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10004 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
10005 {
10006 /* s390_function_ok_for_sibcall should
10007 have denied sibcalls in this case. */
10008 gcc_assert (retaddr_reg != NULL_RTX);
10009 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10010 }
10011 return insn;
10012 }
10013
10014 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10015
10016 static void
10017 s390_conditional_register_usage (void)
10018 {
10019 int i;
10020
10021 if (flag_pic)
10022 {
10023 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10024 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10025 }
10026 if (TARGET_CPU_ZARCH)
10027 {
10028 fixed_regs[BASE_REGNUM] = 0;
10029 call_used_regs[BASE_REGNUM] = 0;
10030 fixed_regs[RETURN_REGNUM] = 0;
10031 call_used_regs[RETURN_REGNUM] = 0;
10032 }
10033 if (TARGET_64BIT)
10034 {
10035 for (i = 24; i < 32; i++)
10036 call_used_regs[i] = call_really_used_regs[i] = 0;
10037 }
10038 else
10039 {
10040 for (i = 18; i < 20; i++)
10041 call_used_regs[i] = call_really_used_regs[i] = 0;
10042 }
10043
10044 if (TARGET_SOFT_FLOAT)
10045 {
10046 for (i = 16; i < 32; i++)
10047 call_used_regs[i] = fixed_regs[i] = 1;
10048 }
10049 }
10050
10051 /* Corresponding function to eh_return expander. */
10052
10053 static GTY(()) rtx s390_tpf_eh_return_symbol;
10054 void
10055 s390_emit_tpf_eh_return (rtx target)
10056 {
10057 rtx insn, reg;
10058
10059 if (!s390_tpf_eh_return_symbol)
10060 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10061
10062 reg = gen_rtx_REG (Pmode, 2);
10063
10064 emit_move_insn (reg, target);
10065 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10066 gen_rtx_REG (Pmode, RETURN_REGNUM));
10067 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10068
10069 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10070 }
10071
10072 /* Rework the prologue/epilogue to avoid saving/restoring
10073 registers unnecessarily. */
10074
10075 static void
10076 s390_optimize_prologue (void)
10077 {
10078 rtx insn, new_insn, next_insn;
10079
10080 /* Do a final recompute of the frame-related data. */
10081
10082 s390_update_frame_layout ();
10083
10084 /* If all special registers are in fact used, there's nothing we
10085 can do, so no point in walking the insn list. */
10086
10087 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10088 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10089 && (TARGET_CPU_ZARCH
10090 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10091 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10092 return;
10093
10094 /* Search for prologue/epilogue insns and replace them. */
10095
10096 for (insn = get_insns (); insn; insn = next_insn)
10097 {
10098 int first, last, off;
10099 rtx set, base, offset;
10100
10101 next_insn = NEXT_INSN (insn);
10102
10103 if (! NONJUMP_INSN_P (insn))
10104 continue;
10105
10106 if (GET_CODE (PATTERN (insn)) == PARALLEL
10107 && store_multiple_operation (PATTERN (insn), VOIDmode))
10108 {
10109 set = XVECEXP (PATTERN (insn), 0, 0);
10110 first = REGNO (SET_SRC (set));
10111 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10112 offset = const0_rtx;
10113 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10114 off = INTVAL (offset);
10115
10116 if (GET_CODE (base) != REG || off < 0)
10117 continue;
10118 if (cfun_frame_layout.first_save_gpr != -1
10119 && (cfun_frame_layout.first_save_gpr < first
10120 || cfun_frame_layout.last_save_gpr > last))
10121 continue;
10122 if (REGNO (base) != STACK_POINTER_REGNUM
10123 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10124 continue;
10125 if (first > BASE_REGNUM || last < BASE_REGNUM)
10126 continue;
10127
10128 if (cfun_frame_layout.first_save_gpr != -1)
10129 {
10130 new_insn = save_gprs (base,
10131 off + (cfun_frame_layout.first_save_gpr
10132 - first) * UNITS_PER_LONG,
10133 cfun_frame_layout.first_save_gpr,
10134 cfun_frame_layout.last_save_gpr);
10135 new_insn = emit_insn_before (new_insn, insn);
10136 INSN_ADDRESSES_NEW (new_insn, -1);
10137 }
10138
10139 remove_insn (insn);
10140 continue;
10141 }
10142
10143 if (cfun_frame_layout.first_save_gpr == -1
10144 && GET_CODE (PATTERN (insn)) == SET
10145 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10146 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10147 || (!TARGET_CPU_ZARCH
10148 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10149 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10150 {
10151 set = PATTERN (insn);
10152 first = REGNO (SET_SRC (set));
10153 offset = const0_rtx;
10154 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10155 off = INTVAL (offset);
10156
10157 if (GET_CODE (base) != REG || off < 0)
10158 continue;
10159 if (REGNO (base) != STACK_POINTER_REGNUM
10160 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10161 continue;
10162
10163 remove_insn (insn);
10164 continue;
10165 }
10166
10167 if (GET_CODE (PATTERN (insn)) == PARALLEL
10168 && load_multiple_operation (PATTERN (insn), VOIDmode))
10169 {
10170 set = XVECEXP (PATTERN (insn), 0, 0);
10171 first = REGNO (SET_DEST (set));
10172 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10173 offset = const0_rtx;
10174 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10175 off = INTVAL (offset);
10176
10177 if (GET_CODE (base) != REG || off < 0)
10178 continue;
10179 if (cfun_frame_layout.first_restore_gpr != -1
10180 && (cfun_frame_layout.first_restore_gpr < first
10181 || cfun_frame_layout.last_restore_gpr > last))
10182 continue;
10183 if (REGNO (base) != STACK_POINTER_REGNUM
10184 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10185 continue;
10186 if (first > BASE_REGNUM || last < BASE_REGNUM)
10187 continue;
10188
10189 if (cfun_frame_layout.first_restore_gpr != -1)
10190 {
10191 new_insn = restore_gprs (base,
10192 off + (cfun_frame_layout.first_restore_gpr
10193 - first) * UNITS_PER_LONG,
10194 cfun_frame_layout.first_restore_gpr,
10195 cfun_frame_layout.last_restore_gpr);
10196 new_insn = emit_insn_before (new_insn, insn);
10197 INSN_ADDRESSES_NEW (new_insn, -1);
10198 }
10199
10200 remove_insn (insn);
10201 continue;
10202 }
10203
10204 if (cfun_frame_layout.first_restore_gpr == -1
10205 && GET_CODE (PATTERN (insn)) == SET
10206 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10207 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10208 || (!TARGET_CPU_ZARCH
10209 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10210 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10211 {
10212 set = PATTERN (insn);
10213 first = REGNO (SET_DEST (set));
10214 offset = const0_rtx;
10215 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10216 off = INTVAL (offset);
10217
10218 if (GET_CODE (base) != REG || off < 0)
10219 continue;
10220 if (REGNO (base) != STACK_POINTER_REGNUM
10221 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10222 continue;
10223
10224 remove_insn (insn);
10225 continue;
10226 }
10227 }
10228 }
10229
10230 /* On z10 and later the dynamic branch prediction must see the
10231 backward jump within a certain windows. If not it falls back to
10232 the static prediction. This function rearranges the loop backward
10233 branch in a way which makes the static prediction always correct.
10234 The function returns true if it added an instruction. */
10235 static bool
10236 s390_fix_long_loop_prediction (rtx insn)
10237 {
10238 rtx set = single_set (insn);
10239 rtx code_label, label_ref, new_label;
10240 rtx uncond_jump;
10241 rtx cur_insn;
10242 rtx tmp;
10243 int distance;
10244
10245 /* This will exclude branch on count and branch on index patterns
10246 since these are correctly statically predicted. */
10247 if (!set
10248 || SET_DEST (set) != pc_rtx
10249 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10250 return false;
10251
10252 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10253 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10254
10255 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10256
10257 code_label = XEXP (label_ref, 0);
10258
10259 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10260 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10261 || (INSN_ADDRESSES (INSN_UID (insn))
10262 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10263 return false;
10264
10265 for (distance = 0, cur_insn = PREV_INSN (insn);
10266 distance < PREDICT_DISTANCE - 6;
10267 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10268 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10269 return false;
10270
10271 new_label = gen_label_rtx ();
10272 uncond_jump = emit_jump_insn_after (
10273 gen_rtx_SET (VOIDmode, pc_rtx,
10274 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10275 insn);
10276 emit_label_after (new_label, uncond_jump);
10277
10278 tmp = XEXP (SET_SRC (set), 1);
10279 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10280 XEXP (SET_SRC (set), 2) = tmp;
10281 INSN_CODE (insn) = -1;
10282
10283 XEXP (label_ref, 0) = new_label;
10284 JUMP_LABEL (insn) = new_label;
10285 JUMP_LABEL (uncond_jump) = code_label;
10286
10287 return true;
10288 }
10289
10290 /* Returns 1 if INSN reads the value of REG for purposes not related
10291 to addressing of memory, and 0 otherwise. */
10292 static int
10293 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10294 {
10295 return reg_referenced_p (reg, PATTERN (insn))
10296 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10297 }
10298
10299 /* Starting from INSN find_cond_jump looks downwards in the insn
10300 stream for a single jump insn which is the last user of the
10301 condition code set in INSN. */
10302 static rtx
10303 find_cond_jump (rtx insn)
10304 {
10305 for (; insn; insn = NEXT_INSN (insn))
10306 {
10307 rtx ite, cc;
10308
10309 if (LABEL_P (insn))
10310 break;
10311
10312 if (!JUMP_P (insn))
10313 {
10314 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10315 break;
10316 continue;
10317 }
10318
10319 /* This will be triggered by a return. */
10320 if (GET_CODE (PATTERN (insn)) != SET)
10321 break;
10322
10323 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10324 ite = SET_SRC (PATTERN (insn));
10325
10326 if (GET_CODE (ite) != IF_THEN_ELSE)
10327 break;
10328
10329 cc = XEXP (XEXP (ite, 0), 0);
10330 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10331 break;
10332
10333 if (find_reg_note (insn, REG_DEAD, cc))
10334 return insn;
10335 break;
10336 }
10337
10338 return NULL_RTX;
10339 }
10340
10341 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10342 the semantics does not change. If NULL_RTX is passed as COND the
10343 function tries to find the conditional jump starting with INSN. */
10344 static void
10345 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10346 {
10347 rtx tmp = *op0;
10348
10349 if (cond == NULL_RTX)
10350 {
10351 rtx jump = find_cond_jump (NEXT_INSN (insn));
10352 jump = jump ? single_set (jump) : NULL_RTX;
10353
10354 if (jump == NULL_RTX)
10355 return;
10356
10357 cond = XEXP (XEXP (jump, 1), 0);
10358 }
10359
10360 *op0 = *op1;
10361 *op1 = tmp;
10362 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10363 }
10364
10365 /* On z10, instructions of the compare-and-branch family have the
10366 property to access the register occurring as second operand with
10367 its bits complemented. If such a compare is grouped with a second
10368 instruction that accesses the same register non-complemented, and
10369 if that register's value is delivered via a bypass, then the
10370 pipeline recycles, thereby causing significant performance decline.
10371 This function locates such situations and exchanges the two
10372 operands of the compare. The function return true whenever it
10373 added an insn. */
10374 static bool
10375 s390_z10_optimize_cmp (rtx insn)
10376 {
10377 rtx prev_insn, next_insn;
10378 bool insn_added_p = false;
10379 rtx cond, *op0, *op1;
10380
10381 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10382 {
10383 /* Handle compare and branch and branch on count
10384 instructions. */
10385 rtx pattern = single_set (insn);
10386
10387 if (!pattern
10388 || SET_DEST (pattern) != pc_rtx
10389 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10390 return false;
10391
10392 cond = XEXP (SET_SRC (pattern), 0);
10393 op0 = &XEXP (cond, 0);
10394 op1 = &XEXP (cond, 1);
10395 }
10396 else if (GET_CODE (PATTERN (insn)) == SET)
10397 {
10398 rtx src, dest;
10399
10400 /* Handle normal compare instructions. */
10401 src = SET_SRC (PATTERN (insn));
10402 dest = SET_DEST (PATTERN (insn));
10403
10404 if (!REG_P (dest)
10405 || !CC_REGNO_P (REGNO (dest))
10406 || GET_CODE (src) != COMPARE)
10407 return false;
10408
10409 /* s390_swap_cmp will try to find the conditional
10410 jump when passing NULL_RTX as condition. */
10411 cond = NULL_RTX;
10412 op0 = &XEXP (src, 0);
10413 op1 = &XEXP (src, 1);
10414 }
10415 else
10416 return false;
10417
10418 if (!REG_P (*op0) || !REG_P (*op1))
10419 return false;
10420
10421 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10422 return false;
10423
10424 /* Swap the COMPARE arguments and its mask if there is a
10425 conflicting access in the previous insn. */
10426 prev_insn = prev_active_insn (insn);
10427 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10428 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10429 s390_swap_cmp (cond, op0, op1, insn);
10430
10431 /* Check if there is a conflict with the next insn. If there
10432 was no conflict with the previous insn, then swap the
10433 COMPARE arguments and its mask. If we already swapped
10434 the operands, or if swapping them would cause a conflict
10435 with the previous insn, issue a NOP after the COMPARE in
10436 order to separate the two instuctions. */
10437 next_insn = next_active_insn (insn);
10438 if (next_insn != NULL_RTX && INSN_P (next_insn)
10439 && s390_non_addr_reg_read_p (*op1, next_insn))
10440 {
10441 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10442 && s390_non_addr_reg_read_p (*op0, prev_insn))
10443 {
10444 if (REGNO (*op1) == 0)
10445 emit_insn_after (gen_nop1 (), insn);
10446 else
10447 emit_insn_after (gen_nop (), insn);
10448 insn_added_p = true;
10449 }
10450 else
10451 s390_swap_cmp (cond, op0, op1, insn);
10452 }
10453 return insn_added_p;
10454 }
10455
10456 /* Perform machine-dependent processing. */
10457
10458 static void
10459 s390_reorg (void)
10460 {
10461 bool pool_overflow = false;
10462
10463 /* Make sure all splits have been performed; splits after
10464 machine_dependent_reorg might confuse insn length counts. */
10465 split_all_insns_noflow ();
10466
10467 /* Install the main literal pool and the associated base
10468 register load insns.
10469
10470 In addition, there are two problematic situations we need
10471 to correct:
10472
10473 - the literal pool might be > 4096 bytes in size, so that
10474 some of its elements cannot be directly accessed
10475
10476 - a branch target might be > 64K away from the branch, so that
10477 it is not possible to use a PC-relative instruction.
10478
10479 To fix those, we split the single literal pool into multiple
10480 pool chunks, reloading the pool base register at various
10481 points throughout the function to ensure it always points to
10482 the pool chunk the following code expects, and / or replace
10483 PC-relative branches by absolute branches.
10484
10485 However, the two problems are interdependent: splitting the
10486 literal pool can move a branch further away from its target,
10487 causing the 64K limit to overflow, and on the other hand,
10488 replacing a PC-relative branch by an absolute branch means
10489 we need to put the branch target address into the literal
10490 pool, possibly causing it to overflow.
10491
10492 So, we loop trying to fix up both problems until we manage
10493 to satisfy both conditions at the same time. Note that the
10494 loop is guaranteed to terminate as every pass of the loop
10495 strictly decreases the total number of PC-relative branches
10496 in the function. (This is not completely true as there
10497 might be branch-over-pool insns introduced by chunkify_start.
10498 Those never need to be split however.) */
10499
10500 for (;;)
10501 {
10502 struct constant_pool *pool = NULL;
10503
10504 /* Collect the literal pool. */
10505 if (!pool_overflow)
10506 {
10507 pool = s390_mainpool_start ();
10508 if (!pool)
10509 pool_overflow = true;
10510 }
10511
10512 /* If literal pool overflowed, start to chunkify it. */
10513 if (pool_overflow)
10514 pool = s390_chunkify_start ();
10515
10516 /* Split out-of-range branches. If this has created new
10517 literal pool entries, cancel current chunk list and
10518 recompute it. zSeries machines have large branch
10519 instructions, so we never need to split a branch. */
10520 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10521 {
10522 if (pool_overflow)
10523 s390_chunkify_cancel (pool);
10524 else
10525 s390_mainpool_cancel (pool);
10526
10527 continue;
10528 }
10529
10530 /* If we made it up to here, both conditions are satisfied.
10531 Finish up literal pool related changes. */
10532 if (pool_overflow)
10533 s390_chunkify_finish (pool);
10534 else
10535 s390_mainpool_finish (pool);
10536
10537 /* We're done splitting branches. */
10538 cfun->machine->split_branches_pending_p = false;
10539 break;
10540 }
10541
10542 /* Generate out-of-pool execute target insns. */
10543 if (TARGET_CPU_ZARCH)
10544 {
10545 rtx insn, label, target;
10546
10547 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10548 {
10549 label = s390_execute_label (insn);
10550 if (!label)
10551 continue;
10552
10553 gcc_assert (label != const0_rtx);
10554
10555 target = emit_label (XEXP (label, 0));
10556 INSN_ADDRESSES_NEW (target, -1);
10557
10558 target = emit_insn (s390_execute_target (insn));
10559 INSN_ADDRESSES_NEW (target, -1);
10560 }
10561 }
10562
10563 /* Try to optimize prologue and epilogue further. */
10564 s390_optimize_prologue ();
10565
10566 /* Walk over the insns and do some >=z10 specific changes. */
10567 if (s390_tune == PROCESSOR_2097_Z10
10568 || s390_tune == PROCESSOR_2817_Z196
10569 || s390_tune == PROCESSOR_2827_ZEC12)
10570 {
10571 rtx insn;
10572 bool insn_added_p = false;
10573
10574 /* The insn lengths and addresses have to be up to date for the
10575 following manipulations. */
10576 shorten_branches (get_insns ());
10577
10578 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10579 {
10580 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10581 continue;
10582
10583 if (JUMP_P (insn))
10584 insn_added_p |= s390_fix_long_loop_prediction (insn);
10585
10586 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10587 || GET_CODE (PATTERN (insn)) == SET)
10588 && s390_tune == PROCESSOR_2097_Z10)
10589 insn_added_p |= s390_z10_optimize_cmp (insn);
10590 }
10591
10592 /* Adjust branches if we added new instructions. */
10593 if (insn_added_p)
10594 shorten_branches (get_insns ());
10595 }
10596 }
10597
10598 /* Return true if INSN is a fp load insn writing register REGNO. */
10599 static inline bool
10600 s390_fpload_toreg (rtx insn, unsigned int regno)
10601 {
10602 rtx set;
10603 enum attr_type flag = s390_safe_attr_type (insn);
10604
10605 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10606 return false;
10607
10608 set = single_set (insn);
10609
10610 if (set == NULL_RTX)
10611 return false;
10612
10613 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10614 return false;
10615
10616 if (REGNO (SET_DEST (set)) != regno)
10617 return false;
10618
10619 return true;
10620 }
10621
10622 /* This value describes the distance to be avoided between an
10623 aritmetic fp instruction and an fp load writing the same register.
10624 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10625 fine but the exact value has to be avoided. Otherwise the FP
10626 pipeline will throw an exception causing a major penalty. */
10627 #define Z10_EARLYLOAD_DISTANCE 7
10628
10629 /* Rearrange the ready list in order to avoid the situation described
10630 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10631 moved to the very end of the ready list. */
10632 static void
10633 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10634 {
10635 unsigned int regno;
10636 int nready = *nready_p;
10637 rtx tmp;
10638 int i;
10639 rtx insn;
10640 rtx set;
10641 enum attr_type flag;
10642 int distance;
10643
10644 /* Skip DISTANCE - 1 active insns. */
10645 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10646 distance > 0 && insn != NULL_RTX;
10647 distance--, insn = prev_active_insn (insn))
10648 if (CALL_P (insn) || JUMP_P (insn))
10649 return;
10650
10651 if (insn == NULL_RTX)
10652 return;
10653
10654 set = single_set (insn);
10655
10656 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10657 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10658 return;
10659
10660 flag = s390_safe_attr_type (insn);
10661
10662 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10663 return;
10664
10665 regno = REGNO (SET_DEST (set));
10666 i = nready - 1;
10667
10668 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10669 i--;
10670
10671 if (!i)
10672 return;
10673
10674 tmp = ready[i];
10675 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10676 ready[0] = tmp;
10677 }
10678
10679
10680 /* The s390_sched_state variable tracks the state of the current or
10681 the last instruction group.
10682
10683 0,1,2 number of instructions scheduled in the current group
10684 3 the last group is complete - normal insns
10685 4 the last group was a cracked/expanded insn */
10686
10687 static int s390_sched_state;
10688
10689 #define S390_OOO_SCHED_STATE_NORMAL 3
10690 #define S390_OOO_SCHED_STATE_CRACKED 4
10691
10692 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
10693 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
10694 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
10695 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
10696
10697 static unsigned int
10698 s390_get_sched_attrmask (rtx insn)
10699 {
10700 unsigned int mask = 0;
10701
10702 if (get_attr_ooo_cracked (insn))
10703 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
10704 if (get_attr_ooo_expanded (insn))
10705 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
10706 if (get_attr_ooo_endgroup (insn))
10707 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
10708 if (get_attr_ooo_groupalone (insn))
10709 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
10710 return mask;
10711 }
10712
10713 /* Return the scheduling score for INSN. The higher the score the
10714 better. The score is calculated from the OOO scheduling attributes
10715 of INSN and the scheduling state s390_sched_state. */
10716 static int
10717 s390_sched_score (rtx insn)
10718 {
10719 unsigned int mask = s390_get_sched_attrmask (insn);
10720 int score = 0;
10721
10722 switch (s390_sched_state)
10723 {
10724 case 0:
10725 /* Try to put insns into the first slot which would otherwise
10726 break a group. */
10727 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10728 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10729 score += 5;
10730 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10731 score += 10;
10732 case 1:
10733 /* Prefer not cracked insns while trying to put together a
10734 group. */
10735 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10736 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10737 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10738 score += 10;
10739 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
10740 score += 5;
10741 break;
10742 case 2:
10743 /* Prefer not cracked insns while trying to put together a
10744 group. */
10745 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10746 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10747 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10748 score += 10;
10749 /* Prefer endgroup insns in the last slot. */
10750 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
10751 score += 10;
10752 break;
10753 case S390_OOO_SCHED_STATE_NORMAL:
10754 /* Prefer not cracked insns if the last was not cracked. */
10755 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10756 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
10757 score += 5;
10758 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10759 score += 10;
10760 break;
10761 case S390_OOO_SCHED_STATE_CRACKED:
10762 /* Try to keep cracked insns together to prevent them from
10763 interrupting groups. */
10764 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10765 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10766 score += 5;
10767 break;
10768 }
10769 return score;
10770 }
10771
10772 /* This function is called via hook TARGET_SCHED_REORDER before
10773 issueing one insn from list READY which contains *NREADYP entries.
10774 For target z10 it reorders load instructions to avoid early load
10775 conflicts in the floating point pipeline */
10776 static int
10777 s390_sched_reorder (FILE *file, int verbose,
10778 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10779 {
10780 if (s390_tune == PROCESSOR_2097_Z10)
10781 if (reload_completed && *nreadyp > 1)
10782 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10783
10784 if (s390_tune == PROCESSOR_2827_ZEC12
10785 && reload_completed
10786 && *nreadyp > 1)
10787 {
10788 int i;
10789 int last_index = *nreadyp - 1;
10790 int max_index = -1;
10791 int max_score = -1;
10792 rtx tmp;
10793
10794 /* Just move the insn with the highest score to the top (the
10795 end) of the list. A full sort is not needed since a conflict
10796 in the hazard recognition cannot happen. So the top insn in
10797 the ready list will always be taken. */
10798 for (i = last_index; i >= 0; i--)
10799 {
10800 int score;
10801
10802 if (recog_memoized (ready[i]) < 0)
10803 continue;
10804
10805 score = s390_sched_score (ready[i]);
10806 if (score > max_score)
10807 {
10808 max_score = score;
10809 max_index = i;
10810 }
10811 }
10812
10813 if (max_index != -1)
10814 {
10815 if (max_index != last_index)
10816 {
10817 tmp = ready[max_index];
10818 ready[max_index] = ready[last_index];
10819 ready[last_index] = tmp;
10820
10821 if (verbose > 5)
10822 fprintf (file,
10823 "move insn %d to the top of list\n",
10824 INSN_UID (ready[last_index]));
10825 }
10826 else if (verbose > 5)
10827 fprintf (file,
10828 "best insn %d already on top\n",
10829 INSN_UID (ready[last_index]));
10830 }
10831
10832 if (verbose > 5)
10833 {
10834 fprintf (file, "ready list ooo attributes - sched state: %d\n",
10835 s390_sched_state);
10836
10837 for (i = last_index; i >= 0; i--)
10838 {
10839 if (recog_memoized (ready[i]) < 0)
10840 continue;
10841 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
10842 s390_sched_score (ready[i]));
10843 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
10844 PRINT_OOO_ATTR (ooo_cracked);
10845 PRINT_OOO_ATTR (ooo_expanded);
10846 PRINT_OOO_ATTR (ooo_endgroup);
10847 PRINT_OOO_ATTR (ooo_groupalone);
10848 #undef PRINT_OOO_ATTR
10849 fprintf (file, "\n");
10850 }
10851 }
10852 }
10853
10854 return s390_issue_rate ();
10855 }
10856
10857
10858 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10859 the scheduler has issued INSN. It stores the last issued insn into
10860 last_scheduled_insn in order to make it available for
10861 s390_sched_reorder. */
10862 static int
10863 s390_sched_variable_issue (FILE *file, int verbose, rtx insn, int more)
10864 {
10865 last_scheduled_insn = insn;
10866
10867 if (s390_tune == PROCESSOR_2827_ZEC12
10868 && reload_completed
10869 && recog_memoized (insn) >= 0)
10870 {
10871 unsigned int mask = s390_get_sched_attrmask (insn);
10872
10873 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10874 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10875 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
10876 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
10877 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10878 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10879 else
10880 {
10881 /* Only normal insns are left (mask == 0). */
10882 switch (s390_sched_state)
10883 {
10884 case 0:
10885 case 1:
10886 case 2:
10887 case S390_OOO_SCHED_STATE_NORMAL:
10888 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
10889 s390_sched_state = 1;
10890 else
10891 s390_sched_state++;
10892
10893 break;
10894 case S390_OOO_SCHED_STATE_CRACKED:
10895 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10896 break;
10897 }
10898 }
10899 if (verbose > 5)
10900 {
10901 fprintf (file, "insn %d: ", INSN_UID (insn));
10902 #define PRINT_OOO_ATTR(ATTR) \
10903 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
10904 PRINT_OOO_ATTR (ooo_cracked);
10905 PRINT_OOO_ATTR (ooo_expanded);
10906 PRINT_OOO_ATTR (ooo_endgroup);
10907 PRINT_OOO_ATTR (ooo_groupalone);
10908 #undef PRINT_OOO_ATTR
10909 fprintf (file, "\n");
10910 fprintf (file, "sched state: %d\n", s390_sched_state);
10911 }
10912 }
10913
10914 if (GET_CODE (PATTERN (insn)) != USE
10915 && GET_CODE (PATTERN (insn)) != CLOBBER)
10916 return more - 1;
10917 else
10918 return more;
10919 }
10920
10921 static void
10922 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10923 int verbose ATTRIBUTE_UNUSED,
10924 int max_ready ATTRIBUTE_UNUSED)
10925 {
10926 last_scheduled_insn = NULL_RTX;
10927 s390_sched_state = 0;
10928 }
10929
10930 /* This function checks the whole of insn X for memory references. The
10931 function always returns zero because the framework it is called
10932 from would stop recursively analyzing the insn upon a return value
10933 other than zero. The real result of this function is updating
10934 counter variable MEM_COUNT. */
10935 static int
10936 check_dpu (rtx *x, unsigned *mem_count)
10937 {
10938 if (*x != NULL_RTX && MEM_P (*x))
10939 (*mem_count)++;
10940 return 0;
10941 }
10942
10943 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10944 a new number struct loop *loop should be unrolled if tuned for cpus with
10945 a built-in stride prefetcher.
10946 The loop is analyzed for memory accesses by calling check_dpu for
10947 each rtx of the loop. Depending on the loop_depth and the amount of
10948 memory accesses a new number <=nunroll is returned to improve the
10949 behaviour of the hardware prefetch unit. */
10950 static unsigned
10951 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10952 {
10953 basic_block *bbs;
10954 rtx insn;
10955 unsigned i;
10956 unsigned mem_count = 0;
10957
10958 if (s390_tune != PROCESSOR_2097_Z10
10959 && s390_tune != PROCESSOR_2817_Z196
10960 && s390_tune != PROCESSOR_2827_ZEC12)
10961 return nunroll;
10962
10963 /* Count the number of memory references within the loop body. */
10964 bbs = get_loop_body (loop);
10965 for (i = 0; i < loop->num_nodes; i++)
10966 {
10967 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10968 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10969 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10970 }
10971 free (bbs);
10972
10973 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10974 if (mem_count == 0)
10975 return nunroll;
10976
10977 switch (loop_depth(loop))
10978 {
10979 case 1:
10980 return MIN (nunroll, 28 / mem_count);
10981 case 2:
10982 return MIN (nunroll, 22 / mem_count);
10983 default:
10984 return MIN (nunroll, 16 / mem_count);
10985 }
10986 }
10987
10988 /* Initialize GCC target structure. */
10989
10990 #undef TARGET_ASM_ALIGNED_HI_OP
10991 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10992 #undef TARGET_ASM_ALIGNED_DI_OP
10993 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10994 #undef TARGET_ASM_INTEGER
10995 #define TARGET_ASM_INTEGER s390_assemble_integer
10996
10997 #undef TARGET_ASM_OPEN_PAREN
10998 #define TARGET_ASM_OPEN_PAREN ""
10999
11000 #undef TARGET_ASM_CLOSE_PAREN
11001 #define TARGET_ASM_CLOSE_PAREN ""
11002
11003 #undef TARGET_OPTION_OVERRIDE
11004 #define TARGET_OPTION_OVERRIDE s390_option_override
11005
11006 #undef TARGET_ENCODE_SECTION_INFO
11007 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
11008
11009 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11010 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11011
11012 #ifdef HAVE_AS_TLS
11013 #undef TARGET_HAVE_TLS
11014 #define TARGET_HAVE_TLS true
11015 #endif
11016 #undef TARGET_CANNOT_FORCE_CONST_MEM
11017 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
11018
11019 #undef TARGET_DELEGITIMIZE_ADDRESS
11020 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
11021
11022 #undef TARGET_LEGITIMIZE_ADDRESS
11023 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
11024
11025 #undef TARGET_RETURN_IN_MEMORY
11026 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
11027
11028 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
11029 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
11030
11031 #undef TARGET_ASM_OUTPUT_MI_THUNK
11032 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
11033 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11034 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11035
11036 #undef TARGET_SCHED_ADJUST_PRIORITY
11037 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
11038 #undef TARGET_SCHED_ISSUE_RATE
11039 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
11040 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11041 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
11042
11043 #undef TARGET_SCHED_VARIABLE_ISSUE
11044 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
11045 #undef TARGET_SCHED_REORDER
11046 #define TARGET_SCHED_REORDER s390_sched_reorder
11047 #undef TARGET_SCHED_INIT
11048 #define TARGET_SCHED_INIT s390_sched_init
11049
11050 #undef TARGET_CANNOT_COPY_INSN_P
11051 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
11052 #undef TARGET_RTX_COSTS
11053 #define TARGET_RTX_COSTS s390_rtx_costs
11054 #undef TARGET_ADDRESS_COST
11055 #define TARGET_ADDRESS_COST s390_address_cost
11056 #undef TARGET_REGISTER_MOVE_COST
11057 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
11058 #undef TARGET_MEMORY_MOVE_COST
11059 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
11060
11061 #undef TARGET_MACHINE_DEPENDENT_REORG
11062 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
11063
11064 #undef TARGET_VALID_POINTER_MODE
11065 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
11066
11067 #undef TARGET_BUILD_BUILTIN_VA_LIST
11068 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
11069 #undef TARGET_EXPAND_BUILTIN_VA_START
11070 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
11071 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11072 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
11073
11074 #undef TARGET_PROMOTE_FUNCTION_MODE
11075 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
11076 #undef TARGET_PASS_BY_REFERENCE
11077 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
11078
11079 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11080 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
11081 #undef TARGET_FUNCTION_ARG
11082 #define TARGET_FUNCTION_ARG s390_function_arg
11083 #undef TARGET_FUNCTION_ARG_ADVANCE
11084 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
11085 #undef TARGET_FUNCTION_VALUE
11086 #define TARGET_FUNCTION_VALUE s390_function_value
11087 #undef TARGET_LIBCALL_VALUE
11088 #define TARGET_LIBCALL_VALUE s390_libcall_value
11089
11090 #undef TARGET_FIXED_CONDITION_CODE_REGS
11091 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
11092
11093 #undef TARGET_CC_MODES_COMPATIBLE
11094 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
11095
11096 #undef TARGET_INVALID_WITHIN_DOLOOP
11097 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
11098
11099 #ifdef HAVE_AS_TLS
11100 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
11101 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
11102 #endif
11103
11104 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11105 #undef TARGET_MANGLE_TYPE
11106 #define TARGET_MANGLE_TYPE s390_mangle_type
11107 #endif
11108
11109 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11110 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11111
11112 #undef TARGET_PREFERRED_RELOAD_CLASS
11113 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
11114
11115 #undef TARGET_SECONDARY_RELOAD
11116 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
11117
11118 #undef TARGET_LIBGCC_CMP_RETURN_MODE
11119 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
11120
11121 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
11122 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
11123
11124 #undef TARGET_LEGITIMATE_ADDRESS_P
11125 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
11126
11127 #undef TARGET_LEGITIMATE_CONSTANT_P
11128 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
11129
11130 #undef TARGET_LRA_P
11131 #define TARGET_LRA_P s390_lra_p
11132
11133 #undef TARGET_CAN_ELIMINATE
11134 #define TARGET_CAN_ELIMINATE s390_can_eliminate
11135
11136 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11137 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
11138
11139 #undef TARGET_LOOP_UNROLL_ADJUST
11140 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
11141
11142 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
11143 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
11144 #undef TARGET_TRAMPOLINE_INIT
11145 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
11146
11147 #undef TARGET_UNWIND_WORD_MODE
11148 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
11149
11150 #undef TARGET_CANONICALIZE_COMPARISON
11151 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
11152
11153 struct gcc_target targetm = TARGET_INITIALIZER;
11154
11155 #include "gt-s390.h"