re PR bootstrap/57609 (S/390 ESA mode bootstrap failure since r197266)
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "reload.h"
42 #include "diagnostic-core.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "debug.h"
48 #include "langhooks.h"
49 #include "optabs.h"
50 #include "gimple.h"
51 #include "df.h"
52 #include "params.h"
53 #include "cfgloop.h"
54 #include "opts.h"
55
56 /* Define the specific costs for a given cpu. */
57
58 struct processor_costs
59 {
60 /* multiplication */
61 const int m; /* cost of an M instruction. */
62 const int mghi; /* cost of an MGHI instruction. */
63 const int mh; /* cost of an MH instruction. */
64 const int mhi; /* cost of an MHI instruction. */
65 const int ml; /* cost of an ML instruction. */
66 const int mr; /* cost of an MR instruction. */
67 const int ms; /* cost of an MS instruction. */
68 const int msg; /* cost of an MSG instruction. */
69 const int msgf; /* cost of an MSGF instruction. */
70 const int msgfr; /* cost of an MSGFR instruction. */
71 const int msgr; /* cost of an MSGR instruction. */
72 const int msr; /* cost of an MSR instruction. */
73 const int mult_df; /* cost of multiplication in DFmode. */
74 const int mxbr;
75 /* square root */
76 const int sqxbr; /* cost of square root in TFmode. */
77 const int sqdbr; /* cost of square root in DFmode. */
78 const int sqebr; /* cost of square root in SFmode. */
79 /* multiply and add */
80 const int madbr; /* cost of multiply and add in DFmode. */
81 const int maebr; /* cost of multiply and add in SFmode. */
82 /* division */
83 const int dxbr;
84 const int ddbr;
85 const int debr;
86 const int dlgr;
87 const int dlr;
88 const int dr;
89 const int dsgfr;
90 const int dsgr;
91 };
92
93 const struct processor_costs *s390_cost;
94
95 static const
96 struct processor_costs z900_cost =
97 {
98 COSTS_N_INSNS (5), /* M */
99 COSTS_N_INSNS (10), /* MGHI */
100 COSTS_N_INSNS (5), /* MH */
101 COSTS_N_INSNS (4), /* MHI */
102 COSTS_N_INSNS (5), /* ML */
103 COSTS_N_INSNS (5), /* MR */
104 COSTS_N_INSNS (4), /* MS */
105 COSTS_N_INSNS (15), /* MSG */
106 COSTS_N_INSNS (7), /* MSGF */
107 COSTS_N_INSNS (7), /* MSGFR */
108 COSTS_N_INSNS (10), /* MSGR */
109 COSTS_N_INSNS (4), /* MSR */
110 COSTS_N_INSNS (7), /* multiplication in DFmode */
111 COSTS_N_INSNS (13), /* MXBR */
112 COSTS_N_INSNS (136), /* SQXBR */
113 COSTS_N_INSNS (44), /* SQDBR */
114 COSTS_N_INSNS (35), /* SQEBR */
115 COSTS_N_INSNS (18), /* MADBR */
116 COSTS_N_INSNS (13), /* MAEBR */
117 COSTS_N_INSNS (134), /* DXBR */
118 COSTS_N_INSNS (30), /* DDBR */
119 COSTS_N_INSNS (27), /* DEBR */
120 COSTS_N_INSNS (220), /* DLGR */
121 COSTS_N_INSNS (34), /* DLR */
122 COSTS_N_INSNS (34), /* DR */
123 COSTS_N_INSNS (32), /* DSGFR */
124 COSTS_N_INSNS (32), /* DSGR */
125 };
126
127 static const
128 struct processor_costs z990_cost =
129 {
130 COSTS_N_INSNS (4), /* M */
131 COSTS_N_INSNS (2), /* MGHI */
132 COSTS_N_INSNS (2), /* MH */
133 COSTS_N_INSNS (2), /* MHI */
134 COSTS_N_INSNS (4), /* ML */
135 COSTS_N_INSNS (4), /* MR */
136 COSTS_N_INSNS (5), /* MS */
137 COSTS_N_INSNS (6), /* MSG */
138 COSTS_N_INSNS (4), /* MSGF */
139 COSTS_N_INSNS (4), /* MSGFR */
140 COSTS_N_INSNS (4), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (1), /* multiplication in DFmode */
143 COSTS_N_INSNS (28), /* MXBR */
144 COSTS_N_INSNS (130), /* SQXBR */
145 COSTS_N_INSNS (66), /* SQDBR */
146 COSTS_N_INSNS (38), /* SQEBR */
147 COSTS_N_INSNS (1), /* MADBR */
148 COSTS_N_INSNS (1), /* MAEBR */
149 COSTS_N_INSNS (60), /* DXBR */
150 COSTS_N_INSNS (40), /* DDBR */
151 COSTS_N_INSNS (26), /* DEBR */
152 COSTS_N_INSNS (176), /* DLGR */
153 COSTS_N_INSNS (31), /* DLR */
154 COSTS_N_INSNS (31), /* DR */
155 COSTS_N_INSNS (31), /* DSGFR */
156 COSTS_N_INSNS (31), /* DSGR */
157 };
158
159 static const
160 struct processor_costs z9_109_cost =
161 {
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (30), /* DLGR */
185 COSTS_N_INSNS (23), /* DLR */
186 COSTS_N_INSNS (23), /* DR */
187 COSTS_N_INSNS (24), /* DSGFR */
188 COSTS_N_INSNS (24), /* DSGR */
189 };
190
191 static const
192 struct processor_costs z10_cost =
193 {
194 COSTS_N_INSNS (10), /* M */
195 COSTS_N_INSNS (10), /* MGHI */
196 COSTS_N_INSNS (10), /* MH */
197 COSTS_N_INSNS (10), /* MHI */
198 COSTS_N_INSNS (10), /* ML */
199 COSTS_N_INSNS (10), /* MR */
200 COSTS_N_INSNS (10), /* MS */
201 COSTS_N_INSNS (10), /* MSG */
202 COSTS_N_INSNS (10), /* MSGF */
203 COSTS_N_INSNS (10), /* MSGFR */
204 COSTS_N_INSNS (10), /* MSGR */
205 COSTS_N_INSNS (10), /* MSR */
206 COSTS_N_INSNS (1) , /* multiplication in DFmode */
207 COSTS_N_INSNS (50), /* MXBR */
208 COSTS_N_INSNS (120), /* SQXBR */
209 COSTS_N_INSNS (52), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (111), /* DXBR */
214 COSTS_N_INSNS (39), /* DDBR */
215 COSTS_N_INSNS (32), /* DEBR */
216 COSTS_N_INSNS (160), /* DLGR */
217 COSTS_N_INSNS (71), /* DLR */
218 COSTS_N_INSNS (71), /* DR */
219 COSTS_N_INSNS (71), /* DSGFR */
220 COSTS_N_INSNS (71), /* DSGR */
221 };
222
223 static const
224 struct processor_costs z196_cost =
225 {
226 COSTS_N_INSNS (7), /* M */
227 COSTS_N_INSNS (5), /* MGHI */
228 COSTS_N_INSNS (5), /* MH */
229 COSTS_N_INSNS (5), /* MHI */
230 COSTS_N_INSNS (7), /* ML */
231 COSTS_N_INSNS (7), /* MR */
232 COSTS_N_INSNS (6), /* MS */
233 COSTS_N_INSNS (8), /* MSG */
234 COSTS_N_INSNS (6), /* MSGF */
235 COSTS_N_INSNS (6), /* MSGFR */
236 COSTS_N_INSNS (8), /* MSGR */
237 COSTS_N_INSNS (6), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (40), /* MXBR B+40 */
240 COSTS_N_INSNS (100), /* SQXBR B+100 */
241 COSTS_N_INSNS (42), /* SQDBR B+42 */
242 COSTS_N_INSNS (28), /* SQEBR B+28 */
243 COSTS_N_INSNS (1), /* MADBR B */
244 COSTS_N_INSNS (1), /* MAEBR B */
245 COSTS_N_INSNS (101), /* DXBR B+101 */
246 COSTS_N_INSNS (29), /* DDBR */
247 COSTS_N_INSNS (22), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR cracked */
249 COSTS_N_INSNS (160), /* DLR cracked */
250 COSTS_N_INSNS (160), /* DR expanded */
251 COSTS_N_INSNS (160), /* DSGFR cracked */
252 COSTS_N_INSNS (160), /* DSGR cracked */
253 };
254
255 static const
256 struct processor_costs zEC12_cost =
257 {
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (131), /* DXBR B+131 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
285 };
286
287 extern int reload_completed;
288
289 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
290 static rtx last_scheduled_insn;
291
292 /* Structure used to hold the components of a S/390 memory
293 address. A legitimate address on S/390 is of the general
294 form
295 base + index + displacement
296 where any of the components is optional.
297
298 base and index are registers of the class ADDR_REGS,
299 displacement is an unsigned 12-bit immediate constant. */
300
301 struct s390_address
302 {
303 rtx base;
304 rtx indx;
305 rtx disp;
306 bool pointer;
307 bool literal_pool;
308 };
309
310 /* The following structure is embedded in the machine
311 specific part of struct function. */
312
313 struct GTY (()) s390_frame_layout
314 {
315 /* Offset within stack frame. */
316 HOST_WIDE_INT gprs_offset;
317 HOST_WIDE_INT f0_offset;
318 HOST_WIDE_INT f4_offset;
319 HOST_WIDE_INT f8_offset;
320 HOST_WIDE_INT backchain_offset;
321
322 /* Number of first and last gpr where slots in the register
323 save area are reserved for. */
324 int first_save_gpr_slot;
325 int last_save_gpr_slot;
326
327 /* Number of first and last gpr to be saved, restored. */
328 int first_save_gpr;
329 int first_restore_gpr;
330 int last_save_gpr;
331 int last_restore_gpr;
332
333 /* Bits standing for floating point registers. Set, if the
334 respective register has to be saved. Starting with reg 16 (f0)
335 at the rightmost bit.
336 Bit 15 - 8 7 6 5 4 3 2 1 0
337 fpr 15 - 8 7 5 3 1 6 4 2 0
338 reg 31 - 24 23 22 21 20 19 18 17 16 */
339 unsigned int fpr_bitmap;
340
341 /* Number of floating point registers f8-f15 which must be saved. */
342 int high_fprs;
343
344 /* Set if return address needs to be saved.
345 This flag is set by s390_return_addr_rtx if it could not use
346 the initial value of r14 and therefore depends on r14 saved
347 to the stack. */
348 bool save_return_addr_p;
349
350 /* Size of stack frame. */
351 HOST_WIDE_INT frame_size;
352 };
353
354 /* Define the structure for the machine field in struct function. */
355
356 struct GTY(()) machine_function
357 {
358 struct s390_frame_layout frame_layout;
359
360 /* Literal pool base register. */
361 rtx base_reg;
362
363 /* True if we may need to perform branch splitting. */
364 bool split_branches_pending_p;
365
366 /* Some local-dynamic TLS symbol name. */
367 const char *some_ld_name;
368
369 bool has_landing_pad_p;
370 };
371
372 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
373
374 #define cfun_frame_layout (cfun->machine->frame_layout)
375 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
376 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
377 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
378 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
379 (1 << (BITNUM)))
380 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
381 (1 << (BITNUM))))
382
383 /* Number of GPRs and FPRs used for argument passing. */
384 #define GP_ARG_NUM_REG 5
385 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
386
387 /* A couple of shortcuts. */
388 #define CONST_OK_FOR_J(x) \
389 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
390 #define CONST_OK_FOR_K(x) \
391 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
392 #define CONST_OK_FOR_Os(x) \
393 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
394 #define CONST_OK_FOR_Op(x) \
395 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
396 #define CONST_OK_FOR_On(x) \
397 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
398
399 #define REGNO_PAIR_OK(REGNO, MODE) \
400 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
401
402 /* That's the read ahead of the dynamic branch prediction unit in
403 bytes on a z10 (or higher) CPU. */
404 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
405
406 /* Return the alignment for LABEL. We default to the -falign-labels
407 value except for the literal pool base label. */
408 int
409 s390_label_align (rtx label)
410 {
411 rtx prev_insn = prev_active_insn (label);
412
413 if (prev_insn == NULL_RTX)
414 goto old;
415
416 prev_insn = single_set (prev_insn);
417
418 if (prev_insn == NULL_RTX)
419 goto old;
420
421 prev_insn = SET_SRC (prev_insn);
422
423 /* Don't align literal pool base labels. */
424 if (GET_CODE (prev_insn) == UNSPEC
425 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
426 return 0;
427
428 old:
429 return align_labels_log;
430 }
431
432 static enum machine_mode
433 s390_libgcc_cmp_return_mode (void)
434 {
435 return TARGET_64BIT ? DImode : SImode;
436 }
437
438 static enum machine_mode
439 s390_libgcc_shift_count_mode (void)
440 {
441 return TARGET_64BIT ? DImode : SImode;
442 }
443
444 static enum machine_mode
445 s390_unwind_word_mode (void)
446 {
447 return TARGET_64BIT ? DImode : SImode;
448 }
449
450 /* Return true if the back end supports mode MODE. */
451 static bool
452 s390_scalar_mode_supported_p (enum machine_mode mode)
453 {
454 /* In contrast to the default implementation reject TImode constants on 31bit
455 TARGET_ZARCH for ABI compliance. */
456 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
457 return false;
458
459 if (DECIMAL_FLOAT_MODE_P (mode))
460 return default_decimal_float_supported_p ();
461
462 return default_scalar_mode_supported_p (mode);
463 }
464
465 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
466
467 void
468 s390_set_has_landing_pad_p (bool value)
469 {
470 cfun->machine->has_landing_pad_p = value;
471 }
472
473 /* If two condition code modes are compatible, return a condition code
474 mode which is compatible with both. Otherwise, return
475 VOIDmode. */
476
477 static enum machine_mode
478 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
479 {
480 if (m1 == m2)
481 return m1;
482
483 switch (m1)
484 {
485 case CCZmode:
486 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
487 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
488 return m2;
489 return VOIDmode;
490
491 case CCSmode:
492 case CCUmode:
493 case CCTmode:
494 case CCSRmode:
495 case CCURmode:
496 case CCZ1mode:
497 if (m2 == CCZmode)
498 return m1;
499
500 return VOIDmode;
501
502 default:
503 return VOIDmode;
504 }
505 return VOIDmode;
506 }
507
508 /* Return true if SET either doesn't set the CC register, or else
509 the source and destination have matching CC modes and that
510 CC mode is at least as constrained as REQ_MODE. */
511
512 static bool
513 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
514 {
515 enum machine_mode set_mode;
516
517 gcc_assert (GET_CODE (set) == SET);
518
519 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
520 return 1;
521
522 set_mode = GET_MODE (SET_DEST (set));
523 switch (set_mode)
524 {
525 case CCSmode:
526 case CCSRmode:
527 case CCUmode:
528 case CCURmode:
529 case CCLmode:
530 case CCL1mode:
531 case CCL2mode:
532 case CCL3mode:
533 case CCT1mode:
534 case CCT2mode:
535 case CCT3mode:
536 if (req_mode != set_mode)
537 return 0;
538 break;
539
540 case CCZmode:
541 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
542 && req_mode != CCSRmode && req_mode != CCURmode)
543 return 0;
544 break;
545
546 case CCAPmode:
547 case CCANmode:
548 if (req_mode != CCAmode)
549 return 0;
550 break;
551
552 default:
553 gcc_unreachable ();
554 }
555
556 return (GET_MODE (SET_SRC (set)) == set_mode);
557 }
558
559 /* Return true if every SET in INSN that sets the CC register
560 has source and destination with matching CC modes and that
561 CC mode is at least as constrained as REQ_MODE.
562 If REQ_MODE is VOIDmode, always return false. */
563
564 bool
565 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
566 {
567 int i;
568
569 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
570 if (req_mode == VOIDmode)
571 return false;
572
573 if (GET_CODE (PATTERN (insn)) == SET)
574 return s390_match_ccmode_set (PATTERN (insn), req_mode);
575
576 if (GET_CODE (PATTERN (insn)) == PARALLEL)
577 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
578 {
579 rtx set = XVECEXP (PATTERN (insn), 0, i);
580 if (GET_CODE (set) == SET)
581 if (!s390_match_ccmode_set (set, req_mode))
582 return false;
583 }
584
585 return true;
586 }
587
588 /* If a test-under-mask instruction can be used to implement
589 (compare (and ... OP1) OP2), return the CC mode required
590 to do that. Otherwise, return VOIDmode.
591 MIXED is true if the instruction can distinguish between
592 CC1 and CC2 for mixed selected bits (TMxx), it is false
593 if the instruction cannot (TM). */
594
595 enum machine_mode
596 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
597 {
598 int bit0, bit1;
599
600 /* ??? Fixme: should work on CONST_DOUBLE as well. */
601 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
602 return VOIDmode;
603
604 /* Selected bits all zero: CC0.
605 e.g.: int a; if ((a & (16 + 128)) == 0) */
606 if (INTVAL (op2) == 0)
607 return CCTmode;
608
609 /* Selected bits all one: CC3.
610 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
611 if (INTVAL (op2) == INTVAL (op1))
612 return CCT3mode;
613
614 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
615 int a;
616 if ((a & (16 + 128)) == 16) -> CCT1
617 if ((a & (16 + 128)) == 128) -> CCT2 */
618 if (mixed)
619 {
620 bit1 = exact_log2 (INTVAL (op2));
621 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
622 if (bit0 != -1 && bit1 != -1)
623 return bit0 > bit1 ? CCT1mode : CCT2mode;
624 }
625
626 return VOIDmode;
627 }
628
629 /* Given a comparison code OP (EQ, NE, etc.) and the operands
630 OP0 and OP1 of a COMPARE, return the mode to be used for the
631 comparison. */
632
633 enum machine_mode
634 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
635 {
636 switch (code)
637 {
638 case EQ:
639 case NE:
640 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
641 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
642 return CCAPmode;
643 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
644 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
645 return CCAPmode;
646 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
647 || GET_CODE (op1) == NEG)
648 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
649 return CCLmode;
650
651 if (GET_CODE (op0) == AND)
652 {
653 /* Check whether we can potentially do it via TM. */
654 enum machine_mode ccmode;
655 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
656 if (ccmode != VOIDmode)
657 {
658 /* Relax CCTmode to CCZmode to allow fall-back to AND
659 if that turns out to be beneficial. */
660 return ccmode == CCTmode ? CCZmode : ccmode;
661 }
662 }
663
664 if (register_operand (op0, HImode)
665 && GET_CODE (op1) == CONST_INT
666 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
667 return CCT3mode;
668 if (register_operand (op0, QImode)
669 && GET_CODE (op1) == CONST_INT
670 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
671 return CCT3mode;
672
673 return CCZmode;
674
675 case LE:
676 case LT:
677 case GE:
678 case GT:
679 /* The only overflow condition of NEG and ABS happens when
680 -INT_MAX is used as parameter, which stays negative. So
681 we have an overflow from a positive value to a negative.
682 Using CCAP mode the resulting cc can be used for comparisons. */
683 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
684 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
685 return CCAPmode;
686
687 /* If constants are involved in an add instruction it is possible to use
688 the resulting cc for comparisons with zero. Knowing the sign of the
689 constant the overflow behavior gets predictable. e.g.:
690 int a, b; if ((b = a + c) > 0)
691 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
692 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
693 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
694 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
695 /* Avoid INT32_MIN on 32 bit. */
696 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
697 {
698 if (INTVAL (XEXP((op0), 1)) < 0)
699 return CCANmode;
700 else
701 return CCAPmode;
702 }
703 /* Fall through. */
704 case UNORDERED:
705 case ORDERED:
706 case UNEQ:
707 case UNLE:
708 case UNLT:
709 case UNGE:
710 case UNGT:
711 case LTGT:
712 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
713 && GET_CODE (op1) != CONST_INT)
714 return CCSRmode;
715 return CCSmode;
716
717 case LTU:
718 case GEU:
719 if (GET_CODE (op0) == PLUS
720 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
721 return CCL1mode;
722
723 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
724 && GET_CODE (op1) != CONST_INT)
725 return CCURmode;
726 return CCUmode;
727
728 case LEU:
729 case GTU:
730 if (GET_CODE (op0) == MINUS
731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
732 return CCL2mode;
733
734 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
735 && GET_CODE (op1) != CONST_INT)
736 return CCURmode;
737 return CCUmode;
738
739 default:
740 gcc_unreachable ();
741 }
742 }
743
744 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
745 that we can implement more efficiently. */
746
747 static void
748 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
749 bool op0_preserve_value)
750 {
751 if (op0_preserve_value)
752 return;
753
754 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
755 if ((*code == EQ || *code == NE)
756 && *op1 == const0_rtx
757 && GET_CODE (*op0) == ZERO_EXTRACT
758 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
759 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
760 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
761 {
762 rtx inner = XEXP (*op0, 0);
763 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
764 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
765 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
766
767 if (len > 0 && len < modesize
768 && pos >= 0 && pos + len <= modesize
769 && modesize <= HOST_BITS_PER_WIDE_INT)
770 {
771 unsigned HOST_WIDE_INT block;
772 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
773 block <<= modesize - pos - len;
774
775 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
776 gen_int_mode (block, GET_MODE (inner)));
777 }
778 }
779
780 /* Narrow AND of memory against immediate to enable TM. */
781 if ((*code == EQ || *code == NE)
782 && *op1 == const0_rtx
783 && GET_CODE (*op0) == AND
784 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
785 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
786 {
787 rtx inner = XEXP (*op0, 0);
788 rtx mask = XEXP (*op0, 1);
789
790 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
791 if (GET_CODE (inner) == SUBREG
792 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
793 && (GET_MODE_SIZE (GET_MODE (inner))
794 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
795 && ((INTVAL (mask)
796 & GET_MODE_MASK (GET_MODE (inner))
797 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
798 == 0))
799 inner = SUBREG_REG (inner);
800
801 /* Do not change volatile MEMs. */
802 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
803 {
804 int part = s390_single_part (XEXP (*op0, 1),
805 GET_MODE (inner), QImode, 0);
806 if (part >= 0)
807 {
808 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
809 inner = adjust_address_nv (inner, QImode, part);
810 *op0 = gen_rtx_AND (QImode, inner, mask);
811 }
812 }
813 }
814
815 /* Narrow comparisons against 0xffff to HImode if possible. */
816 if ((*code == EQ || *code == NE)
817 && GET_CODE (*op1) == CONST_INT
818 && INTVAL (*op1) == 0xffff
819 && SCALAR_INT_MODE_P (GET_MODE (*op0))
820 && (nonzero_bits (*op0, GET_MODE (*op0))
821 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
822 {
823 *op0 = gen_lowpart (HImode, *op0);
824 *op1 = constm1_rtx;
825 }
826
827 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
828 if (GET_CODE (*op0) == UNSPEC
829 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
830 && XVECLEN (*op0, 0) == 1
831 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
832 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
833 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
834 && *op1 == const0_rtx)
835 {
836 enum rtx_code new_code = UNKNOWN;
837 switch (*code)
838 {
839 case EQ: new_code = EQ; break;
840 case NE: new_code = NE; break;
841 case LT: new_code = GTU; break;
842 case GT: new_code = LTU; break;
843 case LE: new_code = GEU; break;
844 case GE: new_code = LEU; break;
845 default: break;
846 }
847
848 if (new_code != UNKNOWN)
849 {
850 *op0 = XVECEXP (*op0, 0, 0);
851 *code = new_code;
852 }
853 }
854
855 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
856 if (GET_CODE (*op0) == UNSPEC
857 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
858 && XVECLEN (*op0, 0) == 1
859 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
860 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
861 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
862 && *op1 == const0_rtx)
863 {
864 enum rtx_code new_code = UNKNOWN;
865 switch (*code)
866 {
867 case EQ: new_code = EQ; break;
868 case NE: new_code = NE; break;
869 default: break;
870 }
871
872 if (new_code != UNKNOWN)
873 {
874 *op0 = XVECEXP (*op0, 0, 0);
875 *code = new_code;
876 }
877 }
878
879 /* Simplify cascaded EQ, NE with const0_rtx. */
880 if ((*code == NE || *code == EQ)
881 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
882 && GET_MODE (*op0) == SImode
883 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
884 && REG_P (XEXP (*op0, 0))
885 && XEXP (*op0, 1) == const0_rtx
886 && *op1 == const0_rtx)
887 {
888 if ((*code == EQ && GET_CODE (*op0) == NE)
889 || (*code == NE && GET_CODE (*op0) == EQ))
890 *code = EQ;
891 else
892 *code = NE;
893 *op0 = XEXP (*op0, 0);
894 }
895
896 /* Prefer register over memory as first operand. */
897 if (MEM_P (*op0) && REG_P (*op1))
898 {
899 rtx tem = *op0; *op0 = *op1; *op1 = tem;
900 *code = (int)swap_condition ((enum rtx_code)*code);
901 }
902 }
903
904 /* Emit a compare instruction suitable to implement the comparison
905 OP0 CODE OP1. Return the correct condition RTL to be placed in
906 the IF_THEN_ELSE of the conditional branch testing the result. */
907
908 rtx
909 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
910 {
911 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
912 rtx cc;
913
914 /* Do not output a redundant compare instruction if a compare_and_swap
915 pattern already computed the result and the machine modes are compatible. */
916 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
917 {
918 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
919 == GET_MODE (op0));
920 cc = op0;
921 }
922 else
923 {
924 cc = gen_rtx_REG (mode, CC_REGNUM);
925 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
926 }
927
928 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
929 }
930
931 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
932 matches CMP.
933 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
934 conditional branch testing the result. */
935
936 static rtx
937 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
938 rtx cmp, rtx new_rtx)
939 {
940 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
941 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
942 const0_rtx);
943 }
944
945 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
946 unconditional jump, else a conditional jump under condition COND. */
947
948 void
949 s390_emit_jump (rtx target, rtx cond)
950 {
951 rtx insn;
952
953 target = gen_rtx_LABEL_REF (VOIDmode, target);
954 if (cond)
955 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
956
957 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
958 emit_jump_insn (insn);
959 }
960
961 /* Return branch condition mask to implement a branch
962 specified by CODE. Return -1 for invalid comparisons. */
963
964 int
965 s390_branch_condition_mask (rtx code)
966 {
967 const int CC0 = 1 << 3;
968 const int CC1 = 1 << 2;
969 const int CC2 = 1 << 1;
970 const int CC3 = 1 << 0;
971
972 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
973 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
974 gcc_assert (XEXP (code, 1) == const0_rtx);
975
976 switch (GET_MODE (XEXP (code, 0)))
977 {
978 case CCZmode:
979 case CCZ1mode:
980 switch (GET_CODE (code))
981 {
982 case EQ: return CC0;
983 case NE: return CC1 | CC2 | CC3;
984 default: return -1;
985 }
986 break;
987
988 case CCT1mode:
989 switch (GET_CODE (code))
990 {
991 case EQ: return CC1;
992 case NE: return CC0 | CC2 | CC3;
993 default: return -1;
994 }
995 break;
996
997 case CCT2mode:
998 switch (GET_CODE (code))
999 {
1000 case EQ: return CC2;
1001 case NE: return CC0 | CC1 | CC3;
1002 default: return -1;
1003 }
1004 break;
1005
1006 case CCT3mode:
1007 switch (GET_CODE (code))
1008 {
1009 case EQ: return CC3;
1010 case NE: return CC0 | CC1 | CC2;
1011 default: return -1;
1012 }
1013 break;
1014
1015 case CCLmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0 | CC2;
1019 case NE: return CC1 | CC3;
1020 default: return -1;
1021 }
1022 break;
1023
1024 case CCL1mode:
1025 switch (GET_CODE (code))
1026 {
1027 case LTU: return CC2 | CC3; /* carry */
1028 case GEU: return CC0 | CC1; /* no carry */
1029 default: return -1;
1030 }
1031 break;
1032
1033 case CCL2mode:
1034 switch (GET_CODE (code))
1035 {
1036 case GTU: return CC0 | CC1; /* borrow */
1037 case LEU: return CC2 | CC3; /* no borrow */
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCL3mode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0 | CC2;
1046 case NE: return CC1 | CC3;
1047 case LTU: return CC1;
1048 case GTU: return CC3;
1049 case LEU: return CC1 | CC2;
1050 case GEU: return CC2 | CC3;
1051 default: return -1;
1052 }
1053
1054 case CCUmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LTU: return CC1;
1060 case GTU: return CC2;
1061 case LEU: return CC0 | CC1;
1062 case GEU: return CC0 | CC2;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCURmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC2 | CC1 | CC3;
1072 case LTU: return CC2;
1073 case GTU: return CC1;
1074 case LEU: return CC0 | CC2;
1075 case GEU: return CC0 | CC1;
1076 default: return -1;
1077 }
1078 break;
1079
1080 case CCAPmode:
1081 switch (GET_CODE (code))
1082 {
1083 case EQ: return CC0;
1084 case NE: return CC1 | CC2 | CC3;
1085 case LT: return CC1 | CC3;
1086 case GT: return CC2;
1087 case LE: return CC0 | CC1 | CC3;
1088 case GE: return CC0 | CC2;
1089 default: return -1;
1090 }
1091 break;
1092
1093 case CCANmode:
1094 switch (GET_CODE (code))
1095 {
1096 case EQ: return CC0;
1097 case NE: return CC1 | CC2 | CC3;
1098 case LT: return CC1;
1099 case GT: return CC2 | CC3;
1100 case LE: return CC0 | CC1;
1101 case GE: return CC0 | CC2 | CC3;
1102 default: return -1;
1103 }
1104 break;
1105
1106 case CCSmode:
1107 switch (GET_CODE (code))
1108 {
1109 case EQ: return CC0;
1110 case NE: return CC1 | CC2 | CC3;
1111 case LT: return CC1;
1112 case GT: return CC2;
1113 case LE: return CC0 | CC1;
1114 case GE: return CC0 | CC2;
1115 case UNORDERED: return CC3;
1116 case ORDERED: return CC0 | CC1 | CC2;
1117 case UNEQ: return CC0 | CC3;
1118 case UNLT: return CC1 | CC3;
1119 case UNGT: return CC2 | CC3;
1120 case UNLE: return CC0 | CC1 | CC3;
1121 case UNGE: return CC0 | CC2 | CC3;
1122 case LTGT: return CC1 | CC2;
1123 default: return -1;
1124 }
1125 break;
1126
1127 case CCSRmode:
1128 switch (GET_CODE (code))
1129 {
1130 case EQ: return CC0;
1131 case NE: return CC2 | CC1 | CC3;
1132 case LT: return CC2;
1133 case GT: return CC1;
1134 case LE: return CC0 | CC2;
1135 case GE: return CC0 | CC1;
1136 case UNORDERED: return CC3;
1137 case ORDERED: return CC0 | CC2 | CC1;
1138 case UNEQ: return CC0 | CC3;
1139 case UNLT: return CC2 | CC3;
1140 case UNGT: return CC1 | CC3;
1141 case UNLE: return CC0 | CC2 | CC3;
1142 case UNGE: return CC0 | CC1 | CC3;
1143 case LTGT: return CC2 | CC1;
1144 default: return -1;
1145 }
1146 break;
1147
1148 default:
1149 return -1;
1150 }
1151 }
1152
1153
1154 /* Return branch condition mask to implement a compare and branch
1155 specified by CODE. Return -1 for invalid comparisons. */
1156
1157 int
1158 s390_compare_and_branch_condition_mask (rtx code)
1159 {
1160 const int CC0 = 1 << 3;
1161 const int CC1 = 1 << 2;
1162 const int CC2 = 1 << 1;
1163
1164 switch (GET_CODE (code))
1165 {
1166 case EQ:
1167 return CC0;
1168 case NE:
1169 return CC1 | CC2;
1170 case LT:
1171 case LTU:
1172 return CC1;
1173 case GT:
1174 case GTU:
1175 return CC2;
1176 case LE:
1177 case LEU:
1178 return CC0 | CC1;
1179 case GE:
1180 case GEU:
1181 return CC0 | CC2;
1182 default:
1183 gcc_unreachable ();
1184 }
1185 return -1;
1186 }
1187
1188 /* If INV is false, return assembler mnemonic string to implement
1189 a branch specified by CODE. If INV is true, return mnemonic
1190 for the corresponding inverted branch. */
1191
1192 static const char *
1193 s390_branch_condition_mnemonic (rtx code, int inv)
1194 {
1195 int mask;
1196
1197 static const char *const mnemonic[16] =
1198 {
1199 NULL, "o", "h", "nle",
1200 "l", "nhe", "lh", "ne",
1201 "e", "nlh", "he", "nl",
1202 "le", "nh", "no", NULL
1203 };
1204
1205 if (GET_CODE (XEXP (code, 0)) == REG
1206 && REGNO (XEXP (code, 0)) == CC_REGNUM
1207 && XEXP (code, 1) == const0_rtx)
1208 mask = s390_branch_condition_mask (code);
1209 else
1210 mask = s390_compare_and_branch_condition_mask (code);
1211
1212 gcc_assert (mask >= 0);
1213
1214 if (inv)
1215 mask ^= 15;
1216
1217 gcc_assert (mask >= 1 && mask <= 14);
1218
1219 return mnemonic[mask];
1220 }
1221
1222 /* Return the part of op which has a value different from def.
1223 The size of the part is determined by mode.
1224 Use this function only if you already know that op really
1225 contains such a part. */
1226
1227 unsigned HOST_WIDE_INT
1228 s390_extract_part (rtx op, enum machine_mode mode, int def)
1229 {
1230 unsigned HOST_WIDE_INT value = 0;
1231 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1232 int part_bits = GET_MODE_BITSIZE (mode);
1233 unsigned HOST_WIDE_INT part_mask
1234 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1235 int i;
1236
1237 for (i = 0; i < max_parts; i++)
1238 {
1239 if (i == 0)
1240 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1241 else
1242 value >>= part_bits;
1243
1244 if ((value & part_mask) != (def & part_mask))
1245 return value & part_mask;
1246 }
1247
1248 gcc_unreachable ();
1249 }
1250
1251 /* If OP is an integer constant of mode MODE with exactly one
1252 part of mode PART_MODE unequal to DEF, return the number of that
1253 part. Otherwise, return -1. */
1254
1255 int
1256 s390_single_part (rtx op,
1257 enum machine_mode mode,
1258 enum machine_mode part_mode,
1259 int def)
1260 {
1261 unsigned HOST_WIDE_INT value = 0;
1262 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1263 unsigned HOST_WIDE_INT part_mask
1264 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1265 int i, part = -1;
1266
1267 if (GET_CODE (op) != CONST_INT)
1268 return -1;
1269
1270 for (i = 0; i < n_parts; i++)
1271 {
1272 if (i == 0)
1273 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1274 else
1275 value >>= GET_MODE_BITSIZE (part_mode);
1276
1277 if ((value & part_mask) != (def & part_mask))
1278 {
1279 if (part != -1)
1280 return -1;
1281 else
1282 part = i;
1283 }
1284 }
1285 return part == -1 ? -1 : n_parts - 1 - part;
1286 }
1287
1288 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1289 bits and no other bits are set in IN. POS and LENGTH can be used
1290 to obtain the start position and the length of the bitfield.
1291
1292 POS gives the position of the first bit of the bitfield counting
1293 from the lowest order bit starting with zero. In order to use this
1294 value for S/390 instructions this has to be converted to "bits big
1295 endian" style. */
1296
1297 bool
1298 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1299 int *pos, int *length)
1300 {
1301 int tmp_pos = 0;
1302 int tmp_length = 0;
1303 int i;
1304 unsigned HOST_WIDE_INT mask = 1ULL;
1305 bool contiguous = false;
1306
1307 for (i = 0; i < size; mask <<= 1, i++)
1308 {
1309 if (contiguous)
1310 {
1311 if (mask & in)
1312 tmp_length++;
1313 else
1314 break;
1315 }
1316 else
1317 {
1318 if (mask & in)
1319 {
1320 contiguous = true;
1321 tmp_length++;
1322 }
1323 else
1324 tmp_pos++;
1325 }
1326 }
1327
1328 if (!tmp_length)
1329 return false;
1330
1331 /* Calculate a mask for all bits beyond the contiguous bits. */
1332 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1333
1334 if (mask & in)
1335 return false;
1336
1337 if (tmp_length + tmp_pos - 1 > size)
1338 return false;
1339
1340 if (length)
1341 *length = tmp_length;
1342
1343 if (pos)
1344 *pos = tmp_pos;
1345
1346 return true;
1347 }
1348
1349 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1350 equivalent to a shift followed by the AND. In particular, CONTIG
1351 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1352 for ROTL indicate a rotate to the right. */
1353
1354 bool
1355 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1356 {
1357 int pos, len;
1358 bool ok;
1359
1360 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1361 gcc_assert (ok);
1362
1363 return ((rotl >= 0 && rotl <= pos)
1364 || (rotl < 0 && -rotl <= bitsize - len - pos));
1365 }
1366
1367 /* Check whether we can (and want to) split a double-word
1368 move in mode MODE from SRC to DST into two single-word
1369 moves, moving the subword FIRST_SUBWORD first. */
1370
1371 bool
1372 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1373 {
1374 /* Floating point registers cannot be split. */
1375 if (FP_REG_P (src) || FP_REG_P (dst))
1376 return false;
1377
1378 /* We don't need to split if operands are directly accessible. */
1379 if (s_operand (src, mode) || s_operand (dst, mode))
1380 return false;
1381
1382 /* Non-offsettable memory references cannot be split. */
1383 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1384 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1385 return false;
1386
1387 /* Moving the first subword must not clobber a register
1388 needed to move the second subword. */
1389 if (register_operand (dst, mode))
1390 {
1391 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1392 if (reg_overlap_mentioned_p (subreg, src))
1393 return false;
1394 }
1395
1396 return true;
1397 }
1398
1399 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1400 and [MEM2, MEM2 + SIZE] do overlap and false
1401 otherwise. */
1402
1403 bool
1404 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1405 {
1406 rtx addr1, addr2, addr_delta;
1407 HOST_WIDE_INT delta;
1408
1409 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1410 return true;
1411
1412 if (size == 0)
1413 return false;
1414
1415 addr1 = XEXP (mem1, 0);
1416 addr2 = XEXP (mem2, 0);
1417
1418 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1419
1420 /* This overlapping check is used by peepholes merging memory block operations.
1421 Overlapping operations would otherwise be recognized by the S/390 hardware
1422 and would fall back to a slower implementation. Allowing overlapping
1423 operations would lead to slow code but not to wrong code. Therefore we are
1424 somewhat optimistic if we cannot prove that the memory blocks are
1425 overlapping.
1426 That's why we return false here although this may accept operations on
1427 overlapping memory areas. */
1428 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1429 return false;
1430
1431 delta = INTVAL (addr_delta);
1432
1433 if (delta == 0
1434 || (delta > 0 && delta < size)
1435 || (delta < 0 && -delta < size))
1436 return true;
1437
1438 return false;
1439 }
1440
1441 /* Check whether the address of memory reference MEM2 equals exactly
1442 the address of memory reference MEM1 plus DELTA. Return true if
1443 we can prove this to be the case, false otherwise. */
1444
1445 bool
1446 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1447 {
1448 rtx addr1, addr2, addr_delta;
1449
1450 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1451 return false;
1452
1453 addr1 = XEXP (mem1, 0);
1454 addr2 = XEXP (mem2, 0);
1455
1456 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1457 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1458 return false;
1459
1460 return true;
1461 }
1462
1463 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1464
1465 void
1466 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1467 rtx *operands)
1468 {
1469 enum machine_mode wmode = mode;
1470 rtx dst = operands[0];
1471 rtx src1 = operands[1];
1472 rtx src2 = operands[2];
1473 rtx op, clob, tem;
1474
1475 /* If we cannot handle the operation directly, use a temp register. */
1476 if (!s390_logical_operator_ok_p (operands))
1477 dst = gen_reg_rtx (mode);
1478
1479 /* QImode and HImode patterns make sense only if we have a destination
1480 in memory. Otherwise perform the operation in SImode. */
1481 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1482 wmode = SImode;
1483
1484 /* Widen operands if required. */
1485 if (mode != wmode)
1486 {
1487 if (GET_CODE (dst) == SUBREG
1488 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1489 dst = tem;
1490 else if (REG_P (dst))
1491 dst = gen_rtx_SUBREG (wmode, dst, 0);
1492 else
1493 dst = gen_reg_rtx (wmode);
1494
1495 if (GET_CODE (src1) == SUBREG
1496 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1497 src1 = tem;
1498 else if (GET_MODE (src1) != VOIDmode)
1499 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1500
1501 if (GET_CODE (src2) == SUBREG
1502 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1503 src2 = tem;
1504 else if (GET_MODE (src2) != VOIDmode)
1505 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1506 }
1507
1508 /* Emit the instruction. */
1509 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1510 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1511 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1512
1513 /* Fix up the destination if needed. */
1514 if (dst != operands[0])
1515 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1516 }
1517
1518 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1519
1520 bool
1521 s390_logical_operator_ok_p (rtx *operands)
1522 {
1523 /* If the destination operand is in memory, it needs to coincide
1524 with one of the source operands. After reload, it has to be
1525 the first source operand. */
1526 if (GET_CODE (operands[0]) == MEM)
1527 return rtx_equal_p (operands[0], operands[1])
1528 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1529
1530 return true;
1531 }
1532
1533 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1534 operand IMMOP to switch from SS to SI type instructions. */
1535
1536 void
1537 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1538 {
1539 int def = code == AND ? -1 : 0;
1540 HOST_WIDE_INT mask;
1541 int part;
1542
1543 gcc_assert (GET_CODE (*memop) == MEM);
1544 gcc_assert (!MEM_VOLATILE_P (*memop));
1545
1546 mask = s390_extract_part (*immop, QImode, def);
1547 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1548 gcc_assert (part >= 0);
1549
1550 *memop = adjust_address (*memop, QImode, part);
1551 *immop = gen_int_mode (mask, QImode);
1552 }
1553
1554
1555 /* How to allocate a 'struct machine_function'. */
1556
1557 static struct machine_function *
1558 s390_init_machine_status (void)
1559 {
1560 return ggc_alloc_cleared_machine_function ();
1561 }
1562
1563 static void
1564 s390_option_override (void)
1565 {
1566 /* Set up function hooks. */
1567 init_machine_status = s390_init_machine_status;
1568
1569 /* Architecture mode defaults according to ABI. */
1570 if (!(target_flags_explicit & MASK_ZARCH))
1571 {
1572 if (TARGET_64BIT)
1573 target_flags |= MASK_ZARCH;
1574 else
1575 target_flags &= ~MASK_ZARCH;
1576 }
1577
1578 /* Set the march default in case it hasn't been specified on
1579 cmdline. */
1580 if (s390_arch == PROCESSOR_max)
1581 {
1582 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1583 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1584 s390_arch_flags = processor_flags_table[(int)s390_arch];
1585 }
1586
1587 /* Determine processor to tune for. */
1588 if (s390_tune == PROCESSOR_max)
1589 {
1590 s390_tune = s390_arch;
1591 s390_tune_flags = s390_arch_flags;
1592 }
1593
1594 /* Sanity checks. */
1595 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1596 error ("z/Architecture mode not supported on %s", s390_arch_string);
1597 if (TARGET_64BIT && !TARGET_ZARCH)
1598 error ("64-bit ABI not supported in ESA/390 mode");
1599
1600 /* Use hardware DFP if available and not explicitly disabled by
1601 user. E.g. with -m31 -march=z10 -mzarch */
1602 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1603 target_flags |= MASK_HARD_DFP;
1604
1605 if (TARGET_HARD_DFP && !TARGET_DFP)
1606 {
1607 if (target_flags_explicit & MASK_HARD_DFP)
1608 {
1609 if (!TARGET_CPU_DFP)
1610 error ("hardware decimal floating point instructions"
1611 " not available on %s", s390_arch_string);
1612 if (!TARGET_ZARCH)
1613 error ("hardware decimal floating point instructions"
1614 " not available in ESA/390 mode");
1615 }
1616 else
1617 target_flags &= ~MASK_HARD_DFP;
1618 }
1619
1620 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1621 {
1622 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1623 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1624
1625 target_flags &= ~MASK_HARD_DFP;
1626 }
1627
1628 /* Set processor cost function. */
1629 switch (s390_tune)
1630 {
1631 case PROCESSOR_2084_Z990:
1632 s390_cost = &z990_cost;
1633 break;
1634 case PROCESSOR_2094_Z9_109:
1635 s390_cost = &z9_109_cost;
1636 break;
1637 case PROCESSOR_2097_Z10:
1638 s390_cost = &z10_cost;
1639 break;
1640 case PROCESSOR_2817_Z196:
1641 s390_cost = &z196_cost;
1642 break;
1643 case PROCESSOR_2827_ZEC12:
1644 s390_cost = &zEC12_cost;
1645 break;
1646 default:
1647 s390_cost = &z900_cost;
1648 }
1649
1650 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1651 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1652 "in combination");
1653
1654 if (s390_stack_size)
1655 {
1656 if (s390_stack_guard >= s390_stack_size)
1657 error ("stack size must be greater than the stack guard value");
1658 else if (s390_stack_size > 1 << 16)
1659 error ("stack size must not be greater than 64k");
1660 }
1661 else if (s390_stack_guard)
1662 error ("-mstack-guard implies use of -mstack-size");
1663
1664 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1665 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1666 target_flags |= MASK_LONG_DOUBLE_128;
1667 #endif
1668
1669 if (s390_tune == PROCESSOR_2097_Z10
1670 || s390_tune == PROCESSOR_2817_Z196
1671 || s390_tune == PROCESSOR_2827_ZEC12)
1672 {
1673 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1674 global_options.x_param_values,
1675 global_options_set.x_param_values);
1676 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1677 global_options.x_param_values,
1678 global_options_set.x_param_values);
1679 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1680 global_options.x_param_values,
1681 global_options_set.x_param_values);
1682 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1683 global_options.x_param_values,
1684 global_options_set.x_param_values);
1685 }
1686
1687 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1688 global_options.x_param_values,
1689 global_options_set.x_param_values);
1690 /* values for loop prefetching */
1691 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1692 global_options.x_param_values,
1693 global_options_set.x_param_values);
1694 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1695 global_options.x_param_values,
1696 global_options_set.x_param_values);
1697 /* s390 has more than 2 levels and the size is much larger. Since
1698 we are always running virtualized assume that we only get a small
1699 part of the caches above l1. */
1700 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1701 global_options.x_param_values,
1702 global_options_set.x_param_values);
1703 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1704 global_options.x_param_values,
1705 global_options_set.x_param_values);
1706 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1707 global_options.x_param_values,
1708 global_options_set.x_param_values);
1709
1710 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1711 requires the arch flags to be evaluated already. Since prefetching
1712 is beneficial on s390, we enable it if available. */
1713 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1714 flag_prefetch_loop_arrays = 1;
1715
1716 /* Use the alternative scheduling-pressure algorithm by default. */
1717 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1718 global_options.x_param_values,
1719 global_options_set.x_param_values);
1720
1721 if (TARGET_TPF)
1722 {
1723 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1724 debuggers do not yet support DWARF 3/4. */
1725 if (!global_options_set.x_dwarf_strict)
1726 dwarf_strict = 1;
1727 if (!global_options_set.x_dwarf_version)
1728 dwarf_version = 2;
1729 }
1730 }
1731
1732 /* Map for smallest class containing reg regno. */
1733
1734 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1735 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1736 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1737 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1738 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1739 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1740 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1741 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1742 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1743 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1744 ACCESS_REGS, ACCESS_REGS
1745 };
1746
1747 /* Return attribute type of insn. */
1748
1749 static enum attr_type
1750 s390_safe_attr_type (rtx insn)
1751 {
1752 if (recog_memoized (insn) >= 0)
1753 return get_attr_type (insn);
1754 else
1755 return TYPE_NONE;
1756 }
1757
1758 /* Return true if DISP is a valid short displacement. */
1759
1760 static bool
1761 s390_short_displacement (rtx disp)
1762 {
1763 /* No displacement is OK. */
1764 if (!disp)
1765 return true;
1766
1767 /* Without the long displacement facility we don't need to
1768 distingiush between long and short displacement. */
1769 if (!TARGET_LONG_DISPLACEMENT)
1770 return true;
1771
1772 /* Integer displacement in range. */
1773 if (GET_CODE (disp) == CONST_INT)
1774 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1775
1776 /* GOT offset is not OK, the GOT can be large. */
1777 if (GET_CODE (disp) == CONST
1778 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1779 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1780 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1781 return false;
1782
1783 /* All other symbolic constants are literal pool references,
1784 which are OK as the literal pool must be small. */
1785 if (GET_CODE (disp) == CONST)
1786 return true;
1787
1788 return false;
1789 }
1790
1791 /* Decompose a RTL expression ADDR for a memory address into
1792 its components, returned in OUT.
1793
1794 Returns false if ADDR is not a valid memory address, true
1795 otherwise. If OUT is NULL, don't return the components,
1796 but check for validity only.
1797
1798 Note: Only addresses in canonical form are recognized.
1799 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1800 canonical form so that they will be recognized. */
1801
1802 static int
1803 s390_decompose_address (rtx addr, struct s390_address *out)
1804 {
1805 HOST_WIDE_INT offset = 0;
1806 rtx base = NULL_RTX;
1807 rtx indx = NULL_RTX;
1808 rtx disp = NULL_RTX;
1809 rtx orig_disp;
1810 bool pointer = false;
1811 bool base_ptr = false;
1812 bool indx_ptr = false;
1813 bool literal_pool = false;
1814
1815 /* We may need to substitute the literal pool base register into the address
1816 below. However, at this point we do not know which register is going to
1817 be used as base, so we substitute the arg pointer register. This is going
1818 to be treated as holding a pointer below -- it shouldn't be used for any
1819 other purpose. */
1820 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1821
1822 /* Decompose address into base + index + displacement. */
1823
1824 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1825 base = addr;
1826
1827 else if (GET_CODE (addr) == PLUS)
1828 {
1829 rtx op0 = XEXP (addr, 0);
1830 rtx op1 = XEXP (addr, 1);
1831 enum rtx_code code0 = GET_CODE (op0);
1832 enum rtx_code code1 = GET_CODE (op1);
1833
1834 if (code0 == REG || code0 == UNSPEC)
1835 {
1836 if (code1 == REG || code1 == UNSPEC)
1837 {
1838 indx = op0; /* index + base */
1839 base = op1;
1840 }
1841
1842 else
1843 {
1844 base = op0; /* base + displacement */
1845 disp = op1;
1846 }
1847 }
1848
1849 else if (code0 == PLUS)
1850 {
1851 indx = XEXP (op0, 0); /* index + base + disp */
1852 base = XEXP (op0, 1);
1853 disp = op1;
1854 }
1855
1856 else
1857 {
1858 return false;
1859 }
1860 }
1861
1862 else
1863 disp = addr; /* displacement */
1864
1865 /* Extract integer part of displacement. */
1866 orig_disp = disp;
1867 if (disp)
1868 {
1869 if (GET_CODE (disp) == CONST_INT)
1870 {
1871 offset = INTVAL (disp);
1872 disp = NULL_RTX;
1873 }
1874 else if (GET_CODE (disp) == CONST
1875 && GET_CODE (XEXP (disp, 0)) == PLUS
1876 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1877 {
1878 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1879 disp = XEXP (XEXP (disp, 0), 0);
1880 }
1881 }
1882
1883 /* Strip off CONST here to avoid special case tests later. */
1884 if (disp && GET_CODE (disp) == CONST)
1885 disp = XEXP (disp, 0);
1886
1887 /* We can convert literal pool addresses to
1888 displacements by basing them off the base register. */
1889 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1890 {
1891 /* Either base or index must be free to hold the base register. */
1892 if (!base)
1893 base = fake_pool_base, literal_pool = true;
1894 else if (!indx)
1895 indx = fake_pool_base, literal_pool = true;
1896 else
1897 return false;
1898
1899 /* Mark up the displacement. */
1900 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1901 UNSPEC_LTREL_OFFSET);
1902 }
1903
1904 /* Validate base register. */
1905 if (base)
1906 {
1907 if (GET_CODE (base) == UNSPEC)
1908 switch (XINT (base, 1))
1909 {
1910 case UNSPEC_LTREF:
1911 if (!disp)
1912 disp = gen_rtx_UNSPEC (Pmode,
1913 gen_rtvec (1, XVECEXP (base, 0, 0)),
1914 UNSPEC_LTREL_OFFSET);
1915 else
1916 return false;
1917
1918 base = XVECEXP (base, 0, 1);
1919 break;
1920
1921 case UNSPEC_LTREL_BASE:
1922 if (XVECLEN (base, 0) == 1)
1923 base = fake_pool_base, literal_pool = true;
1924 else
1925 base = XVECEXP (base, 0, 1);
1926 break;
1927
1928 default:
1929 return false;
1930 }
1931
1932 if (!REG_P (base)
1933 || (GET_MODE (base) != SImode
1934 && GET_MODE (base) != Pmode))
1935 return false;
1936
1937 if (REGNO (base) == STACK_POINTER_REGNUM
1938 || REGNO (base) == FRAME_POINTER_REGNUM
1939 || ((reload_completed || reload_in_progress)
1940 && frame_pointer_needed
1941 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1942 || REGNO (base) == ARG_POINTER_REGNUM
1943 || (flag_pic
1944 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1945 pointer = base_ptr = true;
1946
1947 if ((reload_completed || reload_in_progress)
1948 && base == cfun->machine->base_reg)
1949 pointer = base_ptr = literal_pool = true;
1950 }
1951
1952 /* Validate index register. */
1953 if (indx)
1954 {
1955 if (GET_CODE (indx) == UNSPEC)
1956 switch (XINT (indx, 1))
1957 {
1958 case UNSPEC_LTREF:
1959 if (!disp)
1960 disp = gen_rtx_UNSPEC (Pmode,
1961 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1962 UNSPEC_LTREL_OFFSET);
1963 else
1964 return false;
1965
1966 indx = XVECEXP (indx, 0, 1);
1967 break;
1968
1969 case UNSPEC_LTREL_BASE:
1970 if (XVECLEN (indx, 0) == 1)
1971 indx = fake_pool_base, literal_pool = true;
1972 else
1973 indx = XVECEXP (indx, 0, 1);
1974 break;
1975
1976 default:
1977 return false;
1978 }
1979
1980 if (!REG_P (indx)
1981 || (GET_MODE (indx) != SImode
1982 && GET_MODE (indx) != Pmode))
1983 return false;
1984
1985 if (REGNO (indx) == STACK_POINTER_REGNUM
1986 || REGNO (indx) == FRAME_POINTER_REGNUM
1987 || ((reload_completed || reload_in_progress)
1988 && frame_pointer_needed
1989 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1990 || REGNO (indx) == ARG_POINTER_REGNUM
1991 || (flag_pic
1992 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1993 pointer = indx_ptr = true;
1994
1995 if ((reload_completed || reload_in_progress)
1996 && indx == cfun->machine->base_reg)
1997 pointer = indx_ptr = literal_pool = true;
1998 }
1999
2000 /* Prefer to use pointer as base, not index. */
2001 if (base && indx && !base_ptr
2002 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2003 {
2004 rtx tmp = base;
2005 base = indx;
2006 indx = tmp;
2007 }
2008
2009 /* Validate displacement. */
2010 if (!disp)
2011 {
2012 /* If virtual registers are involved, the displacement will change later
2013 anyway as the virtual registers get eliminated. This could make a
2014 valid displacement invalid, but it is more likely to make an invalid
2015 displacement valid, because we sometimes access the register save area
2016 via negative offsets to one of those registers.
2017 Thus we don't check the displacement for validity here. If after
2018 elimination the displacement turns out to be invalid after all,
2019 this is fixed up by reload in any case. */
2020 /* LRA maintains always displacements up to date and we need to
2021 know the displacement is right during all LRA not only at the
2022 final elimination. */
2023 if (lra_in_progress
2024 || (base != arg_pointer_rtx
2025 && indx != arg_pointer_rtx
2026 && base != return_address_pointer_rtx
2027 && indx != return_address_pointer_rtx
2028 && base != frame_pointer_rtx
2029 && indx != frame_pointer_rtx
2030 && base != virtual_stack_vars_rtx
2031 && indx != virtual_stack_vars_rtx))
2032 if (!DISP_IN_RANGE (offset))
2033 return false;
2034 }
2035 else
2036 {
2037 /* All the special cases are pointers. */
2038 pointer = true;
2039
2040 /* In the small-PIC case, the linker converts @GOT
2041 and @GOTNTPOFF offsets to possible displacements. */
2042 if (GET_CODE (disp) == UNSPEC
2043 && (XINT (disp, 1) == UNSPEC_GOT
2044 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2045 && flag_pic == 1)
2046 {
2047 ;
2048 }
2049
2050 /* Accept pool label offsets. */
2051 else if (GET_CODE (disp) == UNSPEC
2052 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2053 ;
2054
2055 /* Accept literal pool references. */
2056 else if (GET_CODE (disp) == UNSPEC
2057 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2058 {
2059 /* In case CSE pulled a non literal pool reference out of
2060 the pool we have to reject the address. This is
2061 especially important when loading the GOT pointer on non
2062 zarch CPUs. In this case the literal pool contains an lt
2063 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2064 will most likely exceed the displacement. */
2065 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2066 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2067 return false;
2068
2069 orig_disp = gen_rtx_CONST (Pmode, disp);
2070 if (offset)
2071 {
2072 /* If we have an offset, make sure it does not
2073 exceed the size of the constant pool entry. */
2074 rtx sym = XVECEXP (disp, 0, 0);
2075 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2076 return false;
2077
2078 orig_disp = plus_constant (Pmode, orig_disp, offset);
2079 }
2080 }
2081
2082 else
2083 return false;
2084 }
2085
2086 if (!base && !indx)
2087 pointer = true;
2088
2089 if (out)
2090 {
2091 out->base = base;
2092 out->indx = indx;
2093 out->disp = orig_disp;
2094 out->pointer = pointer;
2095 out->literal_pool = literal_pool;
2096 }
2097
2098 return true;
2099 }
2100
2101 /* Decompose a RTL expression OP for a shift count into its components,
2102 and return the base register in BASE and the offset in OFFSET.
2103
2104 Return true if OP is a valid shift count, false if not. */
2105
2106 bool
2107 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2108 {
2109 HOST_WIDE_INT off = 0;
2110
2111 /* We can have an integer constant, an address register,
2112 or a sum of the two. */
2113 if (GET_CODE (op) == CONST_INT)
2114 {
2115 off = INTVAL (op);
2116 op = NULL_RTX;
2117 }
2118 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2119 {
2120 off = INTVAL (XEXP (op, 1));
2121 op = XEXP (op, 0);
2122 }
2123 while (op && GET_CODE (op) == SUBREG)
2124 op = SUBREG_REG (op);
2125
2126 if (op && GET_CODE (op) != REG)
2127 return false;
2128
2129 if (offset)
2130 *offset = off;
2131 if (base)
2132 *base = op;
2133
2134 return true;
2135 }
2136
2137
2138 /* Return true if CODE is a valid address without index. */
2139
2140 bool
2141 s390_legitimate_address_without_index_p (rtx op)
2142 {
2143 struct s390_address addr;
2144
2145 if (!s390_decompose_address (XEXP (op, 0), &addr))
2146 return false;
2147 if (addr.indx)
2148 return false;
2149
2150 return true;
2151 }
2152
2153
2154 /* Return TRUE if ADDR is an operand valid for a load/store relative
2155 instruction. Be aware that the alignment of the operand needs to
2156 be checked separately.
2157 Valid addresses are single references or a sum of a reference and a
2158 constant integer. Return these parts in SYMREF and ADDEND. You can
2159 pass NULL in REF and/or ADDEND if you are not interested in these
2160 values. Literal pool references are *not* considered symbol
2161 references. */
2162
2163 static bool
2164 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2165 {
2166 HOST_WIDE_INT tmpaddend = 0;
2167
2168 if (GET_CODE (addr) == CONST)
2169 addr = XEXP (addr, 0);
2170
2171 if (GET_CODE (addr) == PLUS)
2172 {
2173 if (!CONST_INT_P (XEXP (addr, 1)))
2174 return false;
2175
2176 tmpaddend = INTVAL (XEXP (addr, 1));
2177 addr = XEXP (addr, 0);
2178 }
2179
2180 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2181 || (GET_CODE (addr) == UNSPEC
2182 && (XINT (addr, 1) == UNSPEC_GOTENT
2183 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2184 {
2185 if (symref)
2186 *symref = addr;
2187 if (addend)
2188 *addend = tmpaddend;
2189
2190 return true;
2191 }
2192 return false;
2193 }
2194
2195 /* Return true if the address in OP is valid for constraint letter C
2196 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2197 pool MEMs should be accepted. Only the Q, R, S, T constraint
2198 letters are allowed for C. */
2199
2200 static int
2201 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2202 {
2203 struct s390_address addr;
2204 bool decomposed = false;
2205
2206 /* This check makes sure that no symbolic address (except literal
2207 pool references) are accepted by the R or T constraints. */
2208 if (s390_loadrelative_operand_p (op, NULL, NULL))
2209 return 0;
2210
2211 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2212 if (!lit_pool_ok)
2213 {
2214 if (!s390_decompose_address (op, &addr))
2215 return 0;
2216 if (addr.literal_pool)
2217 return 0;
2218 decomposed = true;
2219 }
2220
2221 switch (c)
2222 {
2223 case 'Q': /* no index short displacement */
2224 if (!decomposed && !s390_decompose_address (op, &addr))
2225 return 0;
2226 if (addr.indx)
2227 return 0;
2228 if (!s390_short_displacement (addr.disp))
2229 return 0;
2230 break;
2231
2232 case 'R': /* with index short displacement */
2233 if (TARGET_LONG_DISPLACEMENT)
2234 {
2235 if (!decomposed && !s390_decompose_address (op, &addr))
2236 return 0;
2237 if (!s390_short_displacement (addr.disp))
2238 return 0;
2239 }
2240 /* Any invalid address here will be fixed up by reload,
2241 so accept it for the most generic constraint. */
2242 break;
2243
2244 case 'S': /* no index long displacement */
2245 if (!TARGET_LONG_DISPLACEMENT)
2246 return 0;
2247 if (!decomposed && !s390_decompose_address (op, &addr))
2248 return 0;
2249 if (addr.indx)
2250 return 0;
2251 if (s390_short_displacement (addr.disp))
2252 return 0;
2253 break;
2254
2255 case 'T': /* with index long displacement */
2256 if (!TARGET_LONG_DISPLACEMENT)
2257 return 0;
2258 /* Any invalid address here will be fixed up by reload,
2259 so accept it for the most generic constraint. */
2260 if ((decomposed || s390_decompose_address (op, &addr))
2261 && s390_short_displacement (addr.disp))
2262 return 0;
2263 break;
2264 default:
2265 return 0;
2266 }
2267 return 1;
2268 }
2269
2270
2271 /* Evaluates constraint strings described by the regular expression
2272 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2273 the constraint given in STR, or 0 else. */
2274
2275 int
2276 s390_mem_constraint (const char *str, rtx op)
2277 {
2278 char c = str[0];
2279
2280 switch (c)
2281 {
2282 case 'A':
2283 /* Check for offsettable variants of memory constraints. */
2284 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2285 return 0;
2286 if ((reload_completed || reload_in_progress)
2287 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2288 return 0;
2289 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2290 case 'B':
2291 /* Check for non-literal-pool variants of memory constraints. */
2292 if (!MEM_P (op))
2293 return 0;
2294 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2295 case 'Q':
2296 case 'R':
2297 case 'S':
2298 case 'T':
2299 if (GET_CODE (op) != MEM)
2300 return 0;
2301 return s390_check_qrst_address (c, XEXP (op, 0), true);
2302 case 'U':
2303 return (s390_check_qrst_address ('Q', op, true)
2304 || s390_check_qrst_address ('R', op, true));
2305 case 'W':
2306 return (s390_check_qrst_address ('S', op, true)
2307 || s390_check_qrst_address ('T', op, true));
2308 case 'Y':
2309 /* Simply check for the basic form of a shift count. Reload will
2310 take care of making sure we have a proper base register. */
2311 if (!s390_decompose_shift_count (op, NULL, NULL))
2312 return 0;
2313 break;
2314 case 'Z':
2315 return s390_check_qrst_address (str[1], op, true);
2316 default:
2317 return 0;
2318 }
2319 return 1;
2320 }
2321
2322
2323 /* Evaluates constraint strings starting with letter O. Input
2324 parameter C is the second letter following the "O" in the constraint
2325 string. Returns 1 if VALUE meets the respective constraint and 0
2326 otherwise. */
2327
2328 int
2329 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2330 {
2331 if (!TARGET_EXTIMM)
2332 return 0;
2333
2334 switch (c)
2335 {
2336 case 's':
2337 return trunc_int_for_mode (value, SImode) == value;
2338
2339 case 'p':
2340 return value == 0
2341 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2342
2343 case 'n':
2344 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2345
2346 default:
2347 gcc_unreachable ();
2348 }
2349 }
2350
2351
2352 /* Evaluates constraint strings starting with letter N. Parameter STR
2353 contains the letters following letter "N" in the constraint string.
2354 Returns true if VALUE matches the constraint. */
2355
2356 int
2357 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2358 {
2359 enum machine_mode mode, part_mode;
2360 int def;
2361 int part, part_goal;
2362
2363
2364 if (str[0] == 'x')
2365 part_goal = -1;
2366 else
2367 part_goal = str[0] - '0';
2368
2369 switch (str[1])
2370 {
2371 case 'Q':
2372 part_mode = QImode;
2373 break;
2374 case 'H':
2375 part_mode = HImode;
2376 break;
2377 case 'S':
2378 part_mode = SImode;
2379 break;
2380 default:
2381 return 0;
2382 }
2383
2384 switch (str[2])
2385 {
2386 case 'H':
2387 mode = HImode;
2388 break;
2389 case 'S':
2390 mode = SImode;
2391 break;
2392 case 'D':
2393 mode = DImode;
2394 break;
2395 default:
2396 return 0;
2397 }
2398
2399 switch (str[3])
2400 {
2401 case '0':
2402 def = 0;
2403 break;
2404 case 'F':
2405 def = -1;
2406 break;
2407 default:
2408 return 0;
2409 }
2410
2411 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2412 return 0;
2413
2414 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2415 if (part < 0)
2416 return 0;
2417 if (part_goal != -1 && part_goal != part)
2418 return 0;
2419
2420 return 1;
2421 }
2422
2423
2424 /* Returns true if the input parameter VALUE is a float zero. */
2425
2426 int
2427 s390_float_const_zero_p (rtx value)
2428 {
2429 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2430 && value == CONST0_RTX (GET_MODE (value)));
2431 }
2432
2433 /* Implement TARGET_REGISTER_MOVE_COST. */
2434
2435 static int
2436 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2437 reg_class_t from, reg_class_t to)
2438 {
2439 /* On s390, copy between fprs and gprs is expensive as long as no
2440 ldgr/lgdr can be used. */
2441 if ((!TARGET_Z10 || GET_MODE_SIZE (mode) != 8)
2442 && ((reg_classes_intersect_p (from, GENERAL_REGS)
2443 && reg_classes_intersect_p (to, FP_REGS))
2444 || (reg_classes_intersect_p (from, FP_REGS)
2445 && reg_classes_intersect_p (to, GENERAL_REGS))))
2446 return 10;
2447
2448 return 1;
2449 }
2450
2451 /* Implement TARGET_MEMORY_MOVE_COST. */
2452
2453 static int
2454 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2455 reg_class_t rclass ATTRIBUTE_UNUSED,
2456 bool in ATTRIBUTE_UNUSED)
2457 {
2458 return 1;
2459 }
2460
2461 /* Compute a (partial) cost for rtx X. Return true if the complete
2462 cost has been computed, and false if subexpressions should be
2463 scanned. In either case, *TOTAL contains the cost result.
2464 CODE contains GET_CODE (x), OUTER_CODE contains the code
2465 of the superexpression of x. */
2466
2467 static bool
2468 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2469 int *total, bool speed ATTRIBUTE_UNUSED)
2470 {
2471 switch (code)
2472 {
2473 case CONST:
2474 case CONST_INT:
2475 case LABEL_REF:
2476 case SYMBOL_REF:
2477 case CONST_DOUBLE:
2478 case MEM:
2479 *total = 0;
2480 return true;
2481
2482 case ASHIFT:
2483 case ASHIFTRT:
2484 case LSHIFTRT:
2485 case ROTATE:
2486 case ROTATERT:
2487 case AND:
2488 case IOR:
2489 case XOR:
2490 case NEG:
2491 case NOT:
2492 *total = COSTS_N_INSNS (1);
2493 return false;
2494
2495 case PLUS:
2496 case MINUS:
2497 *total = COSTS_N_INSNS (1);
2498 return false;
2499
2500 case MULT:
2501 switch (GET_MODE (x))
2502 {
2503 case SImode:
2504 {
2505 rtx left = XEXP (x, 0);
2506 rtx right = XEXP (x, 1);
2507 if (GET_CODE (right) == CONST_INT
2508 && CONST_OK_FOR_K (INTVAL (right)))
2509 *total = s390_cost->mhi;
2510 else if (GET_CODE (left) == SIGN_EXTEND)
2511 *total = s390_cost->mh;
2512 else
2513 *total = s390_cost->ms; /* msr, ms, msy */
2514 break;
2515 }
2516 case DImode:
2517 {
2518 rtx left = XEXP (x, 0);
2519 rtx right = XEXP (x, 1);
2520 if (TARGET_ZARCH)
2521 {
2522 if (GET_CODE (right) == CONST_INT
2523 && CONST_OK_FOR_K (INTVAL (right)))
2524 *total = s390_cost->mghi;
2525 else if (GET_CODE (left) == SIGN_EXTEND)
2526 *total = s390_cost->msgf;
2527 else
2528 *total = s390_cost->msg; /* msgr, msg */
2529 }
2530 else /* TARGET_31BIT */
2531 {
2532 if (GET_CODE (left) == SIGN_EXTEND
2533 && GET_CODE (right) == SIGN_EXTEND)
2534 /* mulsidi case: mr, m */
2535 *total = s390_cost->m;
2536 else if (GET_CODE (left) == ZERO_EXTEND
2537 && GET_CODE (right) == ZERO_EXTEND
2538 && TARGET_CPU_ZARCH)
2539 /* umulsidi case: ml, mlr */
2540 *total = s390_cost->ml;
2541 else
2542 /* Complex calculation is required. */
2543 *total = COSTS_N_INSNS (40);
2544 }
2545 break;
2546 }
2547 case SFmode:
2548 case DFmode:
2549 *total = s390_cost->mult_df;
2550 break;
2551 case TFmode:
2552 *total = s390_cost->mxbr;
2553 break;
2554 default:
2555 return false;
2556 }
2557 return false;
2558
2559 case FMA:
2560 switch (GET_MODE (x))
2561 {
2562 case DFmode:
2563 *total = s390_cost->madbr;
2564 break;
2565 case SFmode:
2566 *total = s390_cost->maebr;
2567 break;
2568 default:
2569 return false;
2570 }
2571 /* Negate in the third argument is free: FMSUB. */
2572 if (GET_CODE (XEXP (x, 2)) == NEG)
2573 {
2574 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2575 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2576 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2577 return true;
2578 }
2579 return false;
2580
2581 case UDIV:
2582 case UMOD:
2583 if (GET_MODE (x) == TImode) /* 128 bit division */
2584 *total = s390_cost->dlgr;
2585 else if (GET_MODE (x) == DImode)
2586 {
2587 rtx right = XEXP (x, 1);
2588 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2589 *total = s390_cost->dlr;
2590 else /* 64 by 64 bit division */
2591 *total = s390_cost->dlgr;
2592 }
2593 else if (GET_MODE (x) == SImode) /* 32 bit division */
2594 *total = s390_cost->dlr;
2595 return false;
2596
2597 case DIV:
2598 case MOD:
2599 if (GET_MODE (x) == DImode)
2600 {
2601 rtx right = XEXP (x, 1);
2602 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2603 if (TARGET_ZARCH)
2604 *total = s390_cost->dsgfr;
2605 else
2606 *total = s390_cost->dr;
2607 else /* 64 by 64 bit division */
2608 *total = s390_cost->dsgr;
2609 }
2610 else if (GET_MODE (x) == SImode) /* 32 bit division */
2611 *total = s390_cost->dlr;
2612 else if (GET_MODE (x) == SFmode)
2613 {
2614 *total = s390_cost->debr;
2615 }
2616 else if (GET_MODE (x) == DFmode)
2617 {
2618 *total = s390_cost->ddbr;
2619 }
2620 else if (GET_MODE (x) == TFmode)
2621 {
2622 *total = s390_cost->dxbr;
2623 }
2624 return false;
2625
2626 case SQRT:
2627 if (GET_MODE (x) == SFmode)
2628 *total = s390_cost->sqebr;
2629 else if (GET_MODE (x) == DFmode)
2630 *total = s390_cost->sqdbr;
2631 else /* TFmode */
2632 *total = s390_cost->sqxbr;
2633 return false;
2634
2635 case SIGN_EXTEND:
2636 case ZERO_EXTEND:
2637 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2638 || outer_code == PLUS || outer_code == MINUS
2639 || outer_code == COMPARE)
2640 *total = 0;
2641 return false;
2642
2643 case COMPARE:
2644 *total = COSTS_N_INSNS (1);
2645 if (GET_CODE (XEXP (x, 0)) == AND
2646 && GET_CODE (XEXP (x, 1)) == CONST_INT
2647 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2648 {
2649 rtx op0 = XEXP (XEXP (x, 0), 0);
2650 rtx op1 = XEXP (XEXP (x, 0), 1);
2651 rtx op2 = XEXP (x, 1);
2652
2653 if (memory_operand (op0, GET_MODE (op0))
2654 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2655 return true;
2656 if (register_operand (op0, GET_MODE (op0))
2657 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2658 return true;
2659 }
2660 return false;
2661
2662 default:
2663 return false;
2664 }
2665 }
2666
2667 /* Return the cost of an address rtx ADDR. */
2668
2669 static int
2670 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2671 addr_space_t as ATTRIBUTE_UNUSED,
2672 bool speed ATTRIBUTE_UNUSED)
2673 {
2674 struct s390_address ad;
2675 if (!s390_decompose_address (addr, &ad))
2676 return 1000;
2677
2678 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2679 }
2680
2681 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2682 otherwise return 0. */
2683
2684 int
2685 tls_symbolic_operand (rtx op)
2686 {
2687 if (GET_CODE (op) != SYMBOL_REF)
2688 return 0;
2689 return SYMBOL_REF_TLS_MODEL (op);
2690 }
2691 \f
2692 /* Split DImode access register reference REG (on 64-bit) into its constituent
2693 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2694 gen_highpart cannot be used as they assume all registers are word-sized,
2695 while our access registers have only half that size. */
2696
2697 void
2698 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2699 {
2700 gcc_assert (TARGET_64BIT);
2701 gcc_assert (ACCESS_REG_P (reg));
2702 gcc_assert (GET_MODE (reg) == DImode);
2703 gcc_assert (!(REGNO (reg) & 1));
2704
2705 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2706 *hi = gen_rtx_REG (SImode, REGNO (reg));
2707 }
2708
2709 /* Return true if OP contains a symbol reference */
2710
2711 bool
2712 symbolic_reference_mentioned_p (rtx op)
2713 {
2714 const char *fmt;
2715 int i;
2716
2717 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2718 return 1;
2719
2720 fmt = GET_RTX_FORMAT (GET_CODE (op));
2721 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2722 {
2723 if (fmt[i] == 'E')
2724 {
2725 int j;
2726
2727 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2728 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2729 return 1;
2730 }
2731
2732 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2733 return 1;
2734 }
2735
2736 return 0;
2737 }
2738
2739 /* Return true if OP contains a reference to a thread-local symbol. */
2740
2741 bool
2742 tls_symbolic_reference_mentioned_p (rtx op)
2743 {
2744 const char *fmt;
2745 int i;
2746
2747 if (GET_CODE (op) == SYMBOL_REF)
2748 return tls_symbolic_operand (op);
2749
2750 fmt = GET_RTX_FORMAT (GET_CODE (op));
2751 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2752 {
2753 if (fmt[i] == 'E')
2754 {
2755 int j;
2756
2757 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2758 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2759 return true;
2760 }
2761
2762 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2763 return true;
2764 }
2765
2766 return false;
2767 }
2768
2769
2770 /* Return true if OP is a legitimate general operand when
2771 generating PIC code. It is given that flag_pic is on
2772 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2773
2774 int
2775 legitimate_pic_operand_p (rtx op)
2776 {
2777 /* Accept all non-symbolic constants. */
2778 if (!SYMBOLIC_CONST (op))
2779 return 1;
2780
2781 /* Reject everything else; must be handled
2782 via emit_symbolic_move. */
2783 return 0;
2784 }
2785
2786 /* Returns true if the constant value OP is a legitimate general operand.
2787 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2788
2789 static bool
2790 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2791 {
2792 /* Accept all non-symbolic constants. */
2793 if (!SYMBOLIC_CONST (op))
2794 return 1;
2795
2796 /* Accept immediate LARL operands. */
2797 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2798 return 1;
2799
2800 /* Thread-local symbols are never legal constants. This is
2801 so that emit_call knows that computing such addresses
2802 might require a function call. */
2803 if (TLS_SYMBOLIC_CONST (op))
2804 return 0;
2805
2806 /* In the PIC case, symbolic constants must *not* be
2807 forced into the literal pool. We accept them here,
2808 so that they will be handled by emit_symbolic_move. */
2809 if (flag_pic)
2810 return 1;
2811
2812 /* All remaining non-PIC symbolic constants are
2813 forced into the literal pool. */
2814 return 0;
2815 }
2816
2817 /* Determine if it's legal to put X into the constant pool. This
2818 is not possible if X contains the address of a symbol that is
2819 not constant (TLS) or not known at final link time (PIC). */
2820
2821 static bool
2822 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2823 {
2824 switch (GET_CODE (x))
2825 {
2826 case CONST_INT:
2827 case CONST_DOUBLE:
2828 /* Accept all non-symbolic constants. */
2829 return false;
2830
2831 case LABEL_REF:
2832 /* Labels are OK iff we are non-PIC. */
2833 return flag_pic != 0;
2834
2835 case SYMBOL_REF:
2836 /* 'Naked' TLS symbol references are never OK,
2837 non-TLS symbols are OK iff we are non-PIC. */
2838 if (tls_symbolic_operand (x))
2839 return true;
2840 else
2841 return flag_pic != 0;
2842
2843 case CONST:
2844 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2845 case PLUS:
2846 case MINUS:
2847 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2848 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2849
2850 case UNSPEC:
2851 switch (XINT (x, 1))
2852 {
2853 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2854 case UNSPEC_LTREL_OFFSET:
2855 case UNSPEC_GOT:
2856 case UNSPEC_GOTOFF:
2857 case UNSPEC_PLTOFF:
2858 case UNSPEC_TLSGD:
2859 case UNSPEC_TLSLDM:
2860 case UNSPEC_NTPOFF:
2861 case UNSPEC_DTPOFF:
2862 case UNSPEC_GOTNTPOFF:
2863 case UNSPEC_INDNTPOFF:
2864 return false;
2865
2866 /* If the literal pool shares the code section, be put
2867 execute template placeholders into the pool as well. */
2868 case UNSPEC_INSN:
2869 return TARGET_CPU_ZARCH;
2870
2871 default:
2872 return true;
2873 }
2874 break;
2875
2876 default:
2877 gcc_unreachable ();
2878 }
2879 }
2880
2881 /* Returns true if the constant value OP is a legitimate general
2882 operand during and after reload. The difference to
2883 legitimate_constant_p is that this function will not accept
2884 a constant that would need to be forced to the literal pool
2885 before it can be used as operand.
2886 This function accepts all constants which can be loaded directly
2887 into a GPR. */
2888
2889 bool
2890 legitimate_reload_constant_p (rtx op)
2891 {
2892 /* Accept la(y) operands. */
2893 if (GET_CODE (op) == CONST_INT
2894 && DISP_IN_RANGE (INTVAL (op)))
2895 return true;
2896
2897 /* Accept l(g)hi/l(g)fi operands. */
2898 if (GET_CODE (op) == CONST_INT
2899 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2900 return true;
2901
2902 /* Accept lliXX operands. */
2903 if (TARGET_ZARCH
2904 && GET_CODE (op) == CONST_INT
2905 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2906 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2907 return true;
2908
2909 if (TARGET_EXTIMM
2910 && GET_CODE (op) == CONST_INT
2911 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2912 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2913 return true;
2914
2915 /* Accept larl operands. */
2916 if (TARGET_CPU_ZARCH
2917 && larl_operand (op, VOIDmode))
2918 return true;
2919
2920 /* Accept floating-point zero operands that fit into a single GPR. */
2921 if (GET_CODE (op) == CONST_DOUBLE
2922 && s390_float_const_zero_p (op)
2923 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2924 return true;
2925
2926 /* Accept double-word operands that can be split. */
2927 if (GET_CODE (op) == CONST_INT
2928 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2929 {
2930 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2931 rtx hi = operand_subword (op, 0, 0, dword_mode);
2932 rtx lo = operand_subword (op, 1, 0, dword_mode);
2933 return legitimate_reload_constant_p (hi)
2934 && legitimate_reload_constant_p (lo);
2935 }
2936
2937 /* Everything else cannot be handled without reload. */
2938 return false;
2939 }
2940
2941 /* Returns true if the constant value OP is a legitimate fp operand
2942 during and after reload.
2943 This function accepts all constants which can be loaded directly
2944 into an FPR. */
2945
2946 static bool
2947 legitimate_reload_fp_constant_p (rtx op)
2948 {
2949 /* Accept floating-point zero operands if the load zero instruction
2950 can be used. Prior to z196 the load fp zero instruction caused a
2951 performance penalty if the result is used as BFP number. */
2952 if (TARGET_Z196
2953 && GET_CODE (op) == CONST_DOUBLE
2954 && s390_float_const_zero_p (op))
2955 return true;
2956
2957 return false;
2958 }
2959
2960 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2961 return the class of reg to actually use. */
2962
2963 static reg_class_t
2964 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2965 {
2966 switch (GET_CODE (op))
2967 {
2968 /* Constants we cannot reload into general registers
2969 must be forced into the literal pool. */
2970 case CONST_DOUBLE:
2971 case CONST_INT:
2972 if (reg_class_subset_p (GENERAL_REGS, rclass)
2973 && legitimate_reload_constant_p (op))
2974 return GENERAL_REGS;
2975 else if (reg_class_subset_p (ADDR_REGS, rclass)
2976 && legitimate_reload_constant_p (op))
2977 return ADDR_REGS;
2978 else if (reg_class_subset_p (FP_REGS, rclass)
2979 && legitimate_reload_fp_constant_p (op))
2980 return FP_REGS;
2981 return NO_REGS;
2982
2983 /* If a symbolic constant or a PLUS is reloaded,
2984 it is most likely being used as an address, so
2985 prefer ADDR_REGS. If 'class' is not a superset
2986 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2987 case CONST:
2988 /* A larl operand with odd addend will get fixed via secondary
2989 reload. So don't request it to be pushed into literal
2990 pool. */
2991 if (TARGET_CPU_ZARCH
2992 && GET_CODE (XEXP (op, 0)) == PLUS
2993 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2994 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2995 {
2996 if (reg_class_subset_p (ADDR_REGS, rclass))
2997 return ADDR_REGS;
2998 else
2999 return NO_REGS;
3000 }
3001 /* fallthrough */
3002 case LABEL_REF:
3003 case SYMBOL_REF:
3004 if (!legitimate_reload_constant_p (op))
3005 return NO_REGS;
3006 /* fallthrough */
3007 case PLUS:
3008 /* load address will be used. */
3009 if (reg_class_subset_p (ADDR_REGS, rclass))
3010 return ADDR_REGS;
3011 else
3012 return NO_REGS;
3013
3014 default:
3015 break;
3016 }
3017
3018 return rclass;
3019 }
3020
3021 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3022 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3023 aligned. */
3024
3025 bool
3026 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3027 {
3028 HOST_WIDE_INT addend;
3029 rtx symref;
3030
3031 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3032 return false;
3033
3034 if (addend & (alignment - 1))
3035 return false;
3036
3037 if (GET_CODE (symref) == SYMBOL_REF
3038 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3039 return true;
3040
3041 if (GET_CODE (symref) == UNSPEC
3042 && alignment <= UNITS_PER_LONG)
3043 return true;
3044
3045 return false;
3046 }
3047
3048 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3049 operand SCRATCH is used to reload the even part of the address and
3050 adding one. */
3051
3052 void
3053 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3054 {
3055 HOST_WIDE_INT addend;
3056 rtx symref;
3057
3058 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3059 gcc_unreachable ();
3060
3061 if (!(addend & 1))
3062 /* Easy case. The addend is even so larl will do fine. */
3063 emit_move_insn (reg, addr);
3064 else
3065 {
3066 /* We can leave the scratch register untouched if the target
3067 register is a valid base register. */
3068 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3069 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3070 scratch = reg;
3071
3072 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3073 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3074
3075 if (addend != 1)
3076 emit_move_insn (scratch,
3077 gen_rtx_CONST (Pmode,
3078 gen_rtx_PLUS (Pmode, symref,
3079 GEN_INT (addend - 1))));
3080 else
3081 emit_move_insn (scratch, symref);
3082
3083 /* Increment the address using la in order to avoid clobbering cc. */
3084 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3085 }
3086 }
3087
3088 /* Generate what is necessary to move between REG and MEM using
3089 SCRATCH. The direction is given by TOMEM. */
3090
3091 void
3092 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3093 {
3094 /* Reload might have pulled a constant out of the literal pool.
3095 Force it back in. */
3096 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3097 || GET_CODE (mem) == CONST)
3098 mem = force_const_mem (GET_MODE (reg), mem);
3099
3100 gcc_assert (MEM_P (mem));
3101
3102 /* For a load from memory we can leave the scratch register
3103 untouched if the target register is a valid base register. */
3104 if (!tomem
3105 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3106 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3107 && GET_MODE (reg) == GET_MODE (scratch))
3108 scratch = reg;
3109
3110 /* Load address into scratch register. Since we can't have a
3111 secondary reload for a secondary reload we have to cover the case
3112 where larl would need a secondary reload here as well. */
3113 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3114
3115 /* Now we can use a standard load/store to do the move. */
3116 if (tomem)
3117 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3118 else
3119 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3120 }
3121
3122 /* Inform reload about cases where moving X with a mode MODE to a register in
3123 RCLASS requires an extra scratch or immediate register. Return the class
3124 needed for the immediate register. */
3125
3126 static reg_class_t
3127 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3128 enum machine_mode mode, secondary_reload_info *sri)
3129 {
3130 enum reg_class rclass = (enum reg_class) rclass_i;
3131
3132 /* Intermediate register needed. */
3133 if (reg_classes_intersect_p (CC_REGS, rclass))
3134 return GENERAL_REGS;
3135
3136 if (TARGET_Z10)
3137 {
3138 HOST_WIDE_INT offset;
3139 rtx symref;
3140
3141 /* On z10 several optimizer steps may generate larl operands with
3142 an odd addend. */
3143 if (in_p
3144 && s390_loadrelative_operand_p (x, &symref, &offset)
3145 && mode == Pmode
3146 && !SYMBOL_REF_ALIGN1_P (symref)
3147 && (offset & 1) == 1)
3148 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3149 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3150
3151 /* On z10 we need a scratch register when moving QI, TI or floating
3152 point mode values from or to a memory location with a SYMBOL_REF
3153 or if the symref addend of a SI or DI move is not aligned to the
3154 width of the access. */
3155 if (MEM_P (x)
3156 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3157 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3158 || (!TARGET_ZARCH && mode == DImode)
3159 || ((mode == HImode || mode == SImode || mode == DImode)
3160 && (!s390_check_symref_alignment (XEXP (x, 0),
3161 GET_MODE_SIZE (mode))))))
3162 {
3163 #define __SECONDARY_RELOAD_CASE(M,m) \
3164 case M##mode: \
3165 if (TARGET_64BIT) \
3166 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3167 CODE_FOR_reload##m##di_tomem_z10; \
3168 else \
3169 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3170 CODE_FOR_reload##m##si_tomem_z10; \
3171 break;
3172
3173 switch (GET_MODE (x))
3174 {
3175 __SECONDARY_RELOAD_CASE (QI, qi);
3176 __SECONDARY_RELOAD_CASE (HI, hi);
3177 __SECONDARY_RELOAD_CASE (SI, si);
3178 __SECONDARY_RELOAD_CASE (DI, di);
3179 __SECONDARY_RELOAD_CASE (TI, ti);
3180 __SECONDARY_RELOAD_CASE (SF, sf);
3181 __SECONDARY_RELOAD_CASE (DF, df);
3182 __SECONDARY_RELOAD_CASE (TF, tf);
3183 __SECONDARY_RELOAD_CASE (SD, sd);
3184 __SECONDARY_RELOAD_CASE (DD, dd);
3185 __SECONDARY_RELOAD_CASE (TD, td);
3186
3187 default:
3188 gcc_unreachable ();
3189 }
3190 #undef __SECONDARY_RELOAD_CASE
3191 }
3192 }
3193
3194 /* We need a scratch register when loading a PLUS expression which
3195 is not a legitimate operand of the LOAD ADDRESS instruction. */
3196 /* LRA can deal with transformation of plus op very well -- so we
3197 don't need to prompt LRA in this case. */
3198 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
3199 sri->icode = (TARGET_64BIT ?
3200 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3201
3202 /* Performing a multiword move from or to memory we have to make sure the
3203 second chunk in memory is addressable without causing a displacement
3204 overflow. If that would be the case we calculate the address in
3205 a scratch register. */
3206 if (MEM_P (x)
3207 && GET_CODE (XEXP (x, 0)) == PLUS
3208 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3209 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3210 + GET_MODE_SIZE (mode) - 1))
3211 {
3212 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3213 in a s_operand address since we may fallback to lm/stm. So we only
3214 have to care about overflows in the b+i+d case. */
3215 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3216 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3217 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3218 /* For FP_REGS no lm/stm is available so this check is triggered
3219 for displacement overflows in b+i+d and b+d like addresses. */
3220 || (reg_classes_intersect_p (FP_REGS, rclass)
3221 && s390_class_max_nregs (FP_REGS, mode) > 1))
3222 {
3223 if (in_p)
3224 sri->icode = (TARGET_64BIT ?
3225 CODE_FOR_reloaddi_nonoffmem_in :
3226 CODE_FOR_reloadsi_nonoffmem_in);
3227 else
3228 sri->icode = (TARGET_64BIT ?
3229 CODE_FOR_reloaddi_nonoffmem_out :
3230 CODE_FOR_reloadsi_nonoffmem_out);
3231 }
3232 }
3233
3234 /* A scratch address register is needed when a symbolic constant is
3235 copied to r0 compiling with -fPIC. In other cases the target
3236 register might be used as temporary (see legitimize_pic_address). */
3237 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3238 sri->icode = (TARGET_64BIT ?
3239 CODE_FOR_reloaddi_PIC_addr :
3240 CODE_FOR_reloadsi_PIC_addr);
3241
3242 /* Either scratch or no register needed. */
3243 return NO_REGS;
3244 }
3245
3246 /* Generate code to load SRC, which is PLUS that is not a
3247 legitimate operand for the LA instruction, into TARGET.
3248 SCRATCH may be used as scratch register. */
3249
3250 void
3251 s390_expand_plus_operand (rtx target, rtx src,
3252 rtx scratch)
3253 {
3254 rtx sum1, sum2;
3255 struct s390_address ad;
3256
3257 /* src must be a PLUS; get its two operands. */
3258 gcc_assert (GET_CODE (src) == PLUS);
3259 gcc_assert (GET_MODE (src) == Pmode);
3260
3261 /* Check if any of the two operands is already scheduled
3262 for replacement by reload. This can happen e.g. when
3263 float registers occur in an address. */
3264 sum1 = find_replacement (&XEXP (src, 0));
3265 sum2 = find_replacement (&XEXP (src, 1));
3266 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3267
3268 /* If the address is already strictly valid, there's nothing to do. */
3269 if (!s390_decompose_address (src, &ad)
3270 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3271 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3272 {
3273 /* Otherwise, one of the operands cannot be an address register;
3274 we reload its value into the scratch register. */
3275 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3276 {
3277 emit_move_insn (scratch, sum1);
3278 sum1 = scratch;
3279 }
3280 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3281 {
3282 emit_move_insn (scratch, sum2);
3283 sum2 = scratch;
3284 }
3285
3286 /* According to the way these invalid addresses are generated
3287 in reload.c, it should never happen (at least on s390) that
3288 *neither* of the PLUS components, after find_replacements
3289 was applied, is an address register. */
3290 if (sum1 == scratch && sum2 == scratch)
3291 {
3292 debug_rtx (src);
3293 gcc_unreachable ();
3294 }
3295
3296 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3297 }
3298
3299 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3300 is only ever performed on addresses, so we can mark the
3301 sum as legitimate for LA in any case. */
3302 s390_load_address (target, src);
3303 }
3304
3305
3306 /* Return true if ADDR is a valid memory address.
3307 STRICT specifies whether strict register checking applies. */
3308
3309 static bool
3310 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3311 {
3312 struct s390_address ad;
3313
3314 if (TARGET_Z10
3315 && larl_operand (addr, VOIDmode)
3316 && (mode == VOIDmode
3317 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3318 return true;
3319
3320 if (!s390_decompose_address (addr, &ad))
3321 return false;
3322
3323 if (strict)
3324 {
3325 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3326 return false;
3327
3328 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3329 return false;
3330 }
3331 else
3332 {
3333 if (ad.base
3334 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3335 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3336 return false;
3337
3338 if (ad.indx
3339 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3340 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3341 return false;
3342 }
3343 return true;
3344 }
3345
3346 /* Return true if OP is a valid operand for the LA instruction.
3347 In 31-bit, we need to prove that the result is used as an
3348 address, as LA performs only a 31-bit addition. */
3349
3350 bool
3351 legitimate_la_operand_p (rtx op)
3352 {
3353 struct s390_address addr;
3354 if (!s390_decompose_address (op, &addr))
3355 return false;
3356
3357 return (TARGET_64BIT || addr.pointer);
3358 }
3359
3360 /* Return true if it is valid *and* preferable to use LA to
3361 compute the sum of OP1 and OP2. */
3362
3363 bool
3364 preferred_la_operand_p (rtx op1, rtx op2)
3365 {
3366 struct s390_address addr;
3367
3368 if (op2 != const0_rtx)
3369 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3370
3371 if (!s390_decompose_address (op1, &addr))
3372 return false;
3373 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3374 return false;
3375 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3376 return false;
3377
3378 /* Avoid LA instructions with index register on z196; it is
3379 preferable to use regular add instructions when possible.
3380 Starting with zEC12 the la with index register is "uncracked"
3381 again. */
3382 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3383 return false;
3384
3385 if (!TARGET_64BIT && !addr.pointer)
3386 return false;
3387
3388 if (addr.pointer)
3389 return true;
3390
3391 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3392 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3393 return true;
3394
3395 return false;
3396 }
3397
3398 /* Emit a forced load-address operation to load SRC into DST.
3399 This will use the LOAD ADDRESS instruction even in situations
3400 where legitimate_la_operand_p (SRC) returns false. */
3401
3402 void
3403 s390_load_address (rtx dst, rtx src)
3404 {
3405 if (TARGET_64BIT)
3406 emit_move_insn (dst, src);
3407 else
3408 emit_insn (gen_force_la_31 (dst, src));
3409 }
3410
3411 /* Return a legitimate reference for ORIG (an address) using the
3412 register REG. If REG is 0, a new pseudo is generated.
3413
3414 There are two types of references that must be handled:
3415
3416 1. Global data references must load the address from the GOT, via
3417 the PIC reg. An insn is emitted to do this load, and the reg is
3418 returned.
3419
3420 2. Static data references, constant pool addresses, and code labels
3421 compute the address as an offset from the GOT, whose base is in
3422 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3423 differentiate them from global data objects. The returned
3424 address is the PIC reg + an unspec constant.
3425
3426 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3427 reg also appears in the address. */
3428
3429 rtx
3430 legitimize_pic_address (rtx orig, rtx reg)
3431 {
3432 rtx addr = orig;
3433 rtx addend = const0_rtx;
3434 rtx new_rtx = orig;
3435
3436 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3437
3438 if (GET_CODE (addr) == CONST)
3439 addr = XEXP (addr, 0);
3440
3441 if (GET_CODE (addr) == PLUS)
3442 {
3443 addend = XEXP (addr, 1);
3444 addr = XEXP (addr, 0);
3445 }
3446
3447 if ((GET_CODE (addr) == LABEL_REF
3448 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3449 || (GET_CODE (addr) == UNSPEC &&
3450 (XINT (addr, 1) == UNSPEC_GOTENT
3451 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3452 && GET_CODE (addend) == CONST_INT)
3453 {
3454 /* This can be locally addressed. */
3455
3456 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3457 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3458 gen_rtx_CONST (Pmode, addr) : addr);
3459
3460 if (TARGET_CPU_ZARCH
3461 && larl_operand (const_addr, VOIDmode)
3462 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3463 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3464 {
3465 if (INTVAL (addend) & 1)
3466 {
3467 /* LARL can't handle odd offsets, so emit a pair of LARL
3468 and LA. */
3469 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3470
3471 if (!DISP_IN_RANGE (INTVAL (addend)))
3472 {
3473 HOST_WIDE_INT even = INTVAL (addend) - 1;
3474 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3475 addr = gen_rtx_CONST (Pmode, addr);
3476 addend = const1_rtx;
3477 }
3478
3479 emit_move_insn (temp, addr);
3480 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3481
3482 if (reg != 0)
3483 {
3484 s390_load_address (reg, new_rtx);
3485 new_rtx = reg;
3486 }
3487 }
3488 else
3489 {
3490 /* If the offset is even, we can just use LARL. This
3491 will happen automatically. */
3492 }
3493 }
3494 else
3495 {
3496 /* No larl - Access local symbols relative to the GOT. */
3497
3498 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3499
3500 if (reload_in_progress || reload_completed)
3501 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3502
3503 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3504 if (addend != const0_rtx)
3505 addr = gen_rtx_PLUS (Pmode, addr, addend);
3506 addr = gen_rtx_CONST (Pmode, addr);
3507 addr = force_const_mem (Pmode, addr);
3508 emit_move_insn (temp, addr);
3509
3510 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3511 if (reg != 0)
3512 {
3513 s390_load_address (reg, new_rtx);
3514 new_rtx = reg;
3515 }
3516 }
3517 }
3518 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3519 {
3520 /* A non-local symbol reference without addend.
3521
3522 The symbol ref is wrapped into an UNSPEC to make sure the
3523 proper operand modifier (@GOT or @GOTENT) will be emitted.
3524 This will tell the linker to put the symbol into the GOT.
3525
3526 Additionally the code dereferencing the GOT slot is emitted here.
3527
3528 An addend to the symref needs to be added afterwards.
3529 legitimize_pic_address calls itself recursively to handle
3530 that case. So no need to do it here. */
3531
3532 if (reg == 0)
3533 reg = gen_reg_rtx (Pmode);
3534
3535 if (TARGET_Z10)
3536 {
3537 /* Use load relative if possible.
3538 lgrl <target>, sym@GOTENT */
3539 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3540 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3541 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3542
3543 emit_move_insn (reg, new_rtx);
3544 new_rtx = reg;
3545 }
3546 else if (flag_pic == 1)
3547 {
3548 /* Assume GOT offset is a valid displacement operand (< 4k
3549 or < 512k with z990). This is handled the same way in
3550 both 31- and 64-bit code (@GOT).
3551 lg <target>, sym@GOT(r12) */
3552
3553 if (reload_in_progress || reload_completed)
3554 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3555
3556 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3557 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3558 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3559 new_rtx = gen_const_mem (Pmode, new_rtx);
3560 emit_move_insn (reg, new_rtx);
3561 new_rtx = reg;
3562 }
3563 else if (TARGET_CPU_ZARCH)
3564 {
3565 /* If the GOT offset might be >= 4k, we determine the position
3566 of the GOT entry via a PC-relative LARL (@GOTENT).
3567 larl temp, sym@GOTENT
3568 lg <target>, 0(temp) */
3569
3570 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3571
3572 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3573 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3574
3575 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3576 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3577 emit_move_insn (temp, new_rtx);
3578
3579 new_rtx = gen_const_mem (Pmode, temp);
3580 emit_move_insn (reg, new_rtx);
3581
3582 new_rtx = reg;
3583 }
3584 else
3585 {
3586 /* If the GOT offset might be >= 4k, we have to load it
3587 from the literal pool (@GOT).
3588
3589 lg temp, lit-litbase(r13)
3590 lg <target>, 0(temp)
3591 lit: .long sym@GOT */
3592
3593 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3594
3595 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3596 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3597
3598 if (reload_in_progress || reload_completed)
3599 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3600
3601 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3602 addr = gen_rtx_CONST (Pmode, addr);
3603 addr = force_const_mem (Pmode, addr);
3604 emit_move_insn (temp, addr);
3605
3606 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3607 new_rtx = gen_const_mem (Pmode, new_rtx);
3608 emit_move_insn (reg, new_rtx);
3609 new_rtx = reg;
3610 }
3611 }
3612 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3613 {
3614 gcc_assert (XVECLEN (addr, 0) == 1);
3615 switch (XINT (addr, 1))
3616 {
3617 /* These address symbols (or PLT slots) relative to the GOT
3618 (not GOT slots!). In general this will exceed the
3619 displacement range so these value belong into the literal
3620 pool. */
3621 case UNSPEC_GOTOFF:
3622 case UNSPEC_PLTOFF:
3623 new_rtx = force_const_mem (Pmode, orig);
3624 break;
3625
3626 /* For -fPIC the GOT size might exceed the displacement
3627 range so make sure the value is in the literal pool. */
3628 case UNSPEC_GOT:
3629 if (flag_pic == 2)
3630 new_rtx = force_const_mem (Pmode, orig);
3631 break;
3632
3633 /* For @GOTENT larl is used. This is handled like local
3634 symbol refs. */
3635 case UNSPEC_GOTENT:
3636 gcc_unreachable ();
3637 break;
3638
3639 /* @PLT is OK as is on 64-bit, must be converted to
3640 GOT-relative @PLTOFF on 31-bit. */
3641 case UNSPEC_PLT:
3642 if (!TARGET_CPU_ZARCH)
3643 {
3644 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3645
3646 if (reload_in_progress || reload_completed)
3647 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3648
3649 addr = XVECEXP (addr, 0, 0);
3650 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3651 UNSPEC_PLTOFF);
3652 if (addend != const0_rtx)
3653 addr = gen_rtx_PLUS (Pmode, addr, addend);
3654 addr = gen_rtx_CONST (Pmode, addr);
3655 addr = force_const_mem (Pmode, addr);
3656 emit_move_insn (temp, addr);
3657
3658 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3659 if (reg != 0)
3660 {
3661 s390_load_address (reg, new_rtx);
3662 new_rtx = reg;
3663 }
3664 }
3665 else
3666 /* On 64 bit larl can be used. This case is handled like
3667 local symbol refs. */
3668 gcc_unreachable ();
3669 break;
3670
3671 /* Everything else cannot happen. */
3672 default:
3673 gcc_unreachable ();
3674 }
3675 }
3676 else if (addend != const0_rtx)
3677 {
3678 /* Otherwise, compute the sum. */
3679
3680 rtx base = legitimize_pic_address (addr, reg);
3681 new_rtx = legitimize_pic_address (addend,
3682 base == reg ? NULL_RTX : reg);
3683 if (GET_CODE (new_rtx) == CONST_INT)
3684 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3685 else
3686 {
3687 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3688 {
3689 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3690 new_rtx = XEXP (new_rtx, 1);
3691 }
3692 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3693 }
3694
3695 if (GET_CODE (new_rtx) == CONST)
3696 new_rtx = XEXP (new_rtx, 0);
3697 new_rtx = force_operand (new_rtx, 0);
3698 }
3699
3700 return new_rtx;
3701 }
3702
3703 /* Load the thread pointer into a register. */
3704
3705 rtx
3706 s390_get_thread_pointer (void)
3707 {
3708 rtx tp = gen_reg_rtx (Pmode);
3709
3710 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3711 mark_reg_pointer (tp, BITS_PER_WORD);
3712
3713 return tp;
3714 }
3715
3716 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3717 in s390_tls_symbol which always refers to __tls_get_offset.
3718 The returned offset is written to RESULT_REG and an USE rtx is
3719 generated for TLS_CALL. */
3720
3721 static GTY(()) rtx s390_tls_symbol;
3722
3723 static void
3724 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3725 {
3726 rtx insn;
3727
3728 if (!flag_pic)
3729 emit_insn (s390_load_got ());
3730
3731 if (!s390_tls_symbol)
3732 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3733
3734 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3735 gen_rtx_REG (Pmode, RETURN_REGNUM));
3736
3737 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3738 RTL_CONST_CALL_P (insn) = 1;
3739 }
3740
3741 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3742 this (thread-local) address. REG may be used as temporary. */
3743
3744 static rtx
3745 legitimize_tls_address (rtx addr, rtx reg)
3746 {
3747 rtx new_rtx, tls_call, temp, base, r2, insn;
3748
3749 if (GET_CODE (addr) == SYMBOL_REF)
3750 switch (tls_symbolic_operand (addr))
3751 {
3752 case TLS_MODEL_GLOBAL_DYNAMIC:
3753 start_sequence ();
3754 r2 = gen_rtx_REG (Pmode, 2);
3755 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3756 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3757 new_rtx = force_const_mem (Pmode, new_rtx);
3758 emit_move_insn (r2, new_rtx);
3759 s390_emit_tls_call_insn (r2, tls_call);
3760 insn = get_insns ();
3761 end_sequence ();
3762
3763 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3764 temp = gen_reg_rtx (Pmode);
3765 emit_libcall_block (insn, temp, r2, new_rtx);
3766
3767 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3768 if (reg != 0)
3769 {
3770 s390_load_address (reg, new_rtx);
3771 new_rtx = reg;
3772 }
3773 break;
3774
3775 case TLS_MODEL_LOCAL_DYNAMIC:
3776 start_sequence ();
3777 r2 = gen_rtx_REG (Pmode, 2);
3778 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3779 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3780 new_rtx = force_const_mem (Pmode, new_rtx);
3781 emit_move_insn (r2, new_rtx);
3782 s390_emit_tls_call_insn (r2, tls_call);
3783 insn = get_insns ();
3784 end_sequence ();
3785
3786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3787 temp = gen_reg_rtx (Pmode);
3788 emit_libcall_block (insn, temp, r2, new_rtx);
3789
3790 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3791 base = gen_reg_rtx (Pmode);
3792 s390_load_address (base, new_rtx);
3793
3794 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3795 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3796 new_rtx = force_const_mem (Pmode, new_rtx);
3797 temp = gen_reg_rtx (Pmode);
3798 emit_move_insn (temp, new_rtx);
3799
3800 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3801 if (reg != 0)
3802 {
3803 s390_load_address (reg, new_rtx);
3804 new_rtx = reg;
3805 }
3806 break;
3807
3808 case TLS_MODEL_INITIAL_EXEC:
3809 if (flag_pic == 1)
3810 {
3811 /* Assume GOT offset < 4k. This is handled the same way
3812 in both 31- and 64-bit code. */
3813
3814 if (reload_in_progress || reload_completed)
3815 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3816
3817 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3818 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3819 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3820 new_rtx = gen_const_mem (Pmode, new_rtx);
3821 temp = gen_reg_rtx (Pmode);
3822 emit_move_insn (temp, new_rtx);
3823 }
3824 else if (TARGET_CPU_ZARCH)
3825 {
3826 /* If the GOT offset might be >= 4k, we determine the position
3827 of the GOT entry via a PC-relative LARL. */
3828
3829 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3830 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3831 temp = gen_reg_rtx (Pmode);
3832 emit_move_insn (temp, new_rtx);
3833
3834 new_rtx = gen_const_mem (Pmode, temp);
3835 temp = gen_reg_rtx (Pmode);
3836 emit_move_insn (temp, new_rtx);
3837 }
3838 else if (flag_pic)
3839 {
3840 /* If the GOT offset might be >= 4k, we have to load it
3841 from the literal pool. */
3842
3843 if (reload_in_progress || reload_completed)
3844 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3845
3846 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3847 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3848 new_rtx = force_const_mem (Pmode, new_rtx);
3849 temp = gen_reg_rtx (Pmode);
3850 emit_move_insn (temp, new_rtx);
3851
3852 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3853 new_rtx = gen_const_mem (Pmode, new_rtx);
3854
3855 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3856 temp = gen_reg_rtx (Pmode);
3857 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3858 }
3859 else
3860 {
3861 /* In position-dependent code, load the absolute address of
3862 the GOT entry from the literal pool. */
3863
3864 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3865 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3866 new_rtx = force_const_mem (Pmode, new_rtx);
3867 temp = gen_reg_rtx (Pmode);
3868 emit_move_insn (temp, new_rtx);
3869
3870 new_rtx = temp;
3871 new_rtx = gen_const_mem (Pmode, new_rtx);
3872 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3873 temp = gen_reg_rtx (Pmode);
3874 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3875 }
3876
3877 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3878 if (reg != 0)
3879 {
3880 s390_load_address (reg, new_rtx);
3881 new_rtx = reg;
3882 }
3883 break;
3884
3885 case TLS_MODEL_LOCAL_EXEC:
3886 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3887 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3888 new_rtx = force_const_mem (Pmode, new_rtx);
3889 temp = gen_reg_rtx (Pmode);
3890 emit_move_insn (temp, new_rtx);
3891
3892 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3893 if (reg != 0)
3894 {
3895 s390_load_address (reg, new_rtx);
3896 new_rtx = reg;
3897 }
3898 break;
3899
3900 default:
3901 gcc_unreachable ();
3902 }
3903
3904 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3905 {
3906 switch (XINT (XEXP (addr, 0), 1))
3907 {
3908 case UNSPEC_INDNTPOFF:
3909 gcc_assert (TARGET_CPU_ZARCH);
3910 new_rtx = addr;
3911 break;
3912
3913 default:
3914 gcc_unreachable ();
3915 }
3916 }
3917
3918 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3919 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3920 {
3921 new_rtx = XEXP (XEXP (addr, 0), 0);
3922 if (GET_CODE (new_rtx) != SYMBOL_REF)
3923 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3924
3925 new_rtx = legitimize_tls_address (new_rtx, reg);
3926 new_rtx = plus_constant (Pmode, new_rtx,
3927 INTVAL (XEXP (XEXP (addr, 0), 1)));
3928 new_rtx = force_operand (new_rtx, 0);
3929 }
3930
3931 else
3932 gcc_unreachable (); /* for now ... */
3933
3934 return new_rtx;
3935 }
3936
3937 /* Emit insns making the address in operands[1] valid for a standard
3938 move to operands[0]. operands[1] is replaced by an address which
3939 should be used instead of the former RTX to emit the move
3940 pattern. */
3941
3942 void
3943 emit_symbolic_move (rtx *operands)
3944 {
3945 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3946
3947 if (GET_CODE (operands[0]) == MEM)
3948 operands[1] = force_reg (Pmode, operands[1]);
3949 else if (TLS_SYMBOLIC_CONST (operands[1]))
3950 operands[1] = legitimize_tls_address (operands[1], temp);
3951 else if (flag_pic)
3952 operands[1] = legitimize_pic_address (operands[1], temp);
3953 }
3954
3955 /* Try machine-dependent ways of modifying an illegitimate address X
3956 to be legitimate. If we find one, return the new, valid address.
3957
3958 OLDX is the address as it was before break_out_memory_refs was called.
3959 In some cases it is useful to look at this to decide what needs to be done.
3960
3961 MODE is the mode of the operand pointed to by X.
3962
3963 When -fpic is used, special handling is needed for symbolic references.
3964 See comments by legitimize_pic_address for details. */
3965
3966 static rtx
3967 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3968 enum machine_mode mode ATTRIBUTE_UNUSED)
3969 {
3970 rtx constant_term = const0_rtx;
3971
3972 if (TLS_SYMBOLIC_CONST (x))
3973 {
3974 x = legitimize_tls_address (x, 0);
3975
3976 if (s390_legitimate_address_p (mode, x, FALSE))
3977 return x;
3978 }
3979 else if (GET_CODE (x) == PLUS
3980 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3981 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3982 {
3983 return x;
3984 }
3985 else if (flag_pic)
3986 {
3987 if (SYMBOLIC_CONST (x)
3988 || (GET_CODE (x) == PLUS
3989 && (SYMBOLIC_CONST (XEXP (x, 0))
3990 || SYMBOLIC_CONST (XEXP (x, 1)))))
3991 x = legitimize_pic_address (x, 0);
3992
3993 if (s390_legitimate_address_p (mode, x, FALSE))
3994 return x;
3995 }
3996
3997 x = eliminate_constant_term (x, &constant_term);
3998
3999 /* Optimize loading of large displacements by splitting them
4000 into the multiple of 4K and the rest; this allows the
4001 former to be CSE'd if possible.
4002
4003 Don't do this if the displacement is added to a register
4004 pointing into the stack frame, as the offsets will
4005 change later anyway. */
4006
4007 if (GET_CODE (constant_term) == CONST_INT
4008 && !TARGET_LONG_DISPLACEMENT
4009 && !DISP_IN_RANGE (INTVAL (constant_term))
4010 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4011 {
4012 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4013 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4014
4015 rtx temp = gen_reg_rtx (Pmode);
4016 rtx val = force_operand (GEN_INT (upper), temp);
4017 if (val != temp)
4018 emit_move_insn (temp, val);
4019
4020 x = gen_rtx_PLUS (Pmode, x, temp);
4021 constant_term = GEN_INT (lower);
4022 }
4023
4024 if (GET_CODE (x) == PLUS)
4025 {
4026 if (GET_CODE (XEXP (x, 0)) == REG)
4027 {
4028 rtx temp = gen_reg_rtx (Pmode);
4029 rtx val = force_operand (XEXP (x, 1), temp);
4030 if (val != temp)
4031 emit_move_insn (temp, val);
4032
4033 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4034 }
4035
4036 else if (GET_CODE (XEXP (x, 1)) == REG)
4037 {
4038 rtx temp = gen_reg_rtx (Pmode);
4039 rtx val = force_operand (XEXP (x, 0), temp);
4040 if (val != temp)
4041 emit_move_insn (temp, val);
4042
4043 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4044 }
4045 }
4046
4047 if (constant_term != const0_rtx)
4048 x = gen_rtx_PLUS (Pmode, x, constant_term);
4049
4050 return x;
4051 }
4052
4053 /* Try a machine-dependent way of reloading an illegitimate address AD
4054 operand. If we find one, push the reload and return the new address.
4055
4056 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4057 and TYPE is the reload type of the current reload. */
4058
4059 rtx
4060 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4061 int opnum, int type)
4062 {
4063 if (!optimize || TARGET_LONG_DISPLACEMENT)
4064 return NULL_RTX;
4065
4066 if (GET_CODE (ad) == PLUS)
4067 {
4068 rtx tem = simplify_binary_operation (PLUS, Pmode,
4069 XEXP (ad, 0), XEXP (ad, 1));
4070 if (tem)
4071 ad = tem;
4072 }
4073
4074 if (GET_CODE (ad) == PLUS
4075 && GET_CODE (XEXP (ad, 0)) == REG
4076 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4077 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4078 {
4079 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4080 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4081 rtx cst, tem, new_rtx;
4082
4083 cst = GEN_INT (upper);
4084 if (!legitimate_reload_constant_p (cst))
4085 cst = force_const_mem (Pmode, cst);
4086
4087 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4088 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4089
4090 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4091 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4092 opnum, (enum reload_type) type);
4093 return new_rtx;
4094 }
4095
4096 return NULL_RTX;
4097 }
4098
4099 /* Emit code to move LEN bytes from DST to SRC. */
4100
4101 bool
4102 s390_expand_movmem (rtx dst, rtx src, rtx len)
4103 {
4104 /* When tuning for z10 or higher we rely on the Glibc functions to
4105 do the right thing. Only for constant lengths below 64k we will
4106 generate inline code. */
4107 if (s390_tune >= PROCESSOR_2097_Z10
4108 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4109 return false;
4110
4111 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4112 {
4113 if (INTVAL (len) > 0)
4114 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4115 }
4116
4117 else if (TARGET_MVCLE)
4118 {
4119 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4120 }
4121
4122 else
4123 {
4124 rtx dst_addr, src_addr, count, blocks, temp;
4125 rtx loop_start_label = gen_label_rtx ();
4126 rtx loop_end_label = gen_label_rtx ();
4127 rtx end_label = gen_label_rtx ();
4128 enum machine_mode mode;
4129
4130 mode = GET_MODE (len);
4131 if (mode == VOIDmode)
4132 mode = Pmode;
4133
4134 dst_addr = gen_reg_rtx (Pmode);
4135 src_addr = gen_reg_rtx (Pmode);
4136 count = gen_reg_rtx (mode);
4137 blocks = gen_reg_rtx (mode);
4138
4139 convert_move (count, len, 1);
4140 emit_cmp_and_jump_insns (count, const0_rtx,
4141 EQ, NULL_RTX, mode, 1, end_label);
4142
4143 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4144 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4145 dst = change_address (dst, VOIDmode, dst_addr);
4146 src = change_address (src, VOIDmode, src_addr);
4147
4148 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4149 OPTAB_DIRECT);
4150 if (temp != count)
4151 emit_move_insn (count, temp);
4152
4153 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4154 OPTAB_DIRECT);
4155 if (temp != blocks)
4156 emit_move_insn (blocks, temp);
4157
4158 emit_cmp_and_jump_insns (blocks, const0_rtx,
4159 EQ, NULL_RTX, mode, 1, loop_end_label);
4160
4161 emit_label (loop_start_label);
4162
4163 if (TARGET_Z10
4164 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4165 {
4166 rtx prefetch;
4167
4168 /* Issue a read prefetch for the +3 cache line. */
4169 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4170 const0_rtx, const0_rtx);
4171 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4172 emit_insn (prefetch);
4173
4174 /* Issue a write prefetch for the +3 cache line. */
4175 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4176 const1_rtx, const0_rtx);
4177 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4178 emit_insn (prefetch);
4179 }
4180
4181 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4182 s390_load_address (dst_addr,
4183 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4184 s390_load_address (src_addr,
4185 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4186
4187 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4188 OPTAB_DIRECT);
4189 if (temp != blocks)
4190 emit_move_insn (blocks, temp);
4191
4192 emit_cmp_and_jump_insns (blocks, const0_rtx,
4193 EQ, NULL_RTX, mode, 1, loop_end_label);
4194
4195 emit_jump (loop_start_label);
4196 emit_label (loop_end_label);
4197
4198 emit_insn (gen_movmem_short (dst, src,
4199 convert_to_mode (Pmode, count, 1)));
4200 emit_label (end_label);
4201 }
4202 return true;
4203 }
4204
4205 /* Emit code to set LEN bytes at DST to VAL.
4206 Make use of clrmem if VAL is zero. */
4207
4208 void
4209 s390_expand_setmem (rtx dst, rtx len, rtx val)
4210 {
4211 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4212 return;
4213
4214 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4215
4216 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4217 {
4218 if (val == const0_rtx && INTVAL (len) <= 256)
4219 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4220 else
4221 {
4222 /* Initialize memory by storing the first byte. */
4223 emit_move_insn (adjust_address (dst, QImode, 0), val);
4224
4225 if (INTVAL (len) > 1)
4226 {
4227 /* Initiate 1 byte overlap move.
4228 The first byte of DST is propagated through DSTP1.
4229 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4230 DST is set to size 1 so the rest of the memory location
4231 does not count as source operand. */
4232 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4233 set_mem_size (dst, 1);
4234
4235 emit_insn (gen_movmem_short (dstp1, dst,
4236 GEN_INT (INTVAL (len) - 2)));
4237 }
4238 }
4239 }
4240
4241 else if (TARGET_MVCLE)
4242 {
4243 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4244 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4245 }
4246
4247 else
4248 {
4249 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4250 rtx loop_start_label = gen_label_rtx ();
4251 rtx loop_end_label = gen_label_rtx ();
4252 rtx end_label = gen_label_rtx ();
4253 enum machine_mode mode;
4254
4255 mode = GET_MODE (len);
4256 if (mode == VOIDmode)
4257 mode = Pmode;
4258
4259 dst_addr = gen_reg_rtx (Pmode);
4260 count = gen_reg_rtx (mode);
4261 blocks = gen_reg_rtx (mode);
4262
4263 convert_move (count, len, 1);
4264 emit_cmp_and_jump_insns (count, const0_rtx,
4265 EQ, NULL_RTX, mode, 1, end_label);
4266
4267 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4268 dst = change_address (dst, VOIDmode, dst_addr);
4269
4270 if (val == const0_rtx)
4271 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4272 OPTAB_DIRECT);
4273 else
4274 {
4275 dstp1 = adjust_address (dst, VOIDmode, 1);
4276 set_mem_size (dst, 1);
4277
4278 /* Initialize memory by storing the first byte. */
4279 emit_move_insn (adjust_address (dst, QImode, 0), val);
4280
4281 /* If count is 1 we are done. */
4282 emit_cmp_and_jump_insns (count, const1_rtx,
4283 EQ, NULL_RTX, mode, 1, end_label);
4284
4285 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4286 OPTAB_DIRECT);
4287 }
4288 if (temp != count)
4289 emit_move_insn (count, temp);
4290
4291 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4292 OPTAB_DIRECT);
4293 if (temp != blocks)
4294 emit_move_insn (blocks, temp);
4295
4296 emit_cmp_and_jump_insns (blocks, const0_rtx,
4297 EQ, NULL_RTX, mode, 1, loop_end_label);
4298
4299 emit_label (loop_start_label);
4300
4301 if (TARGET_Z10
4302 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4303 {
4304 /* Issue a write prefetch for the +4 cache line. */
4305 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4306 GEN_INT (1024)),
4307 const1_rtx, const0_rtx);
4308 emit_insn (prefetch);
4309 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4310 }
4311
4312 if (val == const0_rtx)
4313 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4314 else
4315 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4316 s390_load_address (dst_addr,
4317 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4318
4319 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4320 OPTAB_DIRECT);
4321 if (temp != blocks)
4322 emit_move_insn (blocks, temp);
4323
4324 emit_cmp_and_jump_insns (blocks, const0_rtx,
4325 EQ, NULL_RTX, mode, 1, loop_end_label);
4326
4327 emit_jump (loop_start_label);
4328 emit_label (loop_end_label);
4329
4330 if (val == const0_rtx)
4331 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4332 else
4333 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4334 emit_label (end_label);
4335 }
4336 }
4337
4338 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4339 and return the result in TARGET. */
4340
4341 bool
4342 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4343 {
4344 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4345 rtx tmp;
4346
4347 /* When tuning for z10 or higher we rely on the Glibc functions to
4348 do the right thing. Only for constant lengths below 64k we will
4349 generate inline code. */
4350 if (s390_tune >= PROCESSOR_2097_Z10
4351 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4352 return false;
4353
4354 /* As the result of CMPINT is inverted compared to what we need,
4355 we have to swap the operands. */
4356 tmp = op0; op0 = op1; op1 = tmp;
4357
4358 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4359 {
4360 if (INTVAL (len) > 0)
4361 {
4362 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4363 emit_insn (gen_cmpint (target, ccreg));
4364 }
4365 else
4366 emit_move_insn (target, const0_rtx);
4367 }
4368 else if (TARGET_MVCLE)
4369 {
4370 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4371 emit_insn (gen_cmpint (target, ccreg));
4372 }
4373 else
4374 {
4375 rtx addr0, addr1, count, blocks, temp;
4376 rtx loop_start_label = gen_label_rtx ();
4377 rtx loop_end_label = gen_label_rtx ();
4378 rtx end_label = gen_label_rtx ();
4379 enum machine_mode mode;
4380
4381 mode = GET_MODE (len);
4382 if (mode == VOIDmode)
4383 mode = Pmode;
4384
4385 addr0 = gen_reg_rtx (Pmode);
4386 addr1 = gen_reg_rtx (Pmode);
4387 count = gen_reg_rtx (mode);
4388 blocks = gen_reg_rtx (mode);
4389
4390 convert_move (count, len, 1);
4391 emit_cmp_and_jump_insns (count, const0_rtx,
4392 EQ, NULL_RTX, mode, 1, end_label);
4393
4394 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4395 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4396 op0 = change_address (op0, VOIDmode, addr0);
4397 op1 = change_address (op1, VOIDmode, addr1);
4398
4399 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4400 OPTAB_DIRECT);
4401 if (temp != count)
4402 emit_move_insn (count, temp);
4403
4404 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4405 OPTAB_DIRECT);
4406 if (temp != blocks)
4407 emit_move_insn (blocks, temp);
4408
4409 emit_cmp_and_jump_insns (blocks, const0_rtx,
4410 EQ, NULL_RTX, mode, 1, loop_end_label);
4411
4412 emit_label (loop_start_label);
4413
4414 if (TARGET_Z10
4415 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4416 {
4417 rtx prefetch;
4418
4419 /* Issue a read prefetch for the +2 cache line of operand 1. */
4420 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4421 const0_rtx, const0_rtx);
4422 emit_insn (prefetch);
4423 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4424
4425 /* Issue a read prefetch for the +2 cache line of operand 2. */
4426 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4427 const0_rtx, const0_rtx);
4428 emit_insn (prefetch);
4429 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4430 }
4431
4432 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4433 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4434 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4435 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4436 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4437 emit_jump_insn (temp);
4438
4439 s390_load_address (addr0,
4440 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4441 s390_load_address (addr1,
4442 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4443
4444 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4445 OPTAB_DIRECT);
4446 if (temp != blocks)
4447 emit_move_insn (blocks, temp);
4448
4449 emit_cmp_and_jump_insns (blocks, const0_rtx,
4450 EQ, NULL_RTX, mode, 1, loop_end_label);
4451
4452 emit_jump (loop_start_label);
4453 emit_label (loop_end_label);
4454
4455 emit_insn (gen_cmpmem_short (op0, op1,
4456 convert_to_mode (Pmode, count, 1)));
4457 emit_label (end_label);
4458
4459 emit_insn (gen_cmpint (target, ccreg));
4460 }
4461 return true;
4462 }
4463
4464
4465 /* Expand conditional increment or decrement using alc/slb instructions.
4466 Should generate code setting DST to either SRC or SRC + INCREMENT,
4467 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4468 Returns true if successful, false otherwise.
4469
4470 That makes it possible to implement some if-constructs without jumps e.g.:
4471 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4472 unsigned int a, b, c;
4473 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4474 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4475 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4476 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4477
4478 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4479 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4480 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4481 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4482 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4483
4484 bool
4485 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4486 rtx dst, rtx src, rtx increment)
4487 {
4488 enum machine_mode cmp_mode;
4489 enum machine_mode cc_mode;
4490 rtx op_res;
4491 rtx insn;
4492 rtvec p;
4493 int ret;
4494
4495 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4496 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4497 cmp_mode = SImode;
4498 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4499 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4500 cmp_mode = DImode;
4501 else
4502 return false;
4503
4504 /* Try ADD LOGICAL WITH CARRY. */
4505 if (increment == const1_rtx)
4506 {
4507 /* Determine CC mode to use. */
4508 if (cmp_code == EQ || cmp_code == NE)
4509 {
4510 if (cmp_op1 != const0_rtx)
4511 {
4512 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4513 NULL_RTX, 0, OPTAB_WIDEN);
4514 cmp_op1 = const0_rtx;
4515 }
4516
4517 cmp_code = cmp_code == EQ ? LEU : GTU;
4518 }
4519
4520 if (cmp_code == LTU || cmp_code == LEU)
4521 {
4522 rtx tem = cmp_op0;
4523 cmp_op0 = cmp_op1;
4524 cmp_op1 = tem;
4525 cmp_code = swap_condition (cmp_code);
4526 }
4527
4528 switch (cmp_code)
4529 {
4530 case GTU:
4531 cc_mode = CCUmode;
4532 break;
4533
4534 case GEU:
4535 cc_mode = CCL3mode;
4536 break;
4537
4538 default:
4539 return false;
4540 }
4541
4542 /* Emit comparison instruction pattern. */
4543 if (!register_operand (cmp_op0, cmp_mode))
4544 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4545
4546 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4547 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4548 /* We use insn_invalid_p here to add clobbers if required. */
4549 ret = insn_invalid_p (emit_insn (insn), false);
4550 gcc_assert (!ret);
4551
4552 /* Emit ALC instruction pattern. */
4553 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4554 gen_rtx_REG (cc_mode, CC_REGNUM),
4555 const0_rtx);
4556
4557 if (src != const0_rtx)
4558 {
4559 if (!register_operand (src, GET_MODE (dst)))
4560 src = force_reg (GET_MODE (dst), src);
4561
4562 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4563 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4564 }
4565
4566 p = rtvec_alloc (2);
4567 RTVEC_ELT (p, 0) =
4568 gen_rtx_SET (VOIDmode, dst, op_res);
4569 RTVEC_ELT (p, 1) =
4570 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4571 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4572
4573 return true;
4574 }
4575
4576 /* Try SUBTRACT LOGICAL WITH BORROW. */
4577 if (increment == constm1_rtx)
4578 {
4579 /* Determine CC mode to use. */
4580 if (cmp_code == EQ || cmp_code == NE)
4581 {
4582 if (cmp_op1 != const0_rtx)
4583 {
4584 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4585 NULL_RTX, 0, OPTAB_WIDEN);
4586 cmp_op1 = const0_rtx;
4587 }
4588
4589 cmp_code = cmp_code == EQ ? LEU : GTU;
4590 }
4591
4592 if (cmp_code == GTU || cmp_code == GEU)
4593 {
4594 rtx tem = cmp_op0;
4595 cmp_op0 = cmp_op1;
4596 cmp_op1 = tem;
4597 cmp_code = swap_condition (cmp_code);
4598 }
4599
4600 switch (cmp_code)
4601 {
4602 case LEU:
4603 cc_mode = CCUmode;
4604 break;
4605
4606 case LTU:
4607 cc_mode = CCL3mode;
4608 break;
4609
4610 default:
4611 return false;
4612 }
4613
4614 /* Emit comparison instruction pattern. */
4615 if (!register_operand (cmp_op0, cmp_mode))
4616 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4617
4618 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4619 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4620 /* We use insn_invalid_p here to add clobbers if required. */
4621 ret = insn_invalid_p (emit_insn (insn), false);
4622 gcc_assert (!ret);
4623
4624 /* Emit SLB instruction pattern. */
4625 if (!register_operand (src, GET_MODE (dst)))
4626 src = force_reg (GET_MODE (dst), src);
4627
4628 op_res = gen_rtx_MINUS (GET_MODE (dst),
4629 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4630 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4631 gen_rtx_REG (cc_mode, CC_REGNUM),
4632 const0_rtx));
4633 p = rtvec_alloc (2);
4634 RTVEC_ELT (p, 0) =
4635 gen_rtx_SET (VOIDmode, dst, op_res);
4636 RTVEC_ELT (p, 1) =
4637 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4638 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4639
4640 return true;
4641 }
4642
4643 return false;
4644 }
4645
4646 /* Expand code for the insv template. Return true if successful. */
4647
4648 bool
4649 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4650 {
4651 int bitsize = INTVAL (op1);
4652 int bitpos = INTVAL (op2);
4653 enum machine_mode mode = GET_MODE (dest);
4654 enum machine_mode smode;
4655 int smode_bsize, mode_bsize;
4656 rtx op, clobber;
4657
4658 if (bitsize + bitpos > GET_MODE_SIZE (mode))
4659 return false;
4660
4661 /* Generate INSERT IMMEDIATE (IILL et al). */
4662 /* (set (ze (reg)) (const_int)). */
4663 if (TARGET_ZARCH
4664 && register_operand (dest, word_mode)
4665 && (bitpos % 16) == 0
4666 && (bitsize % 16) == 0
4667 && const_int_operand (src, VOIDmode))
4668 {
4669 HOST_WIDE_INT val = INTVAL (src);
4670 int regpos = bitpos + bitsize;
4671
4672 while (regpos > bitpos)
4673 {
4674 enum machine_mode putmode;
4675 int putsize;
4676
4677 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4678 putmode = SImode;
4679 else
4680 putmode = HImode;
4681
4682 putsize = GET_MODE_BITSIZE (putmode);
4683 regpos -= putsize;
4684 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4685 GEN_INT (putsize),
4686 GEN_INT (regpos)),
4687 gen_int_mode (val, putmode));
4688 val >>= putsize;
4689 }
4690 gcc_assert (regpos == bitpos);
4691 return true;
4692 }
4693
4694 smode = smallest_mode_for_size (bitsize, MODE_INT);
4695 smode_bsize = GET_MODE_BITSIZE (smode);
4696 mode_bsize = GET_MODE_BITSIZE (mode);
4697
4698 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4699 if (bitpos == 0
4700 && (bitsize % BITS_PER_UNIT) == 0
4701 && MEM_P (dest)
4702 && (register_operand (src, word_mode)
4703 || const_int_operand (src, VOIDmode)))
4704 {
4705 /* Emit standard pattern if possible. */
4706 if (smode_bsize == bitsize)
4707 {
4708 emit_move_insn (adjust_address (dest, smode, 0),
4709 gen_lowpart (smode, src));
4710 return true;
4711 }
4712
4713 /* (set (ze (mem)) (const_int)). */
4714 else if (const_int_operand (src, VOIDmode))
4715 {
4716 int size = bitsize / BITS_PER_UNIT;
4717 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4718 BLKmode,
4719 UNITS_PER_WORD - size);
4720
4721 dest = adjust_address (dest, BLKmode, 0);
4722 set_mem_size (dest, size);
4723 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4724 return true;
4725 }
4726
4727 /* (set (ze (mem)) (reg)). */
4728 else if (register_operand (src, word_mode))
4729 {
4730 if (bitsize <= 32)
4731 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4732 const0_rtx), src);
4733 else
4734 {
4735 /* Emit st,stcmh sequence. */
4736 int stcmh_width = bitsize - 32;
4737 int size = stcmh_width / BITS_PER_UNIT;
4738
4739 emit_move_insn (adjust_address (dest, SImode, size),
4740 gen_lowpart (SImode, src));
4741 set_mem_size (dest, size);
4742 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4743 GEN_INT (stcmh_width),
4744 const0_rtx),
4745 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4746 }
4747 return true;
4748 }
4749 }
4750
4751 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4752 if ((bitpos % BITS_PER_UNIT) == 0
4753 && (bitsize % BITS_PER_UNIT) == 0
4754 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4755 && MEM_P (src)
4756 && (mode == DImode || mode == SImode)
4757 && register_operand (dest, mode))
4758 {
4759 /* Emit a strict_low_part pattern if possible. */
4760 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4761 {
4762 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4763 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4764 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4765 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4766 return true;
4767 }
4768
4769 /* ??? There are more powerful versions of ICM that are not
4770 completely represented in the md file. */
4771 }
4772
4773 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4774 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4775 {
4776 enum machine_mode mode_s = GET_MODE (src);
4777
4778 if (mode_s == VOIDmode)
4779 {
4780 /* Assume const_int etc already in the proper mode. */
4781 src = force_reg (mode, src);
4782 }
4783 else if (mode_s != mode)
4784 {
4785 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4786 src = force_reg (mode_s, src);
4787 src = gen_lowpart (mode, src);
4788 }
4789
4790 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4791 op = gen_rtx_SET (VOIDmode, op, src);
4792
4793 if (!TARGET_ZEC12)
4794 {
4795 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4796 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4797 }
4798 emit_insn (op);
4799
4800 return true;
4801 }
4802
4803 return false;
4804 }
4805
4806 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4807 register that holds VAL of mode MODE shifted by COUNT bits. */
4808
4809 static inline rtx
4810 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4811 {
4812 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4813 NULL_RTX, 1, OPTAB_DIRECT);
4814 return expand_simple_binop (SImode, ASHIFT, val, count,
4815 NULL_RTX, 1, OPTAB_DIRECT);
4816 }
4817
4818 /* Structure to hold the initial parameters for a compare_and_swap operation
4819 in HImode and QImode. */
4820
4821 struct alignment_context
4822 {
4823 rtx memsi; /* SI aligned memory location. */
4824 rtx shift; /* Bit offset with regard to lsb. */
4825 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4826 rtx modemaski; /* ~modemask */
4827 bool aligned; /* True if memory is aligned, false else. */
4828 };
4829
4830 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4831 structure AC for transparent simplifying, if the memory alignment is known
4832 to be at least 32bit. MEM is the memory location for the actual operation
4833 and MODE its mode. */
4834
4835 static void
4836 init_alignment_context (struct alignment_context *ac, rtx mem,
4837 enum machine_mode mode)
4838 {
4839 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4840 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4841
4842 if (ac->aligned)
4843 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4844 else
4845 {
4846 /* Alignment is unknown. */
4847 rtx byteoffset, addr, align;
4848
4849 /* Force the address into a register. */
4850 addr = force_reg (Pmode, XEXP (mem, 0));
4851
4852 /* Align it to SImode. */
4853 align = expand_simple_binop (Pmode, AND, addr,
4854 GEN_INT (-GET_MODE_SIZE (SImode)),
4855 NULL_RTX, 1, OPTAB_DIRECT);
4856 /* Generate MEM. */
4857 ac->memsi = gen_rtx_MEM (SImode, align);
4858 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4859 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4860 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4861
4862 /* Calculate shiftcount. */
4863 byteoffset = expand_simple_binop (Pmode, AND, addr,
4864 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4865 NULL_RTX, 1, OPTAB_DIRECT);
4866 /* As we already have some offset, evaluate the remaining distance. */
4867 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4868 NULL_RTX, 1, OPTAB_DIRECT);
4869 }
4870
4871 /* Shift is the byte count, but we need the bitcount. */
4872 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4873 NULL_RTX, 1, OPTAB_DIRECT);
4874
4875 /* Calculate masks. */
4876 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4877 GEN_INT (GET_MODE_MASK (mode)),
4878 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4879 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4880 NULL_RTX, 1);
4881 }
4882
4883 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4884 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4885 perform the merge in SEQ2. */
4886
4887 static rtx
4888 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4889 enum machine_mode mode, rtx val, rtx ins)
4890 {
4891 rtx tmp;
4892
4893 if (ac->aligned)
4894 {
4895 start_sequence ();
4896 tmp = copy_to_mode_reg (SImode, val);
4897 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4898 const0_rtx, ins))
4899 {
4900 *seq1 = NULL;
4901 *seq2 = get_insns ();
4902 end_sequence ();
4903 return tmp;
4904 }
4905 end_sequence ();
4906 }
4907
4908 /* Failed to use insv. Generate a two part shift and mask. */
4909 start_sequence ();
4910 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4911 *seq1 = get_insns ();
4912 end_sequence ();
4913
4914 start_sequence ();
4915 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4916 *seq2 = get_insns ();
4917 end_sequence ();
4918
4919 return tmp;
4920 }
4921
4922 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4923 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4924 value to set if CMP == MEM. */
4925
4926 void
4927 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4928 rtx cmp, rtx new_rtx, bool is_weak)
4929 {
4930 struct alignment_context ac;
4931 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4932 rtx res = gen_reg_rtx (SImode);
4933 rtx csloop = NULL, csend = NULL;
4934
4935 gcc_assert (MEM_P (mem));
4936
4937 init_alignment_context (&ac, mem, mode);
4938
4939 /* Load full word. Subsequent loads are performed by CS. */
4940 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4941 NULL_RTX, 1, OPTAB_DIRECT);
4942
4943 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4944 possible, we try to use insv to make this happen efficiently. If
4945 that fails we'll generate code both inside and outside the loop. */
4946 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4947 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4948
4949 if (seq0)
4950 emit_insn (seq0);
4951 if (seq1)
4952 emit_insn (seq1);
4953
4954 /* Start CS loop. */
4955 if (!is_weak)
4956 {
4957 /* Begin assuming success. */
4958 emit_move_insn (btarget, const1_rtx);
4959
4960 csloop = gen_label_rtx ();
4961 csend = gen_label_rtx ();
4962 emit_label (csloop);
4963 }
4964
4965 /* val = "<mem>00..0<mem>"
4966 * cmp = "00..0<cmp>00..0"
4967 * new = "00..0<new>00..0"
4968 */
4969
4970 emit_insn (seq2);
4971 emit_insn (seq3);
4972
4973 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4974 if (is_weak)
4975 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4976 else
4977 {
4978 rtx tmp;
4979
4980 /* Jump to end if we're done (likely?). */
4981 s390_emit_jump (csend, cc);
4982
4983 /* Check for changes outside mode, and loop internal if so.
4984 Arrange the moves so that the compare is adjacent to the
4985 branch so that we can generate CRJ. */
4986 tmp = copy_to_reg (val);
4987 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4988 1, OPTAB_DIRECT);
4989 cc = s390_emit_compare (NE, val, tmp);
4990 s390_emit_jump (csloop, cc);
4991
4992 /* Failed. */
4993 emit_move_insn (btarget, const0_rtx);
4994 emit_label (csend);
4995 }
4996
4997 /* Return the correct part of the bitfield. */
4998 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4999 NULL_RTX, 1, OPTAB_DIRECT), 1);
5000 }
5001
5002 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
5003 and VAL the value to play with. If AFTER is true then store the value
5004 MEM holds after the operation, if AFTER is false then store the value MEM
5005 holds before the operation. If TARGET is zero then discard that value, else
5006 store it to TARGET. */
5007
5008 void
5009 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
5010 rtx target, rtx mem, rtx val, bool after)
5011 {
5012 struct alignment_context ac;
5013 rtx cmp;
5014 rtx new_rtx = gen_reg_rtx (SImode);
5015 rtx orig = gen_reg_rtx (SImode);
5016 rtx csloop = gen_label_rtx ();
5017
5018 gcc_assert (!target || register_operand (target, VOIDmode));
5019 gcc_assert (MEM_P (mem));
5020
5021 init_alignment_context (&ac, mem, mode);
5022
5023 /* Shift val to the correct bit positions.
5024 Preserve "icm", but prevent "ex icm". */
5025 if (!(ac.aligned && code == SET && MEM_P (val)))
5026 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5027
5028 /* Further preparation insns. */
5029 if (code == PLUS || code == MINUS)
5030 emit_move_insn (orig, val);
5031 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5032 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5033 NULL_RTX, 1, OPTAB_DIRECT);
5034
5035 /* Load full word. Subsequent loads are performed by CS. */
5036 cmp = force_reg (SImode, ac.memsi);
5037
5038 /* Start CS loop. */
5039 emit_label (csloop);
5040 emit_move_insn (new_rtx, cmp);
5041
5042 /* Patch new with val at correct position. */
5043 switch (code)
5044 {
5045 case PLUS:
5046 case MINUS:
5047 val = expand_simple_binop (SImode, code, new_rtx, orig,
5048 NULL_RTX, 1, OPTAB_DIRECT);
5049 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5050 NULL_RTX, 1, OPTAB_DIRECT);
5051 /* FALLTHRU */
5052 case SET:
5053 if (ac.aligned && MEM_P (val))
5054 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5055 0, 0, SImode, val);
5056 else
5057 {
5058 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5059 NULL_RTX, 1, OPTAB_DIRECT);
5060 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5062 }
5063 break;
5064 case AND:
5065 case IOR:
5066 case XOR:
5067 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5068 NULL_RTX, 1, OPTAB_DIRECT);
5069 break;
5070 case MULT: /* NAND */
5071 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5072 NULL_RTX, 1, OPTAB_DIRECT);
5073 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5074 NULL_RTX, 1, OPTAB_DIRECT);
5075 break;
5076 default:
5077 gcc_unreachable ();
5078 }
5079
5080 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5081 ac.memsi, cmp, new_rtx));
5082
5083 /* Return the correct part of the bitfield. */
5084 if (target)
5085 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5086 after ? new_rtx : cmp, ac.shift,
5087 NULL_RTX, 1, OPTAB_DIRECT), 1);
5088 }
5089
5090 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5091 We need to emit DTP-relative relocations. */
5092
5093 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5094
5095 static void
5096 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5097 {
5098 switch (size)
5099 {
5100 case 4:
5101 fputs ("\t.long\t", file);
5102 break;
5103 case 8:
5104 fputs ("\t.quad\t", file);
5105 break;
5106 default:
5107 gcc_unreachable ();
5108 }
5109 output_addr_const (file, x);
5110 fputs ("@DTPOFF", file);
5111 }
5112
5113 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5114 /* Implement TARGET_MANGLE_TYPE. */
5115
5116 static const char *
5117 s390_mangle_type (const_tree type)
5118 {
5119 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5120 && TARGET_LONG_DOUBLE_128)
5121 return "g";
5122
5123 /* For all other types, use normal C++ mangling. */
5124 return NULL;
5125 }
5126 #endif
5127
5128 /* In the name of slightly smaller debug output, and to cater to
5129 general assembler lossage, recognize various UNSPEC sequences
5130 and turn them back into a direct symbol reference. */
5131
5132 static rtx
5133 s390_delegitimize_address (rtx orig_x)
5134 {
5135 rtx x, y;
5136
5137 orig_x = delegitimize_mem_from_attrs (orig_x);
5138 x = orig_x;
5139
5140 /* Extract the symbol ref from:
5141 (plus:SI (reg:SI 12 %r12)
5142 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5143 UNSPEC_GOTOFF/PLTOFF)))
5144 and
5145 (plus:SI (reg:SI 12 %r12)
5146 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5147 UNSPEC_GOTOFF/PLTOFF)
5148 (const_int 4 [0x4])))) */
5149 if (GET_CODE (x) == PLUS
5150 && REG_P (XEXP (x, 0))
5151 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5152 && GET_CODE (XEXP (x, 1)) == CONST)
5153 {
5154 HOST_WIDE_INT offset = 0;
5155
5156 /* The const operand. */
5157 y = XEXP (XEXP (x, 1), 0);
5158
5159 if (GET_CODE (y) == PLUS
5160 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5161 {
5162 offset = INTVAL (XEXP (y, 1));
5163 y = XEXP (y, 0);
5164 }
5165
5166 if (GET_CODE (y) == UNSPEC
5167 && (XINT (y, 1) == UNSPEC_GOTOFF
5168 || XINT (y, 1) == UNSPEC_PLTOFF))
5169 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5170 }
5171
5172 if (GET_CODE (x) != MEM)
5173 return orig_x;
5174
5175 x = XEXP (x, 0);
5176 if (GET_CODE (x) == PLUS
5177 && GET_CODE (XEXP (x, 1)) == CONST
5178 && GET_CODE (XEXP (x, 0)) == REG
5179 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5180 {
5181 y = XEXP (XEXP (x, 1), 0);
5182 if (GET_CODE (y) == UNSPEC
5183 && XINT (y, 1) == UNSPEC_GOT)
5184 y = XVECEXP (y, 0, 0);
5185 else
5186 return orig_x;
5187 }
5188 else if (GET_CODE (x) == CONST)
5189 {
5190 /* Extract the symbol ref from:
5191 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5192 UNSPEC_PLT/GOTENT))) */
5193
5194 y = XEXP (x, 0);
5195 if (GET_CODE (y) == UNSPEC
5196 && (XINT (y, 1) == UNSPEC_GOTENT
5197 || XINT (y, 1) == UNSPEC_PLT))
5198 y = XVECEXP (y, 0, 0);
5199 else
5200 return orig_x;
5201 }
5202 else
5203 return orig_x;
5204
5205 if (GET_MODE (orig_x) != Pmode)
5206 {
5207 if (GET_MODE (orig_x) == BLKmode)
5208 return orig_x;
5209 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5210 if (y == NULL_RTX)
5211 return orig_x;
5212 }
5213 return y;
5214 }
5215
5216 /* Output operand OP to stdio stream FILE.
5217 OP is an address (register + offset) which is not used to address data;
5218 instead the rightmost bits are interpreted as the value. */
5219
5220 static void
5221 print_shift_count_operand (FILE *file, rtx op)
5222 {
5223 HOST_WIDE_INT offset;
5224 rtx base;
5225
5226 /* Extract base register and offset. */
5227 if (!s390_decompose_shift_count (op, &base, &offset))
5228 gcc_unreachable ();
5229
5230 /* Sanity check. */
5231 if (base)
5232 {
5233 gcc_assert (GET_CODE (base) == REG);
5234 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5235 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5236 }
5237
5238 /* Offsets are constricted to twelve bits. */
5239 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5240 if (base)
5241 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5242 }
5243
5244 /* See 'get_some_local_dynamic_name'. */
5245
5246 static int
5247 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5248 {
5249 rtx x = *px;
5250
5251 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5252 {
5253 x = get_pool_constant (x);
5254 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5255 }
5256
5257 if (GET_CODE (x) == SYMBOL_REF
5258 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5259 {
5260 cfun->machine->some_ld_name = XSTR (x, 0);
5261 return 1;
5262 }
5263
5264 return 0;
5265 }
5266
5267 /* Locate some local-dynamic symbol still in use by this function
5268 so that we can print its name in local-dynamic base patterns. */
5269
5270 static const char *
5271 get_some_local_dynamic_name (void)
5272 {
5273 rtx insn;
5274
5275 if (cfun->machine->some_ld_name)
5276 return cfun->machine->some_ld_name;
5277
5278 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5279 if (INSN_P (insn)
5280 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5281 return cfun->machine->some_ld_name;
5282
5283 gcc_unreachable ();
5284 }
5285
5286 /* Output machine-dependent UNSPECs occurring in address constant X
5287 in assembler syntax to stdio stream FILE. Returns true if the
5288 constant X could be recognized, false otherwise. */
5289
5290 static bool
5291 s390_output_addr_const_extra (FILE *file, rtx x)
5292 {
5293 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5294 switch (XINT (x, 1))
5295 {
5296 case UNSPEC_GOTENT:
5297 output_addr_const (file, XVECEXP (x, 0, 0));
5298 fprintf (file, "@GOTENT");
5299 return true;
5300 case UNSPEC_GOT:
5301 output_addr_const (file, XVECEXP (x, 0, 0));
5302 fprintf (file, "@GOT");
5303 return true;
5304 case UNSPEC_GOTOFF:
5305 output_addr_const (file, XVECEXP (x, 0, 0));
5306 fprintf (file, "@GOTOFF");
5307 return true;
5308 case UNSPEC_PLT:
5309 output_addr_const (file, XVECEXP (x, 0, 0));
5310 fprintf (file, "@PLT");
5311 return true;
5312 case UNSPEC_PLTOFF:
5313 output_addr_const (file, XVECEXP (x, 0, 0));
5314 fprintf (file, "@PLTOFF");
5315 return true;
5316 case UNSPEC_TLSGD:
5317 output_addr_const (file, XVECEXP (x, 0, 0));
5318 fprintf (file, "@TLSGD");
5319 return true;
5320 case UNSPEC_TLSLDM:
5321 assemble_name (file, get_some_local_dynamic_name ());
5322 fprintf (file, "@TLSLDM");
5323 return true;
5324 case UNSPEC_DTPOFF:
5325 output_addr_const (file, XVECEXP (x, 0, 0));
5326 fprintf (file, "@DTPOFF");
5327 return true;
5328 case UNSPEC_NTPOFF:
5329 output_addr_const (file, XVECEXP (x, 0, 0));
5330 fprintf (file, "@NTPOFF");
5331 return true;
5332 case UNSPEC_GOTNTPOFF:
5333 output_addr_const (file, XVECEXP (x, 0, 0));
5334 fprintf (file, "@GOTNTPOFF");
5335 return true;
5336 case UNSPEC_INDNTPOFF:
5337 output_addr_const (file, XVECEXP (x, 0, 0));
5338 fprintf (file, "@INDNTPOFF");
5339 return true;
5340 }
5341
5342 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5343 switch (XINT (x, 1))
5344 {
5345 case UNSPEC_POOL_OFFSET:
5346 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5347 output_addr_const (file, x);
5348 return true;
5349 }
5350 return false;
5351 }
5352
5353 /* Output address operand ADDR in assembler syntax to
5354 stdio stream FILE. */
5355
5356 void
5357 print_operand_address (FILE *file, rtx addr)
5358 {
5359 struct s390_address ad;
5360
5361 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5362 {
5363 if (!TARGET_Z10)
5364 {
5365 output_operand_lossage ("symbolic memory references are "
5366 "only supported on z10 or later");
5367 return;
5368 }
5369 output_addr_const (file, addr);
5370 return;
5371 }
5372
5373 if (!s390_decompose_address (addr, &ad)
5374 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5375 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5376 output_operand_lossage ("cannot decompose address");
5377
5378 if (ad.disp)
5379 output_addr_const (file, ad.disp);
5380 else
5381 fprintf (file, "0");
5382
5383 if (ad.base && ad.indx)
5384 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5385 reg_names[REGNO (ad.base)]);
5386 else if (ad.base)
5387 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5388 }
5389
5390 /* Output operand X in assembler syntax to stdio stream FILE.
5391 CODE specified the format flag. The following format flags
5392 are recognized:
5393
5394 'C': print opcode suffix for branch condition.
5395 'D': print opcode suffix for inverse branch condition.
5396 'E': print opcode suffix for branch on index instruction.
5397 'G': print the size of the operand in bytes.
5398 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5399 'M': print the second word of a TImode operand.
5400 'N': print the second word of a DImode operand.
5401 'O': print only the displacement of a memory reference.
5402 'R': print only the base register of a memory reference.
5403 'S': print S-type memory reference (base+displacement).
5404 'Y': print shift count operand.
5405
5406 'b': print integer X as if it's an unsigned byte.
5407 'c': print integer X as if it's an signed byte.
5408 'e': "end" of DImode contiguous bitmask X.
5409 'f': "end" of SImode contiguous bitmask X.
5410 'h': print integer X as if it's a signed halfword.
5411 'i': print the first nonzero HImode part of X.
5412 'j': print the first HImode part unequal to -1 of X.
5413 'k': print the first nonzero SImode part of X.
5414 'm': print the first SImode part unequal to -1 of X.
5415 'o': print integer X as if it's an unsigned 32bit word.
5416 's': "start" of DImode contiguous bitmask X.
5417 't': "start" of SImode contiguous bitmask X.
5418 'x': print integer X as if it's an unsigned halfword.
5419 */
5420
5421 void
5422 print_operand (FILE *file, rtx x, int code)
5423 {
5424 HOST_WIDE_INT ival;
5425
5426 switch (code)
5427 {
5428 case 'C':
5429 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5430 return;
5431
5432 case 'D':
5433 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5434 return;
5435
5436 case 'E':
5437 if (GET_CODE (x) == LE)
5438 fprintf (file, "l");
5439 else if (GET_CODE (x) == GT)
5440 fprintf (file, "h");
5441 else
5442 output_operand_lossage ("invalid comparison operator "
5443 "for 'E' output modifier");
5444 return;
5445
5446 case 'J':
5447 if (GET_CODE (x) == SYMBOL_REF)
5448 {
5449 fprintf (file, "%s", ":tls_load:");
5450 output_addr_const (file, x);
5451 }
5452 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5453 {
5454 fprintf (file, "%s", ":tls_gdcall:");
5455 output_addr_const (file, XVECEXP (x, 0, 0));
5456 }
5457 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5458 {
5459 fprintf (file, "%s", ":tls_ldcall:");
5460 assemble_name (file, get_some_local_dynamic_name ());
5461 }
5462 else
5463 output_operand_lossage ("invalid reference for 'J' output modifier");
5464 return;
5465
5466 case 'G':
5467 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5468 return;
5469
5470 case 'O':
5471 {
5472 struct s390_address ad;
5473 int ret;
5474
5475 if (!MEM_P (x))
5476 {
5477 output_operand_lossage ("memory reference expected for "
5478 "'O' output modifier");
5479 return;
5480 }
5481
5482 ret = s390_decompose_address (XEXP (x, 0), &ad);
5483
5484 if (!ret
5485 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5486 || ad.indx)
5487 {
5488 output_operand_lossage ("invalid address for 'O' output modifier");
5489 return;
5490 }
5491
5492 if (ad.disp)
5493 output_addr_const (file, ad.disp);
5494 else
5495 fprintf (file, "0");
5496 }
5497 return;
5498
5499 case 'R':
5500 {
5501 struct s390_address ad;
5502 int ret;
5503
5504 if (!MEM_P (x))
5505 {
5506 output_operand_lossage ("memory reference expected for "
5507 "'R' output modifier");
5508 return;
5509 }
5510
5511 ret = s390_decompose_address (XEXP (x, 0), &ad);
5512
5513 if (!ret
5514 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5515 || ad.indx)
5516 {
5517 output_operand_lossage ("invalid address for 'R' output modifier");
5518 return;
5519 }
5520
5521 if (ad.base)
5522 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5523 else
5524 fprintf (file, "0");
5525 }
5526 return;
5527
5528 case 'S':
5529 {
5530 struct s390_address ad;
5531 int ret;
5532
5533 if (!MEM_P (x))
5534 {
5535 output_operand_lossage ("memory reference expected for "
5536 "'S' output modifier");
5537 return;
5538 }
5539 ret = s390_decompose_address (XEXP (x, 0), &ad);
5540
5541 if (!ret
5542 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5543 || ad.indx)
5544 {
5545 output_operand_lossage ("invalid address for 'S' output modifier");
5546 return;
5547 }
5548
5549 if (ad.disp)
5550 output_addr_const (file, ad.disp);
5551 else
5552 fprintf (file, "0");
5553
5554 if (ad.base)
5555 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5556 }
5557 return;
5558
5559 case 'N':
5560 if (GET_CODE (x) == REG)
5561 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5562 else if (GET_CODE (x) == MEM)
5563 x = change_address (x, VOIDmode,
5564 plus_constant (Pmode, XEXP (x, 0), 4));
5565 else
5566 output_operand_lossage ("register or memory expression expected "
5567 "for 'N' output modifier");
5568 break;
5569
5570 case 'M':
5571 if (GET_CODE (x) == REG)
5572 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5573 else if (GET_CODE (x) == MEM)
5574 x = change_address (x, VOIDmode,
5575 plus_constant (Pmode, XEXP (x, 0), 8));
5576 else
5577 output_operand_lossage ("register or memory expression expected "
5578 "for 'M' output modifier");
5579 break;
5580
5581 case 'Y':
5582 print_shift_count_operand (file, x);
5583 return;
5584 }
5585
5586 switch (GET_CODE (x))
5587 {
5588 case REG:
5589 fprintf (file, "%s", reg_names[REGNO (x)]);
5590 break;
5591
5592 case MEM:
5593 output_address (XEXP (x, 0));
5594 break;
5595
5596 case CONST:
5597 case CODE_LABEL:
5598 case LABEL_REF:
5599 case SYMBOL_REF:
5600 output_addr_const (file, x);
5601 break;
5602
5603 case CONST_INT:
5604 ival = INTVAL (x);
5605 switch (code)
5606 {
5607 case 0:
5608 break;
5609 case 'b':
5610 ival &= 0xff;
5611 break;
5612 case 'c':
5613 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5614 break;
5615 case 'x':
5616 ival &= 0xffff;
5617 break;
5618 case 'h':
5619 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5620 break;
5621 case 'i':
5622 ival = s390_extract_part (x, HImode, 0);
5623 break;
5624 case 'j':
5625 ival = s390_extract_part (x, HImode, -1);
5626 break;
5627 case 'k':
5628 ival = s390_extract_part (x, SImode, 0);
5629 break;
5630 case 'm':
5631 ival = s390_extract_part (x, SImode, -1);
5632 break;
5633 case 'o':
5634 ival &= 0xffffffff;
5635 break;
5636 case 'e': case 'f':
5637 case 's': case 't':
5638 {
5639 int pos, len;
5640 bool ok;
5641
5642 len = (code == 's' || code == 'e' ? 64 : 32);
5643 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5644 gcc_assert (ok);
5645 if (code == 's' || code == 't')
5646 ival = 64 - pos - len;
5647 else
5648 ival = 64 - 1 - pos;
5649 }
5650 break;
5651 default:
5652 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5653 }
5654 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5655 break;
5656
5657 case CONST_DOUBLE:
5658 gcc_assert (GET_MODE (x) == VOIDmode);
5659 if (code == 'b')
5660 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5661 else if (code == 'x')
5662 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5663 else if (code == 'h')
5664 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5665 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5666 else
5667 {
5668 if (code == 0)
5669 output_operand_lossage ("invalid constant - try using "
5670 "an output modifier");
5671 else
5672 output_operand_lossage ("invalid constant for output modifier '%c'",
5673 code);
5674 }
5675 break;
5676
5677 default:
5678 if (code == 0)
5679 output_operand_lossage ("invalid expression - try using "
5680 "an output modifier");
5681 else
5682 output_operand_lossage ("invalid expression for output "
5683 "modifier '%c'", code);
5684 break;
5685 }
5686 }
5687
5688 /* Target hook for assembling integer objects. We need to define it
5689 here to work a round a bug in some versions of GAS, which couldn't
5690 handle values smaller than INT_MIN when printed in decimal. */
5691
5692 static bool
5693 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5694 {
5695 if (size == 8 && aligned_p
5696 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5697 {
5698 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5699 INTVAL (x));
5700 return true;
5701 }
5702 return default_assemble_integer (x, size, aligned_p);
5703 }
5704
5705 /* Returns true if register REGNO is used for forming
5706 a memory address in expression X. */
5707
5708 static bool
5709 reg_used_in_mem_p (int regno, rtx x)
5710 {
5711 enum rtx_code code = GET_CODE (x);
5712 int i, j;
5713 const char *fmt;
5714
5715 if (code == MEM)
5716 {
5717 if (refers_to_regno_p (regno, regno+1,
5718 XEXP (x, 0), 0))
5719 return true;
5720 }
5721 else if (code == SET
5722 && GET_CODE (SET_DEST (x)) == PC)
5723 {
5724 if (refers_to_regno_p (regno, regno+1,
5725 SET_SRC (x), 0))
5726 return true;
5727 }
5728
5729 fmt = GET_RTX_FORMAT (code);
5730 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5731 {
5732 if (fmt[i] == 'e'
5733 && reg_used_in_mem_p (regno, XEXP (x, i)))
5734 return true;
5735
5736 else if (fmt[i] == 'E')
5737 for (j = 0; j < XVECLEN (x, i); j++)
5738 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5739 return true;
5740 }
5741 return false;
5742 }
5743
5744 /* Returns true if expression DEP_RTX sets an address register
5745 used by instruction INSN to address memory. */
5746
5747 static bool
5748 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5749 {
5750 rtx target, pat;
5751
5752 if (NONJUMP_INSN_P (dep_rtx))
5753 dep_rtx = PATTERN (dep_rtx);
5754
5755 if (GET_CODE (dep_rtx) == SET)
5756 {
5757 target = SET_DEST (dep_rtx);
5758 if (GET_CODE (target) == STRICT_LOW_PART)
5759 target = XEXP (target, 0);
5760 while (GET_CODE (target) == SUBREG)
5761 target = SUBREG_REG (target);
5762
5763 if (GET_CODE (target) == REG)
5764 {
5765 int regno = REGNO (target);
5766
5767 if (s390_safe_attr_type (insn) == TYPE_LA)
5768 {
5769 pat = PATTERN (insn);
5770 if (GET_CODE (pat) == PARALLEL)
5771 {
5772 gcc_assert (XVECLEN (pat, 0) == 2);
5773 pat = XVECEXP (pat, 0, 0);
5774 }
5775 gcc_assert (GET_CODE (pat) == SET);
5776 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5777 }
5778 else if (get_attr_atype (insn) == ATYPE_AGEN)
5779 return reg_used_in_mem_p (regno, PATTERN (insn));
5780 }
5781 }
5782 return false;
5783 }
5784
5785 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5786
5787 int
5788 s390_agen_dep_p (rtx dep_insn, rtx insn)
5789 {
5790 rtx dep_rtx = PATTERN (dep_insn);
5791 int i;
5792
5793 if (GET_CODE (dep_rtx) == SET
5794 && addr_generation_dependency_p (dep_rtx, insn))
5795 return 1;
5796 else if (GET_CODE (dep_rtx) == PARALLEL)
5797 {
5798 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5799 {
5800 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5801 return 1;
5802 }
5803 }
5804 return 0;
5805 }
5806
5807
5808 /* A C statement (sans semicolon) to update the integer scheduling priority
5809 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5810 reduce the priority to execute INSN later. Do not define this macro if
5811 you do not need to adjust the scheduling priorities of insns.
5812
5813 A STD instruction should be scheduled earlier,
5814 in order to use the bypass. */
5815 static int
5816 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5817 {
5818 if (! INSN_P (insn))
5819 return priority;
5820
5821 if (s390_tune != PROCESSOR_2084_Z990
5822 && s390_tune != PROCESSOR_2094_Z9_109
5823 && s390_tune != PROCESSOR_2097_Z10
5824 && s390_tune != PROCESSOR_2817_Z196
5825 && s390_tune != PROCESSOR_2827_ZEC12)
5826 return priority;
5827
5828 switch (s390_safe_attr_type (insn))
5829 {
5830 case TYPE_FSTOREDF:
5831 case TYPE_FSTORESF:
5832 priority = priority << 3;
5833 break;
5834 case TYPE_STORE:
5835 case TYPE_STM:
5836 priority = priority << 1;
5837 break;
5838 default:
5839 break;
5840 }
5841 return priority;
5842 }
5843
5844
5845 /* The number of instructions that can be issued per cycle. */
5846
5847 static int
5848 s390_issue_rate (void)
5849 {
5850 switch (s390_tune)
5851 {
5852 case PROCESSOR_2084_Z990:
5853 case PROCESSOR_2094_Z9_109:
5854 case PROCESSOR_2817_Z196:
5855 return 3;
5856 case PROCESSOR_2097_Z10:
5857 case PROCESSOR_2827_ZEC12:
5858 return 2;
5859 default:
5860 return 1;
5861 }
5862 }
5863
5864 static int
5865 s390_first_cycle_multipass_dfa_lookahead (void)
5866 {
5867 return 4;
5868 }
5869
5870 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5871 Fix up MEMs as required. */
5872
5873 static void
5874 annotate_constant_pool_refs (rtx *x)
5875 {
5876 int i, j;
5877 const char *fmt;
5878
5879 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5880 || !CONSTANT_POOL_ADDRESS_P (*x));
5881
5882 /* Literal pool references can only occur inside a MEM ... */
5883 if (GET_CODE (*x) == MEM)
5884 {
5885 rtx memref = XEXP (*x, 0);
5886
5887 if (GET_CODE (memref) == SYMBOL_REF
5888 && CONSTANT_POOL_ADDRESS_P (memref))
5889 {
5890 rtx base = cfun->machine->base_reg;
5891 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5892 UNSPEC_LTREF);
5893
5894 *x = replace_equiv_address (*x, addr);
5895 return;
5896 }
5897
5898 if (GET_CODE (memref) == CONST
5899 && GET_CODE (XEXP (memref, 0)) == PLUS
5900 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5901 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5902 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5903 {
5904 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5905 rtx sym = XEXP (XEXP (memref, 0), 0);
5906 rtx base = cfun->machine->base_reg;
5907 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5908 UNSPEC_LTREF);
5909
5910 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5911 return;
5912 }
5913 }
5914
5915 /* ... or a load-address type pattern. */
5916 if (GET_CODE (*x) == SET)
5917 {
5918 rtx addrref = SET_SRC (*x);
5919
5920 if (GET_CODE (addrref) == SYMBOL_REF
5921 && CONSTANT_POOL_ADDRESS_P (addrref))
5922 {
5923 rtx base = cfun->machine->base_reg;
5924 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5925 UNSPEC_LTREF);
5926
5927 SET_SRC (*x) = addr;
5928 return;
5929 }
5930
5931 if (GET_CODE (addrref) == CONST
5932 && GET_CODE (XEXP (addrref, 0)) == PLUS
5933 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5934 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5935 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5936 {
5937 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5938 rtx sym = XEXP (XEXP (addrref, 0), 0);
5939 rtx base = cfun->machine->base_reg;
5940 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5941 UNSPEC_LTREF);
5942
5943 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5944 return;
5945 }
5946 }
5947
5948 /* Annotate LTREL_BASE as well. */
5949 if (GET_CODE (*x) == UNSPEC
5950 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5951 {
5952 rtx base = cfun->machine->base_reg;
5953 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5954 UNSPEC_LTREL_BASE);
5955 return;
5956 }
5957
5958 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5959 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5960 {
5961 if (fmt[i] == 'e')
5962 {
5963 annotate_constant_pool_refs (&XEXP (*x, i));
5964 }
5965 else if (fmt[i] == 'E')
5966 {
5967 for (j = 0; j < XVECLEN (*x, i); j++)
5968 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5969 }
5970 }
5971 }
5972
5973 /* Split all branches that exceed the maximum distance.
5974 Returns true if this created a new literal pool entry. */
5975
5976 static int
5977 s390_split_branches (void)
5978 {
5979 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5980 int new_literal = 0, ret;
5981 rtx insn, pat, tmp, target;
5982 rtx *label;
5983
5984 /* We need correct insn addresses. */
5985
5986 shorten_branches (get_insns ());
5987
5988 /* Find all branches that exceed 64KB, and split them. */
5989
5990 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5991 {
5992 if (! JUMP_P (insn))
5993 continue;
5994
5995 pat = PATTERN (insn);
5996 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5997 pat = XVECEXP (pat, 0, 0);
5998 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5999 continue;
6000
6001 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
6002 {
6003 label = &SET_SRC (pat);
6004 }
6005 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
6006 {
6007 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
6008 label = &XEXP (SET_SRC (pat), 1);
6009 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
6010 label = &XEXP (SET_SRC (pat), 2);
6011 else
6012 continue;
6013 }
6014 else
6015 continue;
6016
6017 if (get_attr_length (insn) <= 4)
6018 continue;
6019
6020 /* We are going to use the return register as scratch register,
6021 make sure it will be saved/restored by the prologue/epilogue. */
6022 cfun_frame_layout.save_return_addr_p = 1;
6023
6024 if (!flag_pic)
6025 {
6026 new_literal = 1;
6027 tmp = force_const_mem (Pmode, *label);
6028 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
6029 INSN_ADDRESSES_NEW (tmp, -1);
6030 annotate_constant_pool_refs (&PATTERN (tmp));
6031
6032 target = temp_reg;
6033 }
6034 else
6035 {
6036 new_literal = 1;
6037 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6038 UNSPEC_LTREL_OFFSET);
6039 target = gen_rtx_CONST (Pmode, target);
6040 target = force_const_mem (Pmode, target);
6041 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6042 INSN_ADDRESSES_NEW (tmp, -1);
6043 annotate_constant_pool_refs (&PATTERN (tmp));
6044
6045 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6046 cfun->machine->base_reg),
6047 UNSPEC_LTREL_BASE);
6048 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6049 }
6050
6051 ret = validate_change (insn, label, target, 0);
6052 gcc_assert (ret);
6053 }
6054
6055 return new_literal;
6056 }
6057
6058
6059 /* Find an annotated literal pool symbol referenced in RTX X,
6060 and store it at REF. Will abort if X contains references to
6061 more than one such pool symbol; multiple references to the same
6062 symbol are allowed, however.
6063
6064 The rtx pointed to by REF must be initialized to NULL_RTX
6065 by the caller before calling this routine. */
6066
6067 static void
6068 find_constant_pool_ref (rtx x, rtx *ref)
6069 {
6070 int i, j;
6071 const char *fmt;
6072
6073 /* Ignore LTREL_BASE references. */
6074 if (GET_CODE (x) == UNSPEC
6075 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6076 return;
6077 /* Likewise POOL_ENTRY insns. */
6078 if (GET_CODE (x) == UNSPEC_VOLATILE
6079 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6080 return;
6081
6082 gcc_assert (GET_CODE (x) != SYMBOL_REF
6083 || !CONSTANT_POOL_ADDRESS_P (x));
6084
6085 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6086 {
6087 rtx sym = XVECEXP (x, 0, 0);
6088 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6089 && CONSTANT_POOL_ADDRESS_P (sym));
6090
6091 if (*ref == NULL_RTX)
6092 *ref = sym;
6093 else
6094 gcc_assert (*ref == sym);
6095
6096 return;
6097 }
6098
6099 fmt = GET_RTX_FORMAT (GET_CODE (x));
6100 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6101 {
6102 if (fmt[i] == 'e')
6103 {
6104 find_constant_pool_ref (XEXP (x, i), ref);
6105 }
6106 else if (fmt[i] == 'E')
6107 {
6108 for (j = 0; j < XVECLEN (x, i); j++)
6109 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6110 }
6111 }
6112 }
6113
6114 /* Replace every reference to the annotated literal pool
6115 symbol REF in X by its base plus OFFSET. */
6116
6117 static void
6118 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6119 {
6120 int i, j;
6121 const char *fmt;
6122
6123 gcc_assert (*x != ref);
6124
6125 if (GET_CODE (*x) == UNSPEC
6126 && XINT (*x, 1) == UNSPEC_LTREF
6127 && XVECEXP (*x, 0, 0) == ref)
6128 {
6129 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6130 return;
6131 }
6132
6133 if (GET_CODE (*x) == PLUS
6134 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6135 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6136 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6137 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6138 {
6139 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6140 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6141 return;
6142 }
6143
6144 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6145 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6146 {
6147 if (fmt[i] == 'e')
6148 {
6149 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6150 }
6151 else if (fmt[i] == 'E')
6152 {
6153 for (j = 0; j < XVECLEN (*x, i); j++)
6154 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6155 }
6156 }
6157 }
6158
6159 /* Check whether X contains an UNSPEC_LTREL_BASE.
6160 Return its constant pool symbol if found, NULL_RTX otherwise. */
6161
6162 static rtx
6163 find_ltrel_base (rtx x)
6164 {
6165 int i, j;
6166 const char *fmt;
6167
6168 if (GET_CODE (x) == UNSPEC
6169 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6170 return XVECEXP (x, 0, 0);
6171
6172 fmt = GET_RTX_FORMAT (GET_CODE (x));
6173 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6174 {
6175 if (fmt[i] == 'e')
6176 {
6177 rtx fnd = find_ltrel_base (XEXP (x, i));
6178 if (fnd)
6179 return fnd;
6180 }
6181 else if (fmt[i] == 'E')
6182 {
6183 for (j = 0; j < XVECLEN (x, i); j++)
6184 {
6185 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6186 if (fnd)
6187 return fnd;
6188 }
6189 }
6190 }
6191
6192 return NULL_RTX;
6193 }
6194
6195 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6196
6197 static void
6198 replace_ltrel_base (rtx *x)
6199 {
6200 int i, j;
6201 const char *fmt;
6202
6203 if (GET_CODE (*x) == UNSPEC
6204 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6205 {
6206 *x = XVECEXP (*x, 0, 1);
6207 return;
6208 }
6209
6210 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6211 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6212 {
6213 if (fmt[i] == 'e')
6214 {
6215 replace_ltrel_base (&XEXP (*x, i));
6216 }
6217 else if (fmt[i] == 'E')
6218 {
6219 for (j = 0; j < XVECLEN (*x, i); j++)
6220 replace_ltrel_base (&XVECEXP (*x, i, j));
6221 }
6222 }
6223 }
6224
6225
6226 /* We keep a list of constants which we have to add to internal
6227 constant tables in the middle of large functions. */
6228
6229 #define NR_C_MODES 11
6230 enum machine_mode constant_modes[NR_C_MODES] =
6231 {
6232 TFmode, TImode, TDmode,
6233 DFmode, DImode, DDmode,
6234 SFmode, SImode, SDmode,
6235 HImode,
6236 QImode
6237 };
6238
6239 struct constant
6240 {
6241 struct constant *next;
6242 rtx value;
6243 rtx label;
6244 };
6245
6246 struct constant_pool
6247 {
6248 struct constant_pool *next;
6249 rtx first_insn;
6250 rtx pool_insn;
6251 bitmap insns;
6252 rtx emit_pool_after;
6253
6254 struct constant *constants[NR_C_MODES];
6255 struct constant *execute;
6256 rtx label;
6257 int size;
6258 };
6259
6260 /* Allocate new constant_pool structure. */
6261
6262 static struct constant_pool *
6263 s390_alloc_pool (void)
6264 {
6265 struct constant_pool *pool;
6266 int i;
6267
6268 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6269 pool->next = NULL;
6270 for (i = 0; i < NR_C_MODES; i++)
6271 pool->constants[i] = NULL;
6272
6273 pool->execute = NULL;
6274 pool->label = gen_label_rtx ();
6275 pool->first_insn = NULL_RTX;
6276 pool->pool_insn = NULL_RTX;
6277 pool->insns = BITMAP_ALLOC (NULL);
6278 pool->size = 0;
6279 pool->emit_pool_after = NULL_RTX;
6280
6281 return pool;
6282 }
6283
6284 /* Create new constant pool covering instructions starting at INSN
6285 and chain it to the end of POOL_LIST. */
6286
6287 static struct constant_pool *
6288 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6289 {
6290 struct constant_pool *pool, **prev;
6291
6292 pool = s390_alloc_pool ();
6293 pool->first_insn = insn;
6294
6295 for (prev = pool_list; *prev; prev = &(*prev)->next)
6296 ;
6297 *prev = pool;
6298
6299 return pool;
6300 }
6301
6302 /* End range of instructions covered by POOL at INSN and emit
6303 placeholder insn representing the pool. */
6304
6305 static void
6306 s390_end_pool (struct constant_pool *pool, rtx insn)
6307 {
6308 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6309
6310 if (!insn)
6311 insn = get_last_insn ();
6312
6313 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6314 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6315 }
6316
6317 /* Add INSN to the list of insns covered by POOL. */
6318
6319 static void
6320 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6321 {
6322 bitmap_set_bit (pool->insns, INSN_UID (insn));
6323 }
6324
6325 /* Return pool out of POOL_LIST that covers INSN. */
6326
6327 static struct constant_pool *
6328 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6329 {
6330 struct constant_pool *pool;
6331
6332 for (pool = pool_list; pool; pool = pool->next)
6333 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6334 break;
6335
6336 return pool;
6337 }
6338
6339 /* Add constant VAL of mode MODE to the constant pool POOL. */
6340
6341 static void
6342 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6343 {
6344 struct constant *c;
6345 int i;
6346
6347 for (i = 0; i < NR_C_MODES; i++)
6348 if (constant_modes[i] == mode)
6349 break;
6350 gcc_assert (i != NR_C_MODES);
6351
6352 for (c = pool->constants[i]; c != NULL; c = c->next)
6353 if (rtx_equal_p (val, c->value))
6354 break;
6355
6356 if (c == NULL)
6357 {
6358 c = (struct constant *) xmalloc (sizeof *c);
6359 c->value = val;
6360 c->label = gen_label_rtx ();
6361 c->next = pool->constants[i];
6362 pool->constants[i] = c;
6363 pool->size += GET_MODE_SIZE (mode);
6364 }
6365 }
6366
6367 /* Return an rtx that represents the offset of X from the start of
6368 pool POOL. */
6369
6370 static rtx
6371 s390_pool_offset (struct constant_pool *pool, rtx x)
6372 {
6373 rtx label;
6374
6375 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6376 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6377 UNSPEC_POOL_OFFSET);
6378 return gen_rtx_CONST (GET_MODE (x), x);
6379 }
6380
6381 /* Find constant VAL of mode MODE in the constant pool POOL.
6382 Return an RTX describing the distance from the start of
6383 the pool to the location of the new constant. */
6384
6385 static rtx
6386 s390_find_constant (struct constant_pool *pool, rtx val,
6387 enum machine_mode mode)
6388 {
6389 struct constant *c;
6390 int i;
6391
6392 for (i = 0; i < NR_C_MODES; i++)
6393 if (constant_modes[i] == mode)
6394 break;
6395 gcc_assert (i != NR_C_MODES);
6396
6397 for (c = pool->constants[i]; c != NULL; c = c->next)
6398 if (rtx_equal_p (val, c->value))
6399 break;
6400
6401 gcc_assert (c);
6402
6403 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6404 }
6405
6406 /* Check whether INSN is an execute. Return the label_ref to its
6407 execute target template if so, NULL_RTX otherwise. */
6408
6409 static rtx
6410 s390_execute_label (rtx insn)
6411 {
6412 if (NONJUMP_INSN_P (insn)
6413 && GET_CODE (PATTERN (insn)) == PARALLEL
6414 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6415 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6416 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6417
6418 return NULL_RTX;
6419 }
6420
6421 /* Add execute target for INSN to the constant pool POOL. */
6422
6423 static void
6424 s390_add_execute (struct constant_pool *pool, rtx insn)
6425 {
6426 struct constant *c;
6427
6428 for (c = pool->execute; c != NULL; c = c->next)
6429 if (INSN_UID (insn) == INSN_UID (c->value))
6430 break;
6431
6432 if (c == NULL)
6433 {
6434 c = (struct constant *) xmalloc (sizeof *c);
6435 c->value = insn;
6436 c->label = gen_label_rtx ();
6437 c->next = pool->execute;
6438 pool->execute = c;
6439 pool->size += 6;
6440 }
6441 }
6442
6443 /* Find execute target for INSN in the constant pool POOL.
6444 Return an RTX describing the distance from the start of
6445 the pool to the location of the execute target. */
6446
6447 static rtx
6448 s390_find_execute (struct constant_pool *pool, rtx insn)
6449 {
6450 struct constant *c;
6451
6452 for (c = pool->execute; c != NULL; c = c->next)
6453 if (INSN_UID (insn) == INSN_UID (c->value))
6454 break;
6455
6456 gcc_assert (c);
6457
6458 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6459 }
6460
6461 /* For an execute INSN, extract the execute target template. */
6462
6463 static rtx
6464 s390_execute_target (rtx insn)
6465 {
6466 rtx pattern = PATTERN (insn);
6467 gcc_assert (s390_execute_label (insn));
6468
6469 if (XVECLEN (pattern, 0) == 2)
6470 {
6471 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6472 }
6473 else
6474 {
6475 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6476 int i;
6477
6478 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6479 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6480
6481 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6482 }
6483
6484 return pattern;
6485 }
6486
6487 /* Indicate that INSN cannot be duplicated. This is the case for
6488 execute insns that carry a unique label. */
6489
6490 static bool
6491 s390_cannot_copy_insn_p (rtx insn)
6492 {
6493 rtx label = s390_execute_label (insn);
6494 return label && label != const0_rtx;
6495 }
6496
6497 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6498 do not emit the pool base label. */
6499
6500 static void
6501 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6502 {
6503 struct constant *c;
6504 rtx insn = pool->pool_insn;
6505 int i;
6506
6507 /* Switch to rodata section. */
6508 if (TARGET_CPU_ZARCH)
6509 {
6510 insn = emit_insn_after (gen_pool_section_start (), insn);
6511 INSN_ADDRESSES_NEW (insn, -1);
6512 }
6513
6514 /* Ensure minimum pool alignment. */
6515 if (TARGET_CPU_ZARCH)
6516 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6517 else
6518 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6519 INSN_ADDRESSES_NEW (insn, -1);
6520
6521 /* Emit pool base label. */
6522 if (!remote_label)
6523 {
6524 insn = emit_label_after (pool->label, insn);
6525 INSN_ADDRESSES_NEW (insn, -1);
6526 }
6527
6528 /* Dump constants in descending alignment requirement order,
6529 ensuring proper alignment for every constant. */
6530 for (i = 0; i < NR_C_MODES; i++)
6531 for (c = pool->constants[i]; c; c = c->next)
6532 {
6533 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6534 rtx value = copy_rtx (c->value);
6535 if (GET_CODE (value) == CONST
6536 && GET_CODE (XEXP (value, 0)) == UNSPEC
6537 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6538 && XVECLEN (XEXP (value, 0), 0) == 1)
6539 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6540
6541 insn = emit_label_after (c->label, insn);
6542 INSN_ADDRESSES_NEW (insn, -1);
6543
6544 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6545 gen_rtvec (1, value),
6546 UNSPECV_POOL_ENTRY);
6547 insn = emit_insn_after (value, insn);
6548 INSN_ADDRESSES_NEW (insn, -1);
6549 }
6550
6551 /* Ensure minimum alignment for instructions. */
6552 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6553 INSN_ADDRESSES_NEW (insn, -1);
6554
6555 /* Output in-pool execute template insns. */
6556 for (c = pool->execute; c; c = c->next)
6557 {
6558 insn = emit_label_after (c->label, insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6560
6561 insn = emit_insn_after (s390_execute_target (c->value), insn);
6562 INSN_ADDRESSES_NEW (insn, -1);
6563 }
6564
6565 /* Switch back to previous section. */
6566 if (TARGET_CPU_ZARCH)
6567 {
6568 insn = emit_insn_after (gen_pool_section_end (), insn);
6569 INSN_ADDRESSES_NEW (insn, -1);
6570 }
6571
6572 insn = emit_barrier_after (insn);
6573 INSN_ADDRESSES_NEW (insn, -1);
6574
6575 /* Remove placeholder insn. */
6576 remove_insn (pool->pool_insn);
6577 }
6578
6579 /* Free all memory used by POOL. */
6580
6581 static void
6582 s390_free_pool (struct constant_pool *pool)
6583 {
6584 struct constant *c, *next;
6585 int i;
6586
6587 for (i = 0; i < NR_C_MODES; i++)
6588 for (c = pool->constants[i]; c; c = next)
6589 {
6590 next = c->next;
6591 free (c);
6592 }
6593
6594 for (c = pool->execute; c; c = next)
6595 {
6596 next = c->next;
6597 free (c);
6598 }
6599
6600 BITMAP_FREE (pool->insns);
6601 free (pool);
6602 }
6603
6604
6605 /* Collect main literal pool. Return NULL on overflow. */
6606
6607 static struct constant_pool *
6608 s390_mainpool_start (void)
6609 {
6610 struct constant_pool *pool;
6611 rtx insn;
6612
6613 pool = s390_alloc_pool ();
6614
6615 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6616 {
6617 if (NONJUMP_INSN_P (insn)
6618 && GET_CODE (PATTERN (insn)) == SET
6619 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6620 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6621 {
6622 gcc_assert (!pool->pool_insn);
6623 pool->pool_insn = insn;
6624 }
6625
6626 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6627 {
6628 s390_add_execute (pool, insn);
6629 }
6630 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6631 {
6632 rtx pool_ref = NULL_RTX;
6633 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6634 if (pool_ref)
6635 {
6636 rtx constant = get_pool_constant (pool_ref);
6637 enum machine_mode mode = get_pool_mode (pool_ref);
6638 s390_add_constant (pool, constant, mode);
6639 }
6640 }
6641
6642 /* If hot/cold partitioning is enabled we have to make sure that
6643 the literal pool is emitted in the same section where the
6644 initialization of the literal pool base pointer takes place.
6645 emit_pool_after is only used in the non-overflow case on non
6646 Z cpus where we can emit the literal pool at the end of the
6647 function body within the text section. */
6648 if (NOTE_P (insn)
6649 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6650 && !pool->emit_pool_after)
6651 pool->emit_pool_after = PREV_INSN (insn);
6652 }
6653
6654 gcc_assert (pool->pool_insn || pool->size == 0);
6655
6656 if (pool->size >= 4096)
6657 {
6658 /* We're going to chunkify the pool, so remove the main
6659 pool placeholder insn. */
6660 remove_insn (pool->pool_insn);
6661
6662 s390_free_pool (pool);
6663 pool = NULL;
6664 }
6665
6666 /* If the functions ends with the section where the literal pool
6667 should be emitted set the marker to its end. */
6668 if (pool && !pool->emit_pool_after)
6669 pool->emit_pool_after = get_last_insn ();
6670
6671 return pool;
6672 }
6673
6674 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6675 Modify the current function to output the pool constants as well as
6676 the pool register setup instruction. */
6677
6678 static void
6679 s390_mainpool_finish (struct constant_pool *pool)
6680 {
6681 rtx base_reg = cfun->machine->base_reg;
6682 rtx insn;
6683
6684 /* If the pool is empty, we're done. */
6685 if (pool->size == 0)
6686 {
6687 /* We don't actually need a base register after all. */
6688 cfun->machine->base_reg = NULL_RTX;
6689
6690 if (pool->pool_insn)
6691 remove_insn (pool->pool_insn);
6692 s390_free_pool (pool);
6693 return;
6694 }
6695
6696 /* We need correct insn addresses. */
6697 shorten_branches (get_insns ());
6698
6699 /* On zSeries, we use a LARL to load the pool register. The pool is
6700 located in the .rodata section, so we emit it after the function. */
6701 if (TARGET_CPU_ZARCH)
6702 {
6703 insn = gen_main_base_64 (base_reg, pool->label);
6704 insn = emit_insn_after (insn, pool->pool_insn);
6705 INSN_ADDRESSES_NEW (insn, -1);
6706 remove_insn (pool->pool_insn);
6707
6708 insn = get_last_insn ();
6709 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6710 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6711
6712 s390_dump_pool (pool, 0);
6713 }
6714
6715 /* On S/390, if the total size of the function's code plus literal pool
6716 does not exceed 4096 bytes, we use BASR to set up a function base
6717 pointer, and emit the literal pool at the end of the function. */
6718 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6719 + pool->size + 8 /* alignment slop */ < 4096)
6720 {
6721 insn = gen_main_base_31_small (base_reg, pool->label);
6722 insn = emit_insn_after (insn, pool->pool_insn);
6723 INSN_ADDRESSES_NEW (insn, -1);
6724 remove_insn (pool->pool_insn);
6725
6726 insn = emit_label_after (pool->label, insn);
6727 INSN_ADDRESSES_NEW (insn, -1);
6728
6729 /* emit_pool_after will be set by s390_mainpool_start to the
6730 last insn of the section where the literal pool should be
6731 emitted. */
6732 insn = pool->emit_pool_after;
6733
6734 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6735 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6736
6737 s390_dump_pool (pool, 1);
6738 }
6739
6740 /* Otherwise, we emit an inline literal pool and use BASR to branch
6741 over it, setting up the pool register at the same time. */
6742 else
6743 {
6744 rtx pool_end = gen_label_rtx ();
6745
6746 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6747 insn = emit_jump_insn_after (insn, pool->pool_insn);
6748 JUMP_LABEL (insn) = pool_end;
6749 INSN_ADDRESSES_NEW (insn, -1);
6750 remove_insn (pool->pool_insn);
6751
6752 insn = emit_label_after (pool->label, insn);
6753 INSN_ADDRESSES_NEW (insn, -1);
6754
6755 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6756 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6757
6758 insn = emit_label_after (pool_end, pool->pool_insn);
6759 INSN_ADDRESSES_NEW (insn, -1);
6760
6761 s390_dump_pool (pool, 1);
6762 }
6763
6764
6765 /* Replace all literal pool references. */
6766
6767 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6768 {
6769 if (INSN_P (insn))
6770 replace_ltrel_base (&PATTERN (insn));
6771
6772 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6773 {
6774 rtx addr, pool_ref = NULL_RTX;
6775 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6776 if (pool_ref)
6777 {
6778 if (s390_execute_label (insn))
6779 addr = s390_find_execute (pool, insn);
6780 else
6781 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6782 get_pool_mode (pool_ref));
6783
6784 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6785 INSN_CODE (insn) = -1;
6786 }
6787 }
6788 }
6789
6790
6791 /* Free the pool. */
6792 s390_free_pool (pool);
6793 }
6794
6795 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6796 We have decided we cannot use this pool, so revert all changes
6797 to the current function that were done by s390_mainpool_start. */
6798 static void
6799 s390_mainpool_cancel (struct constant_pool *pool)
6800 {
6801 /* We didn't actually change the instruction stream, so simply
6802 free the pool memory. */
6803 s390_free_pool (pool);
6804 }
6805
6806
6807 /* Chunkify the literal pool. */
6808
6809 #define S390_POOL_CHUNK_MIN 0xc00
6810 #define S390_POOL_CHUNK_MAX 0xe00
6811
6812 static struct constant_pool *
6813 s390_chunkify_start (void)
6814 {
6815 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6816 int extra_size = 0;
6817 bitmap far_labels;
6818 rtx pending_ltrel = NULL_RTX;
6819 rtx insn;
6820
6821 rtx (*gen_reload_base) (rtx, rtx) =
6822 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6823
6824
6825 /* We need correct insn addresses. */
6826
6827 shorten_branches (get_insns ());
6828
6829 /* Scan all insns and move literals to pool chunks. */
6830
6831 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6832 {
6833 bool section_switch_p = false;
6834
6835 /* Check for pending LTREL_BASE. */
6836 if (INSN_P (insn))
6837 {
6838 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6839 if (ltrel_base)
6840 {
6841 gcc_assert (ltrel_base == pending_ltrel);
6842 pending_ltrel = NULL_RTX;
6843 }
6844 }
6845
6846 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6847 {
6848 if (!curr_pool)
6849 curr_pool = s390_start_pool (&pool_list, insn);
6850
6851 s390_add_execute (curr_pool, insn);
6852 s390_add_pool_insn (curr_pool, insn);
6853 }
6854 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6855 {
6856 rtx pool_ref = NULL_RTX;
6857 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6858 if (pool_ref)
6859 {
6860 rtx constant = get_pool_constant (pool_ref);
6861 enum machine_mode mode = get_pool_mode (pool_ref);
6862
6863 if (!curr_pool)
6864 curr_pool = s390_start_pool (&pool_list, insn);
6865
6866 s390_add_constant (curr_pool, constant, mode);
6867 s390_add_pool_insn (curr_pool, insn);
6868
6869 /* Don't split the pool chunk between a LTREL_OFFSET load
6870 and the corresponding LTREL_BASE. */
6871 if (GET_CODE (constant) == CONST
6872 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6873 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6874 {
6875 gcc_assert (!pending_ltrel);
6876 pending_ltrel = pool_ref;
6877 }
6878 }
6879 }
6880
6881 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6882 {
6883 if (curr_pool)
6884 s390_add_pool_insn (curr_pool, insn);
6885 /* An LTREL_BASE must follow within the same basic block. */
6886 gcc_assert (!pending_ltrel);
6887 }
6888
6889 if (NOTE_P (insn))
6890 switch (NOTE_KIND (insn))
6891 {
6892 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6893 section_switch_p = true;
6894 break;
6895 case NOTE_INSN_VAR_LOCATION:
6896 case NOTE_INSN_CALL_ARG_LOCATION:
6897 continue;
6898 default:
6899 break;
6900 }
6901
6902 if (!curr_pool
6903 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6904 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6905 continue;
6906
6907 if (TARGET_CPU_ZARCH)
6908 {
6909 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6910 continue;
6911
6912 s390_end_pool (curr_pool, NULL_RTX);
6913 curr_pool = NULL;
6914 }
6915 else
6916 {
6917 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6918 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6919 + extra_size;
6920
6921 /* We will later have to insert base register reload insns.
6922 Those will have an effect on code size, which we need to
6923 consider here. This calculation makes rather pessimistic
6924 worst-case assumptions. */
6925 if (LABEL_P (insn))
6926 extra_size += 6;
6927
6928 if (chunk_size < S390_POOL_CHUNK_MIN
6929 && curr_pool->size < S390_POOL_CHUNK_MIN
6930 && !section_switch_p)
6931 continue;
6932
6933 /* Pool chunks can only be inserted after BARRIERs ... */
6934 if (BARRIER_P (insn))
6935 {
6936 s390_end_pool (curr_pool, insn);
6937 curr_pool = NULL;
6938 extra_size = 0;
6939 }
6940
6941 /* ... so if we don't find one in time, create one. */
6942 else if (chunk_size > S390_POOL_CHUNK_MAX
6943 || curr_pool->size > S390_POOL_CHUNK_MAX
6944 || section_switch_p)
6945 {
6946 rtx label, jump, barrier, next, prev;
6947
6948 if (!section_switch_p)
6949 {
6950 /* We can insert the barrier only after a 'real' insn. */
6951 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
6952 continue;
6953 if (get_attr_length (insn) == 0)
6954 continue;
6955 /* Don't separate LTREL_BASE from the corresponding
6956 LTREL_OFFSET load. */
6957 if (pending_ltrel)
6958 continue;
6959 next = insn;
6960 do
6961 {
6962 insn = next;
6963 next = NEXT_INSN (insn);
6964 }
6965 while (next
6966 && NOTE_P (next)
6967 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6968 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6969 }
6970 else
6971 {
6972 gcc_assert (!pending_ltrel);
6973
6974 /* The old pool has to end before the section switch
6975 note in order to make it part of the current
6976 section. */
6977 insn = PREV_INSN (insn);
6978 }
6979
6980 label = gen_label_rtx ();
6981 prev = insn;
6982 if (prev && NOTE_P (prev))
6983 prev = prev_nonnote_insn (prev);
6984 if (prev)
6985 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6986 INSN_LOCATION (prev));
6987 else
6988 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6989 barrier = emit_barrier_after (jump);
6990 insn = emit_label_after (label, barrier);
6991 JUMP_LABEL (jump) = label;
6992 LABEL_NUSES (label) = 1;
6993
6994 INSN_ADDRESSES_NEW (jump, -1);
6995 INSN_ADDRESSES_NEW (barrier, -1);
6996 INSN_ADDRESSES_NEW (insn, -1);
6997
6998 s390_end_pool (curr_pool, barrier);
6999 curr_pool = NULL;
7000 extra_size = 0;
7001 }
7002 }
7003 }
7004
7005 if (curr_pool)
7006 s390_end_pool (curr_pool, NULL_RTX);
7007 gcc_assert (!pending_ltrel);
7008
7009 /* Find all labels that are branched into
7010 from an insn belonging to a different chunk. */
7011
7012 far_labels = BITMAP_ALLOC (NULL);
7013
7014 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7015 {
7016 /* Labels marked with LABEL_PRESERVE_P can be target
7017 of non-local jumps, so we have to mark them.
7018 The same holds for named labels.
7019
7020 Don't do that, however, if it is the label before
7021 a jump table. */
7022
7023 if (LABEL_P (insn)
7024 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7025 {
7026 rtx vec_insn = NEXT_INSN (insn);
7027 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7028 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7029 }
7030
7031 /* If we have a direct jump (conditional or unconditional)
7032 or a casesi jump, check all potential targets. */
7033 else if (JUMP_P (insn))
7034 {
7035 rtx pat = PATTERN (insn);
7036 rtx table;
7037
7038 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
7039 pat = XVECEXP (pat, 0, 0);
7040
7041 if (GET_CODE (pat) == SET)
7042 {
7043 rtx label = JUMP_LABEL (insn);
7044 if (label)
7045 {
7046 if (s390_find_pool (pool_list, label)
7047 != s390_find_pool (pool_list, insn))
7048 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7049 }
7050 }
7051 else if (tablejump_p (insn, NULL, &table))
7052 {
7053 rtx vec_pat = PATTERN (table);
7054 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7055
7056 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7057 {
7058 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7059
7060 if (s390_find_pool (pool_list, label)
7061 != s390_find_pool (pool_list, insn))
7062 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7063 }
7064 }
7065 }
7066 }
7067
7068 /* Insert base register reload insns before every pool. */
7069
7070 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7071 {
7072 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7073 curr_pool->label);
7074 rtx insn = curr_pool->first_insn;
7075 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7076 }
7077
7078 /* Insert base register reload insns at every far label. */
7079
7080 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7081 if (LABEL_P (insn)
7082 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7083 {
7084 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7085 if (pool)
7086 {
7087 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7088 pool->label);
7089 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7090 }
7091 }
7092
7093
7094 BITMAP_FREE (far_labels);
7095
7096
7097 /* Recompute insn addresses. */
7098
7099 init_insn_lengths ();
7100 shorten_branches (get_insns ());
7101
7102 return pool_list;
7103 }
7104
7105 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7106 After we have decided to use this list, finish implementing
7107 all changes to the current function as required. */
7108
7109 static void
7110 s390_chunkify_finish (struct constant_pool *pool_list)
7111 {
7112 struct constant_pool *curr_pool = NULL;
7113 rtx insn;
7114
7115
7116 /* Replace all literal pool references. */
7117
7118 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7119 {
7120 if (INSN_P (insn))
7121 replace_ltrel_base (&PATTERN (insn));
7122
7123 curr_pool = s390_find_pool (pool_list, insn);
7124 if (!curr_pool)
7125 continue;
7126
7127 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7128 {
7129 rtx addr, pool_ref = NULL_RTX;
7130 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7131 if (pool_ref)
7132 {
7133 if (s390_execute_label (insn))
7134 addr = s390_find_execute (curr_pool, insn);
7135 else
7136 addr = s390_find_constant (curr_pool,
7137 get_pool_constant (pool_ref),
7138 get_pool_mode (pool_ref));
7139
7140 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7141 INSN_CODE (insn) = -1;
7142 }
7143 }
7144 }
7145
7146 /* Dump out all literal pools. */
7147
7148 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7149 s390_dump_pool (curr_pool, 0);
7150
7151 /* Free pool list. */
7152
7153 while (pool_list)
7154 {
7155 struct constant_pool *next = pool_list->next;
7156 s390_free_pool (pool_list);
7157 pool_list = next;
7158 }
7159 }
7160
7161 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7162 We have decided we cannot use this list, so revert all changes
7163 to the current function that were done by s390_chunkify_start. */
7164
7165 static void
7166 s390_chunkify_cancel (struct constant_pool *pool_list)
7167 {
7168 struct constant_pool *curr_pool = NULL;
7169 rtx insn;
7170
7171 /* Remove all pool placeholder insns. */
7172
7173 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7174 {
7175 /* Did we insert an extra barrier? Remove it. */
7176 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7177 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7178 rtx label = NEXT_INSN (curr_pool->pool_insn);
7179
7180 if (jump && JUMP_P (jump)
7181 && barrier && BARRIER_P (barrier)
7182 && label && LABEL_P (label)
7183 && GET_CODE (PATTERN (jump)) == SET
7184 && SET_DEST (PATTERN (jump)) == pc_rtx
7185 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7186 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7187 {
7188 remove_insn (jump);
7189 remove_insn (barrier);
7190 remove_insn (label);
7191 }
7192
7193 remove_insn (curr_pool->pool_insn);
7194 }
7195
7196 /* Remove all base register reload insns. */
7197
7198 for (insn = get_insns (); insn; )
7199 {
7200 rtx next_insn = NEXT_INSN (insn);
7201
7202 if (NONJUMP_INSN_P (insn)
7203 && GET_CODE (PATTERN (insn)) == SET
7204 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7205 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7206 remove_insn (insn);
7207
7208 insn = next_insn;
7209 }
7210
7211 /* Free pool list. */
7212
7213 while (pool_list)
7214 {
7215 struct constant_pool *next = pool_list->next;
7216 s390_free_pool (pool_list);
7217 pool_list = next;
7218 }
7219 }
7220
7221 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7222
7223 void
7224 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7225 {
7226 REAL_VALUE_TYPE r;
7227
7228 switch (GET_MODE_CLASS (mode))
7229 {
7230 case MODE_FLOAT:
7231 case MODE_DECIMAL_FLOAT:
7232 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7233
7234 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7235 assemble_real (r, mode, align);
7236 break;
7237
7238 case MODE_INT:
7239 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7240 mark_symbol_refs_as_used (exp);
7241 break;
7242
7243 default:
7244 gcc_unreachable ();
7245 }
7246 }
7247
7248
7249 /* Return an RTL expression representing the value of the return address
7250 for the frame COUNT steps up from the current frame. FRAME is the
7251 frame pointer of that frame. */
7252
7253 rtx
7254 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7255 {
7256 int offset;
7257 rtx addr;
7258
7259 /* Without backchain, we fail for all but the current frame. */
7260
7261 if (!TARGET_BACKCHAIN && count > 0)
7262 return NULL_RTX;
7263
7264 /* For the current frame, we need to make sure the initial
7265 value of RETURN_REGNUM is actually saved. */
7266
7267 if (count == 0)
7268 {
7269 /* On non-z architectures branch splitting could overwrite r14. */
7270 if (TARGET_CPU_ZARCH)
7271 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7272 else
7273 {
7274 cfun_frame_layout.save_return_addr_p = true;
7275 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7276 }
7277 }
7278
7279 if (TARGET_PACKED_STACK)
7280 offset = -2 * UNITS_PER_LONG;
7281 else
7282 offset = RETURN_REGNUM * UNITS_PER_LONG;
7283
7284 addr = plus_constant (Pmode, frame, offset);
7285 addr = memory_address (Pmode, addr);
7286 return gen_rtx_MEM (Pmode, addr);
7287 }
7288
7289 /* Return an RTL expression representing the back chain stored in
7290 the current stack frame. */
7291
7292 rtx
7293 s390_back_chain_rtx (void)
7294 {
7295 rtx chain;
7296
7297 gcc_assert (TARGET_BACKCHAIN);
7298
7299 if (TARGET_PACKED_STACK)
7300 chain = plus_constant (Pmode, stack_pointer_rtx,
7301 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7302 else
7303 chain = stack_pointer_rtx;
7304
7305 chain = gen_rtx_MEM (Pmode, chain);
7306 return chain;
7307 }
7308
7309 /* Find first call clobbered register unused in a function.
7310 This could be used as base register in a leaf function
7311 or for holding the return address before epilogue. */
7312
7313 static int
7314 find_unused_clobbered_reg (void)
7315 {
7316 int i;
7317 for (i = 0; i < 6; i++)
7318 if (!df_regs_ever_live_p (i))
7319 return i;
7320 return 0;
7321 }
7322
7323
7324 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7325 clobbered hard regs in SETREG. */
7326
7327 static void
7328 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7329 {
7330 int *regs_ever_clobbered = (int *)data;
7331 unsigned int i, regno;
7332 enum machine_mode mode = GET_MODE (setreg);
7333
7334 if (GET_CODE (setreg) == SUBREG)
7335 {
7336 rtx inner = SUBREG_REG (setreg);
7337 if (!GENERAL_REG_P (inner))
7338 return;
7339 regno = subreg_regno (setreg);
7340 }
7341 else if (GENERAL_REG_P (setreg))
7342 regno = REGNO (setreg);
7343 else
7344 return;
7345
7346 for (i = regno;
7347 i < regno + HARD_REGNO_NREGS (regno, mode);
7348 i++)
7349 regs_ever_clobbered[i] = 1;
7350 }
7351
7352 /* Walks through all basic blocks of the current function looking
7353 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7354 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7355 each of those regs. */
7356
7357 static void
7358 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7359 {
7360 basic_block cur_bb;
7361 rtx cur_insn;
7362 unsigned int i;
7363
7364 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7365
7366 /* For non-leaf functions we have to consider all call clobbered regs to be
7367 clobbered. */
7368 if (!crtl->is_leaf)
7369 {
7370 for (i = 0; i < 16; i++)
7371 regs_ever_clobbered[i] = call_really_used_regs[i];
7372 }
7373
7374 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7375 this work is done by liveness analysis (mark_regs_live_at_end).
7376 Special care is needed for functions containing landing pads. Landing pads
7377 may use the eh registers, but the code which sets these registers is not
7378 contained in that function. Hence s390_regs_ever_clobbered is not able to
7379 deal with this automatically. */
7380 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7381 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7382 if (crtl->calls_eh_return
7383 || (cfun->machine->has_landing_pad_p
7384 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7385 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7386
7387 /* For nonlocal gotos all call-saved registers have to be saved.
7388 This flag is also set for the unwinding code in libgcc.
7389 See expand_builtin_unwind_init. For regs_ever_live this is done by
7390 reload. */
7391 if (cfun->has_nonlocal_label)
7392 for (i = 0; i < 16; i++)
7393 if (!call_really_used_regs[i])
7394 regs_ever_clobbered[i] = 1;
7395
7396 FOR_EACH_BB (cur_bb)
7397 {
7398 FOR_BB_INSNS (cur_bb, cur_insn)
7399 {
7400 if (INSN_P (cur_insn))
7401 note_stores (PATTERN (cur_insn),
7402 s390_reg_clobbered_rtx,
7403 regs_ever_clobbered);
7404 }
7405 }
7406 }
7407
7408 /* Determine the frame area which actually has to be accessed
7409 in the function epilogue. The values are stored at the
7410 given pointers AREA_BOTTOM (address of the lowest used stack
7411 address) and AREA_TOP (address of the first item which does
7412 not belong to the stack frame). */
7413
7414 static void
7415 s390_frame_area (int *area_bottom, int *area_top)
7416 {
7417 int b, t;
7418 int i;
7419
7420 b = INT_MAX;
7421 t = INT_MIN;
7422
7423 if (cfun_frame_layout.first_restore_gpr != -1)
7424 {
7425 b = (cfun_frame_layout.gprs_offset
7426 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7427 t = b + (cfun_frame_layout.last_restore_gpr
7428 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7429 }
7430
7431 if (TARGET_64BIT && cfun_save_high_fprs_p)
7432 {
7433 b = MIN (b, cfun_frame_layout.f8_offset);
7434 t = MAX (t, (cfun_frame_layout.f8_offset
7435 + cfun_frame_layout.high_fprs * 8));
7436 }
7437
7438 if (!TARGET_64BIT)
7439 for (i = 2; i < 4; i++)
7440 if (cfun_fpr_bit_p (i))
7441 {
7442 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7443 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7444 }
7445
7446 *area_bottom = b;
7447 *area_top = t;
7448 }
7449
7450 /* Fill cfun->machine with info about register usage of current function.
7451 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7452
7453 static void
7454 s390_register_info (int clobbered_regs[])
7455 {
7456 int i, j;
7457
7458 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7459 cfun_frame_layout.fpr_bitmap = 0;
7460 cfun_frame_layout.high_fprs = 0;
7461 if (TARGET_64BIT)
7462 for (i = 24; i < 32; i++)
7463 if (df_regs_ever_live_p (i) && !global_regs[i])
7464 {
7465 cfun_set_fpr_bit (i - 16);
7466 cfun_frame_layout.high_fprs++;
7467 }
7468
7469 /* Find first and last gpr to be saved. We trust regs_ever_live
7470 data, except that we don't save and restore global registers.
7471
7472 Also, all registers with special meaning to the compiler need
7473 to be handled extra. */
7474
7475 s390_regs_ever_clobbered (clobbered_regs);
7476
7477 for (i = 0; i < 16; i++)
7478 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7479
7480 if (frame_pointer_needed)
7481 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7482
7483 if (flag_pic)
7484 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7485 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7486
7487 clobbered_regs[BASE_REGNUM]
7488 |= (cfun->machine->base_reg
7489 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7490
7491 clobbered_regs[RETURN_REGNUM]
7492 |= (!crtl->is_leaf
7493 || TARGET_TPF_PROFILING
7494 || cfun->machine->split_branches_pending_p
7495 || cfun_frame_layout.save_return_addr_p
7496 || crtl->calls_eh_return
7497 || cfun->stdarg);
7498
7499 clobbered_regs[STACK_POINTER_REGNUM]
7500 |= (!crtl->is_leaf
7501 || TARGET_TPF_PROFILING
7502 || cfun_save_high_fprs_p
7503 || get_frame_size () > 0
7504 || cfun->calls_alloca
7505 || cfun->stdarg);
7506
7507 for (i = 6; i < 16; i++)
7508 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7509 break;
7510 for (j = 15; j > i; j--)
7511 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7512 break;
7513
7514 if (i == 16)
7515 {
7516 /* Nothing to save/restore. */
7517 cfun_frame_layout.first_save_gpr_slot = -1;
7518 cfun_frame_layout.last_save_gpr_slot = -1;
7519 cfun_frame_layout.first_save_gpr = -1;
7520 cfun_frame_layout.first_restore_gpr = -1;
7521 cfun_frame_layout.last_save_gpr = -1;
7522 cfun_frame_layout.last_restore_gpr = -1;
7523 }
7524 else
7525 {
7526 /* Save slots for gprs from i to j. */
7527 cfun_frame_layout.first_save_gpr_slot = i;
7528 cfun_frame_layout.last_save_gpr_slot = j;
7529
7530 for (i = cfun_frame_layout.first_save_gpr_slot;
7531 i < cfun_frame_layout.last_save_gpr_slot + 1;
7532 i++)
7533 if (clobbered_regs[i])
7534 break;
7535
7536 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7537 if (clobbered_regs[j])
7538 break;
7539
7540 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7541 {
7542 /* Nothing to save/restore. */
7543 cfun_frame_layout.first_save_gpr = -1;
7544 cfun_frame_layout.first_restore_gpr = -1;
7545 cfun_frame_layout.last_save_gpr = -1;
7546 cfun_frame_layout.last_restore_gpr = -1;
7547 }
7548 else
7549 {
7550 /* Save / Restore from gpr i to j. */
7551 cfun_frame_layout.first_save_gpr = i;
7552 cfun_frame_layout.first_restore_gpr = i;
7553 cfun_frame_layout.last_save_gpr = j;
7554 cfun_frame_layout.last_restore_gpr = j;
7555 }
7556 }
7557
7558 if (cfun->stdarg)
7559 {
7560 /* Varargs functions need to save gprs 2 to 6. */
7561 if (cfun->va_list_gpr_size
7562 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7563 {
7564 int min_gpr = crtl->args.info.gprs;
7565 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7566 if (max_gpr > GP_ARG_NUM_REG)
7567 max_gpr = GP_ARG_NUM_REG;
7568
7569 if (cfun_frame_layout.first_save_gpr == -1
7570 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7571 {
7572 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7573 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7574 }
7575
7576 if (cfun_frame_layout.last_save_gpr == -1
7577 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7578 {
7579 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7580 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7581 }
7582 }
7583
7584 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7585 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7586 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7587 {
7588 int min_fpr = crtl->args.info.fprs;
7589 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7590 if (max_fpr > FP_ARG_NUM_REG)
7591 max_fpr = FP_ARG_NUM_REG;
7592
7593 /* ??? This is currently required to ensure proper location
7594 of the fpr save slots within the va_list save area. */
7595 if (TARGET_PACKED_STACK)
7596 min_fpr = 0;
7597
7598 for (i = min_fpr; i < max_fpr; i++)
7599 cfun_set_fpr_bit (i);
7600 }
7601 }
7602
7603 if (!TARGET_64BIT)
7604 for (i = 2; i < 4; i++)
7605 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7606 cfun_set_fpr_bit (i);
7607 }
7608
7609 /* Fill cfun->machine with info about frame of current function. */
7610
7611 static void
7612 s390_frame_info (void)
7613 {
7614 int i;
7615
7616 cfun_frame_layout.frame_size = get_frame_size ();
7617 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7618 fatal_error ("total size of local variables exceeds architecture limit");
7619
7620 if (!TARGET_PACKED_STACK)
7621 {
7622 cfun_frame_layout.backchain_offset = 0;
7623 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7624 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7625 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7626 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7627 * UNITS_PER_LONG);
7628 }
7629 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7630 {
7631 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7632 - UNITS_PER_LONG);
7633 cfun_frame_layout.gprs_offset
7634 = (cfun_frame_layout.backchain_offset
7635 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7636 * UNITS_PER_LONG);
7637
7638 if (TARGET_64BIT)
7639 {
7640 cfun_frame_layout.f4_offset
7641 = (cfun_frame_layout.gprs_offset
7642 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7643
7644 cfun_frame_layout.f0_offset
7645 = (cfun_frame_layout.f4_offset
7646 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7647 }
7648 else
7649 {
7650 /* On 31 bit we have to care about alignment of the
7651 floating point regs to provide fastest access. */
7652 cfun_frame_layout.f0_offset
7653 = ((cfun_frame_layout.gprs_offset
7654 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7655 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7656
7657 cfun_frame_layout.f4_offset
7658 = (cfun_frame_layout.f0_offset
7659 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7660 }
7661 }
7662 else /* no backchain */
7663 {
7664 cfun_frame_layout.f4_offset
7665 = (STACK_POINTER_OFFSET
7666 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7667
7668 cfun_frame_layout.f0_offset
7669 = (cfun_frame_layout.f4_offset
7670 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7671
7672 cfun_frame_layout.gprs_offset
7673 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7674 }
7675
7676 if (crtl->is_leaf
7677 && !TARGET_TPF_PROFILING
7678 && cfun_frame_layout.frame_size == 0
7679 && !cfun_save_high_fprs_p
7680 && !cfun->calls_alloca
7681 && !cfun->stdarg)
7682 return;
7683
7684 if (!TARGET_PACKED_STACK)
7685 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7686 + crtl->outgoing_args_size
7687 + cfun_frame_layout.high_fprs * 8);
7688 else
7689 {
7690 if (TARGET_BACKCHAIN)
7691 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7692
7693 /* No alignment trouble here because f8-f15 are only saved under
7694 64 bit. */
7695 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7696 cfun_frame_layout.f4_offset),
7697 cfun_frame_layout.gprs_offset)
7698 - cfun_frame_layout.high_fprs * 8);
7699
7700 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7701
7702 for (i = 0; i < 8; i++)
7703 if (cfun_fpr_bit_p (i))
7704 cfun_frame_layout.frame_size += 8;
7705
7706 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7707
7708 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7709 the frame size to sustain 8 byte alignment of stack frames. */
7710 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7711 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7712 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7713
7714 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7715 }
7716 }
7717
7718 /* Generate frame layout. Fills in register and frame data for the current
7719 function in cfun->machine. This routine can be called multiple times;
7720 it will re-do the complete frame layout every time. */
7721
7722 static void
7723 s390_init_frame_layout (void)
7724 {
7725 HOST_WIDE_INT frame_size;
7726 int base_used;
7727 int clobbered_regs[16];
7728
7729 /* On S/390 machines, we may need to perform branch splitting, which
7730 will require both base and return address register. We have no
7731 choice but to assume we're going to need them until right at the
7732 end of the machine dependent reorg phase. */
7733 if (!TARGET_CPU_ZARCH)
7734 cfun->machine->split_branches_pending_p = true;
7735
7736 do
7737 {
7738 frame_size = cfun_frame_layout.frame_size;
7739
7740 /* Try to predict whether we'll need the base register. */
7741 base_used = cfun->machine->split_branches_pending_p
7742 || crtl->uses_const_pool
7743 || (!DISP_IN_RANGE (frame_size)
7744 && !CONST_OK_FOR_K (frame_size));
7745
7746 /* Decide which register to use as literal pool base. In small
7747 leaf functions, try to use an unused call-clobbered register
7748 as base register to avoid save/restore overhead. */
7749 if (!base_used)
7750 cfun->machine->base_reg = NULL_RTX;
7751 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7752 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7753 else
7754 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7755
7756 s390_register_info (clobbered_regs);
7757 s390_frame_info ();
7758 }
7759 while (frame_size != cfun_frame_layout.frame_size);
7760 }
7761
7762 /* Update frame layout. Recompute actual register save data based on
7763 current info and update regs_ever_live for the special registers.
7764 May be called multiple times, but may never cause *more* registers
7765 to be saved than s390_init_frame_layout allocated room for. */
7766
7767 static void
7768 s390_update_frame_layout (void)
7769 {
7770 int clobbered_regs[16];
7771
7772 s390_register_info (clobbered_regs);
7773
7774 df_set_regs_ever_live (BASE_REGNUM,
7775 clobbered_regs[BASE_REGNUM] ? true : false);
7776 df_set_regs_ever_live (RETURN_REGNUM,
7777 clobbered_regs[RETURN_REGNUM] ? true : false);
7778 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7779 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7780
7781 if (cfun->machine->base_reg)
7782 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7783 }
7784
7785 /* Return true if it is legal to put a value with MODE into REGNO. */
7786
7787 bool
7788 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7789 {
7790 switch (REGNO_REG_CLASS (regno))
7791 {
7792 case FP_REGS:
7793 if (REGNO_PAIR_OK (regno, mode))
7794 {
7795 if (mode == SImode || mode == DImode)
7796 return true;
7797
7798 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7799 return true;
7800 }
7801 break;
7802 case ADDR_REGS:
7803 if (FRAME_REGNO_P (regno) && mode == Pmode)
7804 return true;
7805
7806 /* fallthrough */
7807 case GENERAL_REGS:
7808 if (REGNO_PAIR_OK (regno, mode))
7809 {
7810 if (TARGET_ZARCH
7811 || (mode != TFmode && mode != TCmode && mode != TDmode))
7812 return true;
7813 }
7814 break;
7815 case CC_REGS:
7816 if (GET_MODE_CLASS (mode) == MODE_CC)
7817 return true;
7818 break;
7819 case ACCESS_REGS:
7820 if (REGNO_PAIR_OK (regno, mode))
7821 {
7822 if (mode == SImode || mode == Pmode)
7823 return true;
7824 }
7825 break;
7826 default:
7827 return false;
7828 }
7829
7830 return false;
7831 }
7832
7833 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7834
7835 bool
7836 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7837 {
7838 /* Once we've decided upon a register to use as base register, it must
7839 no longer be used for any other purpose. */
7840 if (cfun->machine->base_reg)
7841 if (REGNO (cfun->machine->base_reg) == old_reg
7842 || REGNO (cfun->machine->base_reg) == new_reg)
7843 return false;
7844
7845 return true;
7846 }
7847
7848 /* Maximum number of registers to represent a value of mode MODE
7849 in a register of class RCLASS. */
7850
7851 int
7852 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7853 {
7854 switch (rclass)
7855 {
7856 case FP_REGS:
7857 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7858 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7859 else
7860 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7861 case ACCESS_REGS:
7862 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7863 default:
7864 break;
7865 }
7866 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7867 }
7868
7869 /* Return true if we use LRA instead of reload pass. */
7870 static bool
7871 s390_lra_p (void)
7872 {
7873 return s390_lra_flag;
7874 }
7875
7876 /* Return true if register FROM can be eliminated via register TO. */
7877
7878 static bool
7879 s390_can_eliminate (const int from, const int to)
7880 {
7881 /* On zSeries machines, we have not marked the base register as fixed.
7882 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7883 If a function requires the base register, we say here that this
7884 elimination cannot be performed. This will cause reload to free
7885 up the base register (as if it were fixed). On the other hand,
7886 if the current function does *not* require the base register, we
7887 say here the elimination succeeds, which in turn allows reload
7888 to allocate the base register for any other purpose. */
7889 if (from == BASE_REGNUM && to == BASE_REGNUM)
7890 {
7891 if (TARGET_CPU_ZARCH)
7892 {
7893 s390_init_frame_layout ();
7894 return cfun->machine->base_reg == NULL_RTX;
7895 }
7896
7897 return false;
7898 }
7899
7900 /* Everything else must point into the stack frame. */
7901 gcc_assert (to == STACK_POINTER_REGNUM
7902 || to == HARD_FRAME_POINTER_REGNUM);
7903
7904 gcc_assert (from == FRAME_POINTER_REGNUM
7905 || from == ARG_POINTER_REGNUM
7906 || from == RETURN_ADDRESS_POINTER_REGNUM);
7907
7908 /* Make sure we actually saved the return address. */
7909 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7910 if (!crtl->calls_eh_return
7911 && !cfun->stdarg
7912 && !cfun_frame_layout.save_return_addr_p)
7913 return false;
7914
7915 return true;
7916 }
7917
7918 /* Return offset between register FROM and TO initially after prolog. */
7919
7920 HOST_WIDE_INT
7921 s390_initial_elimination_offset (int from, int to)
7922 {
7923 HOST_WIDE_INT offset;
7924 int index;
7925
7926 /* ??? Why are we called for non-eliminable pairs? */
7927 if (!s390_can_eliminate (from, to))
7928 return 0;
7929
7930 switch (from)
7931 {
7932 case FRAME_POINTER_REGNUM:
7933 offset = (get_frame_size()
7934 + STACK_POINTER_OFFSET
7935 + crtl->outgoing_args_size);
7936 break;
7937
7938 case ARG_POINTER_REGNUM:
7939 s390_init_frame_layout ();
7940 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7941 break;
7942
7943 case RETURN_ADDRESS_POINTER_REGNUM:
7944 s390_init_frame_layout ();
7945 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7946 gcc_assert (index >= 0);
7947 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7948 offset += index * UNITS_PER_LONG;
7949 break;
7950
7951 case BASE_REGNUM:
7952 offset = 0;
7953 break;
7954
7955 default:
7956 gcc_unreachable ();
7957 }
7958
7959 return offset;
7960 }
7961
7962 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7963 to register BASE. Return generated insn. */
7964
7965 static rtx
7966 save_fpr (rtx base, int offset, int regnum)
7967 {
7968 rtx addr;
7969 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7970
7971 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7972 set_mem_alias_set (addr, get_varargs_alias_set ());
7973 else
7974 set_mem_alias_set (addr, get_frame_alias_set ());
7975
7976 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7977 }
7978
7979 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7980 to register BASE. Return generated insn. */
7981
7982 static rtx
7983 restore_fpr (rtx base, int offset, int regnum)
7984 {
7985 rtx addr;
7986 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7987 set_mem_alias_set (addr, get_frame_alias_set ());
7988
7989 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7990 }
7991
7992 /* Return true if REGNO is a global register, but not one
7993 of the special ones that need to be saved/restored in anyway. */
7994
7995 static inline bool
7996 global_not_special_regno_p (int regno)
7997 {
7998 return (global_regs[regno]
7999 /* These registers are special and need to be
8000 restored in any case. */
8001 && !(regno == STACK_POINTER_REGNUM
8002 || regno == RETURN_REGNUM
8003 || regno == BASE_REGNUM
8004 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8005 }
8006
8007 /* Generate insn to save registers FIRST to LAST into
8008 the register save area located at offset OFFSET
8009 relative to register BASE. */
8010
8011 static rtx
8012 save_gprs (rtx base, int offset, int first, int last)
8013 {
8014 rtx addr, insn, note;
8015 int i;
8016
8017 addr = plus_constant (Pmode, base, offset);
8018 addr = gen_rtx_MEM (Pmode, addr);
8019
8020 set_mem_alias_set (addr, get_frame_alias_set ());
8021
8022 /* Special-case single register. */
8023 if (first == last)
8024 {
8025 if (TARGET_64BIT)
8026 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8027 else
8028 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8029
8030 if (!global_not_special_regno_p (first))
8031 RTX_FRAME_RELATED_P (insn) = 1;
8032 return insn;
8033 }
8034
8035
8036 insn = gen_store_multiple (addr,
8037 gen_rtx_REG (Pmode, first),
8038 GEN_INT (last - first + 1));
8039
8040 if (first <= 6 && cfun->stdarg)
8041 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8042 {
8043 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8044
8045 if (first + i <= 6)
8046 set_mem_alias_set (mem, get_varargs_alias_set ());
8047 }
8048
8049 /* We need to set the FRAME_RELATED flag on all SETs
8050 inside the store-multiple pattern.
8051
8052 However, we must not emit DWARF records for registers 2..5
8053 if they are stored for use by variable arguments ...
8054
8055 ??? Unfortunately, it is not enough to simply not the
8056 FRAME_RELATED flags for those SETs, because the first SET
8057 of the PARALLEL is always treated as if it had the flag
8058 set, even if it does not. Therefore we emit a new pattern
8059 without those registers as REG_FRAME_RELATED_EXPR note. */
8060
8061 if (first >= 6 && !global_not_special_regno_p (first))
8062 {
8063 rtx pat = PATTERN (insn);
8064
8065 for (i = 0; i < XVECLEN (pat, 0); i++)
8066 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8067 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8068 0, i)))))
8069 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8070
8071 RTX_FRAME_RELATED_P (insn) = 1;
8072 }
8073 else if (last >= 6)
8074 {
8075 int start;
8076
8077 for (start = first >= 6 ? first : 6; start <= last; start++)
8078 if (!global_not_special_regno_p (start))
8079 break;
8080
8081 if (start > last)
8082 return insn;
8083
8084 addr = plus_constant (Pmode, base,
8085 offset + (start - first) * UNITS_PER_LONG);
8086 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8087 gen_rtx_REG (Pmode, start),
8088 GEN_INT (last - start + 1));
8089 note = PATTERN (note);
8090
8091 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8092
8093 for (i = 0; i < XVECLEN (note, 0); i++)
8094 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8095 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8096 0, i)))))
8097 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8098
8099 RTX_FRAME_RELATED_P (insn) = 1;
8100 }
8101
8102 return insn;
8103 }
8104
8105 /* Generate insn to restore registers FIRST to LAST from
8106 the register save area located at offset OFFSET
8107 relative to register BASE. */
8108
8109 static rtx
8110 restore_gprs (rtx base, int offset, int first, int last)
8111 {
8112 rtx addr, insn;
8113
8114 addr = plus_constant (Pmode, base, offset);
8115 addr = gen_rtx_MEM (Pmode, addr);
8116 set_mem_alias_set (addr, get_frame_alias_set ());
8117
8118 /* Special-case single register. */
8119 if (first == last)
8120 {
8121 if (TARGET_64BIT)
8122 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8123 else
8124 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8125
8126 return insn;
8127 }
8128
8129 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8130 addr,
8131 GEN_INT (last - first + 1));
8132 return insn;
8133 }
8134
8135 /* Return insn sequence to load the GOT register. */
8136
8137 static GTY(()) rtx got_symbol;
8138 rtx
8139 s390_load_got (void)
8140 {
8141 rtx insns;
8142
8143 /* We cannot use pic_offset_table_rtx here since we use this
8144 function also for non-pic if __tls_get_offset is called and in
8145 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8146 aren't usable. */
8147 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8148
8149 if (!got_symbol)
8150 {
8151 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8152 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8153 }
8154
8155 start_sequence ();
8156
8157 if (TARGET_CPU_ZARCH)
8158 {
8159 emit_move_insn (got_rtx, got_symbol);
8160 }
8161 else
8162 {
8163 rtx offset;
8164
8165 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8166 UNSPEC_LTREL_OFFSET);
8167 offset = gen_rtx_CONST (Pmode, offset);
8168 offset = force_const_mem (Pmode, offset);
8169
8170 emit_move_insn (got_rtx, offset);
8171
8172 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8173 UNSPEC_LTREL_BASE);
8174 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8175
8176 emit_move_insn (got_rtx, offset);
8177 }
8178
8179 insns = get_insns ();
8180 end_sequence ();
8181 return insns;
8182 }
8183
8184 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8185 and the change to the stack pointer. */
8186
8187 static void
8188 s390_emit_stack_tie (void)
8189 {
8190 rtx mem = gen_frame_mem (BLKmode,
8191 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8192
8193 emit_insn (gen_stack_tie (mem));
8194 }
8195
8196 /* Expand the prologue into a bunch of separate insns. */
8197
8198 void
8199 s390_emit_prologue (void)
8200 {
8201 rtx insn, addr;
8202 rtx temp_reg;
8203 int i;
8204 int offset;
8205 int next_fpr = 0;
8206
8207 /* Complete frame layout. */
8208
8209 s390_update_frame_layout ();
8210
8211 /* Annotate all constant pool references to let the scheduler know
8212 they implicitly use the base register. */
8213
8214 push_topmost_sequence ();
8215
8216 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8217 if (INSN_P (insn))
8218 {
8219 annotate_constant_pool_refs (&PATTERN (insn));
8220 df_insn_rescan (insn);
8221 }
8222
8223 pop_topmost_sequence ();
8224
8225 /* Choose best register to use for temp use within prologue.
8226 See below for why TPF must use the register 1. */
8227
8228 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8229 && !crtl->is_leaf
8230 && !TARGET_TPF_PROFILING)
8231 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8232 else
8233 temp_reg = gen_rtx_REG (Pmode, 1);
8234
8235 /* Save call saved gprs. */
8236 if (cfun_frame_layout.first_save_gpr != -1)
8237 {
8238 insn = save_gprs (stack_pointer_rtx,
8239 cfun_frame_layout.gprs_offset +
8240 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8241 - cfun_frame_layout.first_save_gpr_slot),
8242 cfun_frame_layout.first_save_gpr,
8243 cfun_frame_layout.last_save_gpr);
8244 emit_insn (insn);
8245 }
8246
8247 /* Dummy insn to mark literal pool slot. */
8248
8249 if (cfun->machine->base_reg)
8250 emit_insn (gen_main_pool (cfun->machine->base_reg));
8251
8252 offset = cfun_frame_layout.f0_offset;
8253
8254 /* Save f0 and f2. */
8255 for (i = 0; i < 2; i++)
8256 {
8257 if (cfun_fpr_bit_p (i))
8258 {
8259 save_fpr (stack_pointer_rtx, offset, i + 16);
8260 offset += 8;
8261 }
8262 else if (!TARGET_PACKED_STACK)
8263 offset += 8;
8264 }
8265
8266 /* Save f4 and f6. */
8267 offset = cfun_frame_layout.f4_offset;
8268 for (i = 2; i < 4; i++)
8269 {
8270 if (cfun_fpr_bit_p (i))
8271 {
8272 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8273 offset += 8;
8274
8275 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8276 therefore are not frame related. */
8277 if (!call_really_used_regs[i + 16])
8278 RTX_FRAME_RELATED_P (insn) = 1;
8279 }
8280 else if (!TARGET_PACKED_STACK)
8281 offset += 8;
8282 }
8283
8284 if (TARGET_PACKED_STACK
8285 && cfun_save_high_fprs_p
8286 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8287 {
8288 offset = (cfun_frame_layout.f8_offset
8289 + (cfun_frame_layout.high_fprs - 1) * 8);
8290
8291 for (i = 15; i > 7 && offset >= 0; i--)
8292 if (cfun_fpr_bit_p (i))
8293 {
8294 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8295
8296 RTX_FRAME_RELATED_P (insn) = 1;
8297 offset -= 8;
8298 }
8299 if (offset >= cfun_frame_layout.f8_offset)
8300 next_fpr = i + 16;
8301 }
8302
8303 if (!TARGET_PACKED_STACK)
8304 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8305
8306 if (flag_stack_usage_info)
8307 current_function_static_stack_size = cfun_frame_layout.frame_size;
8308
8309 /* Decrement stack pointer. */
8310
8311 if (cfun_frame_layout.frame_size > 0)
8312 {
8313 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8314 rtx real_frame_off;
8315
8316 if (s390_stack_size)
8317 {
8318 HOST_WIDE_INT stack_guard;
8319
8320 if (s390_stack_guard)
8321 stack_guard = s390_stack_guard;
8322 else
8323 {
8324 /* If no value for stack guard is provided the smallest power of 2
8325 larger than the current frame size is chosen. */
8326 stack_guard = 1;
8327 while (stack_guard < cfun_frame_layout.frame_size)
8328 stack_guard <<= 1;
8329 }
8330
8331 if (cfun_frame_layout.frame_size >= s390_stack_size)
8332 {
8333 warning (0, "frame size of function %qs is %wd"
8334 " bytes exceeding user provided stack limit of "
8335 "%d bytes. "
8336 "An unconditional trap is added.",
8337 current_function_name(), cfun_frame_layout.frame_size,
8338 s390_stack_size);
8339 emit_insn (gen_trap ());
8340 }
8341 else
8342 {
8343 /* stack_guard has to be smaller than s390_stack_size.
8344 Otherwise we would emit an AND with zero which would
8345 not match the test under mask pattern. */
8346 if (stack_guard >= s390_stack_size)
8347 {
8348 warning (0, "frame size of function %qs is %wd"
8349 " bytes which is more than half the stack size. "
8350 "The dynamic check would not be reliable. "
8351 "No check emitted for this function.",
8352 current_function_name(),
8353 cfun_frame_layout.frame_size);
8354 }
8355 else
8356 {
8357 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8358 & ~(stack_guard - 1));
8359
8360 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8361 GEN_INT (stack_check_mask));
8362 if (TARGET_64BIT)
8363 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8364 t, const0_rtx),
8365 t, const0_rtx, const0_rtx));
8366 else
8367 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8368 t, const0_rtx),
8369 t, const0_rtx, const0_rtx));
8370 }
8371 }
8372 }
8373
8374 if (s390_warn_framesize > 0
8375 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8376 warning (0, "frame size of %qs is %wd bytes",
8377 current_function_name (), cfun_frame_layout.frame_size);
8378
8379 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8380 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8381
8382 /* Save incoming stack pointer into temp reg. */
8383 if (TARGET_BACKCHAIN || next_fpr)
8384 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8385
8386 /* Subtract frame size from stack pointer. */
8387
8388 if (DISP_IN_RANGE (INTVAL (frame_off)))
8389 {
8390 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8391 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8392 frame_off));
8393 insn = emit_insn (insn);
8394 }
8395 else
8396 {
8397 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8398 frame_off = force_const_mem (Pmode, frame_off);
8399
8400 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8401 annotate_constant_pool_refs (&PATTERN (insn));
8402 }
8403
8404 RTX_FRAME_RELATED_P (insn) = 1;
8405 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8406 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8407 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8408 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8409 real_frame_off)));
8410
8411 /* Set backchain. */
8412
8413 if (TARGET_BACKCHAIN)
8414 {
8415 if (cfun_frame_layout.backchain_offset)
8416 addr = gen_rtx_MEM (Pmode,
8417 plus_constant (Pmode, stack_pointer_rtx,
8418 cfun_frame_layout.backchain_offset));
8419 else
8420 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8421 set_mem_alias_set (addr, get_frame_alias_set ());
8422 insn = emit_insn (gen_move_insn (addr, temp_reg));
8423 }
8424
8425 /* If we support non-call exceptions (e.g. for Java),
8426 we need to make sure the backchain pointer is set up
8427 before any possibly trapping memory access. */
8428 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8429 {
8430 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8431 emit_clobber (addr);
8432 }
8433 }
8434
8435 /* Save fprs 8 - 15 (64 bit ABI). */
8436
8437 if (cfun_save_high_fprs_p && next_fpr)
8438 {
8439 /* If the stack might be accessed through a different register
8440 we have to make sure that the stack pointer decrement is not
8441 moved below the use of the stack slots. */
8442 s390_emit_stack_tie ();
8443
8444 insn = emit_insn (gen_add2_insn (temp_reg,
8445 GEN_INT (cfun_frame_layout.f8_offset)));
8446
8447 offset = 0;
8448
8449 for (i = 24; i <= next_fpr; i++)
8450 if (cfun_fpr_bit_p (i - 16))
8451 {
8452 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8453 cfun_frame_layout.frame_size
8454 + cfun_frame_layout.f8_offset
8455 + offset);
8456
8457 insn = save_fpr (temp_reg, offset, i);
8458 offset += 8;
8459 RTX_FRAME_RELATED_P (insn) = 1;
8460 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8461 gen_rtx_SET (VOIDmode,
8462 gen_rtx_MEM (DFmode, addr),
8463 gen_rtx_REG (DFmode, i)));
8464 }
8465 }
8466
8467 /* Set frame pointer, if needed. */
8468
8469 if (frame_pointer_needed)
8470 {
8471 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8472 RTX_FRAME_RELATED_P (insn) = 1;
8473 }
8474
8475 /* Set up got pointer, if needed. */
8476
8477 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8478 {
8479 rtx insns = s390_load_got ();
8480
8481 for (insn = insns; insn; insn = NEXT_INSN (insn))
8482 annotate_constant_pool_refs (&PATTERN (insn));
8483
8484 emit_insn (insns);
8485 }
8486
8487 if (TARGET_TPF_PROFILING)
8488 {
8489 /* Generate a BAS instruction to serve as a function
8490 entry intercept to facilitate the use of tracing
8491 algorithms located at the branch target. */
8492 emit_insn (gen_prologue_tpf ());
8493
8494 /* Emit a blockage here so that all code
8495 lies between the profiling mechanisms. */
8496 emit_insn (gen_blockage ());
8497 }
8498 }
8499
8500 /* Expand the epilogue into a bunch of separate insns. */
8501
8502 void
8503 s390_emit_epilogue (bool sibcall)
8504 {
8505 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8506 int area_bottom, area_top, offset = 0;
8507 int next_offset;
8508 rtvec p;
8509 int i;
8510
8511 if (TARGET_TPF_PROFILING)
8512 {
8513
8514 /* Generate a BAS instruction to serve as a function
8515 entry intercept to facilitate the use of tracing
8516 algorithms located at the branch target. */
8517
8518 /* Emit a blockage here so that all code
8519 lies between the profiling mechanisms. */
8520 emit_insn (gen_blockage ());
8521
8522 emit_insn (gen_epilogue_tpf ());
8523 }
8524
8525 /* Check whether to use frame or stack pointer for restore. */
8526
8527 frame_pointer = (frame_pointer_needed
8528 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8529
8530 s390_frame_area (&area_bottom, &area_top);
8531
8532 /* Check whether we can access the register save area.
8533 If not, increment the frame pointer as required. */
8534
8535 if (area_top <= area_bottom)
8536 {
8537 /* Nothing to restore. */
8538 }
8539 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8540 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8541 {
8542 /* Area is in range. */
8543 offset = cfun_frame_layout.frame_size;
8544 }
8545 else
8546 {
8547 rtx insn, frame_off, cfa;
8548
8549 offset = area_bottom < 0 ? -area_bottom : 0;
8550 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8551
8552 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8553 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8554 if (DISP_IN_RANGE (INTVAL (frame_off)))
8555 {
8556 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8557 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8558 insn = emit_insn (insn);
8559 }
8560 else
8561 {
8562 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8563 frame_off = force_const_mem (Pmode, frame_off);
8564
8565 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8566 annotate_constant_pool_refs (&PATTERN (insn));
8567 }
8568 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8569 RTX_FRAME_RELATED_P (insn) = 1;
8570 }
8571
8572 /* Restore call saved fprs. */
8573
8574 if (TARGET_64BIT)
8575 {
8576 if (cfun_save_high_fprs_p)
8577 {
8578 next_offset = cfun_frame_layout.f8_offset;
8579 for (i = 24; i < 32; i++)
8580 {
8581 if (cfun_fpr_bit_p (i - 16))
8582 {
8583 restore_fpr (frame_pointer,
8584 offset + next_offset, i);
8585 cfa_restores
8586 = alloc_reg_note (REG_CFA_RESTORE,
8587 gen_rtx_REG (DFmode, i), cfa_restores);
8588 next_offset += 8;
8589 }
8590 }
8591 }
8592
8593 }
8594 else
8595 {
8596 next_offset = cfun_frame_layout.f4_offset;
8597 for (i = 18; i < 20; i++)
8598 {
8599 if (cfun_fpr_bit_p (i - 16))
8600 {
8601 restore_fpr (frame_pointer,
8602 offset + next_offset, i);
8603 cfa_restores
8604 = alloc_reg_note (REG_CFA_RESTORE,
8605 gen_rtx_REG (DFmode, i), cfa_restores);
8606 next_offset += 8;
8607 }
8608 else if (!TARGET_PACKED_STACK)
8609 next_offset += 8;
8610 }
8611
8612 }
8613
8614 /* Return register. */
8615
8616 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8617
8618 /* Restore call saved gprs. */
8619
8620 if (cfun_frame_layout.first_restore_gpr != -1)
8621 {
8622 rtx insn, addr;
8623 int i;
8624
8625 /* Check for global register and save them
8626 to stack location from where they get restored. */
8627
8628 for (i = cfun_frame_layout.first_restore_gpr;
8629 i <= cfun_frame_layout.last_restore_gpr;
8630 i++)
8631 {
8632 if (global_not_special_regno_p (i))
8633 {
8634 addr = plus_constant (Pmode, frame_pointer,
8635 offset + cfun_frame_layout.gprs_offset
8636 + (i - cfun_frame_layout.first_save_gpr_slot)
8637 * UNITS_PER_LONG);
8638 addr = gen_rtx_MEM (Pmode, addr);
8639 set_mem_alias_set (addr, get_frame_alias_set ());
8640 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8641 }
8642 else
8643 cfa_restores
8644 = alloc_reg_note (REG_CFA_RESTORE,
8645 gen_rtx_REG (Pmode, i), cfa_restores);
8646 }
8647
8648 if (! sibcall)
8649 {
8650 /* Fetch return address from stack before load multiple,
8651 this will do good for scheduling. */
8652
8653 if (cfun_frame_layout.save_return_addr_p
8654 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8655 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8656 {
8657 int return_regnum = find_unused_clobbered_reg();
8658 if (!return_regnum)
8659 return_regnum = 4;
8660 return_reg = gen_rtx_REG (Pmode, return_regnum);
8661
8662 addr = plus_constant (Pmode, frame_pointer,
8663 offset + cfun_frame_layout.gprs_offset
8664 + (RETURN_REGNUM
8665 - cfun_frame_layout.first_save_gpr_slot)
8666 * UNITS_PER_LONG);
8667 addr = gen_rtx_MEM (Pmode, addr);
8668 set_mem_alias_set (addr, get_frame_alias_set ());
8669 emit_move_insn (return_reg, addr);
8670 }
8671 }
8672
8673 insn = restore_gprs (frame_pointer,
8674 offset + cfun_frame_layout.gprs_offset
8675 + (cfun_frame_layout.first_restore_gpr
8676 - cfun_frame_layout.first_save_gpr_slot)
8677 * UNITS_PER_LONG,
8678 cfun_frame_layout.first_restore_gpr,
8679 cfun_frame_layout.last_restore_gpr);
8680 insn = emit_insn (insn);
8681 REG_NOTES (insn) = cfa_restores;
8682 add_reg_note (insn, REG_CFA_DEF_CFA,
8683 plus_constant (Pmode, stack_pointer_rtx,
8684 STACK_POINTER_OFFSET));
8685 RTX_FRAME_RELATED_P (insn) = 1;
8686 }
8687
8688 if (! sibcall)
8689 {
8690
8691 /* Return to caller. */
8692
8693 p = rtvec_alloc (2);
8694
8695 RTVEC_ELT (p, 0) = ret_rtx;
8696 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8697 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8698 }
8699 }
8700
8701
8702 /* Return the size in bytes of a function argument of
8703 type TYPE and/or mode MODE. At least one of TYPE or
8704 MODE must be specified. */
8705
8706 static int
8707 s390_function_arg_size (enum machine_mode mode, const_tree type)
8708 {
8709 if (type)
8710 return int_size_in_bytes (type);
8711
8712 /* No type info available for some library calls ... */
8713 if (mode != BLKmode)
8714 return GET_MODE_SIZE (mode);
8715
8716 /* If we have neither type nor mode, abort */
8717 gcc_unreachable ();
8718 }
8719
8720 /* Return true if a function argument of type TYPE and mode MODE
8721 is to be passed in a floating-point register, if available. */
8722
8723 static bool
8724 s390_function_arg_float (enum machine_mode mode, const_tree type)
8725 {
8726 int size = s390_function_arg_size (mode, type);
8727 if (size > 8)
8728 return false;
8729
8730 /* Soft-float changes the ABI: no floating-point registers are used. */
8731 if (TARGET_SOFT_FLOAT)
8732 return false;
8733
8734 /* No type info available for some library calls ... */
8735 if (!type)
8736 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8737
8738 /* The ABI says that record types with a single member are treated
8739 just like that member would be. */
8740 while (TREE_CODE (type) == RECORD_TYPE)
8741 {
8742 tree field, single = NULL_TREE;
8743
8744 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8745 {
8746 if (TREE_CODE (field) != FIELD_DECL)
8747 continue;
8748
8749 if (single == NULL_TREE)
8750 single = TREE_TYPE (field);
8751 else
8752 return false;
8753 }
8754
8755 if (single == NULL_TREE)
8756 return false;
8757 else
8758 type = single;
8759 }
8760
8761 return TREE_CODE (type) == REAL_TYPE;
8762 }
8763
8764 /* Return true if a function argument of type TYPE and mode MODE
8765 is to be passed in an integer register, or a pair of integer
8766 registers, if available. */
8767
8768 static bool
8769 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8770 {
8771 int size = s390_function_arg_size (mode, type);
8772 if (size > 8)
8773 return false;
8774
8775 /* No type info available for some library calls ... */
8776 if (!type)
8777 return GET_MODE_CLASS (mode) == MODE_INT
8778 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8779
8780 /* We accept small integral (and similar) types. */
8781 if (INTEGRAL_TYPE_P (type)
8782 || POINTER_TYPE_P (type)
8783 || TREE_CODE (type) == NULLPTR_TYPE
8784 || TREE_CODE (type) == OFFSET_TYPE
8785 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8786 return true;
8787
8788 /* We also accept structs of size 1, 2, 4, 8 that are not
8789 passed in floating-point registers. */
8790 if (AGGREGATE_TYPE_P (type)
8791 && exact_log2 (size) >= 0
8792 && !s390_function_arg_float (mode, type))
8793 return true;
8794
8795 return false;
8796 }
8797
8798 /* Return 1 if a function argument of type TYPE and mode MODE
8799 is to be passed by reference. The ABI specifies that only
8800 structures of size 1, 2, 4, or 8 bytes are passed by value,
8801 all other structures (and complex numbers) are passed by
8802 reference. */
8803
8804 static bool
8805 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8806 enum machine_mode mode, const_tree type,
8807 bool named ATTRIBUTE_UNUSED)
8808 {
8809 int size = s390_function_arg_size (mode, type);
8810 if (size > 8)
8811 return true;
8812
8813 if (type)
8814 {
8815 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8816 return 1;
8817
8818 if (TREE_CODE (type) == COMPLEX_TYPE
8819 || TREE_CODE (type) == VECTOR_TYPE)
8820 return 1;
8821 }
8822
8823 return 0;
8824 }
8825
8826 /* Update the data in CUM to advance over an argument of mode MODE and
8827 data type TYPE. (TYPE is null for libcalls where that information
8828 may not be available.). The boolean NAMED specifies whether the
8829 argument is a named argument (as opposed to an unnamed argument
8830 matching an ellipsis). */
8831
8832 static void
8833 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8834 const_tree type, bool named ATTRIBUTE_UNUSED)
8835 {
8836 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8837
8838 if (s390_function_arg_float (mode, type))
8839 {
8840 cum->fprs += 1;
8841 }
8842 else if (s390_function_arg_integer (mode, type))
8843 {
8844 int size = s390_function_arg_size (mode, type);
8845 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8846 }
8847 else
8848 gcc_unreachable ();
8849 }
8850
8851 /* Define where to put the arguments to a function.
8852 Value is zero to push the argument on the stack,
8853 or a hard register in which to store the argument.
8854
8855 MODE is the argument's machine mode.
8856 TYPE is the data type of the argument (as a tree).
8857 This is null for libcalls where that information may
8858 not be available.
8859 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8860 the preceding args and about the function being called.
8861 NAMED is nonzero if this argument is a named parameter
8862 (otherwise it is an extra parameter matching an ellipsis).
8863
8864 On S/390, we use general purpose registers 2 through 6 to
8865 pass integer, pointer, and certain structure arguments, and
8866 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8867 to pass floating point arguments. All remaining arguments
8868 are pushed to the stack. */
8869
8870 static rtx
8871 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8872 const_tree type, bool named ATTRIBUTE_UNUSED)
8873 {
8874 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8875
8876 if (s390_function_arg_float (mode, type))
8877 {
8878 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8879 return 0;
8880 else
8881 return gen_rtx_REG (mode, cum->fprs + 16);
8882 }
8883 else if (s390_function_arg_integer (mode, type))
8884 {
8885 int size = s390_function_arg_size (mode, type);
8886 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8887
8888 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8889 return 0;
8890 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8891 return gen_rtx_REG (mode, cum->gprs + 2);
8892 else if (n_gprs == 2)
8893 {
8894 rtvec p = rtvec_alloc (2);
8895
8896 RTVEC_ELT (p, 0)
8897 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8898 const0_rtx);
8899 RTVEC_ELT (p, 1)
8900 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8901 GEN_INT (4));
8902
8903 return gen_rtx_PARALLEL (mode, p);
8904 }
8905 }
8906
8907 /* After the real arguments, expand_call calls us once again
8908 with a void_type_node type. Whatever we return here is
8909 passed as operand 2 to the call expanders.
8910
8911 We don't need this feature ... */
8912 else if (type == void_type_node)
8913 return const0_rtx;
8914
8915 gcc_unreachable ();
8916 }
8917
8918 /* Return true if return values of type TYPE should be returned
8919 in a memory buffer whose address is passed by the caller as
8920 hidden first argument. */
8921
8922 static bool
8923 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8924 {
8925 /* We accept small integral (and similar) types. */
8926 if (INTEGRAL_TYPE_P (type)
8927 || POINTER_TYPE_P (type)
8928 || TREE_CODE (type) == OFFSET_TYPE
8929 || TREE_CODE (type) == REAL_TYPE)
8930 return int_size_in_bytes (type) > 8;
8931
8932 /* Aggregates and similar constructs are always returned
8933 in memory. */
8934 if (AGGREGATE_TYPE_P (type)
8935 || TREE_CODE (type) == COMPLEX_TYPE
8936 || TREE_CODE (type) == VECTOR_TYPE)
8937 return true;
8938
8939 /* ??? We get called on all sorts of random stuff from
8940 aggregate_value_p. We can't abort, but it's not clear
8941 what's safe to return. Pretend it's a struct I guess. */
8942 return true;
8943 }
8944
8945 /* Function arguments and return values are promoted to word size. */
8946
8947 static enum machine_mode
8948 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8949 int *punsignedp,
8950 const_tree fntype ATTRIBUTE_UNUSED,
8951 int for_return ATTRIBUTE_UNUSED)
8952 {
8953 if (INTEGRAL_MODE_P (mode)
8954 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8955 {
8956 if (type != NULL_TREE && POINTER_TYPE_P (type))
8957 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8958 return Pmode;
8959 }
8960
8961 return mode;
8962 }
8963
8964 /* Define where to return a (scalar) value of type RET_TYPE.
8965 If RET_TYPE is null, define where to return a (scalar)
8966 value of mode MODE from a libcall. */
8967
8968 static rtx
8969 s390_function_and_libcall_value (enum machine_mode mode,
8970 const_tree ret_type,
8971 const_tree fntype_or_decl,
8972 bool outgoing ATTRIBUTE_UNUSED)
8973 {
8974 /* For normal functions perform the promotion as
8975 promote_function_mode would do. */
8976 if (ret_type)
8977 {
8978 int unsignedp = TYPE_UNSIGNED (ret_type);
8979 mode = promote_function_mode (ret_type, mode, &unsignedp,
8980 fntype_or_decl, 1);
8981 }
8982
8983 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8984 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8985
8986 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8987 return gen_rtx_REG (mode, 16);
8988 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8989 || UNITS_PER_LONG == UNITS_PER_WORD)
8990 return gen_rtx_REG (mode, 2);
8991 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8992 {
8993 /* This case is triggered when returning a 64 bit value with
8994 -m31 -mzarch. Although the value would fit into a single
8995 register it has to be forced into a 32 bit register pair in
8996 order to match the ABI. */
8997 rtvec p = rtvec_alloc (2);
8998
8999 RTVEC_ELT (p, 0)
9000 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
9001 RTVEC_ELT (p, 1)
9002 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9003
9004 return gen_rtx_PARALLEL (mode, p);
9005 }
9006
9007 gcc_unreachable ();
9008 }
9009
9010 /* Define where to return a scalar return value of type RET_TYPE. */
9011
9012 static rtx
9013 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9014 bool outgoing)
9015 {
9016 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9017 fn_decl_or_type, outgoing);
9018 }
9019
9020 /* Define where to return a scalar libcall return value of mode
9021 MODE. */
9022
9023 static rtx
9024 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9025 {
9026 return s390_function_and_libcall_value (mode, NULL_TREE,
9027 NULL_TREE, true);
9028 }
9029
9030
9031 /* Create and return the va_list datatype.
9032
9033 On S/390, va_list is an array type equivalent to
9034
9035 typedef struct __va_list_tag
9036 {
9037 long __gpr;
9038 long __fpr;
9039 void *__overflow_arg_area;
9040 void *__reg_save_area;
9041 } va_list[1];
9042
9043 where __gpr and __fpr hold the number of general purpose
9044 or floating point arguments used up to now, respectively,
9045 __overflow_arg_area points to the stack location of the
9046 next argument passed on the stack, and __reg_save_area
9047 always points to the start of the register area in the
9048 call frame of the current function. The function prologue
9049 saves all registers used for argument passing into this
9050 area if the function uses variable arguments. */
9051
9052 static tree
9053 s390_build_builtin_va_list (void)
9054 {
9055 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9056
9057 record = lang_hooks.types.make_type (RECORD_TYPE);
9058
9059 type_decl =
9060 build_decl (BUILTINS_LOCATION,
9061 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9062
9063 f_gpr = build_decl (BUILTINS_LOCATION,
9064 FIELD_DECL, get_identifier ("__gpr"),
9065 long_integer_type_node);
9066 f_fpr = build_decl (BUILTINS_LOCATION,
9067 FIELD_DECL, get_identifier ("__fpr"),
9068 long_integer_type_node);
9069 f_ovf = build_decl (BUILTINS_LOCATION,
9070 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9071 ptr_type_node);
9072 f_sav = build_decl (BUILTINS_LOCATION,
9073 FIELD_DECL, get_identifier ("__reg_save_area"),
9074 ptr_type_node);
9075
9076 va_list_gpr_counter_field = f_gpr;
9077 va_list_fpr_counter_field = f_fpr;
9078
9079 DECL_FIELD_CONTEXT (f_gpr) = record;
9080 DECL_FIELD_CONTEXT (f_fpr) = record;
9081 DECL_FIELD_CONTEXT (f_ovf) = record;
9082 DECL_FIELD_CONTEXT (f_sav) = record;
9083
9084 TYPE_STUB_DECL (record) = type_decl;
9085 TYPE_NAME (record) = type_decl;
9086 TYPE_FIELDS (record) = f_gpr;
9087 DECL_CHAIN (f_gpr) = f_fpr;
9088 DECL_CHAIN (f_fpr) = f_ovf;
9089 DECL_CHAIN (f_ovf) = f_sav;
9090
9091 layout_type (record);
9092
9093 /* The correct type is an array type of one element. */
9094 return build_array_type (record, build_index_type (size_zero_node));
9095 }
9096
9097 /* Implement va_start by filling the va_list structure VALIST.
9098 STDARG_P is always true, and ignored.
9099 NEXTARG points to the first anonymous stack argument.
9100
9101 The following global variables are used to initialize
9102 the va_list structure:
9103
9104 crtl->args.info:
9105 holds number of gprs and fprs used for named arguments.
9106 crtl->args.arg_offset_rtx:
9107 holds the offset of the first anonymous stack argument
9108 (relative to the virtual arg pointer). */
9109
9110 static void
9111 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9112 {
9113 HOST_WIDE_INT n_gpr, n_fpr;
9114 int off;
9115 tree f_gpr, f_fpr, f_ovf, f_sav;
9116 tree gpr, fpr, ovf, sav, t;
9117
9118 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9119 f_fpr = DECL_CHAIN (f_gpr);
9120 f_ovf = DECL_CHAIN (f_fpr);
9121 f_sav = DECL_CHAIN (f_ovf);
9122
9123 valist = build_simple_mem_ref (valist);
9124 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9125 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9126 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9127 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9128
9129 /* Count number of gp and fp argument registers used. */
9130
9131 n_gpr = crtl->args.info.gprs;
9132 n_fpr = crtl->args.info.fprs;
9133
9134 if (cfun->va_list_gpr_size)
9135 {
9136 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9137 build_int_cst (NULL_TREE, n_gpr));
9138 TREE_SIDE_EFFECTS (t) = 1;
9139 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9140 }
9141
9142 if (cfun->va_list_fpr_size)
9143 {
9144 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9145 build_int_cst (NULL_TREE, n_fpr));
9146 TREE_SIDE_EFFECTS (t) = 1;
9147 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9148 }
9149
9150 /* Find the overflow area. */
9151 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9152 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9153 {
9154 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9155
9156 off = INTVAL (crtl->args.arg_offset_rtx);
9157 off = off < 0 ? 0 : off;
9158 if (TARGET_DEBUG_ARG)
9159 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9160 (int)n_gpr, (int)n_fpr, off);
9161
9162 t = fold_build_pointer_plus_hwi (t, off);
9163
9164 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9165 TREE_SIDE_EFFECTS (t) = 1;
9166 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9167 }
9168
9169 /* Find the register save area. */
9170 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9171 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9172 {
9173 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9174 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9175
9176 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9177 TREE_SIDE_EFFECTS (t) = 1;
9178 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9179 }
9180 }
9181
9182 /* Implement va_arg by updating the va_list structure
9183 VALIST as required to retrieve an argument of type
9184 TYPE, and returning that argument.
9185
9186 Generates code equivalent to:
9187
9188 if (integral value) {
9189 if (size <= 4 && args.gpr < 5 ||
9190 size > 4 && args.gpr < 4 )
9191 ret = args.reg_save_area[args.gpr+8]
9192 else
9193 ret = *args.overflow_arg_area++;
9194 } else if (float value) {
9195 if (args.fgpr < 2)
9196 ret = args.reg_save_area[args.fpr+64]
9197 else
9198 ret = *args.overflow_arg_area++;
9199 } else if (aggregate value) {
9200 if (args.gpr < 5)
9201 ret = *args.reg_save_area[args.gpr]
9202 else
9203 ret = **args.overflow_arg_area++;
9204 } */
9205
9206 static tree
9207 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9208 gimple_seq *post_p ATTRIBUTE_UNUSED)
9209 {
9210 tree f_gpr, f_fpr, f_ovf, f_sav;
9211 tree gpr, fpr, ovf, sav, reg, t, u;
9212 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9213 tree lab_false, lab_over, addr;
9214
9215 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9216 f_fpr = DECL_CHAIN (f_gpr);
9217 f_ovf = DECL_CHAIN (f_fpr);
9218 f_sav = DECL_CHAIN (f_ovf);
9219
9220 valist = build_va_arg_indirect_ref (valist);
9221 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9222 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9223 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9224
9225 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9226 both appear on a lhs. */
9227 valist = unshare_expr (valist);
9228 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9229
9230 size = int_size_in_bytes (type);
9231
9232 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9233 {
9234 if (TARGET_DEBUG_ARG)
9235 {
9236 fprintf (stderr, "va_arg: aggregate type");
9237 debug_tree (type);
9238 }
9239
9240 /* Aggregates are passed by reference. */
9241 indirect_p = 1;
9242 reg = gpr;
9243 n_reg = 1;
9244
9245 /* kernel stack layout on 31 bit: It is assumed here that no padding
9246 will be added by s390_frame_info because for va_args always an even
9247 number of gprs has to be saved r15-r2 = 14 regs. */
9248 sav_ofs = 2 * UNITS_PER_LONG;
9249 sav_scale = UNITS_PER_LONG;
9250 size = UNITS_PER_LONG;
9251 max_reg = GP_ARG_NUM_REG - n_reg;
9252 }
9253 else if (s390_function_arg_float (TYPE_MODE (type), type))
9254 {
9255 if (TARGET_DEBUG_ARG)
9256 {
9257 fprintf (stderr, "va_arg: float type");
9258 debug_tree (type);
9259 }
9260
9261 /* FP args go in FP registers, if present. */
9262 indirect_p = 0;
9263 reg = fpr;
9264 n_reg = 1;
9265 sav_ofs = 16 * UNITS_PER_LONG;
9266 sav_scale = 8;
9267 max_reg = FP_ARG_NUM_REG - n_reg;
9268 }
9269 else
9270 {
9271 if (TARGET_DEBUG_ARG)
9272 {
9273 fprintf (stderr, "va_arg: other type");
9274 debug_tree (type);
9275 }
9276
9277 /* Otherwise into GP registers. */
9278 indirect_p = 0;
9279 reg = gpr;
9280 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9281
9282 /* kernel stack layout on 31 bit: It is assumed here that no padding
9283 will be added by s390_frame_info because for va_args always an even
9284 number of gprs has to be saved r15-r2 = 14 regs. */
9285 sav_ofs = 2 * UNITS_PER_LONG;
9286
9287 if (size < UNITS_PER_LONG)
9288 sav_ofs += UNITS_PER_LONG - size;
9289
9290 sav_scale = UNITS_PER_LONG;
9291 max_reg = GP_ARG_NUM_REG - n_reg;
9292 }
9293
9294 /* Pull the value out of the saved registers ... */
9295
9296 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9297 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9298 addr = create_tmp_var (ptr_type_node, "addr");
9299
9300 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9301 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9302 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9303 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9304 gimplify_and_add (t, pre_p);
9305
9306 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9307 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9308 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9309 t = fold_build_pointer_plus (t, u);
9310
9311 gimplify_assign (addr, t, pre_p);
9312
9313 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9314
9315 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9316
9317
9318 /* ... Otherwise out of the overflow area. */
9319
9320 t = ovf;
9321 if (size < UNITS_PER_LONG)
9322 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9323
9324 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9325
9326 gimplify_assign (addr, t, pre_p);
9327
9328 t = fold_build_pointer_plus_hwi (t, size);
9329 gimplify_assign (ovf, t, pre_p);
9330
9331 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9332
9333
9334 /* Increment register save count. */
9335
9336 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9337 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9338 gimplify_and_add (u, pre_p);
9339
9340 if (indirect_p)
9341 {
9342 t = build_pointer_type_for_mode (build_pointer_type (type),
9343 ptr_mode, true);
9344 addr = fold_convert (t, addr);
9345 addr = build_va_arg_indirect_ref (addr);
9346 }
9347 else
9348 {
9349 t = build_pointer_type_for_mode (type, ptr_mode, true);
9350 addr = fold_convert (t, addr);
9351 }
9352
9353 return build_va_arg_indirect_ref (addr);
9354 }
9355
9356 /* Output assembly code for the trampoline template to
9357 stdio stream FILE.
9358
9359 On S/390, we use gpr 1 internally in the trampoline code;
9360 gpr 0 is used to hold the static chain. */
9361
9362 static void
9363 s390_asm_trampoline_template (FILE *file)
9364 {
9365 rtx op[2];
9366 op[0] = gen_rtx_REG (Pmode, 0);
9367 op[1] = gen_rtx_REG (Pmode, 1);
9368
9369 if (TARGET_64BIT)
9370 {
9371 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9372 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9373 output_asm_insn ("br\t%1", op); /* 2 byte */
9374 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9375 }
9376 else
9377 {
9378 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9379 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9380 output_asm_insn ("br\t%1", op); /* 2 byte */
9381 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9382 }
9383 }
9384
9385 /* Emit RTL insns to initialize the variable parts of a trampoline.
9386 FNADDR is an RTX for the address of the function's pure code.
9387 CXT is an RTX for the static chain value for the function. */
9388
9389 static void
9390 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9391 {
9392 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9393 rtx mem;
9394
9395 emit_block_move (m_tramp, assemble_trampoline_template (),
9396 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9397
9398 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9399 emit_move_insn (mem, cxt);
9400 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9401 emit_move_insn (mem, fnaddr);
9402 }
9403
9404 /* Output assembler code to FILE to increment profiler label # LABELNO
9405 for profiling a function entry. */
9406
9407 void
9408 s390_function_profiler (FILE *file, int labelno)
9409 {
9410 rtx op[7];
9411
9412 char label[128];
9413 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9414
9415 fprintf (file, "# function profiler \n");
9416
9417 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9418 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9419 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9420
9421 op[2] = gen_rtx_REG (Pmode, 1);
9422 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9423 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9424
9425 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9426 if (flag_pic)
9427 {
9428 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9429 op[4] = gen_rtx_CONST (Pmode, op[4]);
9430 }
9431
9432 if (TARGET_64BIT)
9433 {
9434 output_asm_insn ("stg\t%0,%1", op);
9435 output_asm_insn ("larl\t%2,%3", op);
9436 output_asm_insn ("brasl\t%0,%4", op);
9437 output_asm_insn ("lg\t%0,%1", op);
9438 }
9439 else if (!flag_pic)
9440 {
9441 op[6] = gen_label_rtx ();
9442
9443 output_asm_insn ("st\t%0,%1", op);
9444 output_asm_insn ("bras\t%2,%l6", op);
9445 output_asm_insn (".long\t%4", op);
9446 output_asm_insn (".long\t%3", op);
9447 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9448 output_asm_insn ("l\t%0,0(%2)", op);
9449 output_asm_insn ("l\t%2,4(%2)", op);
9450 output_asm_insn ("basr\t%0,%0", op);
9451 output_asm_insn ("l\t%0,%1", op);
9452 }
9453 else
9454 {
9455 op[5] = gen_label_rtx ();
9456 op[6] = gen_label_rtx ();
9457
9458 output_asm_insn ("st\t%0,%1", op);
9459 output_asm_insn ("bras\t%2,%l6", op);
9460 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9461 output_asm_insn (".long\t%4-%l5", op);
9462 output_asm_insn (".long\t%3-%l5", op);
9463 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9464 output_asm_insn ("lr\t%0,%2", op);
9465 output_asm_insn ("a\t%0,0(%2)", op);
9466 output_asm_insn ("a\t%2,4(%2)", op);
9467 output_asm_insn ("basr\t%0,%0", op);
9468 output_asm_insn ("l\t%0,%1", op);
9469 }
9470 }
9471
9472 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9473 into its SYMBOL_REF_FLAGS. */
9474
9475 static void
9476 s390_encode_section_info (tree decl, rtx rtl, int first)
9477 {
9478 default_encode_section_info (decl, rtl, first);
9479
9480 if (TREE_CODE (decl) == VAR_DECL)
9481 {
9482 /* If a variable has a forced alignment to < 2 bytes, mark it
9483 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9484 operand. */
9485 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9486 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9487 if (!DECL_SIZE (decl)
9488 || !DECL_ALIGN (decl)
9489 || !host_integerp (DECL_SIZE (decl), 0)
9490 || (DECL_ALIGN (decl) <= 64
9491 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9492 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9493 }
9494
9495 /* Literal pool references don't have a decl so they are handled
9496 differently here. We rely on the information in the MEM_ALIGN
9497 entry to decide upon natural alignment. */
9498 if (MEM_P (rtl)
9499 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9500 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9501 && (MEM_ALIGN (rtl) == 0
9502 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9503 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9504 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9505 }
9506
9507 /* Output thunk to FILE that implements a C++ virtual function call (with
9508 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9509 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9510 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9511 relative to the resulting this pointer. */
9512
9513 static void
9514 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9515 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9516 tree function)
9517 {
9518 rtx op[10];
9519 int nonlocal = 0;
9520
9521 /* Make sure unwind info is emitted for the thunk if needed. */
9522 final_start_function (emit_barrier (), file, 1);
9523
9524 /* Operand 0 is the target function. */
9525 op[0] = XEXP (DECL_RTL (function), 0);
9526 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9527 {
9528 nonlocal = 1;
9529 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9530 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9531 op[0] = gen_rtx_CONST (Pmode, op[0]);
9532 }
9533
9534 /* Operand 1 is the 'this' pointer. */
9535 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9536 op[1] = gen_rtx_REG (Pmode, 3);
9537 else
9538 op[1] = gen_rtx_REG (Pmode, 2);
9539
9540 /* Operand 2 is the delta. */
9541 op[2] = GEN_INT (delta);
9542
9543 /* Operand 3 is the vcall_offset. */
9544 op[3] = GEN_INT (vcall_offset);
9545
9546 /* Operand 4 is the temporary register. */
9547 op[4] = gen_rtx_REG (Pmode, 1);
9548
9549 /* Operands 5 to 8 can be used as labels. */
9550 op[5] = NULL_RTX;
9551 op[6] = NULL_RTX;
9552 op[7] = NULL_RTX;
9553 op[8] = NULL_RTX;
9554
9555 /* Operand 9 can be used for temporary register. */
9556 op[9] = NULL_RTX;
9557
9558 /* Generate code. */
9559 if (TARGET_64BIT)
9560 {
9561 /* Setup literal pool pointer if required. */
9562 if ((!DISP_IN_RANGE (delta)
9563 && !CONST_OK_FOR_K (delta)
9564 && !CONST_OK_FOR_Os (delta))
9565 || (!DISP_IN_RANGE (vcall_offset)
9566 && !CONST_OK_FOR_K (vcall_offset)
9567 && !CONST_OK_FOR_Os (vcall_offset)))
9568 {
9569 op[5] = gen_label_rtx ();
9570 output_asm_insn ("larl\t%4,%5", op);
9571 }
9572
9573 /* Add DELTA to this pointer. */
9574 if (delta)
9575 {
9576 if (CONST_OK_FOR_J (delta))
9577 output_asm_insn ("la\t%1,%2(%1)", op);
9578 else if (DISP_IN_RANGE (delta))
9579 output_asm_insn ("lay\t%1,%2(%1)", op);
9580 else if (CONST_OK_FOR_K (delta))
9581 output_asm_insn ("aghi\t%1,%2", op);
9582 else if (CONST_OK_FOR_Os (delta))
9583 output_asm_insn ("agfi\t%1,%2", op);
9584 else
9585 {
9586 op[6] = gen_label_rtx ();
9587 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9588 }
9589 }
9590
9591 /* Perform vcall adjustment. */
9592 if (vcall_offset)
9593 {
9594 if (DISP_IN_RANGE (vcall_offset))
9595 {
9596 output_asm_insn ("lg\t%4,0(%1)", op);
9597 output_asm_insn ("ag\t%1,%3(%4)", op);
9598 }
9599 else if (CONST_OK_FOR_K (vcall_offset))
9600 {
9601 output_asm_insn ("lghi\t%4,%3", op);
9602 output_asm_insn ("ag\t%4,0(%1)", op);
9603 output_asm_insn ("ag\t%1,0(%4)", op);
9604 }
9605 else if (CONST_OK_FOR_Os (vcall_offset))
9606 {
9607 output_asm_insn ("lgfi\t%4,%3", op);
9608 output_asm_insn ("ag\t%4,0(%1)", op);
9609 output_asm_insn ("ag\t%1,0(%4)", op);
9610 }
9611 else
9612 {
9613 op[7] = gen_label_rtx ();
9614 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9615 output_asm_insn ("ag\t%4,0(%1)", op);
9616 output_asm_insn ("ag\t%1,0(%4)", op);
9617 }
9618 }
9619
9620 /* Jump to target. */
9621 output_asm_insn ("jg\t%0", op);
9622
9623 /* Output literal pool if required. */
9624 if (op[5])
9625 {
9626 output_asm_insn (".align\t4", op);
9627 targetm.asm_out.internal_label (file, "L",
9628 CODE_LABEL_NUMBER (op[5]));
9629 }
9630 if (op[6])
9631 {
9632 targetm.asm_out.internal_label (file, "L",
9633 CODE_LABEL_NUMBER (op[6]));
9634 output_asm_insn (".long\t%2", op);
9635 }
9636 if (op[7])
9637 {
9638 targetm.asm_out.internal_label (file, "L",
9639 CODE_LABEL_NUMBER (op[7]));
9640 output_asm_insn (".long\t%3", op);
9641 }
9642 }
9643 else
9644 {
9645 /* Setup base pointer if required. */
9646 if (!vcall_offset
9647 || (!DISP_IN_RANGE (delta)
9648 && !CONST_OK_FOR_K (delta)
9649 && !CONST_OK_FOR_Os (delta))
9650 || (!DISP_IN_RANGE (delta)
9651 && !CONST_OK_FOR_K (vcall_offset)
9652 && !CONST_OK_FOR_Os (vcall_offset)))
9653 {
9654 op[5] = gen_label_rtx ();
9655 output_asm_insn ("basr\t%4,0", op);
9656 targetm.asm_out.internal_label (file, "L",
9657 CODE_LABEL_NUMBER (op[5]));
9658 }
9659
9660 /* Add DELTA to this pointer. */
9661 if (delta)
9662 {
9663 if (CONST_OK_FOR_J (delta))
9664 output_asm_insn ("la\t%1,%2(%1)", op);
9665 else if (DISP_IN_RANGE (delta))
9666 output_asm_insn ("lay\t%1,%2(%1)", op);
9667 else if (CONST_OK_FOR_K (delta))
9668 output_asm_insn ("ahi\t%1,%2", op);
9669 else if (CONST_OK_FOR_Os (delta))
9670 output_asm_insn ("afi\t%1,%2", op);
9671 else
9672 {
9673 op[6] = gen_label_rtx ();
9674 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9675 }
9676 }
9677
9678 /* Perform vcall adjustment. */
9679 if (vcall_offset)
9680 {
9681 if (CONST_OK_FOR_J (vcall_offset))
9682 {
9683 output_asm_insn ("l\t%4,0(%1)", op);
9684 output_asm_insn ("a\t%1,%3(%4)", op);
9685 }
9686 else if (DISP_IN_RANGE (vcall_offset))
9687 {
9688 output_asm_insn ("l\t%4,0(%1)", op);
9689 output_asm_insn ("ay\t%1,%3(%4)", op);
9690 }
9691 else if (CONST_OK_FOR_K (vcall_offset))
9692 {
9693 output_asm_insn ("lhi\t%4,%3", op);
9694 output_asm_insn ("a\t%4,0(%1)", op);
9695 output_asm_insn ("a\t%1,0(%4)", op);
9696 }
9697 else if (CONST_OK_FOR_Os (vcall_offset))
9698 {
9699 output_asm_insn ("iilf\t%4,%3", op);
9700 output_asm_insn ("a\t%4,0(%1)", op);
9701 output_asm_insn ("a\t%1,0(%4)", op);
9702 }
9703 else
9704 {
9705 op[7] = gen_label_rtx ();
9706 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9707 output_asm_insn ("a\t%4,0(%1)", op);
9708 output_asm_insn ("a\t%1,0(%4)", op);
9709 }
9710
9711 /* We had to clobber the base pointer register.
9712 Re-setup the base pointer (with a different base). */
9713 op[5] = gen_label_rtx ();
9714 output_asm_insn ("basr\t%4,0", op);
9715 targetm.asm_out.internal_label (file, "L",
9716 CODE_LABEL_NUMBER (op[5]));
9717 }
9718
9719 /* Jump to target. */
9720 op[8] = gen_label_rtx ();
9721
9722 if (!flag_pic)
9723 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9724 else if (!nonlocal)
9725 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9726 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9727 else if (flag_pic == 1)
9728 {
9729 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9730 output_asm_insn ("l\t%4,%0(%4)", op);
9731 }
9732 else if (flag_pic == 2)
9733 {
9734 op[9] = gen_rtx_REG (Pmode, 0);
9735 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9736 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9737 output_asm_insn ("ar\t%4,%9", op);
9738 output_asm_insn ("l\t%4,0(%4)", op);
9739 }
9740
9741 output_asm_insn ("br\t%4", op);
9742
9743 /* Output literal pool. */
9744 output_asm_insn (".align\t4", op);
9745
9746 if (nonlocal && flag_pic == 2)
9747 output_asm_insn (".long\t%0", op);
9748 if (nonlocal)
9749 {
9750 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9751 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9752 }
9753
9754 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9755 if (!flag_pic)
9756 output_asm_insn (".long\t%0", op);
9757 else
9758 output_asm_insn (".long\t%0-%5", op);
9759
9760 if (op[6])
9761 {
9762 targetm.asm_out.internal_label (file, "L",
9763 CODE_LABEL_NUMBER (op[6]));
9764 output_asm_insn (".long\t%2", op);
9765 }
9766 if (op[7])
9767 {
9768 targetm.asm_out.internal_label (file, "L",
9769 CODE_LABEL_NUMBER (op[7]));
9770 output_asm_insn (".long\t%3", op);
9771 }
9772 }
9773 final_end_function ();
9774 }
9775
9776 static bool
9777 s390_valid_pointer_mode (enum machine_mode mode)
9778 {
9779 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9780 }
9781
9782 /* Checks whether the given CALL_EXPR would use a caller
9783 saved register. This is used to decide whether sibling call
9784 optimization could be performed on the respective function
9785 call. */
9786
9787 static bool
9788 s390_call_saved_register_used (tree call_expr)
9789 {
9790 CUMULATIVE_ARGS cum_v;
9791 cumulative_args_t cum;
9792 tree parameter;
9793 enum machine_mode mode;
9794 tree type;
9795 rtx parm_rtx;
9796 int reg, i;
9797
9798 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9799 cum = pack_cumulative_args (&cum_v);
9800
9801 for (i = 0; i < call_expr_nargs (call_expr); i++)
9802 {
9803 parameter = CALL_EXPR_ARG (call_expr, i);
9804 gcc_assert (parameter);
9805
9806 /* For an undeclared variable passed as parameter we will get
9807 an ERROR_MARK node here. */
9808 if (TREE_CODE (parameter) == ERROR_MARK)
9809 return true;
9810
9811 type = TREE_TYPE (parameter);
9812 gcc_assert (type);
9813
9814 mode = TYPE_MODE (type);
9815 gcc_assert (mode);
9816
9817 if (pass_by_reference (&cum_v, mode, type, true))
9818 {
9819 mode = Pmode;
9820 type = build_pointer_type (type);
9821 }
9822
9823 parm_rtx = s390_function_arg (cum, mode, type, 0);
9824
9825 s390_function_arg_advance (cum, mode, type, 0);
9826
9827 if (!parm_rtx)
9828 continue;
9829
9830 if (REG_P (parm_rtx))
9831 {
9832 for (reg = 0;
9833 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9834 reg++)
9835 if (!call_used_regs[reg + REGNO (parm_rtx)])
9836 return true;
9837 }
9838
9839 if (GET_CODE (parm_rtx) == PARALLEL)
9840 {
9841 int i;
9842
9843 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9844 {
9845 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9846
9847 gcc_assert (REG_P (r));
9848
9849 for (reg = 0;
9850 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9851 reg++)
9852 if (!call_used_regs[reg + REGNO (r)])
9853 return true;
9854 }
9855 }
9856
9857 }
9858 return false;
9859 }
9860
9861 /* Return true if the given call expression can be
9862 turned into a sibling call.
9863 DECL holds the declaration of the function to be called whereas
9864 EXP is the call expression itself. */
9865
9866 static bool
9867 s390_function_ok_for_sibcall (tree decl, tree exp)
9868 {
9869 /* The TPF epilogue uses register 1. */
9870 if (TARGET_TPF_PROFILING)
9871 return false;
9872
9873 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9874 which would have to be restored before the sibcall. */
9875 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9876 return false;
9877
9878 /* Register 6 on s390 is available as an argument register but unfortunately
9879 "caller saved". This makes functions needing this register for arguments
9880 not suitable for sibcalls. */
9881 return !s390_call_saved_register_used (exp);
9882 }
9883
9884 /* Return the fixed registers used for condition codes. */
9885
9886 static bool
9887 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9888 {
9889 *p1 = CC_REGNUM;
9890 *p2 = INVALID_REGNUM;
9891
9892 return true;
9893 }
9894
9895 /* This function is used by the call expanders of the machine description.
9896 It emits the call insn itself together with the necessary operations
9897 to adjust the target address and returns the emitted insn.
9898 ADDR_LOCATION is the target address rtx
9899 TLS_CALL the location of the thread-local symbol
9900 RESULT_REG the register where the result of the call should be stored
9901 RETADDR_REG the register where the return address should be stored
9902 If this parameter is NULL_RTX the call is considered
9903 to be a sibling call. */
9904
9905 rtx
9906 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9907 rtx retaddr_reg)
9908 {
9909 bool plt_call = false;
9910 rtx insn;
9911 rtx call;
9912 rtx clobber;
9913 rtvec vec;
9914
9915 /* Direct function calls need special treatment. */
9916 if (GET_CODE (addr_location) == SYMBOL_REF)
9917 {
9918 /* When calling a global routine in PIC mode, we must
9919 replace the symbol itself with the PLT stub. */
9920 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9921 {
9922 if (retaddr_reg != NULL_RTX)
9923 {
9924 addr_location = gen_rtx_UNSPEC (Pmode,
9925 gen_rtvec (1, addr_location),
9926 UNSPEC_PLT);
9927 addr_location = gen_rtx_CONST (Pmode, addr_location);
9928 plt_call = true;
9929 }
9930 else
9931 /* For -fpic code the PLT entries might use r12 which is
9932 call-saved. Therefore we cannot do a sibcall when
9933 calling directly using a symbol ref. When reaching
9934 this point we decided (in s390_function_ok_for_sibcall)
9935 to do a sibcall for a function pointer but one of the
9936 optimizers was able to get rid of the function pointer
9937 by propagating the symbol ref into the call. This
9938 optimization is illegal for S/390 so we turn the direct
9939 call into a indirect call again. */
9940 addr_location = force_reg (Pmode, addr_location);
9941 }
9942
9943 /* Unless we can use the bras(l) insn, force the
9944 routine address into a register. */
9945 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9946 {
9947 if (flag_pic)
9948 addr_location = legitimize_pic_address (addr_location, 0);
9949 else
9950 addr_location = force_reg (Pmode, addr_location);
9951 }
9952 }
9953
9954 /* If it is already an indirect call or the code above moved the
9955 SYMBOL_REF to somewhere else make sure the address can be found in
9956 register 1. */
9957 if (retaddr_reg == NULL_RTX
9958 && GET_CODE (addr_location) != SYMBOL_REF
9959 && !plt_call)
9960 {
9961 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9962 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9963 }
9964
9965 addr_location = gen_rtx_MEM (QImode, addr_location);
9966 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9967
9968 if (result_reg != NULL_RTX)
9969 call = gen_rtx_SET (VOIDmode, result_reg, call);
9970
9971 if (retaddr_reg != NULL_RTX)
9972 {
9973 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9974
9975 if (tls_call != NULL_RTX)
9976 vec = gen_rtvec (3, call, clobber,
9977 gen_rtx_USE (VOIDmode, tls_call));
9978 else
9979 vec = gen_rtvec (2, call, clobber);
9980
9981 call = gen_rtx_PARALLEL (VOIDmode, vec);
9982 }
9983
9984 insn = emit_call_insn (call);
9985
9986 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9987 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9988 {
9989 /* s390_function_ok_for_sibcall should
9990 have denied sibcalls in this case. */
9991 gcc_assert (retaddr_reg != NULL_RTX);
9992 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
9993 }
9994 return insn;
9995 }
9996
9997 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9998
9999 static void
10000 s390_conditional_register_usage (void)
10001 {
10002 int i;
10003
10004 if (flag_pic)
10005 {
10006 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10007 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10008 }
10009 if (TARGET_CPU_ZARCH)
10010 {
10011 fixed_regs[BASE_REGNUM] = 0;
10012 call_used_regs[BASE_REGNUM] = 0;
10013 fixed_regs[RETURN_REGNUM] = 0;
10014 call_used_regs[RETURN_REGNUM] = 0;
10015 }
10016 if (TARGET_64BIT)
10017 {
10018 for (i = 24; i < 32; i++)
10019 call_used_regs[i] = call_really_used_regs[i] = 0;
10020 }
10021 else
10022 {
10023 for (i = 18; i < 20; i++)
10024 call_used_regs[i] = call_really_used_regs[i] = 0;
10025 }
10026
10027 if (TARGET_SOFT_FLOAT)
10028 {
10029 for (i = 16; i < 32; i++)
10030 call_used_regs[i] = fixed_regs[i] = 1;
10031 }
10032 }
10033
10034 /* Corresponding function to eh_return expander. */
10035
10036 static GTY(()) rtx s390_tpf_eh_return_symbol;
10037 void
10038 s390_emit_tpf_eh_return (rtx target)
10039 {
10040 rtx insn, reg;
10041
10042 if (!s390_tpf_eh_return_symbol)
10043 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10044
10045 reg = gen_rtx_REG (Pmode, 2);
10046
10047 emit_move_insn (reg, target);
10048 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10049 gen_rtx_REG (Pmode, RETURN_REGNUM));
10050 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10051
10052 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10053 }
10054
10055 /* Rework the prologue/epilogue to avoid saving/restoring
10056 registers unnecessarily. */
10057
10058 static void
10059 s390_optimize_prologue (void)
10060 {
10061 rtx insn, new_insn, next_insn;
10062
10063 /* Do a final recompute of the frame-related data. */
10064
10065 s390_update_frame_layout ();
10066
10067 /* If all special registers are in fact used, there's nothing we
10068 can do, so no point in walking the insn list. */
10069
10070 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10071 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10072 && (TARGET_CPU_ZARCH
10073 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10074 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10075 return;
10076
10077 /* Search for prologue/epilogue insns and replace them. */
10078
10079 for (insn = get_insns (); insn; insn = next_insn)
10080 {
10081 int first, last, off;
10082 rtx set, base, offset;
10083
10084 next_insn = NEXT_INSN (insn);
10085
10086 if (! NONJUMP_INSN_P (insn))
10087 continue;
10088
10089 if (GET_CODE (PATTERN (insn)) == PARALLEL
10090 && store_multiple_operation (PATTERN (insn), VOIDmode))
10091 {
10092 set = XVECEXP (PATTERN (insn), 0, 0);
10093 first = REGNO (SET_SRC (set));
10094 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10095 offset = const0_rtx;
10096 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10097 off = INTVAL (offset);
10098
10099 if (GET_CODE (base) != REG || off < 0)
10100 continue;
10101 if (cfun_frame_layout.first_save_gpr != -1
10102 && (cfun_frame_layout.first_save_gpr < first
10103 || cfun_frame_layout.last_save_gpr > last))
10104 continue;
10105 if (REGNO (base) != STACK_POINTER_REGNUM
10106 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10107 continue;
10108 if (first > BASE_REGNUM || last < BASE_REGNUM)
10109 continue;
10110
10111 if (cfun_frame_layout.first_save_gpr != -1)
10112 {
10113 new_insn = save_gprs (base,
10114 off + (cfun_frame_layout.first_save_gpr
10115 - first) * UNITS_PER_LONG,
10116 cfun_frame_layout.first_save_gpr,
10117 cfun_frame_layout.last_save_gpr);
10118 new_insn = emit_insn_before (new_insn, insn);
10119 INSN_ADDRESSES_NEW (new_insn, -1);
10120 }
10121
10122 remove_insn (insn);
10123 continue;
10124 }
10125
10126 if (cfun_frame_layout.first_save_gpr == -1
10127 && GET_CODE (PATTERN (insn)) == SET
10128 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10129 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10130 || (!TARGET_CPU_ZARCH
10131 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10132 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10133 {
10134 set = PATTERN (insn);
10135 first = REGNO (SET_SRC (set));
10136 offset = const0_rtx;
10137 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10138 off = INTVAL (offset);
10139
10140 if (GET_CODE (base) != REG || off < 0)
10141 continue;
10142 if (REGNO (base) != STACK_POINTER_REGNUM
10143 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10144 continue;
10145
10146 remove_insn (insn);
10147 continue;
10148 }
10149
10150 if (GET_CODE (PATTERN (insn)) == PARALLEL
10151 && load_multiple_operation (PATTERN (insn), VOIDmode))
10152 {
10153 set = XVECEXP (PATTERN (insn), 0, 0);
10154 first = REGNO (SET_DEST (set));
10155 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10156 offset = const0_rtx;
10157 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10158 off = INTVAL (offset);
10159
10160 if (GET_CODE (base) != REG || off < 0)
10161 continue;
10162 if (cfun_frame_layout.first_restore_gpr != -1
10163 && (cfun_frame_layout.first_restore_gpr < first
10164 || cfun_frame_layout.last_restore_gpr > last))
10165 continue;
10166 if (REGNO (base) != STACK_POINTER_REGNUM
10167 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10168 continue;
10169 if (first > BASE_REGNUM || last < BASE_REGNUM)
10170 continue;
10171
10172 if (cfun_frame_layout.first_restore_gpr != -1)
10173 {
10174 new_insn = restore_gprs (base,
10175 off + (cfun_frame_layout.first_restore_gpr
10176 - first) * UNITS_PER_LONG,
10177 cfun_frame_layout.first_restore_gpr,
10178 cfun_frame_layout.last_restore_gpr);
10179 new_insn = emit_insn_before (new_insn, insn);
10180 INSN_ADDRESSES_NEW (new_insn, -1);
10181 }
10182
10183 remove_insn (insn);
10184 continue;
10185 }
10186
10187 if (cfun_frame_layout.first_restore_gpr == -1
10188 && GET_CODE (PATTERN (insn)) == SET
10189 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10190 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10191 || (!TARGET_CPU_ZARCH
10192 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10193 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10194 {
10195 set = PATTERN (insn);
10196 first = REGNO (SET_DEST (set));
10197 offset = const0_rtx;
10198 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10199 off = INTVAL (offset);
10200
10201 if (GET_CODE (base) != REG || off < 0)
10202 continue;
10203 if (REGNO (base) != STACK_POINTER_REGNUM
10204 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10205 continue;
10206
10207 remove_insn (insn);
10208 continue;
10209 }
10210 }
10211 }
10212
10213 /* On z10 and later the dynamic branch prediction must see the
10214 backward jump within a certain windows. If not it falls back to
10215 the static prediction. This function rearranges the loop backward
10216 branch in a way which makes the static prediction always correct.
10217 The function returns true if it added an instruction. */
10218 static bool
10219 s390_fix_long_loop_prediction (rtx insn)
10220 {
10221 rtx set = single_set (insn);
10222 rtx code_label, label_ref, new_label;
10223 rtx uncond_jump;
10224 rtx cur_insn;
10225 rtx tmp;
10226 int distance;
10227
10228 /* This will exclude branch on count and branch on index patterns
10229 since these are correctly statically predicted. */
10230 if (!set
10231 || SET_DEST (set) != pc_rtx
10232 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10233 return false;
10234
10235 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10236 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10237
10238 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10239
10240 code_label = XEXP (label_ref, 0);
10241
10242 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10243 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10244 || (INSN_ADDRESSES (INSN_UID (insn))
10245 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10246 return false;
10247
10248 for (distance = 0, cur_insn = PREV_INSN (insn);
10249 distance < PREDICT_DISTANCE - 6;
10250 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10251 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10252 return false;
10253
10254 new_label = gen_label_rtx ();
10255 uncond_jump = emit_jump_insn_after (
10256 gen_rtx_SET (VOIDmode, pc_rtx,
10257 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10258 insn);
10259 emit_label_after (new_label, uncond_jump);
10260
10261 tmp = XEXP (SET_SRC (set), 1);
10262 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10263 XEXP (SET_SRC (set), 2) = tmp;
10264 INSN_CODE (insn) = -1;
10265
10266 XEXP (label_ref, 0) = new_label;
10267 JUMP_LABEL (insn) = new_label;
10268 JUMP_LABEL (uncond_jump) = code_label;
10269
10270 return true;
10271 }
10272
10273 /* Returns 1 if INSN reads the value of REG for purposes not related
10274 to addressing of memory, and 0 otherwise. */
10275 static int
10276 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10277 {
10278 return reg_referenced_p (reg, PATTERN (insn))
10279 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10280 }
10281
10282 /* Starting from INSN find_cond_jump looks downwards in the insn
10283 stream for a single jump insn which is the last user of the
10284 condition code set in INSN. */
10285 static rtx
10286 find_cond_jump (rtx insn)
10287 {
10288 for (; insn; insn = NEXT_INSN (insn))
10289 {
10290 rtx ite, cc;
10291
10292 if (LABEL_P (insn))
10293 break;
10294
10295 if (!JUMP_P (insn))
10296 {
10297 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10298 break;
10299 continue;
10300 }
10301
10302 /* This will be triggered by a return. */
10303 if (GET_CODE (PATTERN (insn)) != SET)
10304 break;
10305
10306 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10307 ite = SET_SRC (PATTERN (insn));
10308
10309 if (GET_CODE (ite) != IF_THEN_ELSE)
10310 break;
10311
10312 cc = XEXP (XEXP (ite, 0), 0);
10313 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10314 break;
10315
10316 if (find_reg_note (insn, REG_DEAD, cc))
10317 return insn;
10318 break;
10319 }
10320
10321 return NULL_RTX;
10322 }
10323
10324 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10325 the semantics does not change. If NULL_RTX is passed as COND the
10326 function tries to find the conditional jump starting with INSN. */
10327 static void
10328 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10329 {
10330 rtx tmp = *op0;
10331
10332 if (cond == NULL_RTX)
10333 {
10334 rtx jump = find_cond_jump (NEXT_INSN (insn));
10335 jump = jump ? single_set (jump) : NULL_RTX;
10336
10337 if (jump == NULL_RTX)
10338 return;
10339
10340 cond = XEXP (XEXP (jump, 1), 0);
10341 }
10342
10343 *op0 = *op1;
10344 *op1 = tmp;
10345 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10346 }
10347
10348 /* On z10, instructions of the compare-and-branch family have the
10349 property to access the register occurring as second operand with
10350 its bits complemented. If such a compare is grouped with a second
10351 instruction that accesses the same register non-complemented, and
10352 if that register's value is delivered via a bypass, then the
10353 pipeline recycles, thereby causing significant performance decline.
10354 This function locates such situations and exchanges the two
10355 operands of the compare. The function return true whenever it
10356 added an insn. */
10357 static bool
10358 s390_z10_optimize_cmp (rtx insn)
10359 {
10360 rtx prev_insn, next_insn;
10361 bool insn_added_p = false;
10362 rtx cond, *op0, *op1;
10363
10364 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10365 {
10366 /* Handle compare and branch and branch on count
10367 instructions. */
10368 rtx pattern = single_set (insn);
10369
10370 if (!pattern
10371 || SET_DEST (pattern) != pc_rtx
10372 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10373 return false;
10374
10375 cond = XEXP (SET_SRC (pattern), 0);
10376 op0 = &XEXP (cond, 0);
10377 op1 = &XEXP (cond, 1);
10378 }
10379 else if (GET_CODE (PATTERN (insn)) == SET)
10380 {
10381 rtx src, dest;
10382
10383 /* Handle normal compare instructions. */
10384 src = SET_SRC (PATTERN (insn));
10385 dest = SET_DEST (PATTERN (insn));
10386
10387 if (!REG_P (dest)
10388 || !CC_REGNO_P (REGNO (dest))
10389 || GET_CODE (src) != COMPARE)
10390 return false;
10391
10392 /* s390_swap_cmp will try to find the conditional
10393 jump when passing NULL_RTX as condition. */
10394 cond = NULL_RTX;
10395 op0 = &XEXP (src, 0);
10396 op1 = &XEXP (src, 1);
10397 }
10398 else
10399 return false;
10400
10401 if (!REG_P (*op0) || !REG_P (*op1))
10402 return false;
10403
10404 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10405 return false;
10406
10407 /* Swap the COMPARE arguments and its mask if there is a
10408 conflicting access in the previous insn. */
10409 prev_insn = prev_active_insn (insn);
10410 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10411 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10412 s390_swap_cmp (cond, op0, op1, insn);
10413
10414 /* Check if there is a conflict with the next insn. If there
10415 was no conflict with the previous insn, then swap the
10416 COMPARE arguments and its mask. If we already swapped
10417 the operands, or if swapping them would cause a conflict
10418 with the previous insn, issue a NOP after the COMPARE in
10419 order to separate the two instuctions. */
10420 next_insn = next_active_insn (insn);
10421 if (next_insn != NULL_RTX && INSN_P (next_insn)
10422 && s390_non_addr_reg_read_p (*op1, next_insn))
10423 {
10424 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10425 && s390_non_addr_reg_read_p (*op0, prev_insn))
10426 {
10427 if (REGNO (*op1) == 0)
10428 emit_insn_after (gen_nop1 (), insn);
10429 else
10430 emit_insn_after (gen_nop (), insn);
10431 insn_added_p = true;
10432 }
10433 else
10434 s390_swap_cmp (cond, op0, op1, insn);
10435 }
10436 return insn_added_p;
10437 }
10438
10439 /* Perform machine-dependent processing. */
10440
10441 static void
10442 s390_reorg (void)
10443 {
10444 bool pool_overflow = false;
10445
10446 /* Make sure all splits have been performed; splits after
10447 machine_dependent_reorg might confuse insn length counts. */
10448 split_all_insns_noflow ();
10449
10450 /* Install the main literal pool and the associated base
10451 register load insns.
10452
10453 In addition, there are two problematic situations we need
10454 to correct:
10455
10456 - the literal pool might be > 4096 bytes in size, so that
10457 some of its elements cannot be directly accessed
10458
10459 - a branch target might be > 64K away from the branch, so that
10460 it is not possible to use a PC-relative instruction.
10461
10462 To fix those, we split the single literal pool into multiple
10463 pool chunks, reloading the pool base register at various
10464 points throughout the function to ensure it always points to
10465 the pool chunk the following code expects, and / or replace
10466 PC-relative branches by absolute branches.
10467
10468 However, the two problems are interdependent: splitting the
10469 literal pool can move a branch further away from its target,
10470 causing the 64K limit to overflow, and on the other hand,
10471 replacing a PC-relative branch by an absolute branch means
10472 we need to put the branch target address into the literal
10473 pool, possibly causing it to overflow.
10474
10475 So, we loop trying to fix up both problems until we manage
10476 to satisfy both conditions at the same time. Note that the
10477 loop is guaranteed to terminate as every pass of the loop
10478 strictly decreases the total number of PC-relative branches
10479 in the function. (This is not completely true as there
10480 might be branch-over-pool insns introduced by chunkify_start.
10481 Those never need to be split however.) */
10482
10483 for (;;)
10484 {
10485 struct constant_pool *pool = NULL;
10486
10487 /* Collect the literal pool. */
10488 if (!pool_overflow)
10489 {
10490 pool = s390_mainpool_start ();
10491 if (!pool)
10492 pool_overflow = true;
10493 }
10494
10495 /* If literal pool overflowed, start to chunkify it. */
10496 if (pool_overflow)
10497 pool = s390_chunkify_start ();
10498
10499 /* Split out-of-range branches. If this has created new
10500 literal pool entries, cancel current chunk list and
10501 recompute it. zSeries machines have large branch
10502 instructions, so we never need to split a branch. */
10503 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10504 {
10505 if (pool_overflow)
10506 s390_chunkify_cancel (pool);
10507 else
10508 s390_mainpool_cancel (pool);
10509
10510 continue;
10511 }
10512
10513 /* If we made it up to here, both conditions are satisfied.
10514 Finish up literal pool related changes. */
10515 if (pool_overflow)
10516 s390_chunkify_finish (pool);
10517 else
10518 s390_mainpool_finish (pool);
10519
10520 /* We're done splitting branches. */
10521 cfun->machine->split_branches_pending_p = false;
10522 break;
10523 }
10524
10525 /* Generate out-of-pool execute target insns. */
10526 if (TARGET_CPU_ZARCH)
10527 {
10528 rtx insn, label, target;
10529
10530 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10531 {
10532 label = s390_execute_label (insn);
10533 if (!label)
10534 continue;
10535
10536 gcc_assert (label != const0_rtx);
10537
10538 target = emit_label (XEXP (label, 0));
10539 INSN_ADDRESSES_NEW (target, -1);
10540
10541 target = emit_insn (s390_execute_target (insn));
10542 INSN_ADDRESSES_NEW (target, -1);
10543 }
10544 }
10545
10546 /* Try to optimize prologue and epilogue further. */
10547 s390_optimize_prologue ();
10548
10549 /* Walk over the insns and do some >=z10 specific changes. */
10550 if (s390_tune == PROCESSOR_2097_Z10
10551 || s390_tune == PROCESSOR_2817_Z196
10552 || s390_tune == PROCESSOR_2827_ZEC12)
10553 {
10554 rtx insn;
10555 bool insn_added_p = false;
10556
10557 /* The insn lengths and addresses have to be up to date for the
10558 following manipulations. */
10559 shorten_branches (get_insns ());
10560
10561 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10562 {
10563 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10564 continue;
10565
10566 if (JUMP_P (insn))
10567 insn_added_p |= s390_fix_long_loop_prediction (insn);
10568
10569 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10570 || GET_CODE (PATTERN (insn)) == SET)
10571 && s390_tune == PROCESSOR_2097_Z10)
10572 insn_added_p |= s390_z10_optimize_cmp (insn);
10573 }
10574
10575 /* Adjust branches if we added new instructions. */
10576 if (insn_added_p)
10577 shorten_branches (get_insns ());
10578 }
10579 }
10580
10581 /* Return true if INSN is a fp load insn writing register REGNO. */
10582 static inline bool
10583 s390_fpload_toreg (rtx insn, unsigned int regno)
10584 {
10585 rtx set;
10586 enum attr_type flag = s390_safe_attr_type (insn);
10587
10588 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10589 return false;
10590
10591 set = single_set (insn);
10592
10593 if (set == NULL_RTX)
10594 return false;
10595
10596 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10597 return false;
10598
10599 if (REGNO (SET_DEST (set)) != regno)
10600 return false;
10601
10602 return true;
10603 }
10604
10605 /* This value describes the distance to be avoided between an
10606 aritmetic fp instruction and an fp load writing the same register.
10607 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10608 fine but the exact value has to be avoided. Otherwise the FP
10609 pipeline will throw an exception causing a major penalty. */
10610 #define Z10_EARLYLOAD_DISTANCE 7
10611
10612 /* Rearrange the ready list in order to avoid the situation described
10613 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10614 moved to the very end of the ready list. */
10615 static void
10616 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10617 {
10618 unsigned int regno;
10619 int nready = *nready_p;
10620 rtx tmp;
10621 int i;
10622 rtx insn;
10623 rtx set;
10624 enum attr_type flag;
10625 int distance;
10626
10627 /* Skip DISTANCE - 1 active insns. */
10628 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10629 distance > 0 && insn != NULL_RTX;
10630 distance--, insn = prev_active_insn (insn))
10631 if (CALL_P (insn) || JUMP_P (insn))
10632 return;
10633
10634 if (insn == NULL_RTX)
10635 return;
10636
10637 set = single_set (insn);
10638
10639 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10640 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10641 return;
10642
10643 flag = s390_safe_attr_type (insn);
10644
10645 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10646 return;
10647
10648 regno = REGNO (SET_DEST (set));
10649 i = nready - 1;
10650
10651 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10652 i--;
10653
10654 if (!i)
10655 return;
10656
10657 tmp = ready[i];
10658 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10659 ready[0] = tmp;
10660 }
10661
10662
10663 /* The s390_sched_state variable tracks the state of the current or
10664 the last instruction group.
10665
10666 0,1,2 number of instructions scheduled in the current group
10667 3 the last group is complete - normal insns
10668 4 the last group was a cracked/expanded insn */
10669
10670 static int s390_sched_state;
10671
10672 #define S390_OOO_SCHED_STATE_NORMAL 3
10673 #define S390_OOO_SCHED_STATE_CRACKED 4
10674
10675 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
10676 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
10677 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
10678 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
10679
10680 static unsigned int
10681 s390_get_sched_attrmask (rtx insn)
10682 {
10683 unsigned int mask = 0;
10684
10685 if (get_attr_ooo_cracked (insn))
10686 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
10687 if (get_attr_ooo_expanded (insn))
10688 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
10689 if (get_attr_ooo_endgroup (insn))
10690 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
10691 if (get_attr_ooo_groupalone (insn))
10692 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
10693 return mask;
10694 }
10695
10696 /* Return the scheduling score for INSN. The higher the score the
10697 better. The score is calculated from the OOO scheduling attributes
10698 of INSN and the scheduling state s390_sched_state. */
10699 static int
10700 s390_sched_score (rtx insn)
10701 {
10702 unsigned int mask = s390_get_sched_attrmask (insn);
10703 int score = 0;
10704
10705 switch (s390_sched_state)
10706 {
10707 case 0:
10708 /* Try to put insns into the first slot which would otherwise
10709 break a group. */
10710 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10711 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10712 score += 5;
10713 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10714 score += 10;
10715 case 1:
10716 /* Prefer not cracked insns while trying to put together a
10717 group. */
10718 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10719 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10720 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10721 score += 10;
10722 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
10723 score += 5;
10724 break;
10725 case 2:
10726 /* Prefer not cracked insns while trying to put together a
10727 group. */
10728 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10729 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
10730 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
10731 score += 10;
10732 /* Prefer endgroup insns in the last slot. */
10733 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
10734 score += 10;
10735 break;
10736 case S390_OOO_SCHED_STATE_NORMAL:
10737 /* Prefer not cracked insns if the last was not cracked. */
10738 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
10739 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
10740 score += 5;
10741 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10742 score += 10;
10743 break;
10744 case S390_OOO_SCHED_STATE_CRACKED:
10745 /* Try to keep cracked insns together to prevent them from
10746 interrupting groups. */
10747 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10748 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10749 score += 5;
10750 break;
10751 }
10752 return score;
10753 }
10754
10755 /* This function is called via hook TARGET_SCHED_REORDER before
10756 issueing one insn from list READY which contains *NREADYP entries.
10757 For target z10 it reorders load instructions to avoid early load
10758 conflicts in the floating point pipeline */
10759 static int
10760 s390_sched_reorder (FILE *file, int verbose,
10761 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10762 {
10763 if (s390_tune == PROCESSOR_2097_Z10)
10764 if (reload_completed && *nreadyp > 1)
10765 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10766
10767 if (s390_tune == PROCESSOR_2827_ZEC12
10768 && reload_completed
10769 && *nreadyp > 1)
10770 {
10771 int i;
10772 int last_index = *nreadyp - 1;
10773 int max_index = -1;
10774 int max_score = -1;
10775 rtx tmp;
10776
10777 /* Just move the insn with the highest score to the top (the
10778 end) of the list. A full sort is not needed since a conflict
10779 in the hazard recognition cannot happen. So the top insn in
10780 the ready list will always be taken. */
10781 for (i = last_index; i >= 0; i--)
10782 {
10783 int score;
10784
10785 if (recog_memoized (ready[i]) < 0)
10786 continue;
10787
10788 score = s390_sched_score (ready[i]);
10789 if (score > max_score)
10790 {
10791 max_score = score;
10792 max_index = i;
10793 }
10794 }
10795
10796 if (max_index != -1)
10797 {
10798 if (max_index != last_index)
10799 {
10800 tmp = ready[max_index];
10801 ready[max_index] = ready[last_index];
10802 ready[last_index] = tmp;
10803
10804 if (verbose > 5)
10805 fprintf (file,
10806 "move insn %d to the top of list\n",
10807 INSN_UID (ready[last_index]));
10808 }
10809 else if (verbose > 5)
10810 fprintf (file,
10811 "best insn %d already on top\n",
10812 INSN_UID (ready[last_index]));
10813 }
10814
10815 if (verbose > 5)
10816 {
10817 fprintf (file, "ready list ooo attributes - sched state: %d\n",
10818 s390_sched_state);
10819
10820 for (i = last_index; i >= 0; i--)
10821 {
10822 if (recog_memoized (ready[i]) < 0)
10823 continue;
10824 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
10825 s390_sched_score (ready[i]));
10826 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
10827 PRINT_OOO_ATTR (ooo_cracked);
10828 PRINT_OOO_ATTR (ooo_expanded);
10829 PRINT_OOO_ATTR (ooo_endgroup);
10830 PRINT_OOO_ATTR (ooo_groupalone);
10831 #undef PRINT_OOO_ATTR
10832 fprintf (file, "\n");
10833 }
10834 }
10835 }
10836
10837 return s390_issue_rate ();
10838 }
10839
10840
10841 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10842 the scheduler has issued INSN. It stores the last issued insn into
10843 last_scheduled_insn in order to make it available for
10844 s390_sched_reorder. */
10845 static int
10846 s390_sched_variable_issue (FILE *file, int verbose, rtx insn, int more)
10847 {
10848 last_scheduled_insn = insn;
10849
10850 if (s390_tune == PROCESSOR_2827_ZEC12
10851 && reload_completed
10852 && recog_memoized (insn) >= 0)
10853 {
10854 unsigned int mask = s390_get_sched_attrmask (insn);
10855
10856 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
10857 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
10858 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
10859 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
10860 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
10861 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10862 else
10863 {
10864 /* Only normal insns are left (mask == 0). */
10865 switch (s390_sched_state)
10866 {
10867 case 0:
10868 case 1:
10869 case 2:
10870 case S390_OOO_SCHED_STATE_NORMAL:
10871 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
10872 s390_sched_state = 1;
10873 else
10874 s390_sched_state++;
10875
10876 break;
10877 case S390_OOO_SCHED_STATE_CRACKED:
10878 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
10879 break;
10880 }
10881 }
10882 if (verbose > 5)
10883 {
10884 fprintf (file, "insn %d: ", INSN_UID (insn));
10885 #define PRINT_OOO_ATTR(ATTR) \
10886 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
10887 PRINT_OOO_ATTR (ooo_cracked);
10888 PRINT_OOO_ATTR (ooo_expanded);
10889 PRINT_OOO_ATTR (ooo_endgroup);
10890 PRINT_OOO_ATTR (ooo_groupalone);
10891 #undef PRINT_OOO_ATTR
10892 fprintf (file, "\n");
10893 fprintf (file, "sched state: %d\n", s390_sched_state);
10894 }
10895 }
10896
10897 if (GET_CODE (PATTERN (insn)) != USE
10898 && GET_CODE (PATTERN (insn)) != CLOBBER)
10899 return more - 1;
10900 else
10901 return more;
10902 }
10903
10904 static void
10905 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10906 int verbose ATTRIBUTE_UNUSED,
10907 int max_ready ATTRIBUTE_UNUSED)
10908 {
10909 last_scheduled_insn = NULL_RTX;
10910 s390_sched_state = 0;
10911 }
10912
10913 /* This function checks the whole of insn X for memory references. The
10914 function always returns zero because the framework it is called
10915 from would stop recursively analyzing the insn upon a return value
10916 other than zero. The real result of this function is updating
10917 counter variable MEM_COUNT. */
10918 static int
10919 check_dpu (rtx *x, unsigned *mem_count)
10920 {
10921 if (*x != NULL_RTX && MEM_P (*x))
10922 (*mem_count)++;
10923 return 0;
10924 }
10925
10926 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10927 a new number struct loop *loop should be unrolled if tuned for cpus with
10928 a built-in stride prefetcher.
10929 The loop is analyzed for memory accesses by calling check_dpu for
10930 each rtx of the loop. Depending on the loop_depth and the amount of
10931 memory accesses a new number <=nunroll is returned to improve the
10932 behaviour of the hardware prefetch unit. */
10933 static unsigned
10934 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10935 {
10936 basic_block *bbs;
10937 rtx insn;
10938 unsigned i;
10939 unsigned mem_count = 0;
10940
10941 if (s390_tune != PROCESSOR_2097_Z10
10942 && s390_tune != PROCESSOR_2817_Z196
10943 && s390_tune != PROCESSOR_2827_ZEC12)
10944 return nunroll;
10945
10946 /* Count the number of memory references within the loop body. */
10947 bbs = get_loop_body (loop);
10948 for (i = 0; i < loop->num_nodes; i++)
10949 {
10950 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10951 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10952 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10953 }
10954 free (bbs);
10955
10956 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10957 if (mem_count == 0)
10958 return nunroll;
10959
10960 switch (loop_depth(loop))
10961 {
10962 case 1:
10963 return MIN (nunroll, 28 / mem_count);
10964 case 2:
10965 return MIN (nunroll, 22 / mem_count);
10966 default:
10967 return MIN (nunroll, 16 / mem_count);
10968 }
10969 }
10970
10971 /* Initialize GCC target structure. */
10972
10973 #undef TARGET_ASM_ALIGNED_HI_OP
10974 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10975 #undef TARGET_ASM_ALIGNED_DI_OP
10976 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10977 #undef TARGET_ASM_INTEGER
10978 #define TARGET_ASM_INTEGER s390_assemble_integer
10979
10980 #undef TARGET_ASM_OPEN_PAREN
10981 #define TARGET_ASM_OPEN_PAREN ""
10982
10983 #undef TARGET_ASM_CLOSE_PAREN
10984 #define TARGET_ASM_CLOSE_PAREN ""
10985
10986 #undef TARGET_OPTION_OVERRIDE
10987 #define TARGET_OPTION_OVERRIDE s390_option_override
10988
10989 #undef TARGET_ENCODE_SECTION_INFO
10990 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10991
10992 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10993 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10994
10995 #ifdef HAVE_AS_TLS
10996 #undef TARGET_HAVE_TLS
10997 #define TARGET_HAVE_TLS true
10998 #endif
10999 #undef TARGET_CANNOT_FORCE_CONST_MEM
11000 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
11001
11002 #undef TARGET_DELEGITIMIZE_ADDRESS
11003 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
11004
11005 #undef TARGET_LEGITIMIZE_ADDRESS
11006 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
11007
11008 #undef TARGET_RETURN_IN_MEMORY
11009 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
11010
11011 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
11012 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
11013
11014 #undef TARGET_ASM_OUTPUT_MI_THUNK
11015 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
11016 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11017 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11018
11019 #undef TARGET_SCHED_ADJUST_PRIORITY
11020 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
11021 #undef TARGET_SCHED_ISSUE_RATE
11022 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
11023 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11024 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
11025
11026 #undef TARGET_SCHED_VARIABLE_ISSUE
11027 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
11028 #undef TARGET_SCHED_REORDER
11029 #define TARGET_SCHED_REORDER s390_sched_reorder
11030 #undef TARGET_SCHED_INIT
11031 #define TARGET_SCHED_INIT s390_sched_init
11032
11033 #undef TARGET_CANNOT_COPY_INSN_P
11034 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
11035 #undef TARGET_RTX_COSTS
11036 #define TARGET_RTX_COSTS s390_rtx_costs
11037 #undef TARGET_ADDRESS_COST
11038 #define TARGET_ADDRESS_COST s390_address_cost
11039 #undef TARGET_REGISTER_MOVE_COST
11040 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
11041 #undef TARGET_MEMORY_MOVE_COST
11042 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
11043
11044 #undef TARGET_MACHINE_DEPENDENT_REORG
11045 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
11046
11047 #undef TARGET_VALID_POINTER_MODE
11048 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
11049
11050 #undef TARGET_BUILD_BUILTIN_VA_LIST
11051 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
11052 #undef TARGET_EXPAND_BUILTIN_VA_START
11053 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
11054 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11055 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
11056
11057 #undef TARGET_PROMOTE_FUNCTION_MODE
11058 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
11059 #undef TARGET_PASS_BY_REFERENCE
11060 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
11061
11062 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11063 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
11064 #undef TARGET_FUNCTION_ARG
11065 #define TARGET_FUNCTION_ARG s390_function_arg
11066 #undef TARGET_FUNCTION_ARG_ADVANCE
11067 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
11068 #undef TARGET_FUNCTION_VALUE
11069 #define TARGET_FUNCTION_VALUE s390_function_value
11070 #undef TARGET_LIBCALL_VALUE
11071 #define TARGET_LIBCALL_VALUE s390_libcall_value
11072
11073 #undef TARGET_FIXED_CONDITION_CODE_REGS
11074 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
11075
11076 #undef TARGET_CC_MODES_COMPATIBLE
11077 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
11078
11079 #undef TARGET_INVALID_WITHIN_DOLOOP
11080 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
11081
11082 #ifdef HAVE_AS_TLS
11083 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
11084 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
11085 #endif
11086
11087 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11088 #undef TARGET_MANGLE_TYPE
11089 #define TARGET_MANGLE_TYPE s390_mangle_type
11090 #endif
11091
11092 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11093 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
11094
11095 #undef TARGET_PREFERRED_RELOAD_CLASS
11096 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
11097
11098 #undef TARGET_SECONDARY_RELOAD
11099 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
11100
11101 #undef TARGET_LIBGCC_CMP_RETURN_MODE
11102 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
11103
11104 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
11105 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
11106
11107 #undef TARGET_LEGITIMATE_ADDRESS_P
11108 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
11109
11110 #undef TARGET_LEGITIMATE_CONSTANT_P
11111 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
11112
11113 #undef TARGET_LRA_P
11114 #define TARGET_LRA_P s390_lra_p
11115
11116 #undef TARGET_CAN_ELIMINATE
11117 #define TARGET_CAN_ELIMINATE s390_can_eliminate
11118
11119 #undef TARGET_CONDITIONAL_REGISTER_USAGE
11120 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
11121
11122 #undef TARGET_LOOP_UNROLL_ADJUST
11123 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
11124
11125 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
11126 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
11127 #undef TARGET_TRAMPOLINE_INIT
11128 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
11129
11130 #undef TARGET_UNWIND_WORD_MODE
11131 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
11132
11133 #undef TARGET_CANONICALIZE_COMPARISON
11134 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
11135
11136 struct gcc_target targetm = TARGET_INITIALIZER;
11137
11138 #include "gt-s390.h"