81f1fbaf894f4b427341dc62e142bbb41f257749
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "target-globals.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "gimple.h"
34 #include "cfghooks.h"
35 #include "cfgloop.h"
36 #include "df.h"
37 #include "memmodel.h"
38 #include "tm_p.h"
39 #include "stringpool.h"
40 #include "attribs.h"
41 #include "expmed.h"
42 #include "optabs.h"
43 #include "regs.h"
44 #include "emit-rtl.h"
45 #include "recog.h"
46 #include "cgraph.h"
47 #include "diagnostic-core.h"
48 #include "diagnostic.h"
49 #include "alias.h"
50 #include "fold-const.h"
51 #include "print-tree.h"
52 #include "stor-layout.h"
53 #include "varasm.h"
54 #include "calls.h"
55 #include "conditions.h"
56 #include "output.h"
57 #include "insn-attr.h"
58 #include "flags.h"
59 #include "except.h"
60 #include "dojump.h"
61 #include "explow.h"
62 #include "stmt.h"
63 #include "expr.h"
64 #include "reload.h"
65 #include "cfgrtl.h"
66 #include "cfganal.h"
67 #include "lcm.h"
68 #include "cfgbuild.h"
69 #include "cfgcleanup.h"
70 #include "debug.h"
71 #include "langhooks.h"
72 #include "internal-fn.h"
73 #include "gimple-fold.h"
74 #include "tree-eh.h"
75 #include "gimplify.h"
76 #include "params.h"
77 #include "opts.h"
78 #include "tree-pass.h"
79 #include "context.h"
80 #include "builtins.h"
81 #include "rtl-iter.h"
82 #include "intl.h"
83 #include "tm-constrs.h"
84 #include "tree-vrp.h"
85 #include "symbol-summary.h"
86 #include "ipa-prop.h"
87 #include "ipa-fnsummary.h"
88 #include "sched-int.h"
89
90 /* This file should be included last. */
91 #include "target-def.h"
92
93 static bool s390_hard_regno_mode_ok (unsigned int, machine_mode);
94
95 /* Remember the last target of s390_set_current_function. */
96 static GTY(()) tree s390_previous_fndecl;
97
98 /* Define the specific costs for a given cpu. */
99
100 struct processor_costs
101 {
102 /* multiplication */
103 const int m; /* cost of an M instruction. */
104 const int mghi; /* cost of an MGHI instruction. */
105 const int mh; /* cost of an MH instruction. */
106 const int mhi; /* cost of an MHI instruction. */
107 const int ml; /* cost of an ML instruction. */
108 const int mr; /* cost of an MR instruction. */
109 const int ms; /* cost of an MS instruction. */
110 const int msg; /* cost of an MSG instruction. */
111 const int msgf; /* cost of an MSGF instruction. */
112 const int msgfr; /* cost of an MSGFR instruction. */
113 const int msgr; /* cost of an MSGR instruction. */
114 const int msr; /* cost of an MSR instruction. */
115 const int mult_df; /* cost of multiplication in DFmode. */
116 const int mxbr;
117 /* square root */
118 const int sqxbr; /* cost of square root in TFmode. */
119 const int sqdbr; /* cost of square root in DFmode. */
120 const int sqebr; /* cost of square root in SFmode. */
121 /* multiply and add */
122 const int madbr; /* cost of multiply and add in DFmode. */
123 const int maebr; /* cost of multiply and add in SFmode. */
124 /* division */
125 const int dxbr;
126 const int ddbr;
127 const int debr;
128 const int dlgr;
129 const int dlr;
130 const int dr;
131 const int dsgfr;
132 const int dsgr;
133 };
134
135 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
136
137 static const
138 struct processor_costs z900_cost =
139 {
140 COSTS_N_INSNS (5), /* M */
141 COSTS_N_INSNS (10), /* MGHI */
142 COSTS_N_INSNS (5), /* MH */
143 COSTS_N_INSNS (4), /* MHI */
144 COSTS_N_INSNS (5), /* ML */
145 COSTS_N_INSNS (5), /* MR */
146 COSTS_N_INSNS (4), /* MS */
147 COSTS_N_INSNS (15), /* MSG */
148 COSTS_N_INSNS (7), /* MSGF */
149 COSTS_N_INSNS (7), /* MSGFR */
150 COSTS_N_INSNS (10), /* MSGR */
151 COSTS_N_INSNS (4), /* MSR */
152 COSTS_N_INSNS (7), /* multiplication in DFmode */
153 COSTS_N_INSNS (13), /* MXBR */
154 COSTS_N_INSNS (136), /* SQXBR */
155 COSTS_N_INSNS (44), /* SQDBR */
156 COSTS_N_INSNS (35), /* SQEBR */
157 COSTS_N_INSNS (18), /* MADBR */
158 COSTS_N_INSNS (13), /* MAEBR */
159 COSTS_N_INSNS (134), /* DXBR */
160 COSTS_N_INSNS (30), /* DDBR */
161 COSTS_N_INSNS (27), /* DEBR */
162 COSTS_N_INSNS (220), /* DLGR */
163 COSTS_N_INSNS (34), /* DLR */
164 COSTS_N_INSNS (34), /* DR */
165 COSTS_N_INSNS (32), /* DSGFR */
166 COSTS_N_INSNS (32), /* DSGR */
167 };
168
169 static const
170 struct processor_costs z990_cost =
171 {
172 COSTS_N_INSNS (4), /* M */
173 COSTS_N_INSNS (2), /* MGHI */
174 COSTS_N_INSNS (2), /* MH */
175 COSTS_N_INSNS (2), /* MHI */
176 COSTS_N_INSNS (4), /* ML */
177 COSTS_N_INSNS (4), /* MR */
178 COSTS_N_INSNS (5), /* MS */
179 COSTS_N_INSNS (6), /* MSG */
180 COSTS_N_INSNS (4), /* MSGF */
181 COSTS_N_INSNS (4), /* MSGFR */
182 COSTS_N_INSNS (4), /* MSGR */
183 COSTS_N_INSNS (4), /* MSR */
184 COSTS_N_INSNS (1), /* multiplication in DFmode */
185 COSTS_N_INSNS (28), /* MXBR */
186 COSTS_N_INSNS (130), /* SQXBR */
187 COSTS_N_INSNS (66), /* SQDBR */
188 COSTS_N_INSNS (38), /* SQEBR */
189 COSTS_N_INSNS (1), /* MADBR */
190 COSTS_N_INSNS (1), /* MAEBR */
191 COSTS_N_INSNS (60), /* DXBR */
192 COSTS_N_INSNS (40), /* DDBR */
193 COSTS_N_INSNS (26), /* DEBR */
194 COSTS_N_INSNS (176), /* DLGR */
195 COSTS_N_INSNS (31), /* DLR */
196 COSTS_N_INSNS (31), /* DR */
197 COSTS_N_INSNS (31), /* DSGFR */
198 COSTS_N_INSNS (31), /* DSGR */
199 };
200
201 static const
202 struct processor_costs z9_109_cost =
203 {
204 COSTS_N_INSNS (4), /* M */
205 COSTS_N_INSNS (2), /* MGHI */
206 COSTS_N_INSNS (2), /* MH */
207 COSTS_N_INSNS (2), /* MHI */
208 COSTS_N_INSNS (4), /* ML */
209 COSTS_N_INSNS (4), /* MR */
210 COSTS_N_INSNS (5), /* MS */
211 COSTS_N_INSNS (6), /* MSG */
212 COSTS_N_INSNS (4), /* MSGF */
213 COSTS_N_INSNS (4), /* MSGFR */
214 COSTS_N_INSNS (4), /* MSGR */
215 COSTS_N_INSNS (4), /* MSR */
216 COSTS_N_INSNS (1), /* multiplication in DFmode */
217 COSTS_N_INSNS (28), /* MXBR */
218 COSTS_N_INSNS (130), /* SQXBR */
219 COSTS_N_INSNS (66), /* SQDBR */
220 COSTS_N_INSNS (38), /* SQEBR */
221 COSTS_N_INSNS (1), /* MADBR */
222 COSTS_N_INSNS (1), /* MAEBR */
223 COSTS_N_INSNS (60), /* DXBR */
224 COSTS_N_INSNS (40), /* DDBR */
225 COSTS_N_INSNS (26), /* DEBR */
226 COSTS_N_INSNS (30), /* DLGR */
227 COSTS_N_INSNS (23), /* DLR */
228 COSTS_N_INSNS (23), /* DR */
229 COSTS_N_INSNS (24), /* DSGFR */
230 COSTS_N_INSNS (24), /* DSGR */
231 };
232
233 static const
234 struct processor_costs z10_cost =
235 {
236 COSTS_N_INSNS (10), /* M */
237 COSTS_N_INSNS (10), /* MGHI */
238 COSTS_N_INSNS (10), /* MH */
239 COSTS_N_INSNS (10), /* MHI */
240 COSTS_N_INSNS (10), /* ML */
241 COSTS_N_INSNS (10), /* MR */
242 COSTS_N_INSNS (10), /* MS */
243 COSTS_N_INSNS (10), /* MSG */
244 COSTS_N_INSNS (10), /* MSGF */
245 COSTS_N_INSNS (10), /* MSGFR */
246 COSTS_N_INSNS (10), /* MSGR */
247 COSTS_N_INSNS (10), /* MSR */
248 COSTS_N_INSNS (1) , /* multiplication in DFmode */
249 COSTS_N_INSNS (50), /* MXBR */
250 COSTS_N_INSNS (120), /* SQXBR */
251 COSTS_N_INSNS (52), /* SQDBR */
252 COSTS_N_INSNS (38), /* SQEBR */
253 COSTS_N_INSNS (1), /* MADBR */
254 COSTS_N_INSNS (1), /* MAEBR */
255 COSTS_N_INSNS (111), /* DXBR */
256 COSTS_N_INSNS (39), /* DDBR */
257 COSTS_N_INSNS (32), /* DEBR */
258 COSTS_N_INSNS (160), /* DLGR */
259 COSTS_N_INSNS (71), /* DLR */
260 COSTS_N_INSNS (71), /* DR */
261 COSTS_N_INSNS (71), /* DSGFR */
262 COSTS_N_INSNS (71), /* DSGR */
263 };
264
265 static const
266 struct processor_costs z196_cost =
267 {
268 COSTS_N_INSNS (7), /* M */
269 COSTS_N_INSNS (5), /* MGHI */
270 COSTS_N_INSNS (5), /* MH */
271 COSTS_N_INSNS (5), /* MHI */
272 COSTS_N_INSNS (7), /* ML */
273 COSTS_N_INSNS (7), /* MR */
274 COSTS_N_INSNS (6), /* MS */
275 COSTS_N_INSNS (8), /* MSG */
276 COSTS_N_INSNS (6), /* MSGF */
277 COSTS_N_INSNS (6), /* MSGFR */
278 COSTS_N_INSNS (8), /* MSGR */
279 COSTS_N_INSNS (6), /* MSR */
280 COSTS_N_INSNS (1) , /* multiplication in DFmode */
281 COSTS_N_INSNS (40), /* MXBR B+40 */
282 COSTS_N_INSNS (100), /* SQXBR B+100 */
283 COSTS_N_INSNS (42), /* SQDBR B+42 */
284 COSTS_N_INSNS (28), /* SQEBR B+28 */
285 COSTS_N_INSNS (1), /* MADBR B */
286 COSTS_N_INSNS (1), /* MAEBR B */
287 COSTS_N_INSNS (101), /* DXBR B+101 */
288 COSTS_N_INSNS (29), /* DDBR */
289 COSTS_N_INSNS (22), /* DEBR */
290 COSTS_N_INSNS (160), /* DLGR cracked */
291 COSTS_N_INSNS (160), /* DLR cracked */
292 COSTS_N_INSNS (160), /* DR expanded */
293 COSTS_N_INSNS (160), /* DSGFR cracked */
294 COSTS_N_INSNS (160), /* DSGR cracked */
295 };
296
297 static const
298 struct processor_costs zEC12_cost =
299 {
300 COSTS_N_INSNS (7), /* M */
301 COSTS_N_INSNS (5), /* MGHI */
302 COSTS_N_INSNS (5), /* MH */
303 COSTS_N_INSNS (5), /* MHI */
304 COSTS_N_INSNS (7), /* ML */
305 COSTS_N_INSNS (7), /* MR */
306 COSTS_N_INSNS (6), /* MS */
307 COSTS_N_INSNS (8), /* MSG */
308 COSTS_N_INSNS (6), /* MSGF */
309 COSTS_N_INSNS (6), /* MSGFR */
310 COSTS_N_INSNS (8), /* MSGR */
311 COSTS_N_INSNS (6), /* MSR */
312 COSTS_N_INSNS (1) , /* multiplication in DFmode */
313 COSTS_N_INSNS (40), /* MXBR B+40 */
314 COSTS_N_INSNS (100), /* SQXBR B+100 */
315 COSTS_N_INSNS (42), /* SQDBR B+42 */
316 COSTS_N_INSNS (28), /* SQEBR B+28 */
317 COSTS_N_INSNS (1), /* MADBR B */
318 COSTS_N_INSNS (1), /* MAEBR B */
319 COSTS_N_INSNS (131), /* DXBR B+131 */
320 COSTS_N_INSNS (29), /* DDBR */
321 COSTS_N_INSNS (22), /* DEBR */
322 COSTS_N_INSNS (160), /* DLGR cracked */
323 COSTS_N_INSNS (160), /* DLR cracked */
324 COSTS_N_INSNS (160), /* DR expanded */
325 COSTS_N_INSNS (160), /* DSGFR cracked */
326 COSTS_N_INSNS (160), /* DSGR cracked */
327 };
328
329 static struct
330 {
331 /* The preferred name to be used in user visible output. */
332 const char *const name;
333 /* CPU name as it should be passed to Binutils via .machine */
334 const char *const binutils_name;
335 const enum processor_type processor;
336 const struct processor_costs *cost;
337 }
338 const processor_table[] =
339 {
340 { "g5", "g5", PROCESSOR_9672_G5, &z900_cost },
341 { "g6", "g6", PROCESSOR_9672_G6, &z900_cost },
342 { "z900", "z900", PROCESSOR_2064_Z900, &z900_cost },
343 { "z990", "z990", PROCESSOR_2084_Z990, &z990_cost },
344 { "z9-109", "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
345 { "z9-ec", "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
346 { "z10", "z10", PROCESSOR_2097_Z10, &z10_cost },
347 { "z196", "z196", PROCESSOR_2817_Z196, &z196_cost },
348 { "zEC12", "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
349 { "z13", "z13", PROCESSOR_2964_Z13, &zEC12_cost },
350 { "z14", "arch12", PROCESSOR_3906_Z14, &zEC12_cost },
351 { "native", "", PROCESSOR_NATIVE, NULL }
352 };
353
354 extern int reload_completed;
355
356 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
357 static rtx_insn *last_scheduled_insn;
358 #define MAX_SCHED_UNITS 3
359 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
360
361 #define NUM_SIDES 2
362 static int current_side = 1;
363 #define LONGRUNNING_THRESHOLD 5
364
365 /* Estimate of number of cycles a long-running insn occupies an
366 execution unit. */
367 static unsigned fxu_longrunning[NUM_SIDES];
368 static unsigned vfu_longrunning[NUM_SIDES];
369
370 /* Factor to scale latencies by, determined by measurements. */
371 #define LATENCY_FACTOR 4
372
373 /* The maximum score added for an instruction whose unit hasn't been
374 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
375 give instruction mix scheduling more priority over instruction
376 grouping. */
377 #define MAX_SCHED_MIX_SCORE 8
378
379 /* The maximum distance up to which individual scores will be
380 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
381 Increase this with the OOO windows size of the machine. */
382 #define MAX_SCHED_MIX_DISTANCE 100
383
384 /* Structure used to hold the components of a S/390 memory
385 address. A legitimate address on S/390 is of the general
386 form
387 base + index + displacement
388 where any of the components is optional.
389
390 base and index are registers of the class ADDR_REGS,
391 displacement is an unsigned 12-bit immediate constant. */
392
393 struct s390_address
394 {
395 rtx base;
396 rtx indx;
397 rtx disp;
398 bool pointer;
399 bool literal_pool;
400 };
401
402 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
403
404 #define cfun_frame_layout (cfun->machine->frame_layout)
405 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
406 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
407 ? cfun_frame_layout.fpr_bitmap & 0x0f \
408 : cfun_frame_layout.fpr_bitmap & 0x03))
409 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
410 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
411 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
412 (1 << (REGNO - FPR0_REGNUM)))
413 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
414 (1 << (REGNO - FPR0_REGNUM))))
415 #define cfun_gpr_save_slot(REGNO) \
416 cfun->machine->frame_layout.gpr_save_slots[REGNO]
417
418 /* Number of GPRs and FPRs used for argument passing. */
419 #define GP_ARG_NUM_REG 5
420 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
421 #define VEC_ARG_NUM_REG 8
422
423 /* A couple of shortcuts. */
424 #define CONST_OK_FOR_J(x) \
425 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
426 #define CONST_OK_FOR_K(x) \
427 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
428 #define CONST_OK_FOR_Os(x) \
429 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
430 #define CONST_OK_FOR_Op(x) \
431 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
432 #define CONST_OK_FOR_On(x) \
433 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
434
435 #define REGNO_PAIR_OK(REGNO, MODE) \
436 (s390_hard_regno_nregs ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
437
438 /* That's the read ahead of the dynamic branch prediction unit in
439 bytes on a z10 (or higher) CPU. */
440 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
441
442 /* Masks per jump target register indicating which thunk need to be
443 generated. */
444 static GTY(()) int indirect_branch_prez10thunk_mask = 0;
445 static GTY(()) int indirect_branch_z10thunk_mask = 0;
446
447 #define INDIRECT_BRANCH_NUM_OPTIONS 4
448
449 enum s390_indirect_branch_option
450 {
451 s390_opt_indirect_branch_jump = 0,
452 s390_opt_indirect_branch_call,
453 s390_opt_function_return_reg,
454 s390_opt_function_return_mem
455 };
456
457 static GTY(()) int indirect_branch_table_label_no[INDIRECT_BRANCH_NUM_OPTIONS] = { 0 };
458 const char *indirect_branch_table_label[INDIRECT_BRANCH_NUM_OPTIONS] = \
459 { "LJUMP", "LCALL", "LRETREG", "LRETMEM" };
460 const char *indirect_branch_table_name[INDIRECT_BRANCH_NUM_OPTIONS] = \
461 { ".s390_indirect_jump", ".s390_indirect_call",
462 ".s390_return_reg", ".s390_return_mem" };
463
464 bool
465 s390_return_addr_from_memory ()
466 {
467 return cfun_gpr_save_slot(RETURN_REGNUM) == SAVE_SLOT_STACK;
468 }
469
470 /* Indicate which ABI has been used for passing vector args.
471 0 - no vector type arguments have been passed where the ABI is relevant
472 1 - the old ABI has been used
473 2 - a vector type argument has been passed either in a vector register
474 or on the stack by value */
475 static int s390_vector_abi = 0;
476
477 /* Set the vector ABI marker if TYPE is subject to the vector ABI
478 switch. The vector ABI affects only vector data types. There are
479 two aspects of the vector ABI relevant here:
480
481 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
482 ABI and natural alignment with the old.
483
484 2. vector <= 16 bytes are passed in VRs or by value on the stack
485 with the new ABI but by reference on the stack with the old.
486
487 If ARG_P is true TYPE is used for a function argument or return
488 value. The ABI marker then is set for all vector data types. If
489 ARG_P is false only type 1 vectors are being checked. */
490
491 static void
492 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
493 {
494 static hash_set<const_tree> visited_types_hash;
495
496 if (s390_vector_abi)
497 return;
498
499 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
500 return;
501
502 if (visited_types_hash.contains (type))
503 return;
504
505 visited_types_hash.add (type);
506
507 if (VECTOR_TYPE_P (type))
508 {
509 int type_size = int_size_in_bytes (type);
510
511 /* Outside arguments only the alignment is changing and this
512 only happens for vector types >= 16 bytes. */
513 if (!arg_p && type_size < 16)
514 return;
515
516 /* In arguments vector types > 16 are passed as before (GCC
517 never enforced the bigger alignment for arguments which was
518 required by the old vector ABI). However, it might still be
519 ABI relevant due to the changed alignment if it is a struct
520 member. */
521 if (arg_p && type_size > 16 && !in_struct_p)
522 return;
523
524 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
525 }
526 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
527 {
528 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
529 natural alignment there will never be ABI dependent padding
530 in an array type. That's why we do not set in_struct_p to
531 true here. */
532 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
533 }
534 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
535 {
536 tree arg_chain;
537
538 /* Check the return type. */
539 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
540
541 for (arg_chain = TYPE_ARG_TYPES (type);
542 arg_chain;
543 arg_chain = TREE_CHAIN (arg_chain))
544 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
545 }
546 else if (RECORD_OR_UNION_TYPE_P (type))
547 {
548 tree field;
549
550 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
551 {
552 if (TREE_CODE (field) != FIELD_DECL)
553 continue;
554
555 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
556 }
557 }
558 }
559
560
561 /* System z builtins. */
562
563 #include "s390-builtins.h"
564
565 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
566 {
567 #undef B_DEF
568 #undef OB_DEF
569 #undef OB_DEF_VAR
570 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
571 #define OB_DEF(...)
572 #define OB_DEF_VAR(...)
573 #include "s390-builtins.def"
574 0
575 };
576
577 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
578 {
579 #undef B_DEF
580 #undef OB_DEF
581 #undef OB_DEF_VAR
582 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
583 #define OB_DEF(...)
584 #define OB_DEF_VAR(...)
585 #include "s390-builtins.def"
586 0
587 };
588
589 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
590 {
591 #undef B_DEF
592 #undef OB_DEF
593 #undef OB_DEF_VAR
594 #define B_DEF(...)
595 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
596 #define OB_DEF_VAR(...)
597 #include "s390-builtins.def"
598 0
599 };
600
601 const unsigned int
602 bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
603 {
604 #undef B_DEF
605 #undef OB_DEF
606 #undef OB_DEF_VAR
607 #define B_DEF(...)
608 #define OB_DEF(...)
609 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
610 #include "s390-builtins.def"
611 0
612 };
613
614 const unsigned int
615 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
616 {
617 #undef B_DEF
618 #undef OB_DEF
619 #undef OB_DEF_VAR
620 #define B_DEF(...)
621 #define OB_DEF(...)
622 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
623 #include "s390-builtins.def"
624 0
625 };
626
627 tree s390_builtin_types[BT_MAX];
628 tree s390_builtin_fn_types[BT_FN_MAX];
629 tree s390_builtin_decls[S390_BUILTIN_MAX +
630 S390_OVERLOADED_BUILTIN_MAX +
631 S390_OVERLOADED_BUILTIN_VAR_MAX];
632
633 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
634 #undef B_DEF
635 #undef OB_DEF
636 #undef OB_DEF_VAR
637 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
638 #define OB_DEF(...)
639 #define OB_DEF_VAR(...)
640
641 #include "s390-builtins.def"
642 CODE_FOR_nothing
643 };
644
645 static void
646 s390_init_builtins (void)
647 {
648 /* These definitions are being used in s390-builtins.def. */
649 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
650 NULL, NULL);
651 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
652 tree c_uint64_type_node;
653
654 /* The uint64_type_node from tree.c is not compatible to the C99
655 uint64_t data type. What we want is c_uint64_type_node from
656 c-common.c. But since backend code is not supposed to interface
657 with the frontend we recreate it here. */
658 if (TARGET_64BIT)
659 c_uint64_type_node = long_unsigned_type_node;
660 else
661 c_uint64_type_node = long_long_unsigned_type_node;
662
663 #undef DEF_TYPE
664 #define DEF_TYPE(INDEX, NODE, CONST_P) \
665 if (s390_builtin_types[INDEX] == NULL) \
666 s390_builtin_types[INDEX] = (!CONST_P) ? \
667 (NODE) : build_type_variant ((NODE), 1, 0);
668
669 #undef DEF_POINTER_TYPE
670 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
671 if (s390_builtin_types[INDEX] == NULL) \
672 s390_builtin_types[INDEX] = \
673 build_pointer_type (s390_builtin_types[INDEX_BASE]);
674
675 #undef DEF_DISTINCT_TYPE
676 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
677 if (s390_builtin_types[INDEX] == NULL) \
678 s390_builtin_types[INDEX] = \
679 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
680
681 #undef DEF_VECTOR_TYPE
682 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
683 if (s390_builtin_types[INDEX] == NULL) \
684 s390_builtin_types[INDEX] = \
685 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
686
687 #undef DEF_OPAQUE_VECTOR_TYPE
688 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
689 if (s390_builtin_types[INDEX] == NULL) \
690 s390_builtin_types[INDEX] = \
691 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
692
693 #undef DEF_FN_TYPE
694 #define DEF_FN_TYPE(INDEX, args...) \
695 if (s390_builtin_fn_types[INDEX] == NULL) \
696 s390_builtin_fn_types[INDEX] = \
697 build_function_type_list (args, NULL_TREE);
698 #undef DEF_OV_TYPE
699 #define DEF_OV_TYPE(...)
700 #include "s390-builtin-types.def"
701
702 #undef B_DEF
703 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
704 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
705 s390_builtin_decls[S390_BUILTIN_##NAME] = \
706 add_builtin_function ("__builtin_" #NAME, \
707 s390_builtin_fn_types[FNTYPE], \
708 S390_BUILTIN_##NAME, \
709 BUILT_IN_MD, \
710 NULL, \
711 ATTRS);
712 #undef OB_DEF
713 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
714 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
715 == NULL) \
716 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
717 add_builtin_function ("__builtin_" #NAME, \
718 s390_builtin_fn_types[FNTYPE], \
719 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
720 BUILT_IN_MD, \
721 NULL, \
722 0);
723 #undef OB_DEF_VAR
724 #define OB_DEF_VAR(...)
725 #include "s390-builtins.def"
726
727 }
728
729 /* Return true if ARG is appropriate as argument number ARGNUM of
730 builtin DECL. The operand flags from s390-builtins.def have to
731 passed as OP_FLAGS. */
732 bool
733 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
734 {
735 if (O_UIMM_P (op_flags))
736 {
737 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
738 int bitwidth = bitwidths[op_flags - O_U1];
739
740 if (!tree_fits_uhwi_p (arg)
741 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
742 {
743 error("constant argument %d for builtin %qF is out of range (0.."
744 HOST_WIDE_INT_PRINT_UNSIGNED ")",
745 argnum, decl,
746 (HOST_WIDE_INT_1U << bitwidth) - 1);
747 return false;
748 }
749 }
750
751 if (O_SIMM_P (op_flags))
752 {
753 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
754 int bitwidth = bitwidths[op_flags - O_S2];
755
756 if (!tree_fits_shwi_p (arg)
757 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
758 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
759 {
760 error("constant argument %d for builtin %qF is out of range ("
761 HOST_WIDE_INT_PRINT_DEC ".."
762 HOST_WIDE_INT_PRINT_DEC ")",
763 argnum, decl,
764 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
765 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
766 return false;
767 }
768 }
769 return true;
770 }
771
772 /* Expand an expression EXP that calls a built-in function,
773 with result going to TARGET if that's convenient
774 (and in mode MODE if that's convenient).
775 SUBTARGET may be used as the target for computing one of EXP's operands.
776 IGNORE is nonzero if the value is to be ignored. */
777
778 static rtx
779 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
780 machine_mode mode ATTRIBUTE_UNUSED,
781 int ignore ATTRIBUTE_UNUSED)
782 {
783 #define MAX_ARGS 6
784
785 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
786 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
787 enum insn_code icode;
788 rtx op[MAX_ARGS], pat;
789 int arity;
790 bool nonvoid;
791 tree arg;
792 call_expr_arg_iterator iter;
793 unsigned int all_op_flags = opflags_for_builtin (fcode);
794 machine_mode last_vec_mode = VOIDmode;
795
796 if (TARGET_DEBUG_ARG)
797 {
798 fprintf (stderr,
799 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
800 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
801 bflags_for_builtin (fcode));
802 }
803
804 if (S390_USE_TARGET_ATTRIBUTE)
805 {
806 unsigned int bflags;
807
808 bflags = bflags_for_builtin (fcode);
809 if ((bflags & B_HTM) && !TARGET_HTM)
810 {
811 error ("builtin %qF is not supported without -mhtm "
812 "(default with -march=zEC12 and higher).", fndecl);
813 return const0_rtx;
814 }
815 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
816 {
817 error ("builtin %qF requires -mvx "
818 "(default with -march=z13 and higher).", fndecl);
819 return const0_rtx;
820 }
821
822 if ((bflags & B_VXE) && !TARGET_VXE)
823 {
824 error ("Builtin %qF requires z14 or higher.", fndecl);
825 return const0_rtx;
826 }
827 }
828 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
829 && fcode < S390_ALL_BUILTIN_MAX)
830 {
831 gcc_unreachable ();
832 }
833 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
834 {
835 icode = code_for_builtin[fcode];
836 /* Set a flag in the machine specific cfun part in order to support
837 saving/restoring of FPRs. */
838 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
839 cfun->machine->tbegin_p = true;
840 }
841 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
842 {
843 error ("unresolved overloaded builtin");
844 return const0_rtx;
845 }
846 else
847 internal_error ("bad builtin fcode");
848
849 if (icode == 0)
850 internal_error ("bad builtin icode");
851
852 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
853
854 if (nonvoid)
855 {
856 machine_mode tmode = insn_data[icode].operand[0].mode;
857 if (!target
858 || GET_MODE (target) != tmode
859 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
860 target = gen_reg_rtx (tmode);
861
862 /* There are builtins (e.g. vec_promote) with no vector
863 arguments but an element selector. So we have to also look
864 at the vector return type when emitting the modulo
865 operation. */
866 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
867 last_vec_mode = insn_data[icode].operand[0].mode;
868 }
869
870 arity = 0;
871 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
872 {
873 rtx tmp_rtx;
874 const struct insn_operand_data *insn_op;
875 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
876
877 all_op_flags = all_op_flags >> O_SHIFT;
878
879 if (arg == error_mark_node)
880 return NULL_RTX;
881 if (arity >= MAX_ARGS)
882 return NULL_RTX;
883
884 if (O_IMM_P (op_flags)
885 && TREE_CODE (arg) != INTEGER_CST)
886 {
887 error ("constant value required for builtin %qF argument %d",
888 fndecl, arity + 1);
889 return const0_rtx;
890 }
891
892 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
893 return const0_rtx;
894
895 insn_op = &insn_data[icode].operand[arity + nonvoid];
896 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
897
898 /* expand_expr truncates constants to the target mode only if it
899 is "convenient". However, our checks below rely on this
900 being done. */
901 if (CONST_INT_P (op[arity])
902 && SCALAR_INT_MODE_P (insn_op->mode)
903 && GET_MODE (op[arity]) != insn_op->mode)
904 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
905 insn_op->mode));
906
907 /* Wrap the expanded RTX for pointer types into a MEM expr with
908 the proper mode. This allows us to use e.g. (match_operand
909 "memory_operand"..) in the insn patterns instead of (mem
910 (match_operand "address_operand)). This is helpful for
911 patterns not just accepting MEMs. */
912 if (POINTER_TYPE_P (TREE_TYPE (arg))
913 && insn_op->predicate != address_operand)
914 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
915
916 /* Expand the module operation required on element selectors. */
917 if (op_flags == O_ELEM)
918 {
919 gcc_assert (last_vec_mode != VOIDmode);
920 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
921 op[arity],
922 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
923 NULL_RTX, 1, OPTAB_DIRECT);
924 }
925
926 /* Record the vector mode used for an element selector. This assumes:
927 1. There is no builtin with two different vector modes and an element selector
928 2. The element selector comes after the vector type it is referring to.
929 This currently the true for all the builtins but FIXME we
930 should better check for that. */
931 if (VECTOR_MODE_P (insn_op->mode))
932 last_vec_mode = insn_op->mode;
933
934 if (insn_op->predicate (op[arity], insn_op->mode))
935 {
936 arity++;
937 continue;
938 }
939
940 if (MEM_P (op[arity])
941 && insn_op->predicate == memory_operand
942 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
943 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
944 {
945 op[arity] = replace_equiv_address (op[arity],
946 copy_to_mode_reg (Pmode,
947 XEXP (op[arity], 0)));
948 }
949 /* Some of the builtins require different modes/types than the
950 pattern in order to implement a specific API. Instead of
951 adding many expanders which do the mode change we do it here.
952 E.g. s390_vec_add_u128 required to have vector unsigned char
953 arguments is mapped to addti3. */
954 else if (insn_op->mode != VOIDmode
955 && GET_MODE (op[arity]) != VOIDmode
956 && GET_MODE (op[arity]) != insn_op->mode
957 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
958 GET_MODE (op[arity]), 0))
959 != NULL_RTX))
960 {
961 op[arity] = tmp_rtx;
962 }
963 else if (GET_MODE (op[arity]) == insn_op->mode
964 || GET_MODE (op[arity]) == VOIDmode
965 || (insn_op->predicate == address_operand
966 && GET_MODE (op[arity]) == Pmode))
967 {
968 /* An address_operand usually has VOIDmode in the expander
969 so we cannot use this. */
970 machine_mode target_mode =
971 (insn_op->predicate == address_operand
972 ? (machine_mode) Pmode : insn_op->mode);
973 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
974 }
975
976 if (!insn_op->predicate (op[arity], insn_op->mode))
977 {
978 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
979 return const0_rtx;
980 }
981 arity++;
982 }
983
984 switch (arity)
985 {
986 case 0:
987 pat = GEN_FCN (icode) (target);
988 break;
989 case 1:
990 if (nonvoid)
991 pat = GEN_FCN (icode) (target, op[0]);
992 else
993 pat = GEN_FCN (icode) (op[0]);
994 break;
995 case 2:
996 if (nonvoid)
997 pat = GEN_FCN (icode) (target, op[0], op[1]);
998 else
999 pat = GEN_FCN (icode) (op[0], op[1]);
1000 break;
1001 case 3:
1002 if (nonvoid)
1003 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1004 else
1005 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1006 break;
1007 case 4:
1008 if (nonvoid)
1009 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1010 else
1011 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1012 break;
1013 case 5:
1014 if (nonvoid)
1015 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1016 else
1017 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1018 break;
1019 case 6:
1020 if (nonvoid)
1021 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1022 else
1023 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1024 break;
1025 default:
1026 gcc_unreachable ();
1027 }
1028 if (!pat)
1029 return NULL_RTX;
1030 emit_insn (pat);
1031
1032 if (nonvoid)
1033 return target;
1034 else
1035 return const0_rtx;
1036 }
1037
1038
1039 static const int s390_hotpatch_hw_max = 1000000;
1040 static int s390_hotpatch_hw_before_label = 0;
1041 static int s390_hotpatch_hw_after_label = 0;
1042
1043 /* Check whether the hotpatch attribute is applied to a function and, if it has
1044 an argument, the argument is valid. */
1045
1046 static tree
1047 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1048 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1049 {
1050 tree expr;
1051 tree expr2;
1052 int err;
1053
1054 if (TREE_CODE (*node) != FUNCTION_DECL)
1055 {
1056 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1057 name);
1058 *no_add_attrs = true;
1059 }
1060 if (args != NULL && TREE_CHAIN (args) != NULL)
1061 {
1062 expr = TREE_VALUE (args);
1063 expr2 = TREE_VALUE (TREE_CHAIN (args));
1064 }
1065 if (args == NULL || TREE_CHAIN (args) == NULL)
1066 err = 1;
1067 else if (TREE_CODE (expr) != INTEGER_CST
1068 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1069 || wi::gtu_p (wi::to_wide (expr), s390_hotpatch_hw_max))
1070 err = 1;
1071 else if (TREE_CODE (expr2) != INTEGER_CST
1072 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1073 || wi::gtu_p (wi::to_wide (expr2), s390_hotpatch_hw_max))
1074 err = 1;
1075 else
1076 err = 0;
1077 if (err)
1078 {
1079 error ("requested %qE attribute is not a comma separated pair of"
1080 " non-negative integer constants or too large (max. %d)", name,
1081 s390_hotpatch_hw_max);
1082 *no_add_attrs = true;
1083 }
1084
1085 return NULL_TREE;
1086 }
1087
1088 /* Expand the s390_vector_bool type attribute. */
1089
1090 static tree
1091 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1092 tree args ATTRIBUTE_UNUSED,
1093 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1094 {
1095 tree type = *node, result = NULL_TREE;
1096 machine_mode mode;
1097
1098 while (POINTER_TYPE_P (type)
1099 || TREE_CODE (type) == FUNCTION_TYPE
1100 || TREE_CODE (type) == METHOD_TYPE
1101 || TREE_CODE (type) == ARRAY_TYPE)
1102 type = TREE_TYPE (type);
1103
1104 mode = TYPE_MODE (type);
1105 switch (mode)
1106 {
1107 case E_DImode: case E_V2DImode:
1108 result = s390_builtin_types[BT_BV2DI];
1109 break;
1110 case E_SImode: case E_V4SImode:
1111 result = s390_builtin_types[BT_BV4SI];
1112 break;
1113 case E_HImode: case E_V8HImode:
1114 result = s390_builtin_types[BT_BV8HI];
1115 break;
1116 case E_QImode: case E_V16QImode:
1117 result = s390_builtin_types[BT_BV16QI];
1118 break;
1119 default:
1120 break;
1121 }
1122
1123 *no_add_attrs = true; /* No need to hang on to the attribute. */
1124
1125 if (result)
1126 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1127
1128 return NULL_TREE;
1129 }
1130
1131 /* Check syntax of function decl attributes having a string type value. */
1132
1133 static tree
1134 s390_handle_string_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1135 tree args ATTRIBUTE_UNUSED,
1136 int flags ATTRIBUTE_UNUSED,
1137 bool *no_add_attrs)
1138 {
1139 tree cst;
1140
1141 if (TREE_CODE (*node) != FUNCTION_DECL)
1142 {
1143 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1144 name);
1145 *no_add_attrs = true;
1146 }
1147
1148 cst = TREE_VALUE (args);
1149
1150 if (TREE_CODE (cst) != STRING_CST)
1151 {
1152 warning (OPT_Wattributes,
1153 "%qE attribute requires a string constant argument",
1154 name);
1155 *no_add_attrs = true;
1156 }
1157
1158 if (is_attribute_p ("indirect_branch", name)
1159 || is_attribute_p ("indirect_branch_call", name)
1160 || is_attribute_p ("function_return", name)
1161 || is_attribute_p ("function_return_reg", name)
1162 || is_attribute_p ("function_return_mem", name))
1163 {
1164 if (strcmp (TREE_STRING_POINTER (cst), "keep") != 0
1165 && strcmp (TREE_STRING_POINTER (cst), "thunk") != 0
1166 && strcmp (TREE_STRING_POINTER (cst), "thunk-extern") != 0)
1167 {
1168 warning (OPT_Wattributes,
1169 "argument to %qE attribute is not "
1170 "(keep|thunk|thunk-extern)", name);
1171 *no_add_attrs = true;
1172 }
1173 }
1174
1175 if (is_attribute_p ("indirect_branch_jump", name)
1176 && strcmp (TREE_STRING_POINTER (cst), "keep") != 0
1177 && strcmp (TREE_STRING_POINTER (cst), "thunk") != 0
1178 && strcmp (TREE_STRING_POINTER (cst), "thunk-inline") != 0
1179 && strcmp (TREE_STRING_POINTER (cst), "thunk-extern") != 0)
1180 {
1181 warning (OPT_Wattributes,
1182 "argument to %qE attribute is not "
1183 "(keep|thunk|thunk-inline|thunk-extern)", name);
1184 *no_add_attrs = true;
1185 }
1186
1187 return NULL_TREE;
1188 }
1189
1190 static const struct attribute_spec s390_attribute_table[] = {
1191 { "hotpatch", 2, 2, true, false, false, false,
1192 s390_handle_hotpatch_attribute, NULL },
1193 { "s390_vector_bool", 0, 0, false, true, false, true,
1194 s390_handle_vectorbool_attribute, NULL },
1195 { "indirect_branch", 1, 1, true, false, false, false,
1196 s390_handle_string_attribute, NULL },
1197 { "indirect_branch_jump", 1, 1, true, false, false, false,
1198 s390_handle_string_attribute, NULL },
1199 { "indirect_branch_call", 1, 1, true, false, false, false,
1200 s390_handle_string_attribute, NULL },
1201 { "function_return", 1, 1, true, false, false, false,
1202 s390_handle_string_attribute, NULL },
1203 { "function_return_reg", 1, 1, true, false, false, false,
1204 s390_handle_string_attribute, NULL },
1205 { "function_return_mem", 1, 1, true, false, false, false,
1206 s390_handle_string_attribute, NULL },
1207
1208 /* End element. */
1209 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1210 };
1211
1212 /* Return the alignment for LABEL. We default to the -falign-labels
1213 value except for the literal pool base label. */
1214 int
1215 s390_label_align (rtx_insn *label)
1216 {
1217 rtx_insn *prev_insn = prev_active_insn (label);
1218 rtx set, src;
1219
1220 if (prev_insn == NULL_RTX)
1221 goto old;
1222
1223 set = single_set (prev_insn);
1224
1225 if (set == NULL_RTX)
1226 goto old;
1227
1228 src = SET_SRC (set);
1229
1230 /* Don't align literal pool base labels. */
1231 if (GET_CODE (src) == UNSPEC
1232 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1233 return 0;
1234
1235 old:
1236 return align_labels.levels[0].log;
1237 }
1238
1239 static GTY(()) rtx got_symbol;
1240
1241 /* Return the GOT table symbol. The symbol will be created when the
1242 function is invoked for the first time. */
1243
1244 static rtx
1245 s390_got_symbol (void)
1246 {
1247 if (!got_symbol)
1248 {
1249 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1250 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1251 }
1252
1253 return got_symbol;
1254 }
1255
1256 static scalar_int_mode
1257 s390_libgcc_cmp_return_mode (void)
1258 {
1259 return TARGET_64BIT ? DImode : SImode;
1260 }
1261
1262 static scalar_int_mode
1263 s390_libgcc_shift_count_mode (void)
1264 {
1265 return TARGET_64BIT ? DImode : SImode;
1266 }
1267
1268 static scalar_int_mode
1269 s390_unwind_word_mode (void)
1270 {
1271 return TARGET_64BIT ? DImode : SImode;
1272 }
1273
1274 /* Return true if the back end supports mode MODE. */
1275 static bool
1276 s390_scalar_mode_supported_p (scalar_mode mode)
1277 {
1278 /* In contrast to the default implementation reject TImode constants on 31bit
1279 TARGET_ZARCH for ABI compliance. */
1280 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1281 return false;
1282
1283 if (DECIMAL_FLOAT_MODE_P (mode))
1284 return default_decimal_float_supported_p ();
1285
1286 return default_scalar_mode_supported_p (mode);
1287 }
1288
1289 /* Return true if the back end supports vector mode MODE. */
1290 static bool
1291 s390_vector_mode_supported_p (machine_mode mode)
1292 {
1293 machine_mode inner;
1294
1295 if (!VECTOR_MODE_P (mode)
1296 || !TARGET_VX
1297 || GET_MODE_SIZE (mode) > 16)
1298 return false;
1299
1300 inner = GET_MODE_INNER (mode);
1301
1302 switch (inner)
1303 {
1304 case E_QImode:
1305 case E_HImode:
1306 case E_SImode:
1307 case E_DImode:
1308 case E_TImode:
1309 case E_SFmode:
1310 case E_DFmode:
1311 case E_TFmode:
1312 return true;
1313 default:
1314 return false;
1315 }
1316 }
1317
1318 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1319
1320 void
1321 s390_set_has_landing_pad_p (bool value)
1322 {
1323 cfun->machine->has_landing_pad_p = value;
1324 }
1325
1326 /* If two condition code modes are compatible, return a condition code
1327 mode which is compatible with both. Otherwise, return
1328 VOIDmode. */
1329
1330 static machine_mode
1331 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1332 {
1333 if (m1 == m2)
1334 return m1;
1335
1336 switch (m1)
1337 {
1338 case E_CCZmode:
1339 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1340 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1341 return m2;
1342 return VOIDmode;
1343
1344 case E_CCSmode:
1345 case E_CCUmode:
1346 case E_CCTmode:
1347 case E_CCSRmode:
1348 case E_CCURmode:
1349 case E_CCZ1mode:
1350 if (m2 == CCZmode)
1351 return m1;
1352
1353 return VOIDmode;
1354
1355 default:
1356 return VOIDmode;
1357 }
1358 return VOIDmode;
1359 }
1360
1361 /* Return true if SET either doesn't set the CC register, or else
1362 the source and destination have matching CC modes and that
1363 CC mode is at least as constrained as REQ_MODE. */
1364
1365 static bool
1366 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1367 {
1368 machine_mode set_mode;
1369
1370 gcc_assert (GET_CODE (set) == SET);
1371
1372 /* These modes are supposed to be used only in CC consumer
1373 patterns. */
1374 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1375 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1376
1377 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1378 return 1;
1379
1380 set_mode = GET_MODE (SET_DEST (set));
1381 switch (set_mode)
1382 {
1383 case E_CCZ1mode:
1384 case E_CCSmode:
1385 case E_CCSRmode:
1386 case E_CCUmode:
1387 case E_CCURmode:
1388 case E_CCLmode:
1389 case E_CCL1mode:
1390 case E_CCL2mode:
1391 case E_CCL3mode:
1392 case E_CCT1mode:
1393 case E_CCT2mode:
1394 case E_CCT3mode:
1395 case E_CCVEQmode:
1396 case E_CCVIHmode:
1397 case E_CCVIHUmode:
1398 case E_CCVFHmode:
1399 case E_CCVFHEmode:
1400 if (req_mode != set_mode)
1401 return 0;
1402 break;
1403
1404 case E_CCZmode:
1405 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1406 && req_mode != CCSRmode && req_mode != CCURmode
1407 && req_mode != CCZ1mode)
1408 return 0;
1409 break;
1410
1411 case E_CCAPmode:
1412 case E_CCANmode:
1413 if (req_mode != CCAmode)
1414 return 0;
1415 break;
1416
1417 default:
1418 gcc_unreachable ();
1419 }
1420
1421 return (GET_MODE (SET_SRC (set)) == set_mode);
1422 }
1423
1424 /* Return true if every SET in INSN that sets the CC register
1425 has source and destination with matching CC modes and that
1426 CC mode is at least as constrained as REQ_MODE.
1427 If REQ_MODE is VOIDmode, always return false. */
1428
1429 bool
1430 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1431 {
1432 int i;
1433
1434 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1435 if (req_mode == VOIDmode)
1436 return false;
1437
1438 if (GET_CODE (PATTERN (insn)) == SET)
1439 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1440
1441 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1442 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1443 {
1444 rtx set = XVECEXP (PATTERN (insn), 0, i);
1445 if (GET_CODE (set) == SET)
1446 if (!s390_match_ccmode_set (set, req_mode))
1447 return false;
1448 }
1449
1450 return true;
1451 }
1452
1453 /* If a test-under-mask instruction can be used to implement
1454 (compare (and ... OP1) OP2), return the CC mode required
1455 to do that. Otherwise, return VOIDmode.
1456 MIXED is true if the instruction can distinguish between
1457 CC1 and CC2 for mixed selected bits (TMxx), it is false
1458 if the instruction cannot (TM). */
1459
1460 machine_mode
1461 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1462 {
1463 int bit0, bit1;
1464
1465 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1466 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1467 return VOIDmode;
1468
1469 /* Selected bits all zero: CC0.
1470 e.g.: int a; if ((a & (16 + 128)) == 0) */
1471 if (INTVAL (op2) == 0)
1472 return CCTmode;
1473
1474 /* Selected bits all one: CC3.
1475 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1476 if (INTVAL (op2) == INTVAL (op1))
1477 return CCT3mode;
1478
1479 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1480 int a;
1481 if ((a & (16 + 128)) == 16) -> CCT1
1482 if ((a & (16 + 128)) == 128) -> CCT2 */
1483 if (mixed)
1484 {
1485 bit1 = exact_log2 (INTVAL (op2));
1486 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1487 if (bit0 != -1 && bit1 != -1)
1488 return bit0 > bit1 ? CCT1mode : CCT2mode;
1489 }
1490
1491 return VOIDmode;
1492 }
1493
1494 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1495 OP0 and OP1 of a COMPARE, return the mode to be used for the
1496 comparison. */
1497
1498 machine_mode
1499 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1500 {
1501 switch (code)
1502 {
1503 case EQ:
1504 case NE:
1505 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1506 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1507 return CCAPmode;
1508 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1509 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1510 return CCAPmode;
1511 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1512 || GET_CODE (op1) == NEG)
1513 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1514 return CCLmode;
1515
1516 if (GET_CODE (op0) == AND)
1517 {
1518 /* Check whether we can potentially do it via TM. */
1519 machine_mode ccmode;
1520 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1521 if (ccmode != VOIDmode)
1522 {
1523 /* Relax CCTmode to CCZmode to allow fall-back to AND
1524 if that turns out to be beneficial. */
1525 return ccmode == CCTmode ? CCZmode : ccmode;
1526 }
1527 }
1528
1529 if (register_operand (op0, HImode)
1530 && GET_CODE (op1) == CONST_INT
1531 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1532 return CCT3mode;
1533 if (register_operand (op0, QImode)
1534 && GET_CODE (op1) == CONST_INT
1535 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1536 return CCT3mode;
1537
1538 return CCZmode;
1539
1540 case LE:
1541 case LT:
1542 case GE:
1543 case GT:
1544 /* The only overflow condition of NEG and ABS happens when
1545 -INT_MAX is used as parameter, which stays negative. So
1546 we have an overflow from a positive value to a negative.
1547 Using CCAP mode the resulting cc can be used for comparisons. */
1548 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1549 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1550 return CCAPmode;
1551
1552 /* If constants are involved in an add instruction it is possible to use
1553 the resulting cc for comparisons with zero. Knowing the sign of the
1554 constant the overflow behavior gets predictable. e.g.:
1555 int a, b; if ((b = a + c) > 0)
1556 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1557 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1558 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1559 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1560 /* Avoid INT32_MIN on 32 bit. */
1561 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1562 {
1563 if (INTVAL (XEXP((op0), 1)) < 0)
1564 return CCANmode;
1565 else
1566 return CCAPmode;
1567 }
1568 /* Fall through. */
1569 case UNORDERED:
1570 case ORDERED:
1571 case UNEQ:
1572 case UNLE:
1573 case UNLT:
1574 case UNGE:
1575 case UNGT:
1576 case LTGT:
1577 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1578 && GET_CODE (op1) != CONST_INT)
1579 return CCSRmode;
1580 return CCSmode;
1581
1582 case LTU:
1583 case GEU:
1584 if (GET_CODE (op0) == PLUS
1585 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1586 return CCL1mode;
1587
1588 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1589 && GET_CODE (op1) != CONST_INT)
1590 return CCURmode;
1591 return CCUmode;
1592
1593 case LEU:
1594 case GTU:
1595 if (GET_CODE (op0) == MINUS
1596 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1597 return CCL2mode;
1598
1599 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1600 && GET_CODE (op1) != CONST_INT)
1601 return CCURmode;
1602 return CCUmode;
1603
1604 default:
1605 gcc_unreachable ();
1606 }
1607 }
1608
1609 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1610 that we can implement more efficiently. */
1611
1612 static void
1613 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1614 bool op0_preserve_value)
1615 {
1616 if (op0_preserve_value)
1617 return;
1618
1619 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1620 if ((*code == EQ || *code == NE)
1621 && *op1 == const0_rtx
1622 && GET_CODE (*op0) == ZERO_EXTRACT
1623 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1624 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1625 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1626 {
1627 rtx inner = XEXP (*op0, 0);
1628 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1629 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1630 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1631
1632 if (len > 0 && len < modesize
1633 && pos >= 0 && pos + len <= modesize
1634 && modesize <= HOST_BITS_PER_WIDE_INT)
1635 {
1636 unsigned HOST_WIDE_INT block;
1637 block = (HOST_WIDE_INT_1U << len) - 1;
1638 block <<= modesize - pos - len;
1639
1640 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1641 gen_int_mode (block, GET_MODE (inner)));
1642 }
1643 }
1644
1645 /* Narrow AND of memory against immediate to enable TM. */
1646 if ((*code == EQ || *code == NE)
1647 && *op1 == const0_rtx
1648 && GET_CODE (*op0) == AND
1649 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1650 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1651 {
1652 rtx inner = XEXP (*op0, 0);
1653 rtx mask = XEXP (*op0, 1);
1654
1655 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1656 if (GET_CODE (inner) == SUBREG
1657 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1658 && (GET_MODE_SIZE (GET_MODE (inner))
1659 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1660 && ((INTVAL (mask)
1661 & GET_MODE_MASK (GET_MODE (inner))
1662 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1663 == 0))
1664 inner = SUBREG_REG (inner);
1665
1666 /* Do not change volatile MEMs. */
1667 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1668 {
1669 int part = s390_single_part (XEXP (*op0, 1),
1670 GET_MODE (inner), QImode, 0);
1671 if (part >= 0)
1672 {
1673 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1674 inner = adjust_address_nv (inner, QImode, part);
1675 *op0 = gen_rtx_AND (QImode, inner, mask);
1676 }
1677 }
1678 }
1679
1680 /* Narrow comparisons against 0xffff to HImode if possible. */
1681 if ((*code == EQ || *code == NE)
1682 && GET_CODE (*op1) == CONST_INT
1683 && INTVAL (*op1) == 0xffff
1684 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1685 && (nonzero_bits (*op0, GET_MODE (*op0))
1686 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1687 {
1688 *op0 = gen_lowpart (HImode, *op0);
1689 *op1 = constm1_rtx;
1690 }
1691
1692 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1693 if (GET_CODE (*op0) == UNSPEC
1694 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1695 && XVECLEN (*op0, 0) == 1
1696 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1697 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1698 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1699 && *op1 == const0_rtx)
1700 {
1701 enum rtx_code new_code = UNKNOWN;
1702 switch (*code)
1703 {
1704 case EQ: new_code = EQ; break;
1705 case NE: new_code = NE; break;
1706 case LT: new_code = GTU; break;
1707 case GT: new_code = LTU; break;
1708 case LE: new_code = GEU; break;
1709 case GE: new_code = LEU; break;
1710 default: break;
1711 }
1712
1713 if (new_code != UNKNOWN)
1714 {
1715 *op0 = XVECEXP (*op0, 0, 0);
1716 *code = new_code;
1717 }
1718 }
1719
1720 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1721 if (GET_CODE (*op0) == UNSPEC
1722 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1723 && XVECLEN (*op0, 0) == 1
1724 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1725 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1726 && CONST_INT_P (*op1))
1727 {
1728 enum rtx_code new_code = UNKNOWN;
1729 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1730 {
1731 case E_CCZmode:
1732 case E_CCRAWmode:
1733 switch (*code)
1734 {
1735 case EQ: new_code = EQ; break;
1736 case NE: new_code = NE; break;
1737 default: break;
1738 }
1739 break;
1740 default: break;
1741 }
1742
1743 if (new_code != UNKNOWN)
1744 {
1745 /* For CCRAWmode put the required cc mask into the second
1746 operand. */
1747 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1748 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1749 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1750 *op0 = XVECEXP (*op0, 0, 0);
1751 *code = new_code;
1752 }
1753 }
1754
1755 /* Simplify cascaded EQ, NE with const0_rtx. */
1756 if ((*code == NE || *code == EQ)
1757 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1758 && GET_MODE (*op0) == SImode
1759 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1760 && REG_P (XEXP (*op0, 0))
1761 && XEXP (*op0, 1) == const0_rtx
1762 && *op1 == const0_rtx)
1763 {
1764 if ((*code == EQ && GET_CODE (*op0) == NE)
1765 || (*code == NE && GET_CODE (*op0) == EQ))
1766 *code = EQ;
1767 else
1768 *code = NE;
1769 *op0 = XEXP (*op0, 0);
1770 }
1771
1772 /* Prefer register over memory as first operand. */
1773 if (MEM_P (*op0) && REG_P (*op1))
1774 {
1775 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1776 *code = (int)swap_condition ((enum rtx_code)*code);
1777 }
1778
1779 /* A comparison result is compared against zero. Replace it with
1780 the (perhaps inverted) original comparison.
1781 This probably should be done by simplify_relational_operation. */
1782 if ((*code == EQ || *code == NE)
1783 && *op1 == const0_rtx
1784 && COMPARISON_P (*op0)
1785 && CC_REG_P (XEXP (*op0, 0)))
1786 {
1787 enum rtx_code new_code;
1788
1789 if (*code == EQ)
1790 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1791 XEXP (*op0, 0),
1792 XEXP (*op1, 0), NULL);
1793 else
1794 new_code = GET_CODE (*op0);
1795
1796 if (new_code != UNKNOWN)
1797 {
1798 *code = new_code;
1799 *op1 = XEXP (*op0, 1);
1800 *op0 = XEXP (*op0, 0);
1801 }
1802 }
1803 }
1804
1805
1806 /* Emit a compare instruction suitable to implement the comparison
1807 OP0 CODE OP1. Return the correct condition RTL to be placed in
1808 the IF_THEN_ELSE of the conditional branch testing the result. */
1809
1810 rtx
1811 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1812 {
1813 machine_mode mode = s390_select_ccmode (code, op0, op1);
1814 rtx cc;
1815
1816 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1817 {
1818 /* Do not output a redundant compare instruction if a
1819 compare_and_swap pattern already computed the result and the
1820 machine modes are compatible. */
1821 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1822 == GET_MODE (op0));
1823 cc = op0;
1824 }
1825 else
1826 {
1827 cc = gen_rtx_REG (mode, CC_REGNUM);
1828 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1829 }
1830
1831 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1832 }
1833
1834 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1835 matches CMP.
1836 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1837 conditional branch testing the result. */
1838
1839 static rtx
1840 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1841 rtx cmp, rtx new_rtx, machine_mode ccmode)
1842 {
1843 rtx cc;
1844
1845 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1846 switch (GET_MODE (mem))
1847 {
1848 case E_SImode:
1849 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1850 new_rtx, cc));
1851 break;
1852 case E_DImode:
1853 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1854 new_rtx, cc));
1855 break;
1856 case E_TImode:
1857 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1858 new_rtx, cc));
1859 break;
1860 case E_QImode:
1861 case E_HImode:
1862 default:
1863 gcc_unreachable ();
1864 }
1865 return s390_emit_compare (code, cc, const0_rtx);
1866 }
1867
1868 /* Emit a jump instruction to TARGET and return it. If COND is
1869 NULL_RTX, emit an unconditional jump, else a conditional jump under
1870 condition COND. */
1871
1872 rtx_insn *
1873 s390_emit_jump (rtx target, rtx cond)
1874 {
1875 rtx insn;
1876
1877 target = gen_rtx_LABEL_REF (VOIDmode, target);
1878 if (cond)
1879 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1880
1881 insn = gen_rtx_SET (pc_rtx, target);
1882 return emit_jump_insn (insn);
1883 }
1884
1885 /* Return branch condition mask to implement a branch
1886 specified by CODE. Return -1 for invalid comparisons. */
1887
1888 int
1889 s390_branch_condition_mask (rtx code)
1890 {
1891 const int CC0 = 1 << 3;
1892 const int CC1 = 1 << 2;
1893 const int CC2 = 1 << 1;
1894 const int CC3 = 1 << 0;
1895
1896 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1897 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1898 gcc_assert (XEXP (code, 1) == const0_rtx
1899 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1900 && CONST_INT_P (XEXP (code, 1))));
1901
1902
1903 switch (GET_MODE (XEXP (code, 0)))
1904 {
1905 case E_CCZmode:
1906 case E_CCZ1mode:
1907 switch (GET_CODE (code))
1908 {
1909 case EQ: return CC0;
1910 case NE: return CC1 | CC2 | CC3;
1911 default: return -1;
1912 }
1913 break;
1914
1915 case E_CCT1mode:
1916 switch (GET_CODE (code))
1917 {
1918 case EQ: return CC1;
1919 case NE: return CC0 | CC2 | CC3;
1920 default: return -1;
1921 }
1922 break;
1923
1924 case E_CCT2mode:
1925 switch (GET_CODE (code))
1926 {
1927 case EQ: return CC2;
1928 case NE: return CC0 | CC1 | CC3;
1929 default: return -1;
1930 }
1931 break;
1932
1933 case E_CCT3mode:
1934 switch (GET_CODE (code))
1935 {
1936 case EQ: return CC3;
1937 case NE: return CC0 | CC1 | CC2;
1938 default: return -1;
1939 }
1940 break;
1941
1942 case E_CCLmode:
1943 switch (GET_CODE (code))
1944 {
1945 case EQ: return CC0 | CC2;
1946 case NE: return CC1 | CC3;
1947 default: return -1;
1948 }
1949 break;
1950
1951 case E_CCL1mode:
1952 switch (GET_CODE (code))
1953 {
1954 case LTU: return CC2 | CC3; /* carry */
1955 case GEU: return CC0 | CC1; /* no carry */
1956 default: return -1;
1957 }
1958 break;
1959
1960 case E_CCL2mode:
1961 switch (GET_CODE (code))
1962 {
1963 case GTU: return CC0 | CC1; /* borrow */
1964 case LEU: return CC2 | CC3; /* no borrow */
1965 default: return -1;
1966 }
1967 break;
1968
1969 case E_CCL3mode:
1970 switch (GET_CODE (code))
1971 {
1972 case EQ: return CC0 | CC2;
1973 case NE: return CC1 | CC3;
1974 case LTU: return CC1;
1975 case GTU: return CC3;
1976 case LEU: return CC1 | CC2;
1977 case GEU: return CC2 | CC3;
1978 default: return -1;
1979 }
1980
1981 case E_CCUmode:
1982 switch (GET_CODE (code))
1983 {
1984 case EQ: return CC0;
1985 case NE: return CC1 | CC2 | CC3;
1986 case LTU: return CC1;
1987 case GTU: return CC2;
1988 case LEU: return CC0 | CC1;
1989 case GEU: return CC0 | CC2;
1990 default: return -1;
1991 }
1992 break;
1993
1994 case E_CCURmode:
1995 switch (GET_CODE (code))
1996 {
1997 case EQ: return CC0;
1998 case NE: return CC2 | CC1 | CC3;
1999 case LTU: return CC2;
2000 case GTU: return CC1;
2001 case LEU: return CC0 | CC2;
2002 case GEU: return CC0 | CC1;
2003 default: return -1;
2004 }
2005 break;
2006
2007 case E_CCAPmode:
2008 switch (GET_CODE (code))
2009 {
2010 case EQ: return CC0;
2011 case NE: return CC1 | CC2 | CC3;
2012 case LT: return CC1 | CC3;
2013 case GT: return CC2;
2014 case LE: return CC0 | CC1 | CC3;
2015 case GE: return CC0 | CC2;
2016 default: return -1;
2017 }
2018 break;
2019
2020 case E_CCANmode:
2021 switch (GET_CODE (code))
2022 {
2023 case EQ: return CC0;
2024 case NE: return CC1 | CC2 | CC3;
2025 case LT: return CC1;
2026 case GT: return CC2 | CC3;
2027 case LE: return CC0 | CC1;
2028 case GE: return CC0 | CC2 | CC3;
2029 default: return -1;
2030 }
2031 break;
2032
2033 case E_CCSmode:
2034 switch (GET_CODE (code))
2035 {
2036 case EQ: return CC0;
2037 case NE: return CC1 | CC2 | CC3;
2038 case LT: return CC1;
2039 case GT: return CC2;
2040 case LE: return CC0 | CC1;
2041 case GE: return CC0 | CC2;
2042 case UNORDERED: return CC3;
2043 case ORDERED: return CC0 | CC1 | CC2;
2044 case UNEQ: return CC0 | CC3;
2045 case UNLT: return CC1 | CC3;
2046 case UNGT: return CC2 | CC3;
2047 case UNLE: return CC0 | CC1 | CC3;
2048 case UNGE: return CC0 | CC2 | CC3;
2049 case LTGT: return CC1 | CC2;
2050 default: return -1;
2051 }
2052 break;
2053
2054 case E_CCSRmode:
2055 switch (GET_CODE (code))
2056 {
2057 case EQ: return CC0;
2058 case NE: return CC2 | CC1 | CC3;
2059 case LT: return CC2;
2060 case GT: return CC1;
2061 case LE: return CC0 | CC2;
2062 case GE: return CC0 | CC1;
2063 case UNORDERED: return CC3;
2064 case ORDERED: return CC0 | CC2 | CC1;
2065 case UNEQ: return CC0 | CC3;
2066 case UNLT: return CC2 | CC3;
2067 case UNGT: return CC1 | CC3;
2068 case UNLE: return CC0 | CC2 | CC3;
2069 case UNGE: return CC0 | CC1 | CC3;
2070 case LTGT: return CC2 | CC1;
2071 default: return -1;
2072 }
2073 break;
2074
2075 /* Vector comparison modes. */
2076 /* CC2 will never be set. It however is part of the negated
2077 masks. */
2078 case E_CCVIALLmode:
2079 switch (GET_CODE (code))
2080 {
2081 case EQ:
2082 case GTU:
2083 case GT:
2084 case GE: return CC0;
2085 /* The inverted modes are in fact *any* modes. */
2086 case NE:
2087 case LEU:
2088 case LE:
2089 case LT: return CC3 | CC1 | CC2;
2090 default: return -1;
2091 }
2092
2093 case E_CCVIANYmode:
2094 switch (GET_CODE (code))
2095 {
2096 case EQ:
2097 case GTU:
2098 case GT:
2099 case GE: return CC0 | CC1;
2100 /* The inverted modes are in fact *all* modes. */
2101 case NE:
2102 case LEU:
2103 case LE:
2104 case LT: return CC3 | CC2;
2105 default: return -1;
2106 }
2107 case E_CCVFALLmode:
2108 switch (GET_CODE (code))
2109 {
2110 case EQ:
2111 case GT:
2112 case GE: return CC0;
2113 /* The inverted modes are in fact *any* modes. */
2114 case NE:
2115 case UNLE:
2116 case UNLT: return CC3 | CC1 | CC2;
2117 default: return -1;
2118 }
2119
2120 case E_CCVFANYmode:
2121 switch (GET_CODE (code))
2122 {
2123 case EQ:
2124 case GT:
2125 case GE: return CC0 | CC1;
2126 /* The inverted modes are in fact *all* modes. */
2127 case NE:
2128 case UNLE:
2129 case UNLT: return CC3 | CC2;
2130 default: return -1;
2131 }
2132
2133 case E_CCRAWmode:
2134 switch (GET_CODE (code))
2135 {
2136 case EQ:
2137 return INTVAL (XEXP (code, 1));
2138 case NE:
2139 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2140 default:
2141 gcc_unreachable ();
2142 }
2143
2144 default:
2145 return -1;
2146 }
2147 }
2148
2149
2150 /* Return branch condition mask to implement a compare and branch
2151 specified by CODE. Return -1 for invalid comparisons. */
2152
2153 int
2154 s390_compare_and_branch_condition_mask (rtx code)
2155 {
2156 const int CC0 = 1 << 3;
2157 const int CC1 = 1 << 2;
2158 const int CC2 = 1 << 1;
2159
2160 switch (GET_CODE (code))
2161 {
2162 case EQ:
2163 return CC0;
2164 case NE:
2165 return CC1 | CC2;
2166 case LT:
2167 case LTU:
2168 return CC1;
2169 case GT:
2170 case GTU:
2171 return CC2;
2172 case LE:
2173 case LEU:
2174 return CC0 | CC1;
2175 case GE:
2176 case GEU:
2177 return CC0 | CC2;
2178 default:
2179 gcc_unreachable ();
2180 }
2181 return -1;
2182 }
2183
2184 /* If INV is false, return assembler mnemonic string to implement
2185 a branch specified by CODE. If INV is true, return mnemonic
2186 for the corresponding inverted branch. */
2187
2188 static const char *
2189 s390_branch_condition_mnemonic (rtx code, int inv)
2190 {
2191 int mask;
2192
2193 static const char *const mnemonic[16] =
2194 {
2195 NULL, "o", "h", "nle",
2196 "l", "nhe", "lh", "ne",
2197 "e", "nlh", "he", "nl",
2198 "le", "nh", "no", NULL
2199 };
2200
2201 if (GET_CODE (XEXP (code, 0)) == REG
2202 && REGNO (XEXP (code, 0)) == CC_REGNUM
2203 && (XEXP (code, 1) == const0_rtx
2204 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2205 && CONST_INT_P (XEXP (code, 1)))))
2206 mask = s390_branch_condition_mask (code);
2207 else
2208 mask = s390_compare_and_branch_condition_mask (code);
2209
2210 gcc_assert (mask >= 0);
2211
2212 if (inv)
2213 mask ^= 15;
2214
2215 gcc_assert (mask >= 1 && mask <= 14);
2216
2217 return mnemonic[mask];
2218 }
2219
2220 /* Return the part of op which has a value different from def.
2221 The size of the part is determined by mode.
2222 Use this function only if you already know that op really
2223 contains such a part. */
2224
2225 unsigned HOST_WIDE_INT
2226 s390_extract_part (rtx op, machine_mode mode, int def)
2227 {
2228 unsigned HOST_WIDE_INT value = 0;
2229 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2230 int part_bits = GET_MODE_BITSIZE (mode);
2231 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2232 int i;
2233
2234 for (i = 0; i < max_parts; i++)
2235 {
2236 if (i == 0)
2237 value = UINTVAL (op);
2238 else
2239 value >>= part_bits;
2240
2241 if ((value & part_mask) != (def & part_mask))
2242 return value & part_mask;
2243 }
2244
2245 gcc_unreachable ();
2246 }
2247
2248 /* If OP is an integer constant of mode MODE with exactly one
2249 part of mode PART_MODE unequal to DEF, return the number of that
2250 part. Otherwise, return -1. */
2251
2252 int
2253 s390_single_part (rtx op,
2254 machine_mode mode,
2255 machine_mode part_mode,
2256 int def)
2257 {
2258 unsigned HOST_WIDE_INT value = 0;
2259 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2260 unsigned HOST_WIDE_INT part_mask
2261 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2262 int i, part = -1;
2263
2264 if (GET_CODE (op) != CONST_INT)
2265 return -1;
2266
2267 for (i = 0; i < n_parts; i++)
2268 {
2269 if (i == 0)
2270 value = UINTVAL (op);
2271 else
2272 value >>= GET_MODE_BITSIZE (part_mode);
2273
2274 if ((value & part_mask) != (def & part_mask))
2275 {
2276 if (part != -1)
2277 return -1;
2278 else
2279 part = i;
2280 }
2281 }
2282 return part == -1 ? -1 : n_parts - 1 - part;
2283 }
2284
2285 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2286 bits and no other bits are set in (the lower SIZE bits of) IN.
2287
2288 PSTART and PEND can be used to obtain the start and end
2289 position (inclusive) of the bitfield relative to 64
2290 bits. *PSTART / *PEND gives the position of the first/last bit
2291 of the bitfield counting from the highest order bit starting
2292 with zero. */
2293
2294 bool
2295 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2296 int *pstart, int *pend)
2297 {
2298 int start;
2299 int end = -1;
2300 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2301 int highbit = HOST_BITS_PER_WIDE_INT - size;
2302 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2303
2304 gcc_assert (!!pstart == !!pend);
2305 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2306 if (end == -1)
2307 {
2308 /* Look for the rightmost bit of a contiguous range of ones. */
2309 if (bitmask & in)
2310 /* Found it. */
2311 end = start;
2312 }
2313 else
2314 {
2315 /* Look for the firt zero bit after the range of ones. */
2316 if (! (bitmask & in))
2317 /* Found it. */
2318 break;
2319 }
2320 /* We're one past the last one-bit. */
2321 start++;
2322
2323 if (end == -1)
2324 /* No one bits found. */
2325 return false;
2326
2327 if (start > highbit)
2328 {
2329 unsigned HOST_WIDE_INT mask;
2330
2331 /* Calculate a mask for all bits beyond the contiguous bits. */
2332 mask = ((~HOST_WIDE_INT_0U >> highbit)
2333 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2334 if (mask & in)
2335 /* There are more bits set beyond the first range of one bits. */
2336 return false;
2337 }
2338
2339 if (pstart)
2340 {
2341 *pstart = start;
2342 *pend = end;
2343 }
2344
2345 return true;
2346 }
2347
2348 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2349 if ~IN contains a contiguous bitfield. In that case, *END is <
2350 *START.
2351
2352 If WRAP_P is true, a bitmask that wraps around is also tested.
2353 When a wraparoud occurs *START is greater than *END (in
2354 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2355 part of the range. If WRAP_P is false, no wraparound is
2356 tested. */
2357
2358 bool
2359 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2360 int size, int *start, int *end)
2361 {
2362 int bs = HOST_BITS_PER_WIDE_INT;
2363 bool b;
2364
2365 gcc_assert (!!start == !!end);
2366 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2367 /* This cannot be expressed as a contiguous bitmask. Exit early because
2368 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2369 a valid bitmask. */
2370 return false;
2371 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2372 if (b)
2373 return true;
2374 if (! wrap_p)
2375 return false;
2376 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2377 if (b && start)
2378 {
2379 int s = *start;
2380 int e = *end;
2381
2382 gcc_assert (s >= 1);
2383 *start = ((e + 1) & (bs - 1));
2384 *end = ((s - 1 + bs) & (bs - 1));
2385 }
2386
2387 return b;
2388 }
2389
2390 /* Return true if OP contains the same contiguous bitfield in *all*
2391 its elements. START and END can be used to obtain the start and
2392 end position of the bitfield.
2393
2394 START/STOP give the position of the first/last bit of the bitfield
2395 counting from the lowest order bit starting with zero. In order to
2396 use these values for S/390 instructions this has to be converted to
2397 "bits big endian" style. */
2398
2399 bool
2400 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2401 {
2402 unsigned HOST_WIDE_INT mask;
2403 int size;
2404 rtx elt;
2405 bool b;
2406
2407 gcc_assert (!!start == !!end);
2408 if (!const_vec_duplicate_p (op, &elt)
2409 || !CONST_INT_P (elt))
2410 return false;
2411
2412 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2413
2414 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2415 if (size > 64)
2416 return false;
2417
2418 mask = UINTVAL (elt);
2419
2420 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2421 if (b)
2422 {
2423 if (start)
2424 {
2425 *start -= (HOST_BITS_PER_WIDE_INT - size);
2426 *end -= (HOST_BITS_PER_WIDE_INT - size);
2427 }
2428 return true;
2429 }
2430 else
2431 return false;
2432 }
2433
2434 /* Return true if C consists only of byte chunks being either 0 or
2435 0xff. If MASK is !=NULL a byte mask is generated which is
2436 appropriate for the vector generate byte mask instruction. */
2437
2438 bool
2439 s390_bytemask_vector_p (rtx op, unsigned *mask)
2440 {
2441 int i;
2442 unsigned tmp_mask = 0;
2443 int nunit, unit_size;
2444
2445 if (!VECTOR_MODE_P (GET_MODE (op))
2446 || GET_CODE (op) != CONST_VECTOR
2447 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2448 return false;
2449
2450 nunit = GET_MODE_NUNITS (GET_MODE (op));
2451 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2452
2453 for (i = 0; i < nunit; i++)
2454 {
2455 unsigned HOST_WIDE_INT c;
2456 int j;
2457
2458 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2459 return false;
2460
2461 c = UINTVAL (XVECEXP (op, 0, i));
2462 for (j = 0; j < unit_size; j++)
2463 {
2464 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2465 return false;
2466 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2467 c = c >> BITS_PER_UNIT;
2468 }
2469 }
2470
2471 if (mask != NULL)
2472 *mask = tmp_mask;
2473
2474 return true;
2475 }
2476
2477 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2478 equivalent to a shift followed by the AND. In particular, CONTIG
2479 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2480 for ROTL indicate a rotate to the right. */
2481
2482 bool
2483 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2484 {
2485 int start, end;
2486 bool ok;
2487
2488 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2489 gcc_assert (ok);
2490
2491 if (rotl >= 0)
2492 return (64 - end >= rotl);
2493 else
2494 {
2495 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2496 DIMode. */
2497 rotl = -rotl + (64 - bitsize);
2498 return (start >= rotl);
2499 }
2500 }
2501
2502 /* Check whether we can (and want to) split a double-word
2503 move in mode MODE from SRC to DST into two single-word
2504 moves, moving the subword FIRST_SUBWORD first. */
2505
2506 bool
2507 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2508 {
2509 /* Floating point and vector registers cannot be split. */
2510 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2511 return false;
2512
2513 /* Non-offsettable memory references cannot be split. */
2514 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2515 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2516 return false;
2517
2518 /* Moving the first subword must not clobber a register
2519 needed to move the second subword. */
2520 if (register_operand (dst, mode))
2521 {
2522 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2523 if (reg_overlap_mentioned_p (subreg, src))
2524 return false;
2525 }
2526
2527 return true;
2528 }
2529
2530 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2531 and [MEM2, MEM2 + SIZE] do overlap and false
2532 otherwise. */
2533
2534 bool
2535 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2536 {
2537 rtx addr1, addr2, addr_delta;
2538 HOST_WIDE_INT delta;
2539
2540 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2541 return true;
2542
2543 if (size == 0)
2544 return false;
2545
2546 addr1 = XEXP (mem1, 0);
2547 addr2 = XEXP (mem2, 0);
2548
2549 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2550
2551 /* This overlapping check is used by peepholes merging memory block operations.
2552 Overlapping operations would otherwise be recognized by the S/390 hardware
2553 and would fall back to a slower implementation. Allowing overlapping
2554 operations would lead to slow code but not to wrong code. Therefore we are
2555 somewhat optimistic if we cannot prove that the memory blocks are
2556 overlapping.
2557 That's why we return false here although this may accept operations on
2558 overlapping memory areas. */
2559 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2560 return false;
2561
2562 delta = INTVAL (addr_delta);
2563
2564 if (delta == 0
2565 || (delta > 0 && delta < size)
2566 || (delta < 0 && -delta < size))
2567 return true;
2568
2569 return false;
2570 }
2571
2572 /* Check whether the address of memory reference MEM2 equals exactly
2573 the address of memory reference MEM1 plus DELTA. Return true if
2574 we can prove this to be the case, false otherwise. */
2575
2576 bool
2577 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2578 {
2579 rtx addr1, addr2, addr_delta;
2580
2581 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2582 return false;
2583
2584 addr1 = XEXP (mem1, 0);
2585 addr2 = XEXP (mem2, 0);
2586
2587 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2588 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2589 return false;
2590
2591 return true;
2592 }
2593
2594 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2595
2596 void
2597 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2598 rtx *operands)
2599 {
2600 machine_mode wmode = mode;
2601 rtx dst = operands[0];
2602 rtx src1 = operands[1];
2603 rtx src2 = operands[2];
2604 rtx op, clob, tem;
2605
2606 /* If we cannot handle the operation directly, use a temp register. */
2607 if (!s390_logical_operator_ok_p (operands))
2608 dst = gen_reg_rtx (mode);
2609
2610 /* QImode and HImode patterns make sense only if we have a destination
2611 in memory. Otherwise perform the operation in SImode. */
2612 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2613 wmode = SImode;
2614
2615 /* Widen operands if required. */
2616 if (mode != wmode)
2617 {
2618 if (GET_CODE (dst) == SUBREG
2619 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2620 dst = tem;
2621 else if (REG_P (dst))
2622 dst = gen_rtx_SUBREG (wmode, dst, 0);
2623 else
2624 dst = gen_reg_rtx (wmode);
2625
2626 if (GET_CODE (src1) == SUBREG
2627 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2628 src1 = tem;
2629 else if (GET_MODE (src1) != VOIDmode)
2630 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2631
2632 if (GET_CODE (src2) == SUBREG
2633 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2634 src2 = tem;
2635 else if (GET_MODE (src2) != VOIDmode)
2636 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2637 }
2638
2639 /* Emit the instruction. */
2640 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2641 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2642 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2643
2644 /* Fix up the destination if needed. */
2645 if (dst != operands[0])
2646 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2647 }
2648
2649 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2650
2651 bool
2652 s390_logical_operator_ok_p (rtx *operands)
2653 {
2654 /* If the destination operand is in memory, it needs to coincide
2655 with one of the source operands. After reload, it has to be
2656 the first source operand. */
2657 if (GET_CODE (operands[0]) == MEM)
2658 return rtx_equal_p (operands[0], operands[1])
2659 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2660
2661 return true;
2662 }
2663
2664 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2665 operand IMMOP to switch from SS to SI type instructions. */
2666
2667 void
2668 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2669 {
2670 int def = code == AND ? -1 : 0;
2671 HOST_WIDE_INT mask;
2672 int part;
2673
2674 gcc_assert (GET_CODE (*memop) == MEM);
2675 gcc_assert (!MEM_VOLATILE_P (*memop));
2676
2677 mask = s390_extract_part (*immop, QImode, def);
2678 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2679 gcc_assert (part >= 0);
2680
2681 *memop = adjust_address (*memop, QImode, part);
2682 *immop = gen_int_mode (mask, QImode);
2683 }
2684
2685
2686 /* How to allocate a 'struct machine_function'. */
2687
2688 static struct machine_function *
2689 s390_init_machine_status (void)
2690 {
2691 return ggc_cleared_alloc<machine_function> ();
2692 }
2693
2694 /* Map for smallest class containing reg regno. */
2695
2696 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2697 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2698 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2699 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2700 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2701 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2702 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2703 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2704 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2705 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2706 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2707 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2708 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2709 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2710 VEC_REGS, VEC_REGS /* 52 */
2711 };
2712
2713 /* Return attribute type of insn. */
2714
2715 static enum attr_type
2716 s390_safe_attr_type (rtx_insn *insn)
2717 {
2718 if (recog_memoized (insn) >= 0)
2719 return get_attr_type (insn);
2720 else
2721 return TYPE_NONE;
2722 }
2723
2724 /* Return true if DISP is a valid short displacement. */
2725
2726 static bool
2727 s390_short_displacement (rtx disp)
2728 {
2729 /* No displacement is OK. */
2730 if (!disp)
2731 return true;
2732
2733 /* Without the long displacement facility we don't need to
2734 distingiush between long and short displacement. */
2735 if (!TARGET_LONG_DISPLACEMENT)
2736 return true;
2737
2738 /* Integer displacement in range. */
2739 if (GET_CODE (disp) == CONST_INT)
2740 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2741
2742 /* GOT offset is not OK, the GOT can be large. */
2743 if (GET_CODE (disp) == CONST
2744 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2745 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2746 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2747 return false;
2748
2749 /* All other symbolic constants are literal pool references,
2750 which are OK as the literal pool must be small. */
2751 if (GET_CODE (disp) == CONST)
2752 return true;
2753
2754 return false;
2755 }
2756
2757 /* Decompose a RTL expression ADDR for a memory address into
2758 its components, returned in OUT.
2759
2760 Returns false if ADDR is not a valid memory address, true
2761 otherwise. If OUT is NULL, don't return the components,
2762 but check for validity only.
2763
2764 Note: Only addresses in canonical form are recognized.
2765 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2766 canonical form so that they will be recognized. */
2767
2768 static int
2769 s390_decompose_address (rtx addr, struct s390_address *out)
2770 {
2771 HOST_WIDE_INT offset = 0;
2772 rtx base = NULL_RTX;
2773 rtx indx = NULL_RTX;
2774 rtx disp = NULL_RTX;
2775 rtx orig_disp;
2776 bool pointer = false;
2777 bool base_ptr = false;
2778 bool indx_ptr = false;
2779 bool literal_pool = false;
2780
2781 /* We may need to substitute the literal pool base register into the address
2782 below. However, at this point we do not know which register is going to
2783 be used as base, so we substitute the arg pointer register. This is going
2784 to be treated as holding a pointer below -- it shouldn't be used for any
2785 other purpose. */
2786 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2787
2788 /* Decompose address into base + index + displacement. */
2789
2790 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2791 base = addr;
2792
2793 else if (GET_CODE (addr) == PLUS)
2794 {
2795 rtx op0 = XEXP (addr, 0);
2796 rtx op1 = XEXP (addr, 1);
2797 enum rtx_code code0 = GET_CODE (op0);
2798 enum rtx_code code1 = GET_CODE (op1);
2799
2800 if (code0 == REG || code0 == UNSPEC)
2801 {
2802 if (code1 == REG || code1 == UNSPEC)
2803 {
2804 indx = op0; /* index + base */
2805 base = op1;
2806 }
2807
2808 else
2809 {
2810 base = op0; /* base + displacement */
2811 disp = op1;
2812 }
2813 }
2814
2815 else if (code0 == PLUS)
2816 {
2817 indx = XEXP (op0, 0); /* index + base + disp */
2818 base = XEXP (op0, 1);
2819 disp = op1;
2820 }
2821
2822 else
2823 {
2824 return false;
2825 }
2826 }
2827
2828 else
2829 disp = addr; /* displacement */
2830
2831 /* Extract integer part of displacement. */
2832 orig_disp = disp;
2833 if (disp)
2834 {
2835 if (GET_CODE (disp) == CONST_INT)
2836 {
2837 offset = INTVAL (disp);
2838 disp = NULL_RTX;
2839 }
2840 else if (GET_CODE (disp) == CONST
2841 && GET_CODE (XEXP (disp, 0)) == PLUS
2842 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2843 {
2844 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2845 disp = XEXP (XEXP (disp, 0), 0);
2846 }
2847 }
2848
2849 /* Strip off CONST here to avoid special case tests later. */
2850 if (disp && GET_CODE (disp) == CONST)
2851 disp = XEXP (disp, 0);
2852
2853 /* We can convert literal pool addresses to
2854 displacements by basing them off the base register. */
2855 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2856 {
2857 if (base || indx)
2858 return false;
2859
2860 base = fake_pool_base, literal_pool = true;
2861
2862 /* Mark up the displacement. */
2863 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2864 UNSPEC_LTREL_OFFSET);
2865 }
2866
2867 /* Validate base register. */
2868 if (base)
2869 {
2870 if (GET_CODE (base) == UNSPEC)
2871 switch (XINT (base, 1))
2872 {
2873 case UNSPEC_LTREF:
2874 if (!disp)
2875 disp = gen_rtx_UNSPEC (Pmode,
2876 gen_rtvec (1, XVECEXP (base, 0, 0)),
2877 UNSPEC_LTREL_OFFSET);
2878 else
2879 return false;
2880
2881 base = XVECEXP (base, 0, 1);
2882 break;
2883
2884 case UNSPEC_LTREL_BASE:
2885 if (XVECLEN (base, 0) == 1)
2886 base = fake_pool_base, literal_pool = true;
2887 else
2888 base = XVECEXP (base, 0, 1);
2889 break;
2890
2891 default:
2892 return false;
2893 }
2894
2895 if (!REG_P (base) || GET_MODE (base) != Pmode)
2896 return false;
2897
2898 if (REGNO (base) == STACK_POINTER_REGNUM
2899 || REGNO (base) == FRAME_POINTER_REGNUM
2900 || ((reload_completed || reload_in_progress)
2901 && frame_pointer_needed
2902 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2903 || REGNO (base) == ARG_POINTER_REGNUM
2904 || (flag_pic
2905 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2906 pointer = base_ptr = true;
2907
2908 if ((reload_completed || reload_in_progress)
2909 && base == cfun->machine->base_reg)
2910 pointer = base_ptr = literal_pool = true;
2911 }
2912
2913 /* Validate index register. */
2914 if (indx)
2915 {
2916 if (GET_CODE (indx) == UNSPEC)
2917 switch (XINT (indx, 1))
2918 {
2919 case UNSPEC_LTREF:
2920 if (!disp)
2921 disp = gen_rtx_UNSPEC (Pmode,
2922 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2923 UNSPEC_LTREL_OFFSET);
2924 else
2925 return false;
2926
2927 indx = XVECEXP (indx, 0, 1);
2928 break;
2929
2930 case UNSPEC_LTREL_BASE:
2931 if (XVECLEN (indx, 0) == 1)
2932 indx = fake_pool_base, literal_pool = true;
2933 else
2934 indx = XVECEXP (indx, 0, 1);
2935 break;
2936
2937 default:
2938 return false;
2939 }
2940
2941 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2942 return false;
2943
2944 if (REGNO (indx) == STACK_POINTER_REGNUM
2945 || REGNO (indx) == FRAME_POINTER_REGNUM
2946 || ((reload_completed || reload_in_progress)
2947 && frame_pointer_needed
2948 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2949 || REGNO (indx) == ARG_POINTER_REGNUM
2950 || (flag_pic
2951 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2952 pointer = indx_ptr = true;
2953
2954 if ((reload_completed || reload_in_progress)
2955 && indx == cfun->machine->base_reg)
2956 pointer = indx_ptr = literal_pool = true;
2957 }
2958
2959 /* Prefer to use pointer as base, not index. */
2960 if (base && indx && !base_ptr
2961 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2962 {
2963 rtx tmp = base;
2964 base = indx;
2965 indx = tmp;
2966 }
2967
2968 /* Validate displacement. */
2969 if (!disp)
2970 {
2971 /* If virtual registers are involved, the displacement will change later
2972 anyway as the virtual registers get eliminated. This could make a
2973 valid displacement invalid, but it is more likely to make an invalid
2974 displacement valid, because we sometimes access the register save area
2975 via negative offsets to one of those registers.
2976 Thus we don't check the displacement for validity here. If after
2977 elimination the displacement turns out to be invalid after all,
2978 this is fixed up by reload in any case. */
2979 /* LRA maintains always displacements up to date and we need to
2980 know the displacement is right during all LRA not only at the
2981 final elimination. */
2982 if (lra_in_progress
2983 || (base != arg_pointer_rtx
2984 && indx != arg_pointer_rtx
2985 && base != return_address_pointer_rtx
2986 && indx != return_address_pointer_rtx
2987 && base != frame_pointer_rtx
2988 && indx != frame_pointer_rtx
2989 && base != virtual_stack_vars_rtx
2990 && indx != virtual_stack_vars_rtx))
2991 if (!DISP_IN_RANGE (offset))
2992 return false;
2993 }
2994 else
2995 {
2996 /* All the special cases are pointers. */
2997 pointer = true;
2998
2999 /* In the small-PIC case, the linker converts @GOT
3000 and @GOTNTPOFF offsets to possible displacements. */
3001 if (GET_CODE (disp) == UNSPEC
3002 && (XINT (disp, 1) == UNSPEC_GOT
3003 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
3004 && flag_pic == 1)
3005 {
3006 ;
3007 }
3008
3009 /* Accept pool label offsets. */
3010 else if (GET_CODE (disp) == UNSPEC
3011 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
3012 ;
3013
3014 /* Accept literal pool references. */
3015 else if (GET_CODE (disp) == UNSPEC
3016 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
3017 {
3018 /* In case CSE pulled a non literal pool reference out of
3019 the pool we have to reject the address. This is
3020 especially important when loading the GOT pointer on non
3021 zarch CPUs. In this case the literal pool contains an lt
3022 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3023 will most likely exceed the displacement. */
3024 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3025 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3026 return false;
3027
3028 orig_disp = gen_rtx_CONST (Pmode, disp);
3029 if (offset)
3030 {
3031 /* If we have an offset, make sure it does not
3032 exceed the size of the constant pool entry. */
3033 rtx sym = XVECEXP (disp, 0, 0);
3034 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3035 return false;
3036
3037 orig_disp = plus_constant (Pmode, orig_disp, offset);
3038 }
3039 }
3040
3041 else
3042 return false;
3043 }
3044
3045 if (!base && !indx)
3046 pointer = true;
3047
3048 if (out)
3049 {
3050 out->base = base;
3051 out->indx = indx;
3052 out->disp = orig_disp;
3053 out->pointer = pointer;
3054 out->literal_pool = literal_pool;
3055 }
3056
3057 return true;
3058 }
3059
3060 /* Decompose a RTL expression OP for an address style operand into its
3061 components, and return the base register in BASE and the offset in
3062 OFFSET. While OP looks like an address it is never supposed to be
3063 used as such.
3064
3065 Return true if OP is a valid address operand, false if not. */
3066
3067 bool
3068 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3069 HOST_WIDE_INT *offset)
3070 {
3071 rtx off = NULL_RTX;
3072
3073 /* We can have an integer constant, an address register,
3074 or a sum of the two. */
3075 if (CONST_SCALAR_INT_P (op))
3076 {
3077 off = op;
3078 op = NULL_RTX;
3079 }
3080 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3081 {
3082 off = XEXP (op, 1);
3083 op = XEXP (op, 0);
3084 }
3085 while (op && GET_CODE (op) == SUBREG)
3086 op = SUBREG_REG (op);
3087
3088 if (op && GET_CODE (op) != REG)
3089 return false;
3090
3091 if (offset)
3092 {
3093 if (off == NULL_RTX)
3094 *offset = 0;
3095 else if (CONST_INT_P (off))
3096 *offset = INTVAL (off);
3097 else if (CONST_WIDE_INT_P (off))
3098 /* The offset will anyway be cut down to 12 bits so take just
3099 the lowest order chunk of the wide int. */
3100 *offset = CONST_WIDE_INT_ELT (off, 0);
3101 else
3102 gcc_unreachable ();
3103 }
3104 if (base)
3105 *base = op;
3106
3107 return true;
3108 }
3109
3110
3111 /* Return true if CODE is a valid address without index. */
3112
3113 bool
3114 s390_legitimate_address_without_index_p (rtx op)
3115 {
3116 struct s390_address addr;
3117
3118 if (!s390_decompose_address (XEXP (op, 0), &addr))
3119 return false;
3120 if (addr.indx)
3121 return false;
3122
3123 return true;
3124 }
3125
3126
3127 /* Return TRUE if ADDR is an operand valid for a load/store relative
3128 instruction. Be aware that the alignment of the operand needs to
3129 be checked separately.
3130 Valid addresses are single references or a sum of a reference and a
3131 constant integer. Return these parts in SYMREF and ADDEND. You can
3132 pass NULL in REF and/or ADDEND if you are not interested in these
3133 values. Literal pool references are *not* considered symbol
3134 references. */
3135
3136 static bool
3137 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3138 {
3139 HOST_WIDE_INT tmpaddend = 0;
3140
3141 if (GET_CODE (addr) == CONST)
3142 addr = XEXP (addr, 0);
3143
3144 if (GET_CODE (addr) == PLUS)
3145 {
3146 if (!CONST_INT_P (XEXP (addr, 1)))
3147 return false;
3148
3149 tmpaddend = INTVAL (XEXP (addr, 1));
3150 addr = XEXP (addr, 0);
3151 }
3152
3153 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3154 || (GET_CODE (addr) == UNSPEC
3155 && (XINT (addr, 1) == UNSPEC_GOTENT
3156 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3157 {
3158 if (symref)
3159 *symref = addr;
3160 if (addend)
3161 *addend = tmpaddend;
3162
3163 return true;
3164 }
3165 return false;
3166 }
3167
3168 /* Return true if the address in OP is valid for constraint letter C
3169 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3170 pool MEMs should be accepted. Only the Q, R, S, T constraint
3171 letters are allowed for C. */
3172
3173 static int
3174 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3175 {
3176 struct s390_address addr;
3177 bool decomposed = false;
3178
3179 if (!address_operand (op, GET_MODE (op)))
3180 return 0;
3181
3182 /* This check makes sure that no symbolic address (except literal
3183 pool references) are accepted by the R or T constraints. */
3184 if (s390_loadrelative_operand_p (op, NULL, NULL))
3185 return 0;
3186
3187 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3188 if (!lit_pool_ok)
3189 {
3190 if (!s390_decompose_address (op, &addr))
3191 return 0;
3192 if (addr.literal_pool)
3193 return 0;
3194 decomposed = true;
3195 }
3196
3197 /* With reload, we sometimes get intermediate address forms that are
3198 actually invalid as-is, but we need to accept them in the most
3199 generic cases below ('R' or 'T'), since reload will in fact fix
3200 them up. LRA behaves differently here; we never see such forms,
3201 but on the other hand, we need to strictly reject every invalid
3202 address form. Perform this check right up front. */
3203 if (lra_in_progress)
3204 {
3205 if (!decomposed && !s390_decompose_address (op, &addr))
3206 return 0;
3207 decomposed = true;
3208 }
3209
3210 switch (c)
3211 {
3212 case 'Q': /* no index short displacement */
3213 if (!decomposed && !s390_decompose_address (op, &addr))
3214 return 0;
3215 if (addr.indx)
3216 return 0;
3217 if (!s390_short_displacement (addr.disp))
3218 return 0;
3219 break;
3220
3221 case 'R': /* with index short displacement */
3222 if (TARGET_LONG_DISPLACEMENT)
3223 {
3224 if (!decomposed && !s390_decompose_address (op, &addr))
3225 return 0;
3226 if (!s390_short_displacement (addr.disp))
3227 return 0;
3228 }
3229 /* Any invalid address here will be fixed up by reload,
3230 so accept it for the most generic constraint. */
3231 break;
3232
3233 case 'S': /* no index long displacement */
3234 if (!decomposed && !s390_decompose_address (op, &addr))
3235 return 0;
3236 if (addr.indx)
3237 return 0;
3238 break;
3239
3240 case 'T': /* with index long displacement */
3241 /* Any invalid address here will be fixed up by reload,
3242 so accept it for the most generic constraint. */
3243 break;
3244
3245 default:
3246 return 0;
3247 }
3248 return 1;
3249 }
3250
3251
3252 /* Evaluates constraint strings described by the regular expression
3253 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3254 the constraint given in STR, or 0 else. */
3255
3256 int
3257 s390_mem_constraint (const char *str, rtx op)
3258 {
3259 char c = str[0];
3260
3261 switch (c)
3262 {
3263 case 'A':
3264 /* Check for offsettable variants of memory constraints. */
3265 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3266 return 0;
3267 if ((reload_completed || reload_in_progress)
3268 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3269 return 0;
3270 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3271 case 'B':
3272 /* Check for non-literal-pool variants of memory constraints. */
3273 if (!MEM_P (op))
3274 return 0;
3275 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3276 case 'Q':
3277 case 'R':
3278 case 'S':
3279 case 'T':
3280 if (GET_CODE (op) != MEM)
3281 return 0;
3282 return s390_check_qrst_address (c, XEXP (op, 0), true);
3283 case 'Y':
3284 /* Simply check for the basic form of a shift count. Reload will
3285 take care of making sure we have a proper base register. */
3286 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3287 return 0;
3288 break;
3289 case 'Z':
3290 return s390_check_qrst_address (str[1], op, true);
3291 default:
3292 return 0;
3293 }
3294 return 1;
3295 }
3296
3297
3298 /* Evaluates constraint strings starting with letter O. Input
3299 parameter C is the second letter following the "O" in the constraint
3300 string. Returns 1 if VALUE meets the respective constraint and 0
3301 otherwise. */
3302
3303 int
3304 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3305 {
3306 if (!TARGET_EXTIMM)
3307 return 0;
3308
3309 switch (c)
3310 {
3311 case 's':
3312 return trunc_int_for_mode (value, SImode) == value;
3313
3314 case 'p':
3315 return value == 0
3316 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3317
3318 case 'n':
3319 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3320
3321 default:
3322 gcc_unreachable ();
3323 }
3324 }
3325
3326
3327 /* Evaluates constraint strings starting with letter N. Parameter STR
3328 contains the letters following letter "N" in the constraint string.
3329 Returns true if VALUE matches the constraint. */
3330
3331 int
3332 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3333 {
3334 machine_mode mode, part_mode;
3335 int def;
3336 int part, part_goal;
3337
3338
3339 if (str[0] == 'x')
3340 part_goal = -1;
3341 else
3342 part_goal = str[0] - '0';
3343
3344 switch (str[1])
3345 {
3346 case 'Q':
3347 part_mode = QImode;
3348 break;
3349 case 'H':
3350 part_mode = HImode;
3351 break;
3352 case 'S':
3353 part_mode = SImode;
3354 break;
3355 default:
3356 return 0;
3357 }
3358
3359 switch (str[2])
3360 {
3361 case 'H':
3362 mode = HImode;
3363 break;
3364 case 'S':
3365 mode = SImode;
3366 break;
3367 case 'D':
3368 mode = DImode;
3369 break;
3370 default:
3371 return 0;
3372 }
3373
3374 switch (str[3])
3375 {
3376 case '0':
3377 def = 0;
3378 break;
3379 case 'F':
3380 def = -1;
3381 break;
3382 default:
3383 return 0;
3384 }
3385
3386 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3387 return 0;
3388
3389 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3390 if (part < 0)
3391 return 0;
3392 if (part_goal != -1 && part_goal != part)
3393 return 0;
3394
3395 return 1;
3396 }
3397
3398
3399 /* Returns true if the input parameter VALUE is a float zero. */
3400
3401 int
3402 s390_float_const_zero_p (rtx value)
3403 {
3404 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3405 && value == CONST0_RTX (GET_MODE (value)));
3406 }
3407
3408 /* Implement TARGET_REGISTER_MOVE_COST. */
3409
3410 static int
3411 s390_register_move_cost (machine_mode mode,
3412 reg_class_t from, reg_class_t to)
3413 {
3414 /* On s390, copy between fprs and gprs is expensive. */
3415
3416 /* It becomes somewhat faster having ldgr/lgdr. */
3417 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3418 {
3419 /* ldgr is single cycle. */
3420 if (reg_classes_intersect_p (from, GENERAL_REGS)
3421 && reg_classes_intersect_p (to, FP_REGS))
3422 return 1;
3423 /* lgdr needs 3 cycles. */
3424 if (reg_classes_intersect_p (to, GENERAL_REGS)
3425 && reg_classes_intersect_p (from, FP_REGS))
3426 return 3;
3427 }
3428
3429 /* Otherwise copying is done via memory. */
3430 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3431 && reg_classes_intersect_p (to, FP_REGS))
3432 || (reg_classes_intersect_p (from, FP_REGS)
3433 && reg_classes_intersect_p (to, GENERAL_REGS)))
3434 return 10;
3435
3436 return 1;
3437 }
3438
3439 /* Implement TARGET_MEMORY_MOVE_COST. */
3440
3441 static int
3442 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3443 reg_class_t rclass ATTRIBUTE_UNUSED,
3444 bool in ATTRIBUTE_UNUSED)
3445 {
3446 return 2;
3447 }
3448
3449 /* Compute a (partial) cost for rtx X. Return true if the complete
3450 cost has been computed, and false if subexpressions should be
3451 scanned. In either case, *TOTAL contains the cost result. The
3452 initial value of *TOTAL is the default value computed by
3453 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3454 code of the superexpression of x. */
3455
3456 static bool
3457 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3458 int opno ATTRIBUTE_UNUSED,
3459 int *total, bool speed ATTRIBUTE_UNUSED)
3460 {
3461 int code = GET_CODE (x);
3462 switch (code)
3463 {
3464 case CONST:
3465 case CONST_INT:
3466 case LABEL_REF:
3467 case SYMBOL_REF:
3468 case CONST_DOUBLE:
3469 case CONST_WIDE_INT:
3470 case MEM:
3471 *total = 0;
3472 return true;
3473
3474 case SET:
3475 {
3476 /* Without this a conditional move instruction would be
3477 accounted as 3 * COSTS_N_INSNS (set, if_then_else,
3478 comparison operator). That's a bit pessimistic. */
3479
3480 if (!TARGET_Z196 || GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3481 return false;
3482
3483 rtx cond = XEXP (SET_SRC (x), 0);
3484
3485 if (!CC_REG_P (XEXP (cond, 0)) || !CONST_INT_P (XEXP (cond, 1)))
3486 return false;
3487
3488 /* It is going to be a load/store on condition. Make it
3489 slightly more expensive than a normal load. */
3490 *total = COSTS_N_INSNS (1) + 1;
3491
3492 rtx dst = SET_DEST (x);
3493 rtx then = XEXP (SET_SRC (x), 1);
3494 rtx els = XEXP (SET_SRC (x), 2);
3495
3496 /* It is a real IF-THEN-ELSE. An additional move will be
3497 needed to implement that. */
3498 if (reload_completed
3499 && !rtx_equal_p (dst, then)
3500 && !rtx_equal_p (dst, els))
3501 *total += COSTS_N_INSNS (1) / 2;
3502
3503 /* A minor penalty for constants we cannot directly handle. */
3504 if ((CONST_INT_P (then) || CONST_INT_P (els))
3505 && (!TARGET_Z13 || MEM_P (dst)
3506 || (CONST_INT_P (then) && !satisfies_constraint_K (then))
3507 || (CONST_INT_P (els) && !satisfies_constraint_K (els))))
3508 *total += COSTS_N_INSNS (1) / 2;
3509
3510 /* A store on condition can only handle register src operands. */
3511 if (MEM_P (dst) && (!REG_P (then) || !REG_P (els)))
3512 *total += COSTS_N_INSNS (1) / 2;
3513
3514 return true;
3515 }
3516 case IOR:
3517 /* risbg */
3518 if (GET_CODE (XEXP (x, 0)) == AND
3519 && GET_CODE (XEXP (x, 1)) == ASHIFT
3520 && REG_P (XEXP (XEXP (x, 0), 0))
3521 && REG_P (XEXP (XEXP (x, 1), 0))
3522 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3523 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3524 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3525 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3526 {
3527 *total = COSTS_N_INSNS (2);
3528 return true;
3529 }
3530
3531 /* ~AND on a 128 bit mode. This can be done using a vector
3532 instruction. */
3533 if (TARGET_VXE
3534 && GET_CODE (XEXP (x, 0)) == NOT
3535 && GET_CODE (XEXP (x, 1)) == NOT
3536 && REG_P (XEXP (XEXP (x, 0), 0))
3537 && REG_P (XEXP (XEXP (x, 1), 0))
3538 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3539 && s390_hard_regno_mode_ok (VR0_REGNUM,
3540 GET_MODE (XEXP (XEXP (x, 0), 0))))
3541 {
3542 *total = COSTS_N_INSNS (1);
3543 return true;
3544 }
3545 /* fallthrough */
3546 case ASHIFT:
3547 case ASHIFTRT:
3548 case LSHIFTRT:
3549 case ROTATE:
3550 case ROTATERT:
3551 case AND:
3552 case XOR:
3553 case NEG:
3554 case NOT:
3555 *total = COSTS_N_INSNS (1);
3556 return false;
3557
3558 case PLUS:
3559 case MINUS:
3560 *total = COSTS_N_INSNS (1);
3561 return false;
3562
3563 case MULT:
3564 switch (mode)
3565 {
3566 case E_SImode:
3567 {
3568 rtx left = XEXP (x, 0);
3569 rtx right = XEXP (x, 1);
3570 if (GET_CODE (right) == CONST_INT
3571 && CONST_OK_FOR_K (INTVAL (right)))
3572 *total = s390_cost->mhi;
3573 else if (GET_CODE (left) == SIGN_EXTEND)
3574 *total = s390_cost->mh;
3575 else
3576 *total = s390_cost->ms; /* msr, ms, msy */
3577 break;
3578 }
3579 case E_DImode:
3580 {
3581 rtx left = XEXP (x, 0);
3582 rtx right = XEXP (x, 1);
3583 if (TARGET_ZARCH)
3584 {
3585 if (GET_CODE (right) == CONST_INT
3586 && CONST_OK_FOR_K (INTVAL (right)))
3587 *total = s390_cost->mghi;
3588 else if (GET_CODE (left) == SIGN_EXTEND)
3589 *total = s390_cost->msgf;
3590 else
3591 *total = s390_cost->msg; /* msgr, msg */
3592 }
3593 else /* TARGET_31BIT */
3594 {
3595 if (GET_CODE (left) == SIGN_EXTEND
3596 && GET_CODE (right) == SIGN_EXTEND)
3597 /* mulsidi case: mr, m */
3598 *total = s390_cost->m;
3599 else if (GET_CODE (left) == ZERO_EXTEND
3600 && GET_CODE (right) == ZERO_EXTEND
3601 && TARGET_CPU_ZARCH)
3602 /* umulsidi case: ml, mlr */
3603 *total = s390_cost->ml;
3604 else
3605 /* Complex calculation is required. */
3606 *total = COSTS_N_INSNS (40);
3607 }
3608 break;
3609 }
3610 case E_SFmode:
3611 case E_DFmode:
3612 *total = s390_cost->mult_df;
3613 break;
3614 case E_TFmode:
3615 *total = s390_cost->mxbr;
3616 break;
3617 default:
3618 return false;
3619 }
3620 return false;
3621
3622 case FMA:
3623 switch (mode)
3624 {
3625 case E_DFmode:
3626 *total = s390_cost->madbr;
3627 break;
3628 case E_SFmode:
3629 *total = s390_cost->maebr;
3630 break;
3631 default:
3632 return false;
3633 }
3634 /* Negate in the third argument is free: FMSUB. */
3635 if (GET_CODE (XEXP (x, 2)) == NEG)
3636 {
3637 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3638 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3639 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3640 return true;
3641 }
3642 return false;
3643
3644 case UDIV:
3645 case UMOD:
3646 if (mode == TImode) /* 128 bit division */
3647 *total = s390_cost->dlgr;
3648 else if (mode == DImode)
3649 {
3650 rtx right = XEXP (x, 1);
3651 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3652 *total = s390_cost->dlr;
3653 else /* 64 by 64 bit division */
3654 *total = s390_cost->dlgr;
3655 }
3656 else if (mode == SImode) /* 32 bit division */
3657 *total = s390_cost->dlr;
3658 return false;
3659
3660 case DIV:
3661 case MOD:
3662 if (mode == DImode)
3663 {
3664 rtx right = XEXP (x, 1);
3665 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3666 if (TARGET_ZARCH)
3667 *total = s390_cost->dsgfr;
3668 else
3669 *total = s390_cost->dr;
3670 else /* 64 by 64 bit division */
3671 *total = s390_cost->dsgr;
3672 }
3673 else if (mode == SImode) /* 32 bit division */
3674 *total = s390_cost->dlr;
3675 else if (mode == SFmode)
3676 {
3677 *total = s390_cost->debr;
3678 }
3679 else if (mode == DFmode)
3680 {
3681 *total = s390_cost->ddbr;
3682 }
3683 else if (mode == TFmode)
3684 {
3685 *total = s390_cost->dxbr;
3686 }
3687 return false;
3688
3689 case SQRT:
3690 if (mode == SFmode)
3691 *total = s390_cost->sqebr;
3692 else if (mode == DFmode)
3693 *total = s390_cost->sqdbr;
3694 else /* TFmode */
3695 *total = s390_cost->sqxbr;
3696 return false;
3697
3698 case SIGN_EXTEND:
3699 case ZERO_EXTEND:
3700 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3701 || outer_code == PLUS || outer_code == MINUS
3702 || outer_code == COMPARE)
3703 *total = 0;
3704 return false;
3705
3706 case COMPARE:
3707 *total = COSTS_N_INSNS (1);
3708 if (GET_CODE (XEXP (x, 0)) == AND
3709 && GET_CODE (XEXP (x, 1)) == CONST_INT
3710 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3711 {
3712 rtx op0 = XEXP (XEXP (x, 0), 0);
3713 rtx op1 = XEXP (XEXP (x, 0), 1);
3714 rtx op2 = XEXP (x, 1);
3715
3716 if (memory_operand (op0, GET_MODE (op0))
3717 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3718 return true;
3719 if (register_operand (op0, GET_MODE (op0))
3720 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3721 return true;
3722 }
3723 return false;
3724
3725 default:
3726 return false;
3727 }
3728 }
3729
3730 /* Return the cost of an address rtx ADDR. */
3731
3732 static int
3733 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3734 addr_space_t as ATTRIBUTE_UNUSED,
3735 bool speed ATTRIBUTE_UNUSED)
3736 {
3737 struct s390_address ad;
3738 if (!s390_decompose_address (addr, &ad))
3739 return 1000;
3740
3741 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3742 }
3743
3744 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3745 static int
3746 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3747 tree vectype,
3748 int misalign ATTRIBUTE_UNUSED)
3749 {
3750 switch (type_of_cost)
3751 {
3752 case scalar_stmt:
3753 case scalar_load:
3754 case scalar_store:
3755 case vector_stmt:
3756 case vector_load:
3757 case vector_store:
3758 case vector_gather_load:
3759 case vector_scatter_store:
3760 case vec_to_scalar:
3761 case scalar_to_vec:
3762 case cond_branch_not_taken:
3763 case vec_perm:
3764 case vec_promote_demote:
3765 case unaligned_load:
3766 case unaligned_store:
3767 return 1;
3768
3769 case cond_branch_taken:
3770 return 3;
3771
3772 case vec_construct:
3773 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3774
3775 default:
3776 gcc_unreachable ();
3777 }
3778 }
3779
3780 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3781 otherwise return 0. */
3782
3783 int
3784 tls_symbolic_operand (rtx op)
3785 {
3786 if (GET_CODE (op) != SYMBOL_REF)
3787 return 0;
3788 return SYMBOL_REF_TLS_MODEL (op);
3789 }
3790 \f
3791 /* Split DImode access register reference REG (on 64-bit) into its constituent
3792 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3793 gen_highpart cannot be used as they assume all registers are word-sized,
3794 while our access registers have only half that size. */
3795
3796 void
3797 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3798 {
3799 gcc_assert (TARGET_64BIT);
3800 gcc_assert (ACCESS_REG_P (reg));
3801 gcc_assert (GET_MODE (reg) == DImode);
3802 gcc_assert (!(REGNO (reg) & 1));
3803
3804 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3805 *hi = gen_rtx_REG (SImode, REGNO (reg));
3806 }
3807
3808 /* Return true if OP contains a symbol reference */
3809
3810 bool
3811 symbolic_reference_mentioned_p (rtx op)
3812 {
3813 const char *fmt;
3814 int i;
3815
3816 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3817 return 1;
3818
3819 fmt = GET_RTX_FORMAT (GET_CODE (op));
3820 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3821 {
3822 if (fmt[i] == 'E')
3823 {
3824 int j;
3825
3826 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3827 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3828 return 1;
3829 }
3830
3831 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3832 return 1;
3833 }
3834
3835 return 0;
3836 }
3837
3838 /* Return true if OP contains a reference to a thread-local symbol. */
3839
3840 bool
3841 tls_symbolic_reference_mentioned_p (rtx op)
3842 {
3843 const char *fmt;
3844 int i;
3845
3846 if (GET_CODE (op) == SYMBOL_REF)
3847 return tls_symbolic_operand (op);
3848
3849 fmt = GET_RTX_FORMAT (GET_CODE (op));
3850 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3851 {
3852 if (fmt[i] == 'E')
3853 {
3854 int j;
3855
3856 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3857 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3858 return true;
3859 }
3860
3861 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3862 return true;
3863 }
3864
3865 return false;
3866 }
3867
3868
3869 /* Return true if OP is a legitimate general operand when
3870 generating PIC code. It is given that flag_pic is on
3871 and that OP satisfies CONSTANT_P. */
3872
3873 int
3874 legitimate_pic_operand_p (rtx op)
3875 {
3876 /* Accept all non-symbolic constants. */
3877 if (!SYMBOLIC_CONST (op))
3878 return 1;
3879
3880 /* Reject everything else; must be handled
3881 via emit_symbolic_move. */
3882 return 0;
3883 }
3884
3885 /* Returns true if the constant value OP is a legitimate general operand.
3886 It is given that OP satisfies CONSTANT_P. */
3887
3888 static bool
3889 s390_legitimate_constant_p (machine_mode mode, rtx op)
3890 {
3891 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3892 {
3893 if (GET_MODE_SIZE (mode) != 16)
3894 return 0;
3895
3896 if (!satisfies_constraint_j00 (op)
3897 && !satisfies_constraint_jm1 (op)
3898 && !satisfies_constraint_jKK (op)
3899 && !satisfies_constraint_jxx (op)
3900 && !satisfies_constraint_jyy (op))
3901 return 0;
3902 }
3903
3904 /* Accept all non-symbolic constants. */
3905 if (!SYMBOLIC_CONST (op))
3906 return 1;
3907
3908 /* Accept immediate LARL operands. */
3909 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3910 return 1;
3911
3912 /* Thread-local symbols are never legal constants. This is
3913 so that emit_call knows that computing such addresses
3914 might require a function call. */
3915 if (TLS_SYMBOLIC_CONST (op))
3916 return 0;
3917
3918 /* In the PIC case, symbolic constants must *not* be
3919 forced into the literal pool. We accept them here,
3920 so that they will be handled by emit_symbolic_move. */
3921 if (flag_pic)
3922 return 1;
3923
3924 /* All remaining non-PIC symbolic constants are
3925 forced into the literal pool. */
3926 return 0;
3927 }
3928
3929 /* Determine if it's legal to put X into the constant pool. This
3930 is not possible if X contains the address of a symbol that is
3931 not constant (TLS) or not known at final link time (PIC). */
3932
3933 static bool
3934 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3935 {
3936 switch (GET_CODE (x))
3937 {
3938 case CONST_INT:
3939 case CONST_DOUBLE:
3940 case CONST_WIDE_INT:
3941 case CONST_VECTOR:
3942 /* Accept all non-symbolic constants. */
3943 return false;
3944
3945 case LABEL_REF:
3946 /* Labels are OK iff we are non-PIC. */
3947 return flag_pic != 0;
3948
3949 case SYMBOL_REF:
3950 /* 'Naked' TLS symbol references are never OK,
3951 non-TLS symbols are OK iff we are non-PIC. */
3952 if (tls_symbolic_operand (x))
3953 return true;
3954 else
3955 return flag_pic != 0;
3956
3957 case CONST:
3958 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3959 case PLUS:
3960 case MINUS:
3961 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3962 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3963
3964 case UNSPEC:
3965 switch (XINT (x, 1))
3966 {
3967 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3968 case UNSPEC_LTREL_OFFSET:
3969 case UNSPEC_GOT:
3970 case UNSPEC_GOTOFF:
3971 case UNSPEC_PLTOFF:
3972 case UNSPEC_TLSGD:
3973 case UNSPEC_TLSLDM:
3974 case UNSPEC_NTPOFF:
3975 case UNSPEC_DTPOFF:
3976 case UNSPEC_GOTNTPOFF:
3977 case UNSPEC_INDNTPOFF:
3978 return false;
3979
3980 /* If the literal pool shares the code section, be put
3981 execute template placeholders into the pool as well. */
3982 case UNSPEC_INSN:
3983 return TARGET_CPU_ZARCH;
3984
3985 default:
3986 return true;
3987 }
3988 break;
3989
3990 default:
3991 gcc_unreachable ();
3992 }
3993 }
3994
3995 /* Returns true if the constant value OP is a legitimate general
3996 operand during and after reload. The difference to
3997 legitimate_constant_p is that this function will not accept
3998 a constant that would need to be forced to the literal pool
3999 before it can be used as operand.
4000 This function accepts all constants which can be loaded directly
4001 into a GPR. */
4002
4003 bool
4004 legitimate_reload_constant_p (rtx op)
4005 {
4006 /* Accept la(y) operands. */
4007 if (GET_CODE (op) == CONST_INT
4008 && DISP_IN_RANGE (INTVAL (op)))
4009 return true;
4010
4011 /* Accept l(g)hi/l(g)fi operands. */
4012 if (GET_CODE (op) == CONST_INT
4013 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
4014 return true;
4015
4016 /* Accept lliXX operands. */
4017 if (TARGET_ZARCH
4018 && GET_CODE (op) == CONST_INT
4019 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
4020 && s390_single_part (op, word_mode, HImode, 0) >= 0)
4021 return true;
4022
4023 if (TARGET_EXTIMM
4024 && GET_CODE (op) == CONST_INT
4025 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
4026 && s390_single_part (op, word_mode, SImode, 0) >= 0)
4027 return true;
4028
4029 /* Accept larl operands. */
4030 if (TARGET_CPU_ZARCH
4031 && larl_operand (op, VOIDmode))
4032 return true;
4033
4034 /* Accept floating-point zero operands that fit into a single GPR. */
4035 if (GET_CODE (op) == CONST_DOUBLE
4036 && s390_float_const_zero_p (op)
4037 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
4038 return true;
4039
4040 /* Accept double-word operands that can be split. */
4041 if (GET_CODE (op) == CONST_WIDE_INT
4042 || (GET_CODE (op) == CONST_INT
4043 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
4044 {
4045 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
4046 rtx hi = operand_subword (op, 0, 0, dword_mode);
4047 rtx lo = operand_subword (op, 1, 0, dword_mode);
4048 return legitimate_reload_constant_p (hi)
4049 && legitimate_reload_constant_p (lo);
4050 }
4051
4052 /* Everything else cannot be handled without reload. */
4053 return false;
4054 }
4055
4056 /* Returns true if the constant value OP is a legitimate fp operand
4057 during and after reload.
4058 This function accepts all constants which can be loaded directly
4059 into an FPR. */
4060
4061 static bool
4062 legitimate_reload_fp_constant_p (rtx op)
4063 {
4064 /* Accept floating-point zero operands if the load zero instruction
4065 can be used. Prior to z196 the load fp zero instruction caused a
4066 performance penalty if the result is used as BFP number. */
4067 if (TARGET_Z196
4068 && GET_CODE (op) == CONST_DOUBLE
4069 && s390_float_const_zero_p (op))
4070 return true;
4071
4072 return false;
4073 }
4074
4075 /* Returns true if the constant value OP is a legitimate vector operand
4076 during and after reload.
4077 This function accepts all constants which can be loaded directly
4078 into an VR. */
4079
4080 static bool
4081 legitimate_reload_vector_constant_p (rtx op)
4082 {
4083 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4084 && (satisfies_constraint_j00 (op)
4085 || satisfies_constraint_jm1 (op)
4086 || satisfies_constraint_jKK (op)
4087 || satisfies_constraint_jxx (op)
4088 || satisfies_constraint_jyy (op)))
4089 return true;
4090
4091 return false;
4092 }
4093
4094 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4095 return the class of reg to actually use. */
4096
4097 static reg_class_t
4098 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4099 {
4100 switch (GET_CODE (op))
4101 {
4102 /* Constants we cannot reload into general registers
4103 must be forced into the literal pool. */
4104 case CONST_VECTOR:
4105 case CONST_DOUBLE:
4106 case CONST_INT:
4107 case CONST_WIDE_INT:
4108 if (reg_class_subset_p (GENERAL_REGS, rclass)
4109 && legitimate_reload_constant_p (op))
4110 return GENERAL_REGS;
4111 else if (reg_class_subset_p (ADDR_REGS, rclass)
4112 && legitimate_reload_constant_p (op))
4113 return ADDR_REGS;
4114 else if (reg_class_subset_p (FP_REGS, rclass)
4115 && legitimate_reload_fp_constant_p (op))
4116 return FP_REGS;
4117 else if (reg_class_subset_p (VEC_REGS, rclass)
4118 && legitimate_reload_vector_constant_p (op))
4119 return VEC_REGS;
4120
4121 return NO_REGS;
4122
4123 /* If a symbolic constant or a PLUS is reloaded,
4124 it is most likely being used as an address, so
4125 prefer ADDR_REGS. If 'class' is not a superset
4126 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4127 case CONST:
4128 /* Symrefs cannot be pushed into the literal pool with -fPIC
4129 so we *MUST NOT* return NO_REGS for these cases
4130 (s390_cannot_force_const_mem will return true).
4131
4132 On the other hand we MUST return NO_REGS for symrefs with
4133 invalid addend which might have been pushed to the literal
4134 pool (no -fPIC). Usually we would expect them to be
4135 handled via secondary reload but this does not happen if
4136 they are used as literal pool slot replacement in reload
4137 inheritance (see emit_input_reload_insns). */
4138 if (TARGET_CPU_ZARCH
4139 && GET_CODE (XEXP (op, 0)) == PLUS
4140 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4141 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4142 {
4143 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4144 return ADDR_REGS;
4145 else
4146 return NO_REGS;
4147 }
4148 /* fallthrough */
4149 case LABEL_REF:
4150 case SYMBOL_REF:
4151 if (!legitimate_reload_constant_p (op))
4152 return NO_REGS;
4153 /* fallthrough */
4154 case PLUS:
4155 /* load address will be used. */
4156 if (reg_class_subset_p (ADDR_REGS, rclass))
4157 return ADDR_REGS;
4158 else
4159 return NO_REGS;
4160
4161 default:
4162 break;
4163 }
4164
4165 return rclass;
4166 }
4167
4168 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4169 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4170 aligned. */
4171
4172 bool
4173 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4174 {
4175 HOST_WIDE_INT addend;
4176 rtx symref;
4177
4178 /* The "required alignment" might be 0 (e.g. for certain structs
4179 accessed via BLKmode). Early abort in this case, as well as when
4180 an alignment > 8 is required. */
4181 if (alignment < 2 || alignment > 8)
4182 return false;
4183
4184 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4185 return false;
4186
4187 if (addend & (alignment - 1))
4188 return false;
4189
4190 if (GET_CODE (symref) == SYMBOL_REF)
4191 {
4192 /* We have load-relative instructions for 2-byte, 4-byte, and
4193 8-byte alignment so allow only these. */
4194 switch (alignment)
4195 {
4196 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4197 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4198 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4199 default: return false;
4200 }
4201 }
4202
4203 if (GET_CODE (symref) == UNSPEC
4204 && alignment <= UNITS_PER_LONG)
4205 return true;
4206
4207 return false;
4208 }
4209
4210 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4211 operand SCRATCH is used to reload the even part of the address and
4212 adding one. */
4213
4214 void
4215 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4216 {
4217 HOST_WIDE_INT addend;
4218 rtx symref;
4219
4220 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4221 gcc_unreachable ();
4222
4223 if (!(addend & 1))
4224 /* Easy case. The addend is even so larl will do fine. */
4225 emit_move_insn (reg, addr);
4226 else
4227 {
4228 /* We can leave the scratch register untouched if the target
4229 register is a valid base register. */
4230 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4231 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4232 scratch = reg;
4233
4234 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4235 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4236
4237 if (addend != 1)
4238 emit_move_insn (scratch,
4239 gen_rtx_CONST (Pmode,
4240 gen_rtx_PLUS (Pmode, symref,
4241 GEN_INT (addend - 1))));
4242 else
4243 emit_move_insn (scratch, symref);
4244
4245 /* Increment the address using la in order to avoid clobbering cc. */
4246 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4247 }
4248 }
4249
4250 /* Generate what is necessary to move between REG and MEM using
4251 SCRATCH. The direction is given by TOMEM. */
4252
4253 void
4254 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4255 {
4256 /* Reload might have pulled a constant out of the literal pool.
4257 Force it back in. */
4258 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4259 || GET_CODE (mem) == CONST_WIDE_INT
4260 || GET_CODE (mem) == CONST_VECTOR
4261 || GET_CODE (mem) == CONST)
4262 mem = force_const_mem (GET_MODE (reg), mem);
4263
4264 gcc_assert (MEM_P (mem));
4265
4266 /* For a load from memory we can leave the scratch register
4267 untouched if the target register is a valid base register. */
4268 if (!tomem
4269 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4270 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4271 && GET_MODE (reg) == GET_MODE (scratch))
4272 scratch = reg;
4273
4274 /* Load address into scratch register. Since we can't have a
4275 secondary reload for a secondary reload we have to cover the case
4276 where larl would need a secondary reload here as well. */
4277 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4278
4279 /* Now we can use a standard load/store to do the move. */
4280 if (tomem)
4281 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4282 else
4283 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4284 }
4285
4286 /* Inform reload about cases where moving X with a mode MODE to a register in
4287 RCLASS requires an extra scratch or immediate register. Return the class
4288 needed for the immediate register. */
4289
4290 static reg_class_t
4291 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4292 machine_mode mode, secondary_reload_info *sri)
4293 {
4294 enum reg_class rclass = (enum reg_class) rclass_i;
4295
4296 /* Intermediate register needed. */
4297 if (reg_classes_intersect_p (CC_REGS, rclass))
4298 return GENERAL_REGS;
4299
4300 if (TARGET_VX)
4301 {
4302 /* The vst/vl vector move instructions allow only for short
4303 displacements. */
4304 if (MEM_P (x)
4305 && GET_CODE (XEXP (x, 0)) == PLUS
4306 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4307 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4308 && reg_class_subset_p (rclass, VEC_REGS)
4309 && (!reg_class_subset_p (rclass, FP_REGS)
4310 || (GET_MODE_SIZE (mode) > 8
4311 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4312 {
4313 if (in_p)
4314 sri->icode = (TARGET_64BIT ?
4315 CODE_FOR_reloaddi_la_in :
4316 CODE_FOR_reloadsi_la_in);
4317 else
4318 sri->icode = (TARGET_64BIT ?
4319 CODE_FOR_reloaddi_la_out :
4320 CODE_FOR_reloadsi_la_out);
4321 }
4322 }
4323
4324 if (TARGET_Z10)
4325 {
4326 HOST_WIDE_INT offset;
4327 rtx symref;
4328
4329 /* On z10 several optimizer steps may generate larl operands with
4330 an odd addend. */
4331 if (in_p
4332 && s390_loadrelative_operand_p (x, &symref, &offset)
4333 && mode == Pmode
4334 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4335 && (offset & 1) == 1)
4336 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4337 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4338
4339 /* Handle all the (mem (symref)) accesses we cannot use the z10
4340 instructions for. */
4341 if (MEM_P (x)
4342 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4343 && (mode == QImode
4344 || !reg_class_subset_p (rclass, GENERAL_REGS)
4345 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4346 || !s390_check_symref_alignment (XEXP (x, 0),
4347 GET_MODE_SIZE (mode))))
4348 {
4349 #define __SECONDARY_RELOAD_CASE(M,m) \
4350 case E_##M##mode: \
4351 if (TARGET_64BIT) \
4352 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4353 CODE_FOR_reload##m##di_tomem_z10; \
4354 else \
4355 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4356 CODE_FOR_reload##m##si_tomem_z10; \
4357 break;
4358
4359 switch (GET_MODE (x))
4360 {
4361 __SECONDARY_RELOAD_CASE (QI, qi);
4362 __SECONDARY_RELOAD_CASE (HI, hi);
4363 __SECONDARY_RELOAD_CASE (SI, si);
4364 __SECONDARY_RELOAD_CASE (DI, di);
4365 __SECONDARY_RELOAD_CASE (TI, ti);
4366 __SECONDARY_RELOAD_CASE (SF, sf);
4367 __SECONDARY_RELOAD_CASE (DF, df);
4368 __SECONDARY_RELOAD_CASE (TF, tf);
4369 __SECONDARY_RELOAD_CASE (SD, sd);
4370 __SECONDARY_RELOAD_CASE (DD, dd);
4371 __SECONDARY_RELOAD_CASE (TD, td);
4372 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4373 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4374 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4375 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4376 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4377 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4378 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4379 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4380 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4381 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4382 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4383 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4384 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4385 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4386 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4387 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4388 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4389 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4390 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4391 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4392 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4393 default:
4394 gcc_unreachable ();
4395 }
4396 #undef __SECONDARY_RELOAD_CASE
4397 }
4398 }
4399
4400 /* We need a scratch register when loading a PLUS expression which
4401 is not a legitimate operand of the LOAD ADDRESS instruction. */
4402 /* LRA can deal with transformation of plus op very well -- so we
4403 don't need to prompt LRA in this case. */
4404 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4405 sri->icode = (TARGET_64BIT ?
4406 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4407
4408 /* Performing a multiword move from or to memory we have to make sure the
4409 second chunk in memory is addressable without causing a displacement
4410 overflow. If that would be the case we calculate the address in
4411 a scratch register. */
4412 if (MEM_P (x)
4413 && GET_CODE (XEXP (x, 0)) == PLUS
4414 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4415 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4416 + GET_MODE_SIZE (mode) - 1))
4417 {
4418 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4419 in a s_operand address since we may fallback to lm/stm. So we only
4420 have to care about overflows in the b+i+d case. */
4421 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4422 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4423 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4424 /* For FP_REGS no lm/stm is available so this check is triggered
4425 for displacement overflows in b+i+d and b+d like addresses. */
4426 || (reg_classes_intersect_p (FP_REGS, rclass)
4427 && s390_class_max_nregs (FP_REGS, mode) > 1))
4428 {
4429 if (in_p)
4430 sri->icode = (TARGET_64BIT ?
4431 CODE_FOR_reloaddi_la_in :
4432 CODE_FOR_reloadsi_la_in);
4433 else
4434 sri->icode = (TARGET_64BIT ?
4435 CODE_FOR_reloaddi_la_out :
4436 CODE_FOR_reloadsi_la_out);
4437 }
4438 }
4439
4440 /* A scratch address register is needed when a symbolic constant is
4441 copied to r0 compiling with -fPIC. In other cases the target
4442 register might be used as temporary (see legitimize_pic_address). */
4443 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4444 sri->icode = (TARGET_64BIT ?
4445 CODE_FOR_reloaddi_PIC_addr :
4446 CODE_FOR_reloadsi_PIC_addr);
4447
4448 /* Either scratch or no register needed. */
4449 return NO_REGS;
4450 }
4451
4452 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
4453
4454 We need secondary memory to move data between GPRs and FPRs.
4455
4456 - With DFP the ldgr lgdr instructions are available. Due to the
4457 different alignment we cannot use them for SFmode. For 31 bit a
4458 64 bit value in GPR would be a register pair so here we still
4459 need to go via memory.
4460
4461 - With z13 we can do the SF/SImode moves with vlgvf. Due to the
4462 overlapping of FPRs and VRs we still disallow TF/TD modes to be
4463 in full VRs so as before also on z13 we do these moves via
4464 memory.
4465
4466 FIXME: Should we try splitting it into two vlgvg's/vlvg's instead? */
4467
4468 static bool
4469 s390_secondary_memory_needed (machine_mode mode,
4470 reg_class_t class1, reg_class_t class2)
4471 {
4472 return (((reg_classes_intersect_p (class1, VEC_REGS)
4473 && reg_classes_intersect_p (class2, GENERAL_REGS))
4474 || (reg_classes_intersect_p (class1, GENERAL_REGS)
4475 && reg_classes_intersect_p (class2, VEC_REGS)))
4476 && (!TARGET_DFP || !TARGET_64BIT || GET_MODE_SIZE (mode) != 8)
4477 && (!TARGET_VX || (SCALAR_FLOAT_MODE_P (mode)
4478 && GET_MODE_SIZE (mode) > 8)));
4479 }
4480
4481 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
4482
4483 get_secondary_mem widens its argument to BITS_PER_WORD which loses on 64bit
4484 because the movsi and movsf patterns don't handle r/f moves. */
4485
4486 static machine_mode
4487 s390_secondary_memory_needed_mode (machine_mode mode)
4488 {
4489 if (GET_MODE_BITSIZE (mode) < 32)
4490 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
4491 return mode;
4492 }
4493
4494 /* Generate code to load SRC, which is PLUS that is not a
4495 legitimate operand for the LA instruction, into TARGET.
4496 SCRATCH may be used as scratch register. */
4497
4498 void
4499 s390_expand_plus_operand (rtx target, rtx src,
4500 rtx scratch)
4501 {
4502 rtx sum1, sum2;
4503 struct s390_address ad;
4504
4505 /* src must be a PLUS; get its two operands. */
4506 gcc_assert (GET_CODE (src) == PLUS);
4507 gcc_assert (GET_MODE (src) == Pmode);
4508
4509 /* Check if any of the two operands is already scheduled
4510 for replacement by reload. This can happen e.g. when
4511 float registers occur in an address. */
4512 sum1 = find_replacement (&XEXP (src, 0));
4513 sum2 = find_replacement (&XEXP (src, 1));
4514 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4515
4516 /* If the address is already strictly valid, there's nothing to do. */
4517 if (!s390_decompose_address (src, &ad)
4518 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4519 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4520 {
4521 /* Otherwise, one of the operands cannot be an address register;
4522 we reload its value into the scratch register. */
4523 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4524 {
4525 emit_move_insn (scratch, sum1);
4526 sum1 = scratch;
4527 }
4528 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4529 {
4530 emit_move_insn (scratch, sum2);
4531 sum2 = scratch;
4532 }
4533
4534 /* According to the way these invalid addresses are generated
4535 in reload.c, it should never happen (at least on s390) that
4536 *neither* of the PLUS components, after find_replacements
4537 was applied, is an address register. */
4538 if (sum1 == scratch && sum2 == scratch)
4539 {
4540 debug_rtx (src);
4541 gcc_unreachable ();
4542 }
4543
4544 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4545 }
4546
4547 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4548 is only ever performed on addresses, so we can mark the
4549 sum as legitimate for LA in any case. */
4550 s390_load_address (target, src);
4551 }
4552
4553
4554 /* Return true if ADDR is a valid memory address.
4555 STRICT specifies whether strict register checking applies. */
4556
4557 static bool
4558 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4559 {
4560 struct s390_address ad;
4561
4562 if (TARGET_Z10
4563 && larl_operand (addr, VOIDmode)
4564 && (mode == VOIDmode
4565 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4566 return true;
4567
4568 if (!s390_decompose_address (addr, &ad))
4569 return false;
4570
4571 if (strict)
4572 {
4573 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4574 return false;
4575
4576 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4577 return false;
4578 }
4579 else
4580 {
4581 if (ad.base
4582 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4583 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4584 return false;
4585
4586 if (ad.indx
4587 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4588 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4589 return false;
4590 }
4591 return true;
4592 }
4593
4594 /* Return true if OP is a valid operand for the LA instruction.
4595 In 31-bit, we need to prove that the result is used as an
4596 address, as LA performs only a 31-bit addition. */
4597
4598 bool
4599 legitimate_la_operand_p (rtx op)
4600 {
4601 struct s390_address addr;
4602 if (!s390_decompose_address (op, &addr))
4603 return false;
4604
4605 return (TARGET_64BIT || addr.pointer);
4606 }
4607
4608 /* Return true if it is valid *and* preferable to use LA to
4609 compute the sum of OP1 and OP2. */
4610
4611 bool
4612 preferred_la_operand_p (rtx op1, rtx op2)
4613 {
4614 struct s390_address addr;
4615
4616 if (op2 != const0_rtx)
4617 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4618
4619 if (!s390_decompose_address (op1, &addr))
4620 return false;
4621 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4622 return false;
4623 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4624 return false;
4625
4626 /* Avoid LA instructions with index register on z196; it is
4627 preferable to use regular add instructions when possible.
4628 Starting with zEC12 the la with index register is "uncracked"
4629 again. */
4630 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4631 return false;
4632
4633 if (!TARGET_64BIT && !addr.pointer)
4634 return false;
4635
4636 if (addr.pointer)
4637 return true;
4638
4639 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4640 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4641 return true;
4642
4643 return false;
4644 }
4645
4646 /* Emit a forced load-address operation to load SRC into DST.
4647 This will use the LOAD ADDRESS instruction even in situations
4648 where legitimate_la_operand_p (SRC) returns false. */
4649
4650 void
4651 s390_load_address (rtx dst, rtx src)
4652 {
4653 if (TARGET_64BIT)
4654 emit_move_insn (dst, src);
4655 else
4656 emit_insn (gen_force_la_31 (dst, src));
4657 }
4658
4659 /* Return true if it ok to use SYMBOL_REF in a relative address. */
4660
4661 bool
4662 s390_rel_address_ok_p (rtx symbol_ref)
4663 {
4664 tree decl;
4665
4666 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4667 return true;
4668
4669 decl = SYMBOL_REF_DECL (symbol_ref);
4670
4671 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4672 return (s390_pic_data_is_text_relative
4673 || (decl
4674 && TREE_CODE (decl) == FUNCTION_DECL));
4675
4676 return false;
4677 }
4678
4679 /* Return a legitimate reference for ORIG (an address) using the
4680 register REG. If REG is 0, a new pseudo is generated.
4681
4682 There are two types of references that must be handled:
4683
4684 1. Global data references must load the address from the GOT, via
4685 the PIC reg. An insn is emitted to do this load, and the reg is
4686 returned.
4687
4688 2. Static data references, constant pool addresses, and code labels
4689 compute the address as an offset from the GOT, whose base is in
4690 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4691 differentiate them from global data objects. The returned
4692 address is the PIC reg + an unspec constant.
4693
4694 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4695 reg also appears in the address. */
4696
4697 rtx
4698 legitimize_pic_address (rtx orig, rtx reg)
4699 {
4700 rtx addr = orig;
4701 rtx addend = const0_rtx;
4702 rtx new_rtx = orig;
4703
4704 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4705
4706 if (GET_CODE (addr) == CONST)
4707 addr = XEXP (addr, 0);
4708
4709 if (GET_CODE (addr) == PLUS)
4710 {
4711 addend = XEXP (addr, 1);
4712 addr = XEXP (addr, 0);
4713 }
4714
4715 if ((GET_CODE (addr) == LABEL_REF
4716 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
4717 || (GET_CODE (addr) == UNSPEC &&
4718 (XINT (addr, 1) == UNSPEC_GOTENT
4719 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4720 && GET_CODE (addend) == CONST_INT)
4721 {
4722 /* This can be locally addressed. */
4723
4724 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4725 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4726 gen_rtx_CONST (Pmode, addr) : addr);
4727
4728 if (TARGET_CPU_ZARCH
4729 && larl_operand (const_addr, VOIDmode)
4730 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4731 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4732 {
4733 if (INTVAL (addend) & 1)
4734 {
4735 /* LARL can't handle odd offsets, so emit a pair of LARL
4736 and LA. */
4737 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4738
4739 if (!DISP_IN_RANGE (INTVAL (addend)))
4740 {
4741 HOST_WIDE_INT even = INTVAL (addend) - 1;
4742 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4743 addr = gen_rtx_CONST (Pmode, addr);
4744 addend = const1_rtx;
4745 }
4746
4747 emit_move_insn (temp, addr);
4748 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4749
4750 if (reg != 0)
4751 {
4752 s390_load_address (reg, new_rtx);
4753 new_rtx = reg;
4754 }
4755 }
4756 else
4757 {
4758 /* If the offset is even, we can just use LARL. This
4759 will happen automatically. */
4760 }
4761 }
4762 else
4763 {
4764 /* No larl - Access local symbols relative to the GOT. */
4765
4766 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4767
4768 if (reload_in_progress || reload_completed)
4769 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4770
4771 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4772 if (addend != const0_rtx)
4773 addr = gen_rtx_PLUS (Pmode, addr, addend);
4774 addr = gen_rtx_CONST (Pmode, addr);
4775 addr = force_const_mem (Pmode, addr);
4776 emit_move_insn (temp, addr);
4777
4778 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4779 if (reg != 0)
4780 {
4781 s390_load_address (reg, new_rtx);
4782 new_rtx = reg;
4783 }
4784 }
4785 }
4786 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4787 {
4788 /* A non-local symbol reference without addend.
4789
4790 The symbol ref is wrapped into an UNSPEC to make sure the
4791 proper operand modifier (@GOT or @GOTENT) will be emitted.
4792 This will tell the linker to put the symbol into the GOT.
4793
4794 Additionally the code dereferencing the GOT slot is emitted here.
4795
4796 An addend to the symref needs to be added afterwards.
4797 legitimize_pic_address calls itself recursively to handle
4798 that case. So no need to do it here. */
4799
4800 if (reg == 0)
4801 reg = gen_reg_rtx (Pmode);
4802
4803 if (TARGET_Z10)
4804 {
4805 /* Use load relative if possible.
4806 lgrl <target>, sym@GOTENT */
4807 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4808 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4809 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4810
4811 emit_move_insn (reg, new_rtx);
4812 new_rtx = reg;
4813 }
4814 else if (flag_pic == 1)
4815 {
4816 /* Assume GOT offset is a valid displacement operand (< 4k
4817 or < 512k with z990). This is handled the same way in
4818 both 31- and 64-bit code (@GOT).
4819 lg <target>, sym@GOT(r12) */
4820
4821 if (reload_in_progress || reload_completed)
4822 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4823
4824 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4825 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4826 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4827 new_rtx = gen_const_mem (Pmode, new_rtx);
4828 emit_move_insn (reg, new_rtx);
4829 new_rtx = reg;
4830 }
4831 else if (TARGET_CPU_ZARCH)
4832 {
4833 /* If the GOT offset might be >= 4k, we determine the position
4834 of the GOT entry via a PC-relative LARL (@GOTENT).
4835 larl temp, sym@GOTENT
4836 lg <target>, 0(temp) */
4837
4838 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4839
4840 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4841 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4842
4843 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4844 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4845 emit_move_insn (temp, new_rtx);
4846
4847 new_rtx = gen_const_mem (Pmode, temp);
4848 emit_move_insn (reg, new_rtx);
4849
4850 new_rtx = reg;
4851 }
4852 else
4853 {
4854 /* If the GOT offset might be >= 4k, we have to load it
4855 from the literal pool (@GOT).
4856
4857 lg temp, lit-litbase(r13)
4858 lg <target>, 0(temp)
4859 lit: .long sym@GOT */
4860
4861 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4862
4863 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4864 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4865
4866 if (reload_in_progress || reload_completed)
4867 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4868
4869 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4870 addr = gen_rtx_CONST (Pmode, addr);
4871 addr = force_const_mem (Pmode, addr);
4872 emit_move_insn (temp, addr);
4873
4874 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4875 new_rtx = gen_const_mem (Pmode, new_rtx);
4876 emit_move_insn (reg, new_rtx);
4877 new_rtx = reg;
4878 }
4879 }
4880 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4881 {
4882 gcc_assert (XVECLEN (addr, 0) == 1);
4883 switch (XINT (addr, 1))
4884 {
4885 /* These address symbols (or PLT slots) relative to the GOT
4886 (not GOT slots!). In general this will exceed the
4887 displacement range so these value belong into the literal
4888 pool. */
4889 case UNSPEC_GOTOFF:
4890 case UNSPEC_PLTOFF:
4891 new_rtx = force_const_mem (Pmode, orig);
4892 break;
4893
4894 /* For -fPIC the GOT size might exceed the displacement
4895 range so make sure the value is in the literal pool. */
4896 case UNSPEC_GOT:
4897 if (flag_pic == 2)
4898 new_rtx = force_const_mem (Pmode, orig);
4899 break;
4900
4901 /* For @GOTENT larl is used. This is handled like local
4902 symbol refs. */
4903 case UNSPEC_GOTENT:
4904 gcc_unreachable ();
4905 break;
4906
4907 /* @PLT is OK as is on 64-bit, must be converted to
4908 GOT-relative @PLTOFF on 31-bit. */
4909 case UNSPEC_PLT:
4910 if (!TARGET_CPU_ZARCH)
4911 {
4912 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4913
4914 if (reload_in_progress || reload_completed)
4915 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4916
4917 addr = XVECEXP (addr, 0, 0);
4918 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4919 UNSPEC_PLTOFF);
4920 if (addend != const0_rtx)
4921 addr = gen_rtx_PLUS (Pmode, addr, addend);
4922 addr = gen_rtx_CONST (Pmode, addr);
4923 addr = force_const_mem (Pmode, addr);
4924 emit_move_insn (temp, addr);
4925
4926 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4927 if (reg != 0)
4928 {
4929 s390_load_address (reg, new_rtx);
4930 new_rtx = reg;
4931 }
4932 }
4933 else
4934 /* On 64 bit larl can be used. This case is handled like
4935 local symbol refs. */
4936 gcc_unreachable ();
4937 break;
4938
4939 /* Everything else cannot happen. */
4940 default:
4941 gcc_unreachable ();
4942 }
4943 }
4944 else if (addend != const0_rtx)
4945 {
4946 /* Otherwise, compute the sum. */
4947
4948 rtx base = legitimize_pic_address (addr, reg);
4949 new_rtx = legitimize_pic_address (addend,
4950 base == reg ? NULL_RTX : reg);
4951 if (GET_CODE (new_rtx) == CONST_INT)
4952 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4953 else
4954 {
4955 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4956 {
4957 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4958 new_rtx = XEXP (new_rtx, 1);
4959 }
4960 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4961 }
4962
4963 if (GET_CODE (new_rtx) == CONST)
4964 new_rtx = XEXP (new_rtx, 0);
4965 new_rtx = force_operand (new_rtx, 0);
4966 }
4967
4968 return new_rtx;
4969 }
4970
4971 /* Load the thread pointer into a register. */
4972
4973 rtx
4974 s390_get_thread_pointer (void)
4975 {
4976 rtx tp = gen_reg_rtx (Pmode);
4977
4978 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4979 mark_reg_pointer (tp, BITS_PER_WORD);
4980
4981 return tp;
4982 }
4983
4984 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4985 in s390_tls_symbol which always refers to __tls_get_offset.
4986 The returned offset is written to RESULT_REG and an USE rtx is
4987 generated for TLS_CALL. */
4988
4989 static GTY(()) rtx s390_tls_symbol;
4990
4991 static void
4992 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4993 {
4994 rtx insn;
4995
4996 if (!flag_pic)
4997 emit_insn (s390_load_got ());
4998
4999 if (!s390_tls_symbol)
5000 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
5001
5002 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
5003 gen_rtx_REG (Pmode, RETURN_REGNUM));
5004
5005 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
5006 RTL_CONST_CALL_P (insn) = 1;
5007 }
5008
5009 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5010 this (thread-local) address. REG may be used as temporary. */
5011
5012 static rtx
5013 legitimize_tls_address (rtx addr, rtx reg)
5014 {
5015 rtx new_rtx, tls_call, temp, base, r2;
5016 rtx_insn *insn;
5017
5018 if (GET_CODE (addr) == SYMBOL_REF)
5019 switch (tls_symbolic_operand (addr))
5020 {
5021 case TLS_MODEL_GLOBAL_DYNAMIC:
5022 start_sequence ();
5023 r2 = gen_rtx_REG (Pmode, 2);
5024 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
5025 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5026 new_rtx = force_const_mem (Pmode, new_rtx);
5027 emit_move_insn (r2, new_rtx);
5028 s390_emit_tls_call_insn (r2, tls_call);
5029 insn = get_insns ();
5030 end_sequence ();
5031
5032 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5033 temp = gen_reg_rtx (Pmode);
5034 emit_libcall_block (insn, temp, r2, new_rtx);
5035
5036 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5037 if (reg != 0)
5038 {
5039 s390_load_address (reg, new_rtx);
5040 new_rtx = reg;
5041 }
5042 break;
5043
5044 case TLS_MODEL_LOCAL_DYNAMIC:
5045 start_sequence ();
5046 r2 = gen_rtx_REG (Pmode, 2);
5047 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
5048 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5049 new_rtx = force_const_mem (Pmode, new_rtx);
5050 emit_move_insn (r2, new_rtx);
5051 s390_emit_tls_call_insn (r2, tls_call);
5052 insn = get_insns ();
5053 end_sequence ();
5054
5055 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
5056 temp = gen_reg_rtx (Pmode);
5057 emit_libcall_block (insn, temp, r2, new_rtx);
5058
5059 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5060 base = gen_reg_rtx (Pmode);
5061 s390_load_address (base, new_rtx);
5062
5063 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
5064 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5065 new_rtx = force_const_mem (Pmode, new_rtx);
5066 temp = gen_reg_rtx (Pmode);
5067 emit_move_insn (temp, new_rtx);
5068
5069 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
5070 if (reg != 0)
5071 {
5072 s390_load_address (reg, new_rtx);
5073 new_rtx = reg;
5074 }
5075 break;
5076
5077 case TLS_MODEL_INITIAL_EXEC:
5078 if (flag_pic == 1)
5079 {
5080 /* Assume GOT offset < 4k. This is handled the same way
5081 in both 31- and 64-bit code. */
5082
5083 if (reload_in_progress || reload_completed)
5084 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5085
5086 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5087 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5088 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
5089 new_rtx = gen_const_mem (Pmode, new_rtx);
5090 temp = gen_reg_rtx (Pmode);
5091 emit_move_insn (temp, new_rtx);
5092 }
5093 else if (TARGET_CPU_ZARCH)
5094 {
5095 /* If the GOT offset might be >= 4k, we determine the position
5096 of the GOT entry via a PC-relative LARL. */
5097
5098 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5099 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5100 temp = gen_reg_rtx (Pmode);
5101 emit_move_insn (temp, new_rtx);
5102
5103 new_rtx = gen_const_mem (Pmode, temp);
5104 temp = gen_reg_rtx (Pmode);
5105 emit_move_insn (temp, new_rtx);
5106 }
5107 else if (flag_pic)
5108 {
5109 /* If the GOT offset might be >= 4k, we have to load it
5110 from the literal pool. */
5111
5112 if (reload_in_progress || reload_completed)
5113 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5114
5115 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5116 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5117 new_rtx = force_const_mem (Pmode, new_rtx);
5118 temp = gen_reg_rtx (Pmode);
5119 emit_move_insn (temp, new_rtx);
5120
5121 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
5122 new_rtx = gen_const_mem (Pmode, new_rtx);
5123
5124 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5125 temp = gen_reg_rtx (Pmode);
5126 emit_insn (gen_rtx_SET (temp, new_rtx));
5127 }
5128 else
5129 {
5130 /* In position-dependent code, load the absolute address of
5131 the GOT entry from the literal pool. */
5132
5133 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5134 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5135 new_rtx = force_const_mem (Pmode, new_rtx);
5136 temp = gen_reg_rtx (Pmode);
5137 emit_move_insn (temp, new_rtx);
5138
5139 new_rtx = temp;
5140 new_rtx = gen_const_mem (Pmode, new_rtx);
5141 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5142 temp = gen_reg_rtx (Pmode);
5143 emit_insn (gen_rtx_SET (temp, new_rtx));
5144 }
5145
5146 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5147 if (reg != 0)
5148 {
5149 s390_load_address (reg, new_rtx);
5150 new_rtx = reg;
5151 }
5152 break;
5153
5154 case TLS_MODEL_LOCAL_EXEC:
5155 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5156 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5157 new_rtx = force_const_mem (Pmode, new_rtx);
5158 temp = gen_reg_rtx (Pmode);
5159 emit_move_insn (temp, new_rtx);
5160
5161 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5162 if (reg != 0)
5163 {
5164 s390_load_address (reg, new_rtx);
5165 new_rtx = reg;
5166 }
5167 break;
5168
5169 default:
5170 gcc_unreachable ();
5171 }
5172
5173 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5174 {
5175 switch (XINT (XEXP (addr, 0), 1))
5176 {
5177 case UNSPEC_INDNTPOFF:
5178 gcc_assert (TARGET_CPU_ZARCH);
5179 new_rtx = addr;
5180 break;
5181
5182 default:
5183 gcc_unreachable ();
5184 }
5185 }
5186
5187 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5188 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5189 {
5190 new_rtx = XEXP (XEXP (addr, 0), 0);
5191 if (GET_CODE (new_rtx) != SYMBOL_REF)
5192 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5193
5194 new_rtx = legitimize_tls_address (new_rtx, reg);
5195 new_rtx = plus_constant (Pmode, new_rtx,
5196 INTVAL (XEXP (XEXP (addr, 0), 1)));
5197 new_rtx = force_operand (new_rtx, 0);
5198 }
5199
5200 else
5201 gcc_unreachable (); /* for now ... */
5202
5203 return new_rtx;
5204 }
5205
5206 /* Emit insns making the address in operands[1] valid for a standard
5207 move to operands[0]. operands[1] is replaced by an address which
5208 should be used instead of the former RTX to emit the move
5209 pattern. */
5210
5211 void
5212 emit_symbolic_move (rtx *operands)
5213 {
5214 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5215
5216 if (GET_CODE (operands[0]) == MEM)
5217 operands[1] = force_reg (Pmode, operands[1]);
5218 else if (TLS_SYMBOLIC_CONST (operands[1]))
5219 operands[1] = legitimize_tls_address (operands[1], temp);
5220 else if (flag_pic)
5221 operands[1] = legitimize_pic_address (operands[1], temp);
5222 }
5223
5224 /* Try machine-dependent ways of modifying an illegitimate address X
5225 to be legitimate. If we find one, return the new, valid address.
5226
5227 OLDX is the address as it was before break_out_memory_refs was called.
5228 In some cases it is useful to look at this to decide what needs to be done.
5229
5230 MODE is the mode of the operand pointed to by X.
5231
5232 When -fpic is used, special handling is needed for symbolic references.
5233 See comments by legitimize_pic_address for details. */
5234
5235 static rtx
5236 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5237 machine_mode mode ATTRIBUTE_UNUSED)
5238 {
5239 rtx constant_term = const0_rtx;
5240
5241 if (TLS_SYMBOLIC_CONST (x))
5242 {
5243 x = legitimize_tls_address (x, 0);
5244
5245 if (s390_legitimate_address_p (mode, x, FALSE))
5246 return x;
5247 }
5248 else if (GET_CODE (x) == PLUS
5249 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5250 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5251 {
5252 return x;
5253 }
5254 else if (flag_pic)
5255 {
5256 if (SYMBOLIC_CONST (x)
5257 || (GET_CODE (x) == PLUS
5258 && (SYMBOLIC_CONST (XEXP (x, 0))
5259 || SYMBOLIC_CONST (XEXP (x, 1)))))
5260 x = legitimize_pic_address (x, 0);
5261
5262 if (s390_legitimate_address_p (mode, x, FALSE))
5263 return x;
5264 }
5265
5266 x = eliminate_constant_term (x, &constant_term);
5267
5268 /* Optimize loading of large displacements by splitting them
5269 into the multiple of 4K and the rest; this allows the
5270 former to be CSE'd if possible.
5271
5272 Don't do this if the displacement is added to a register
5273 pointing into the stack frame, as the offsets will
5274 change later anyway. */
5275
5276 if (GET_CODE (constant_term) == CONST_INT
5277 && !TARGET_LONG_DISPLACEMENT
5278 && !DISP_IN_RANGE (INTVAL (constant_term))
5279 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5280 {
5281 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5282 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5283
5284 rtx temp = gen_reg_rtx (Pmode);
5285 rtx val = force_operand (GEN_INT (upper), temp);
5286 if (val != temp)
5287 emit_move_insn (temp, val);
5288
5289 x = gen_rtx_PLUS (Pmode, x, temp);
5290 constant_term = GEN_INT (lower);
5291 }
5292
5293 if (GET_CODE (x) == PLUS)
5294 {
5295 if (GET_CODE (XEXP (x, 0)) == REG)
5296 {
5297 rtx temp = gen_reg_rtx (Pmode);
5298 rtx val = force_operand (XEXP (x, 1), temp);
5299 if (val != temp)
5300 emit_move_insn (temp, val);
5301
5302 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5303 }
5304
5305 else if (GET_CODE (XEXP (x, 1)) == REG)
5306 {
5307 rtx temp = gen_reg_rtx (Pmode);
5308 rtx val = force_operand (XEXP (x, 0), temp);
5309 if (val != temp)
5310 emit_move_insn (temp, val);
5311
5312 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5313 }
5314 }
5315
5316 if (constant_term != const0_rtx)
5317 x = gen_rtx_PLUS (Pmode, x, constant_term);
5318
5319 return x;
5320 }
5321
5322 /* Try a machine-dependent way of reloading an illegitimate address AD
5323 operand. If we find one, push the reload and return the new address.
5324
5325 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5326 and TYPE is the reload type of the current reload. */
5327
5328 rtx
5329 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5330 int opnum, int type)
5331 {
5332 if (!optimize || TARGET_LONG_DISPLACEMENT)
5333 return NULL_RTX;
5334
5335 if (GET_CODE (ad) == PLUS)
5336 {
5337 rtx tem = simplify_binary_operation (PLUS, Pmode,
5338 XEXP (ad, 0), XEXP (ad, 1));
5339 if (tem)
5340 ad = tem;
5341 }
5342
5343 if (GET_CODE (ad) == PLUS
5344 && GET_CODE (XEXP (ad, 0)) == REG
5345 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5346 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5347 {
5348 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5349 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5350 rtx cst, tem, new_rtx;
5351
5352 cst = GEN_INT (upper);
5353 if (!legitimate_reload_constant_p (cst))
5354 cst = force_const_mem (Pmode, cst);
5355
5356 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5357 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5358
5359 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5360 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5361 opnum, (enum reload_type) type);
5362 return new_rtx;
5363 }
5364
5365 return NULL_RTX;
5366 }
5367
5368 /* Emit code to move LEN bytes from DST to SRC. */
5369
5370 bool
5371 s390_expand_movmem (rtx dst, rtx src, rtx len)
5372 {
5373 /* When tuning for z10 or higher we rely on the Glibc functions to
5374 do the right thing. Only for constant lengths below 64k we will
5375 generate inline code. */
5376 if (s390_tune >= PROCESSOR_2097_Z10
5377 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5378 return false;
5379
5380 /* Expand memcpy for constant length operands without a loop if it
5381 is shorter that way.
5382
5383 With a constant length argument a
5384 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5385 if (GET_CODE (len) == CONST_INT
5386 && INTVAL (len) >= 0
5387 && INTVAL (len) <= 256 * 6
5388 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5389 {
5390 HOST_WIDE_INT o, l;
5391
5392 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5393 {
5394 rtx newdst = adjust_address (dst, BLKmode, o);
5395 rtx newsrc = adjust_address (src, BLKmode, o);
5396 emit_insn (gen_movmem_short (newdst, newsrc,
5397 GEN_INT (l > 256 ? 255 : l - 1)));
5398 }
5399 }
5400
5401 else if (TARGET_MVCLE)
5402 {
5403 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5404 }
5405
5406 else
5407 {
5408 rtx dst_addr, src_addr, count, blocks, temp;
5409 rtx_code_label *loop_start_label = gen_label_rtx ();
5410 rtx_code_label *loop_end_label = gen_label_rtx ();
5411 rtx_code_label *end_label = gen_label_rtx ();
5412 machine_mode mode;
5413
5414 mode = GET_MODE (len);
5415 if (mode == VOIDmode)
5416 mode = Pmode;
5417
5418 dst_addr = gen_reg_rtx (Pmode);
5419 src_addr = gen_reg_rtx (Pmode);
5420 count = gen_reg_rtx (mode);
5421 blocks = gen_reg_rtx (mode);
5422
5423 convert_move (count, len, 1);
5424 emit_cmp_and_jump_insns (count, const0_rtx,
5425 EQ, NULL_RTX, mode, 1, end_label);
5426
5427 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5428 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5429 dst = change_address (dst, VOIDmode, dst_addr);
5430 src = change_address (src, VOIDmode, src_addr);
5431
5432 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5433 OPTAB_DIRECT);
5434 if (temp != count)
5435 emit_move_insn (count, temp);
5436
5437 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5438 OPTAB_DIRECT);
5439 if (temp != blocks)
5440 emit_move_insn (blocks, temp);
5441
5442 emit_cmp_and_jump_insns (blocks, const0_rtx,
5443 EQ, NULL_RTX, mode, 1, loop_end_label);
5444
5445 emit_label (loop_start_label);
5446
5447 if (TARGET_Z10
5448 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5449 {
5450 rtx prefetch;
5451
5452 /* Issue a read prefetch for the +3 cache line. */
5453 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5454 const0_rtx, const0_rtx);
5455 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5456 emit_insn (prefetch);
5457
5458 /* Issue a write prefetch for the +3 cache line. */
5459 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5460 const1_rtx, const0_rtx);
5461 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5462 emit_insn (prefetch);
5463 }
5464
5465 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5466 s390_load_address (dst_addr,
5467 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5468 s390_load_address (src_addr,
5469 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5470
5471 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5472 OPTAB_DIRECT);
5473 if (temp != blocks)
5474 emit_move_insn (blocks, temp);
5475
5476 emit_cmp_and_jump_insns (blocks, const0_rtx,
5477 EQ, NULL_RTX, mode, 1, loop_end_label);
5478
5479 emit_jump (loop_start_label);
5480 emit_label (loop_end_label);
5481
5482 emit_insn (gen_movmem_short (dst, src,
5483 convert_to_mode (Pmode, count, 1)));
5484 emit_label (end_label);
5485 }
5486 return true;
5487 }
5488
5489 /* Emit code to set LEN bytes at DST to VAL.
5490 Make use of clrmem if VAL is zero. */
5491
5492 void
5493 s390_expand_setmem (rtx dst, rtx len, rtx val)
5494 {
5495 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5496 return;
5497
5498 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5499
5500 /* Expand setmem/clrmem for a constant length operand without a
5501 loop if it will be shorter that way.
5502 With a constant length and without pfd argument a
5503 clrmem loop is 32 bytes -> 5.3 * xc
5504 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5505 if (GET_CODE (len) == CONST_INT
5506 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5507 || INTVAL (len) <= 257 * 3)
5508 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5509 {
5510 HOST_WIDE_INT o, l;
5511
5512 if (val == const0_rtx)
5513 /* clrmem: emit 256 byte blockwise XCs. */
5514 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5515 {
5516 rtx newdst = adjust_address (dst, BLKmode, o);
5517 emit_insn (gen_clrmem_short (newdst,
5518 GEN_INT (l > 256 ? 255 : l - 1)));
5519 }
5520 else
5521 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5522 setting first byte to val and using a 256 byte mvc with one
5523 byte overlap to propagate the byte. */
5524 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5525 {
5526 rtx newdst = adjust_address (dst, BLKmode, o);
5527 emit_move_insn (adjust_address (dst, QImode, o), val);
5528 if (l > 1)
5529 {
5530 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5531 emit_insn (gen_movmem_short (newdstp1, newdst,
5532 GEN_INT (l > 257 ? 255 : l - 2)));
5533 }
5534 }
5535 }
5536
5537 else if (TARGET_MVCLE)
5538 {
5539 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5540 if (TARGET_64BIT)
5541 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5542 val));
5543 else
5544 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5545 val));
5546 }
5547
5548 else
5549 {
5550 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5551 rtx_code_label *loop_start_label = gen_label_rtx ();
5552 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5553 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5554 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5555 machine_mode mode;
5556
5557 mode = GET_MODE (len);
5558 if (mode == VOIDmode)
5559 mode = Pmode;
5560
5561 dst_addr = gen_reg_rtx (Pmode);
5562 count = gen_reg_rtx (mode);
5563 blocks = gen_reg_rtx (mode);
5564
5565 convert_move (count, len, 1);
5566 emit_cmp_and_jump_insns (count, const0_rtx,
5567 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5568 profile_probability::very_unlikely ());
5569
5570 /* We need to make a copy of the target address since memset is
5571 supposed to return it unmodified. We have to make it here
5572 already since the new reg is used at onebyte_end_label. */
5573 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5574 dst = change_address (dst, VOIDmode, dst_addr);
5575
5576 if (val != const0_rtx)
5577 {
5578 /* When using the overlapping mvc the original target
5579 address is only accessed as single byte entity (even by
5580 the mvc reading this value). */
5581 set_mem_size (dst, 1);
5582 dstp1 = adjust_address (dst, VOIDmode, 1);
5583 emit_cmp_and_jump_insns (count,
5584 const1_rtx, EQ, NULL_RTX, mode, 1,
5585 onebyte_end_label,
5586 profile_probability::very_unlikely ());
5587 }
5588
5589 /* There is one unconditional (mvi+mvc)/xc after the loop
5590 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5591 or one (xc) here leaves this number of bytes to be handled by
5592 it. */
5593 temp = expand_binop (mode, add_optab, count,
5594 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5595 count, 1, OPTAB_DIRECT);
5596 if (temp != count)
5597 emit_move_insn (count, temp);
5598
5599 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5600 OPTAB_DIRECT);
5601 if (temp != blocks)
5602 emit_move_insn (blocks, temp);
5603
5604 emit_cmp_and_jump_insns (blocks, const0_rtx,
5605 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5606
5607 emit_jump (loop_start_label);
5608
5609 if (val != const0_rtx)
5610 {
5611 /* The 1 byte != 0 special case. Not handled efficiently
5612 since we require two jumps for that. However, this
5613 should be very rare. */
5614 emit_label (onebyte_end_label);
5615 emit_move_insn (adjust_address (dst, QImode, 0), val);
5616 emit_jump (zerobyte_end_label);
5617 }
5618
5619 emit_label (loop_start_label);
5620
5621 if (TARGET_Z10
5622 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5623 {
5624 /* Issue a write prefetch for the +4 cache line. */
5625 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5626 GEN_INT (1024)),
5627 const1_rtx, const0_rtx);
5628 emit_insn (prefetch);
5629 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5630 }
5631
5632 if (val == const0_rtx)
5633 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5634 else
5635 {
5636 /* Set the first byte in the block to the value and use an
5637 overlapping mvc for the block. */
5638 emit_move_insn (adjust_address (dst, QImode, 0), val);
5639 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5640 }
5641 s390_load_address (dst_addr,
5642 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5643
5644 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5645 OPTAB_DIRECT);
5646 if (temp != blocks)
5647 emit_move_insn (blocks, temp);
5648
5649 emit_cmp_and_jump_insns (blocks, const0_rtx,
5650 NE, NULL_RTX, mode, 1, loop_start_label);
5651
5652 emit_label (restbyte_end_label);
5653
5654 if (val == const0_rtx)
5655 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5656 else
5657 {
5658 /* Set the first byte in the block to the value and use an
5659 overlapping mvc for the block. */
5660 emit_move_insn (adjust_address (dst, QImode, 0), val);
5661 /* execute only uses the lowest 8 bits of count that's
5662 exactly what we need here. */
5663 emit_insn (gen_movmem_short (dstp1, dst,
5664 convert_to_mode (Pmode, count, 1)));
5665 }
5666
5667 emit_label (zerobyte_end_label);
5668 }
5669 }
5670
5671 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5672 and return the result in TARGET. */
5673
5674 bool
5675 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5676 {
5677 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5678 rtx tmp;
5679
5680 /* When tuning for z10 or higher we rely on the Glibc functions to
5681 do the right thing. Only for constant lengths below 64k we will
5682 generate inline code. */
5683 if (s390_tune >= PROCESSOR_2097_Z10
5684 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5685 return false;
5686
5687 /* As the result of CMPINT is inverted compared to what we need,
5688 we have to swap the operands. */
5689 tmp = op0; op0 = op1; op1 = tmp;
5690
5691 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5692 {
5693 if (INTVAL (len) > 0)
5694 {
5695 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5696 emit_insn (gen_cmpint (target, ccreg));
5697 }
5698 else
5699 emit_move_insn (target, const0_rtx);
5700 }
5701 else if (TARGET_MVCLE)
5702 {
5703 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5704 emit_insn (gen_cmpint (target, ccreg));
5705 }
5706 else
5707 {
5708 rtx addr0, addr1, count, blocks, temp;
5709 rtx_code_label *loop_start_label = gen_label_rtx ();
5710 rtx_code_label *loop_end_label = gen_label_rtx ();
5711 rtx_code_label *end_label = gen_label_rtx ();
5712 machine_mode mode;
5713
5714 mode = GET_MODE (len);
5715 if (mode == VOIDmode)
5716 mode = Pmode;
5717
5718 addr0 = gen_reg_rtx (Pmode);
5719 addr1 = gen_reg_rtx (Pmode);
5720 count = gen_reg_rtx (mode);
5721 blocks = gen_reg_rtx (mode);
5722
5723 convert_move (count, len, 1);
5724 emit_cmp_and_jump_insns (count, const0_rtx,
5725 EQ, NULL_RTX, mode, 1, end_label);
5726
5727 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5728 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5729 op0 = change_address (op0, VOIDmode, addr0);
5730 op1 = change_address (op1, VOIDmode, addr1);
5731
5732 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5733 OPTAB_DIRECT);
5734 if (temp != count)
5735 emit_move_insn (count, temp);
5736
5737 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5738 OPTAB_DIRECT);
5739 if (temp != blocks)
5740 emit_move_insn (blocks, temp);
5741
5742 emit_cmp_and_jump_insns (blocks, const0_rtx,
5743 EQ, NULL_RTX, mode, 1, loop_end_label);
5744
5745 emit_label (loop_start_label);
5746
5747 if (TARGET_Z10
5748 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5749 {
5750 rtx prefetch;
5751
5752 /* Issue a read prefetch for the +2 cache line of operand 1. */
5753 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5754 const0_rtx, const0_rtx);
5755 emit_insn (prefetch);
5756 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5757
5758 /* Issue a read prefetch for the +2 cache line of operand 2. */
5759 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5760 const0_rtx, const0_rtx);
5761 emit_insn (prefetch);
5762 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5763 }
5764
5765 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5766 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5767 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5768 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5769 temp = gen_rtx_SET (pc_rtx, temp);
5770 emit_jump_insn (temp);
5771
5772 s390_load_address (addr0,
5773 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5774 s390_load_address (addr1,
5775 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5776
5777 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5778 OPTAB_DIRECT);
5779 if (temp != blocks)
5780 emit_move_insn (blocks, temp);
5781
5782 emit_cmp_and_jump_insns (blocks, const0_rtx,
5783 EQ, NULL_RTX, mode, 1, loop_end_label);
5784
5785 emit_jump (loop_start_label);
5786 emit_label (loop_end_label);
5787
5788 emit_insn (gen_cmpmem_short (op0, op1,
5789 convert_to_mode (Pmode, count, 1)));
5790 emit_label (end_label);
5791
5792 emit_insn (gen_cmpint (target, ccreg));
5793 }
5794 return true;
5795 }
5796
5797 /* Emit a conditional jump to LABEL for condition code mask MASK using
5798 comparsion operator COMPARISON. Return the emitted jump insn. */
5799
5800 static rtx_insn *
5801 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5802 {
5803 rtx temp;
5804
5805 gcc_assert (comparison == EQ || comparison == NE);
5806 gcc_assert (mask > 0 && mask < 15);
5807
5808 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5809 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5810 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5811 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5812 temp = gen_rtx_SET (pc_rtx, temp);
5813 return emit_jump_insn (temp);
5814 }
5815
5816 /* Emit the instructions to implement strlen of STRING and store the
5817 result in TARGET. The string has the known ALIGNMENT. This
5818 version uses vector instructions and is therefore not appropriate
5819 for targets prior to z13. */
5820
5821 void
5822 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5823 {
5824 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5825 rtx str_reg = gen_reg_rtx (V16QImode);
5826 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5827 rtx str_idx_reg = gen_reg_rtx (Pmode);
5828 rtx result_reg = gen_reg_rtx (V16QImode);
5829 rtx is_aligned_label = gen_label_rtx ();
5830 rtx into_loop_label = NULL_RTX;
5831 rtx loop_start_label = gen_label_rtx ();
5832 rtx temp;
5833 rtx len = gen_reg_rtx (QImode);
5834 rtx cond;
5835
5836 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5837 emit_move_insn (str_idx_reg, const0_rtx);
5838
5839 if (INTVAL (alignment) < 16)
5840 {
5841 /* Check whether the address happens to be aligned properly so
5842 jump directly to the aligned loop. */
5843 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5844 str_addr_base_reg, GEN_INT (15)),
5845 const0_rtx, EQ, NULL_RTX,
5846 Pmode, 1, is_aligned_label);
5847
5848 temp = gen_reg_rtx (Pmode);
5849 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5850 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5851 gcc_assert (REG_P (temp));
5852 highest_index_to_load_reg =
5853 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5854 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5855 gcc_assert (REG_P (highest_index_to_load_reg));
5856 emit_insn (gen_vllv16qi (str_reg,
5857 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5858 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5859
5860 into_loop_label = gen_label_rtx ();
5861 s390_emit_jump (into_loop_label, NULL_RTX);
5862 emit_barrier ();
5863 }
5864
5865 emit_label (is_aligned_label);
5866 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5867
5868 /* Reaching this point we are only performing 16 bytes aligned
5869 loads. */
5870 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5871
5872 emit_label (loop_start_label);
5873 LABEL_NUSES (loop_start_label) = 1;
5874
5875 /* Load 16 bytes of the string into VR. */
5876 emit_move_insn (str_reg,
5877 gen_rtx_MEM (V16QImode,
5878 gen_rtx_PLUS (Pmode, str_idx_reg,
5879 str_addr_base_reg)));
5880 if (into_loop_label != NULL_RTX)
5881 {
5882 emit_label (into_loop_label);
5883 LABEL_NUSES (into_loop_label) = 1;
5884 }
5885
5886 /* Increment string index by 16 bytes. */
5887 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5888 str_idx_reg, 1, OPTAB_DIRECT);
5889
5890 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5891 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5892
5893 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5894 REG_BR_PROB,
5895 profile_probability::very_likely ().to_reg_br_prob_note ());
5896 emit_insn (gen_vec_extractv16qiqi (len, result_reg, GEN_INT (7)));
5897
5898 /* If the string pointer wasn't aligned we have loaded less then 16
5899 bytes and the remaining bytes got filled with zeros (by vll).
5900 Now we have to check whether the resulting index lies within the
5901 bytes actually part of the string. */
5902
5903 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5904 highest_index_to_load_reg);
5905 s390_load_address (highest_index_to_load_reg,
5906 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5907 const1_rtx));
5908 if (TARGET_64BIT)
5909 emit_insn (gen_movdicc (str_idx_reg, cond,
5910 highest_index_to_load_reg, str_idx_reg));
5911 else
5912 emit_insn (gen_movsicc (str_idx_reg, cond,
5913 highest_index_to_load_reg, str_idx_reg));
5914
5915 add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
5916 profile_probability::very_unlikely ());
5917
5918 expand_binop (Pmode, add_optab, str_idx_reg,
5919 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5920 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5921 here. */
5922 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5923 convert_to_mode (Pmode, len, 1),
5924 target, 1, OPTAB_DIRECT);
5925 if (temp != target)
5926 emit_move_insn (target, temp);
5927 }
5928
5929 void
5930 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5931 {
5932 rtx temp = gen_reg_rtx (Pmode);
5933 rtx src_addr = XEXP (src, 0);
5934 rtx dst_addr = XEXP (dst, 0);
5935 rtx src_addr_reg = gen_reg_rtx (Pmode);
5936 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5937 rtx offset = gen_reg_rtx (Pmode);
5938 rtx vsrc = gen_reg_rtx (V16QImode);
5939 rtx vpos = gen_reg_rtx (V16QImode);
5940 rtx loadlen = gen_reg_rtx (SImode);
5941 rtx gpos_qi = gen_reg_rtx(QImode);
5942 rtx gpos = gen_reg_rtx (SImode);
5943 rtx done_label = gen_label_rtx ();
5944 rtx loop_label = gen_label_rtx ();
5945 rtx exit_label = gen_label_rtx ();
5946 rtx full_label = gen_label_rtx ();
5947
5948 /* Perform a quick check for string ending on the first up to 16
5949 bytes and exit early if successful. */
5950
5951 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5952 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5953 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5954 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5955 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5956 /* gpos is the byte index if a zero was found and 16 otherwise.
5957 So if it is lower than the loaded bytes we have a hit. */
5958 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5959 full_label);
5960 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5961
5962 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5963 1, OPTAB_DIRECT);
5964 emit_jump (exit_label);
5965 emit_barrier ();
5966
5967 emit_label (full_label);
5968 LABEL_NUSES (full_label) = 1;
5969
5970 /* Calculate `offset' so that src + offset points to the last byte
5971 before 16 byte alignment. */
5972
5973 /* temp = src_addr & 0xf */
5974 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5975 1, OPTAB_DIRECT);
5976
5977 /* offset = 0xf - temp */
5978 emit_move_insn (offset, GEN_INT (15));
5979 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5980 1, OPTAB_DIRECT);
5981
5982 /* Store `offset' bytes in the dstination string. The quick check
5983 has loaded at least `offset' bytes into vsrc. */
5984
5985 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5986
5987 /* Advance to the next byte to be loaded. */
5988 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5989 1, OPTAB_DIRECT);
5990
5991 /* Make sure the addresses are single regs which can be used as a
5992 base. */
5993 emit_move_insn (src_addr_reg, src_addr);
5994 emit_move_insn (dst_addr_reg, dst_addr);
5995
5996 /* MAIN LOOP */
5997
5998 emit_label (loop_label);
5999 LABEL_NUSES (loop_label) = 1;
6000
6001 emit_move_insn (vsrc,
6002 gen_rtx_MEM (V16QImode,
6003 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
6004
6005 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
6006 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
6007 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
6008 REG_BR_PROB, profile_probability::very_unlikely ()
6009 .to_reg_br_prob_note ());
6010
6011 emit_move_insn (gen_rtx_MEM (V16QImode,
6012 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
6013 vsrc);
6014 /* offset += 16 */
6015 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
6016 offset, 1, OPTAB_DIRECT);
6017
6018 emit_jump (loop_label);
6019 emit_barrier ();
6020
6021 /* REGULAR EXIT */
6022
6023 /* We are done. Add the offset of the zero character to the dst_addr
6024 pointer to get the result. */
6025
6026 emit_label (done_label);
6027 LABEL_NUSES (done_label) = 1;
6028
6029 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
6030 1, OPTAB_DIRECT);
6031
6032 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
6033 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
6034
6035 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
6036
6037 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
6038 1, OPTAB_DIRECT);
6039
6040 /* EARLY EXIT */
6041
6042 emit_label (exit_label);
6043 LABEL_NUSES (exit_label) = 1;
6044 }
6045
6046
6047 /* Expand conditional increment or decrement using alc/slb instructions.
6048 Should generate code setting DST to either SRC or SRC + INCREMENT,
6049 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
6050 Returns true if successful, false otherwise.
6051
6052 That makes it possible to implement some if-constructs without jumps e.g.:
6053 (borrow = CC0 | CC1 and carry = CC2 | CC3)
6054 unsigned int a, b, c;
6055 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
6056 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
6057 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
6058 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
6059
6060 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
6061 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
6062 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
6063 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
6064 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
6065
6066 bool
6067 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
6068 rtx dst, rtx src, rtx increment)
6069 {
6070 machine_mode cmp_mode;
6071 machine_mode cc_mode;
6072 rtx op_res;
6073 rtx insn;
6074 rtvec p;
6075 int ret;
6076
6077 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
6078 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
6079 cmp_mode = SImode;
6080 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
6081 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
6082 cmp_mode = DImode;
6083 else
6084 return false;
6085
6086 /* Try ADD LOGICAL WITH CARRY. */
6087 if (increment == const1_rtx)
6088 {
6089 /* Determine CC mode to use. */
6090 if (cmp_code == EQ || cmp_code == NE)
6091 {
6092 if (cmp_op1 != const0_rtx)
6093 {
6094 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6095 NULL_RTX, 0, OPTAB_WIDEN);
6096 cmp_op1 = const0_rtx;
6097 }
6098
6099 cmp_code = cmp_code == EQ ? LEU : GTU;
6100 }
6101
6102 if (cmp_code == LTU || cmp_code == LEU)
6103 {
6104 rtx tem = cmp_op0;
6105 cmp_op0 = cmp_op1;
6106 cmp_op1 = tem;
6107 cmp_code = swap_condition (cmp_code);
6108 }
6109
6110 switch (cmp_code)
6111 {
6112 case GTU:
6113 cc_mode = CCUmode;
6114 break;
6115
6116 case GEU:
6117 cc_mode = CCL3mode;
6118 break;
6119
6120 default:
6121 return false;
6122 }
6123
6124 /* Emit comparison instruction pattern. */
6125 if (!register_operand (cmp_op0, cmp_mode))
6126 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6127
6128 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6129 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6130 /* We use insn_invalid_p here to add clobbers if required. */
6131 ret = insn_invalid_p (emit_insn (insn), false);
6132 gcc_assert (!ret);
6133
6134 /* Emit ALC instruction pattern. */
6135 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6136 gen_rtx_REG (cc_mode, CC_REGNUM),
6137 const0_rtx);
6138
6139 if (src != const0_rtx)
6140 {
6141 if (!register_operand (src, GET_MODE (dst)))
6142 src = force_reg (GET_MODE (dst), src);
6143
6144 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6145 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6146 }
6147
6148 p = rtvec_alloc (2);
6149 RTVEC_ELT (p, 0) =
6150 gen_rtx_SET (dst, op_res);
6151 RTVEC_ELT (p, 1) =
6152 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6153 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6154
6155 return true;
6156 }
6157
6158 /* Try SUBTRACT LOGICAL WITH BORROW. */
6159 if (increment == constm1_rtx)
6160 {
6161 /* Determine CC mode to use. */
6162 if (cmp_code == EQ || cmp_code == NE)
6163 {
6164 if (cmp_op1 != const0_rtx)
6165 {
6166 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6167 NULL_RTX, 0, OPTAB_WIDEN);
6168 cmp_op1 = const0_rtx;
6169 }
6170
6171 cmp_code = cmp_code == EQ ? LEU : GTU;
6172 }
6173
6174 if (cmp_code == GTU || cmp_code == GEU)
6175 {
6176 rtx tem = cmp_op0;
6177 cmp_op0 = cmp_op1;
6178 cmp_op1 = tem;
6179 cmp_code = swap_condition (cmp_code);
6180 }
6181
6182 switch (cmp_code)
6183 {
6184 case LEU:
6185 cc_mode = CCUmode;
6186 break;
6187
6188 case LTU:
6189 cc_mode = CCL3mode;
6190 break;
6191
6192 default:
6193 return false;
6194 }
6195
6196 /* Emit comparison instruction pattern. */
6197 if (!register_operand (cmp_op0, cmp_mode))
6198 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6199
6200 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6201 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6202 /* We use insn_invalid_p here to add clobbers if required. */
6203 ret = insn_invalid_p (emit_insn (insn), false);
6204 gcc_assert (!ret);
6205
6206 /* Emit SLB instruction pattern. */
6207 if (!register_operand (src, GET_MODE (dst)))
6208 src = force_reg (GET_MODE (dst), src);
6209
6210 op_res = gen_rtx_MINUS (GET_MODE (dst),
6211 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6212 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6213 gen_rtx_REG (cc_mode, CC_REGNUM),
6214 const0_rtx));
6215 p = rtvec_alloc (2);
6216 RTVEC_ELT (p, 0) =
6217 gen_rtx_SET (dst, op_res);
6218 RTVEC_ELT (p, 1) =
6219 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6220 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6221
6222 return true;
6223 }
6224
6225 return false;
6226 }
6227
6228 /* Expand code for the insv template. Return true if successful. */
6229
6230 bool
6231 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6232 {
6233 int bitsize = INTVAL (op1);
6234 int bitpos = INTVAL (op2);
6235 machine_mode mode = GET_MODE (dest);
6236 machine_mode smode;
6237 int smode_bsize, mode_bsize;
6238 rtx op, clobber;
6239
6240 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6241 return false;
6242
6243 /* Generate INSERT IMMEDIATE (IILL et al). */
6244 /* (set (ze (reg)) (const_int)). */
6245 if (TARGET_ZARCH
6246 && register_operand (dest, word_mode)
6247 && (bitpos % 16) == 0
6248 && (bitsize % 16) == 0
6249 && const_int_operand (src, VOIDmode))
6250 {
6251 HOST_WIDE_INT val = INTVAL (src);
6252 int regpos = bitpos + bitsize;
6253
6254 while (regpos > bitpos)
6255 {
6256 machine_mode putmode;
6257 int putsize;
6258
6259 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6260 putmode = SImode;
6261 else
6262 putmode = HImode;
6263
6264 putsize = GET_MODE_BITSIZE (putmode);
6265 regpos -= putsize;
6266 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6267 GEN_INT (putsize),
6268 GEN_INT (regpos)),
6269 gen_int_mode (val, putmode));
6270 val >>= putsize;
6271 }
6272 gcc_assert (regpos == bitpos);
6273 return true;
6274 }
6275
6276 smode = smallest_int_mode_for_size (bitsize);
6277 smode_bsize = GET_MODE_BITSIZE (smode);
6278 mode_bsize = GET_MODE_BITSIZE (mode);
6279
6280 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6281 if (bitpos == 0
6282 && (bitsize % BITS_PER_UNIT) == 0
6283 && MEM_P (dest)
6284 && (register_operand (src, word_mode)
6285 || const_int_operand (src, VOIDmode)))
6286 {
6287 /* Emit standard pattern if possible. */
6288 if (smode_bsize == bitsize)
6289 {
6290 emit_move_insn (adjust_address (dest, smode, 0),
6291 gen_lowpart (smode, src));
6292 return true;
6293 }
6294
6295 /* (set (ze (mem)) (const_int)). */
6296 else if (const_int_operand (src, VOIDmode))
6297 {
6298 int size = bitsize / BITS_PER_UNIT;
6299 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6300 BLKmode,
6301 UNITS_PER_WORD - size);
6302
6303 dest = adjust_address (dest, BLKmode, 0);
6304 set_mem_size (dest, size);
6305 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6306 return true;
6307 }
6308
6309 /* (set (ze (mem)) (reg)). */
6310 else if (register_operand (src, word_mode))
6311 {
6312 if (bitsize <= 32)
6313 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6314 const0_rtx), src);
6315 else
6316 {
6317 /* Emit st,stcmh sequence. */
6318 int stcmh_width = bitsize - 32;
6319 int size = stcmh_width / BITS_PER_UNIT;
6320
6321 emit_move_insn (adjust_address (dest, SImode, size),
6322 gen_lowpart (SImode, src));
6323 set_mem_size (dest, size);
6324 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6325 GEN_INT (stcmh_width),
6326 const0_rtx),
6327 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6328 }
6329 return true;
6330 }
6331 }
6332
6333 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6334 if ((bitpos % BITS_PER_UNIT) == 0
6335 && (bitsize % BITS_PER_UNIT) == 0
6336 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6337 && MEM_P (src)
6338 && (mode == DImode || mode == SImode)
6339 && register_operand (dest, mode))
6340 {
6341 /* Emit a strict_low_part pattern if possible. */
6342 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6343 {
6344 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6345 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6346 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6347 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6348 return true;
6349 }
6350
6351 /* ??? There are more powerful versions of ICM that are not
6352 completely represented in the md file. */
6353 }
6354
6355 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6356 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6357 {
6358 machine_mode mode_s = GET_MODE (src);
6359
6360 if (CONSTANT_P (src))
6361 {
6362 /* For constant zero values the representation with AND
6363 appears to be folded in more situations than the (set
6364 (zero_extract) ...).
6365 We only do this when the start and end of the bitfield
6366 remain in the same SImode chunk. That way nihf or nilf
6367 can be used.
6368 The AND patterns might still generate a risbg for this. */
6369 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6370 return false;
6371 else
6372 src = force_reg (mode, src);
6373 }
6374 else if (mode_s != mode)
6375 {
6376 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6377 src = force_reg (mode_s, src);
6378 src = gen_lowpart (mode, src);
6379 }
6380
6381 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6382 op = gen_rtx_SET (op, src);
6383
6384 if (!TARGET_ZEC12)
6385 {
6386 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6387 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6388 }
6389 emit_insn (op);
6390
6391 return true;
6392 }
6393
6394 return false;
6395 }
6396
6397 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6398 register that holds VAL of mode MODE shifted by COUNT bits. */
6399
6400 static inline rtx
6401 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6402 {
6403 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6404 NULL_RTX, 1, OPTAB_DIRECT);
6405 return expand_simple_binop (SImode, ASHIFT, val, count,
6406 NULL_RTX, 1, OPTAB_DIRECT);
6407 }
6408
6409 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6410 the result in TARGET. */
6411
6412 void
6413 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6414 rtx cmp_op1, rtx cmp_op2)
6415 {
6416 machine_mode mode = GET_MODE (target);
6417 bool neg_p = false, swap_p = false;
6418 rtx tmp;
6419
6420 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
6421 {
6422 switch (cond)
6423 {
6424 /* NE a != b -> !(a == b) */
6425 case NE: cond = EQ; neg_p = true; break;
6426 /* UNGT a u> b -> !(b >= a) */
6427 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6428 /* UNGE a u>= b -> !(b > a) */
6429 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6430 /* LE: a <= b -> b >= a */
6431 case LE: cond = GE; swap_p = true; break;
6432 /* UNLE: a u<= b -> !(a > b) */
6433 case UNLE: cond = GT; neg_p = true; break;
6434 /* LT: a < b -> b > a */
6435 case LT: cond = GT; swap_p = true; break;
6436 /* UNLT: a u< b -> !(a >= b) */
6437 case UNLT: cond = GE; neg_p = true; break;
6438 case UNEQ:
6439 emit_insn (gen_vec_cmpuneq (target, cmp_op1, cmp_op2));
6440 return;
6441 case LTGT:
6442 emit_insn (gen_vec_cmpltgt (target, cmp_op1, cmp_op2));
6443 return;
6444 case ORDERED:
6445 emit_insn (gen_vec_ordered (target, cmp_op1, cmp_op2));
6446 return;
6447 case UNORDERED:
6448 emit_insn (gen_vec_unordered (target, cmp_op1, cmp_op2));
6449 return;
6450 default: break;
6451 }
6452 }
6453 else
6454 {
6455 switch (cond)
6456 {
6457 /* NE: a != b -> !(a == b) */
6458 case NE: cond = EQ; neg_p = true; break;
6459 /* GE: a >= b -> !(b > a) */
6460 case GE: cond = GT; neg_p = true; swap_p = true; break;
6461 /* GEU: a >= b -> !(b > a) */
6462 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6463 /* LE: a <= b -> !(a > b) */
6464 case LE: cond = GT; neg_p = true; break;
6465 /* LEU: a <= b -> !(a > b) */
6466 case LEU: cond = GTU; neg_p = true; break;
6467 /* LT: a < b -> b > a */
6468 case LT: cond = GT; swap_p = true; break;
6469 /* LTU: a < b -> b > a */
6470 case LTU: cond = GTU; swap_p = true; break;
6471 default: break;
6472 }
6473 }
6474
6475 if (swap_p)
6476 {
6477 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6478 }
6479
6480 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6481 mode,
6482 cmp_op1, cmp_op2)));
6483 if (neg_p)
6484 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6485 }
6486
6487 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6488 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6489 elements in CMP1 and CMP2 fulfill the comparison.
6490 This function is only used to emit patterns for the vx builtins and
6491 therefore only handles comparison codes required by the
6492 builtins. */
6493 void
6494 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6495 rtx cmp1, rtx cmp2, bool all_p)
6496 {
6497 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6498 rtx tmp_reg = gen_reg_rtx (SImode);
6499 bool swap_p = false;
6500
6501 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6502 {
6503 switch (code)
6504 {
6505 case EQ:
6506 case NE:
6507 cc_producer_mode = CCVEQmode;
6508 break;
6509 case GE:
6510 case LT:
6511 code = swap_condition (code);
6512 swap_p = true;
6513 /* fallthrough */
6514 case GT:
6515 case LE:
6516 cc_producer_mode = CCVIHmode;
6517 break;
6518 case GEU:
6519 case LTU:
6520 code = swap_condition (code);
6521 swap_p = true;
6522 /* fallthrough */
6523 case GTU:
6524 case LEU:
6525 cc_producer_mode = CCVIHUmode;
6526 break;
6527 default:
6528 gcc_unreachable ();
6529 }
6530
6531 scratch_mode = GET_MODE (cmp1);
6532 /* These codes represent inverted CC interpretations. Inverting
6533 an ALL CC mode results in an ANY CC mode and the other way
6534 around. Invert the all_p flag here to compensate for
6535 that. */
6536 if (code == NE || code == LE || code == LEU)
6537 all_p = !all_p;
6538
6539 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6540 }
6541 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6542 {
6543 bool inv_p = false;
6544
6545 switch (code)
6546 {
6547 case EQ: cc_producer_mode = CCVEQmode; break;
6548 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6549 case GT: cc_producer_mode = CCVFHmode; break;
6550 case GE: cc_producer_mode = CCVFHEmode; break;
6551 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6552 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6553 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6554 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6555 default: gcc_unreachable ();
6556 }
6557 scratch_mode = mode_for_int_vector (GET_MODE (cmp1)).require ();
6558
6559 if (inv_p)
6560 all_p = !all_p;
6561
6562 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6563 }
6564 else
6565 gcc_unreachable ();
6566
6567 if (swap_p)
6568 {
6569 rtx tmp = cmp2;
6570 cmp2 = cmp1;
6571 cmp1 = tmp;
6572 }
6573
6574 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6575 gen_rtvec (2, gen_rtx_SET (
6576 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6577 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6578 gen_rtx_CLOBBER (VOIDmode,
6579 gen_rtx_SCRATCH (scratch_mode)))));
6580 emit_move_insn (target, const0_rtx);
6581 emit_move_insn (tmp_reg, const1_rtx);
6582
6583 emit_move_insn (target,
6584 gen_rtx_IF_THEN_ELSE (SImode,
6585 gen_rtx_fmt_ee (code, VOIDmode,
6586 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6587 const0_rtx),
6588 tmp_reg, target));
6589 }
6590
6591 /* Invert the comparison CODE applied to a CC mode. This is only safe
6592 if we know whether there result was created by a floating point
6593 compare or not. For the CCV modes this is encoded as part of the
6594 mode. */
6595 enum rtx_code
6596 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6597 {
6598 /* Reversal of FP compares takes care -- an ordered compare
6599 becomes an unordered compare and vice versa. */
6600 if (mode == CCVFALLmode || mode == CCVFANYmode)
6601 return reverse_condition_maybe_unordered (code);
6602 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6603 return reverse_condition (code);
6604 else
6605 gcc_unreachable ();
6606 }
6607
6608 /* Generate a vector comparison expression loading either elements of
6609 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6610 and CMP_OP2. */
6611
6612 void
6613 s390_expand_vcond (rtx target, rtx then, rtx els,
6614 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6615 {
6616 rtx tmp;
6617 machine_mode result_mode;
6618 rtx result_target;
6619
6620 machine_mode target_mode = GET_MODE (target);
6621 machine_mode cmp_mode = GET_MODE (cmp_op1);
6622 rtx op = (cond == LT) ? els : then;
6623
6624 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6625 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6626 for short and byte (x >> 15 and x >> 7 respectively). */
6627 if ((cond == LT || cond == GE)
6628 && target_mode == cmp_mode
6629 && cmp_op2 == CONST0_RTX (cmp_mode)
6630 && op == CONST0_RTX (target_mode)
6631 && s390_vector_mode_supported_p (target_mode)
6632 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6633 {
6634 rtx negop = (cond == LT) ? then : els;
6635
6636 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6637
6638 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6639 if (negop == CONST1_RTX (target_mode))
6640 {
6641 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6642 GEN_INT (shift), target,
6643 1, OPTAB_DIRECT);
6644 if (res != target)
6645 emit_move_insn (target, res);
6646 return;
6647 }
6648
6649 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6650 else if (all_ones_operand (negop, target_mode))
6651 {
6652 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6653 GEN_INT (shift), target,
6654 0, OPTAB_DIRECT);
6655 if (res != target)
6656 emit_move_insn (target, res);
6657 return;
6658 }
6659 }
6660
6661 /* We always use an integral type vector to hold the comparison
6662 result. */
6663 result_mode = mode_for_int_vector (cmp_mode).require ();
6664 result_target = gen_reg_rtx (result_mode);
6665
6666 /* We allow vector immediates as comparison operands that
6667 can be handled by the optimization above but not by the
6668 following code. Hence, force them into registers here. */
6669 if (!REG_P (cmp_op1))
6670 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6671
6672 if (!REG_P (cmp_op2))
6673 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6674
6675 s390_expand_vec_compare (result_target, cond,
6676 cmp_op1, cmp_op2);
6677
6678 /* If the results are supposed to be either -1 or 0 we are done
6679 since this is what our compare instructions generate anyway. */
6680 if (all_ones_operand (then, GET_MODE (then))
6681 && const0_operand (els, GET_MODE (els)))
6682 {
6683 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6684 result_target, 0));
6685 return;
6686 }
6687
6688 /* Otherwise we will do a vsel afterwards. */
6689 /* This gets triggered e.g.
6690 with gcc.c-torture/compile/pr53410-1.c */
6691 if (!REG_P (then))
6692 then = force_reg (target_mode, then);
6693
6694 if (!REG_P (els))
6695 els = force_reg (target_mode, els);
6696
6697 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6698 result_target,
6699 CONST0_RTX (result_mode));
6700
6701 /* We compared the result against zero above so we have to swap then
6702 and els here. */
6703 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6704
6705 gcc_assert (target_mode == GET_MODE (then));
6706 emit_insn (gen_rtx_SET (target, tmp));
6707 }
6708
6709 /* Emit the RTX necessary to initialize the vector TARGET with values
6710 in VALS. */
6711 void
6712 s390_expand_vec_init (rtx target, rtx vals)
6713 {
6714 machine_mode mode = GET_MODE (target);
6715 machine_mode inner_mode = GET_MODE_INNER (mode);
6716 int n_elts = GET_MODE_NUNITS (mode);
6717 bool all_same = true, all_regs = true, all_const_int = true;
6718 rtx x;
6719 int i;
6720
6721 for (i = 0; i < n_elts; ++i)
6722 {
6723 x = XVECEXP (vals, 0, i);
6724
6725 if (!CONST_INT_P (x))
6726 all_const_int = false;
6727
6728 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6729 all_same = false;
6730
6731 if (!REG_P (x))
6732 all_regs = false;
6733 }
6734
6735 /* Use vector gen mask or vector gen byte mask if possible. */
6736 if (all_same && all_const_int
6737 && (XVECEXP (vals, 0, 0) == const0_rtx
6738 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6739 NULL, NULL)
6740 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6741 {
6742 emit_insn (gen_rtx_SET (target,
6743 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6744 return;
6745 }
6746
6747 if (all_same)
6748 {
6749 emit_insn (gen_rtx_SET (target,
6750 gen_rtx_VEC_DUPLICATE (mode,
6751 XVECEXP (vals, 0, 0))));
6752 return;
6753 }
6754
6755 if (all_regs
6756 && REG_P (target)
6757 && n_elts == 2
6758 && GET_MODE_SIZE (inner_mode) == 8)
6759 {
6760 /* Use vector load pair. */
6761 emit_insn (gen_rtx_SET (target,
6762 gen_rtx_VEC_CONCAT (mode,
6763 XVECEXP (vals, 0, 0),
6764 XVECEXP (vals, 0, 1))));
6765 return;
6766 }
6767
6768 /* Use vector load logical element and zero. */
6769 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6770 {
6771 bool found = true;
6772
6773 x = XVECEXP (vals, 0, 0);
6774 if (memory_operand (x, inner_mode))
6775 {
6776 for (i = 1; i < n_elts; ++i)
6777 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6778
6779 if (found)
6780 {
6781 machine_mode half_mode = (inner_mode == SFmode
6782 ? V2SFmode : V2SImode);
6783 emit_insn (gen_rtx_SET (target,
6784 gen_rtx_VEC_CONCAT (mode,
6785 gen_rtx_VEC_CONCAT (half_mode,
6786 x,
6787 const0_rtx),
6788 gen_rtx_VEC_CONCAT (half_mode,
6789 const0_rtx,
6790 const0_rtx))));
6791 return;
6792 }
6793 }
6794 }
6795
6796 /* We are about to set the vector elements one by one. Zero out the
6797 full register first in order to help the data flow framework to
6798 detect it as full VR set. */
6799 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6800
6801 /* Unfortunately the vec_init expander is not allowed to fail. So
6802 we have to implement the fallback ourselves. */
6803 for (i = 0; i < n_elts; i++)
6804 {
6805 rtx elem = XVECEXP (vals, 0, i);
6806 if (!general_operand (elem, GET_MODE (elem)))
6807 elem = force_reg (inner_mode, elem);
6808
6809 emit_insn (gen_rtx_SET (target,
6810 gen_rtx_UNSPEC (mode,
6811 gen_rtvec (3, elem,
6812 GEN_INT (i), target),
6813 UNSPEC_VEC_SET)));
6814 }
6815 }
6816
6817 /* Structure to hold the initial parameters for a compare_and_swap operation
6818 in HImode and QImode. */
6819
6820 struct alignment_context
6821 {
6822 rtx memsi; /* SI aligned memory location. */
6823 rtx shift; /* Bit offset with regard to lsb. */
6824 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6825 rtx modemaski; /* ~modemask */
6826 bool aligned; /* True if memory is aligned, false else. */
6827 };
6828
6829 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6830 structure AC for transparent simplifying, if the memory alignment is known
6831 to be at least 32bit. MEM is the memory location for the actual operation
6832 and MODE its mode. */
6833
6834 static void
6835 init_alignment_context (struct alignment_context *ac, rtx mem,
6836 machine_mode mode)
6837 {
6838 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6839 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6840
6841 if (ac->aligned)
6842 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6843 else
6844 {
6845 /* Alignment is unknown. */
6846 rtx byteoffset, addr, align;
6847
6848 /* Force the address into a register. */
6849 addr = force_reg (Pmode, XEXP (mem, 0));
6850
6851 /* Align it to SImode. */
6852 align = expand_simple_binop (Pmode, AND, addr,
6853 GEN_INT (-GET_MODE_SIZE (SImode)),
6854 NULL_RTX, 1, OPTAB_DIRECT);
6855 /* Generate MEM. */
6856 ac->memsi = gen_rtx_MEM (SImode, align);
6857 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6858 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6859 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6860
6861 /* Calculate shiftcount. */
6862 byteoffset = expand_simple_binop (Pmode, AND, addr,
6863 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6864 NULL_RTX, 1, OPTAB_DIRECT);
6865 /* As we already have some offset, evaluate the remaining distance. */
6866 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6867 NULL_RTX, 1, OPTAB_DIRECT);
6868 }
6869
6870 /* Shift is the byte count, but we need the bitcount. */
6871 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6872 NULL_RTX, 1, OPTAB_DIRECT);
6873
6874 /* Calculate masks. */
6875 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6876 GEN_INT (GET_MODE_MASK (mode)),
6877 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6878 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6879 NULL_RTX, 1);
6880 }
6881
6882 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6883 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6884 perform the merge in SEQ2. */
6885
6886 static rtx
6887 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6888 machine_mode mode, rtx val, rtx ins)
6889 {
6890 rtx tmp;
6891
6892 if (ac->aligned)
6893 {
6894 start_sequence ();
6895 tmp = copy_to_mode_reg (SImode, val);
6896 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6897 const0_rtx, ins))
6898 {
6899 *seq1 = NULL;
6900 *seq2 = get_insns ();
6901 end_sequence ();
6902 return tmp;
6903 }
6904 end_sequence ();
6905 }
6906
6907 /* Failed to use insv. Generate a two part shift and mask. */
6908 start_sequence ();
6909 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6910 *seq1 = get_insns ();
6911 end_sequence ();
6912
6913 start_sequence ();
6914 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6915 *seq2 = get_insns ();
6916 end_sequence ();
6917
6918 return tmp;
6919 }
6920
6921 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6922 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6923 value to set if CMP == MEM. */
6924
6925 static void
6926 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6927 rtx cmp, rtx new_rtx, bool is_weak)
6928 {
6929 struct alignment_context ac;
6930 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6931 rtx res = gen_reg_rtx (SImode);
6932 rtx_code_label *csloop = NULL, *csend = NULL;
6933
6934 gcc_assert (MEM_P (mem));
6935
6936 init_alignment_context (&ac, mem, mode);
6937
6938 /* Load full word. Subsequent loads are performed by CS. */
6939 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6940 NULL_RTX, 1, OPTAB_DIRECT);
6941
6942 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6943 possible, we try to use insv to make this happen efficiently. If
6944 that fails we'll generate code both inside and outside the loop. */
6945 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6946 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6947
6948 if (seq0)
6949 emit_insn (seq0);
6950 if (seq1)
6951 emit_insn (seq1);
6952
6953 /* Start CS loop. */
6954 if (!is_weak)
6955 {
6956 /* Begin assuming success. */
6957 emit_move_insn (btarget, const1_rtx);
6958
6959 csloop = gen_label_rtx ();
6960 csend = gen_label_rtx ();
6961 emit_label (csloop);
6962 }
6963
6964 /* val = "<mem>00..0<mem>"
6965 * cmp = "00..0<cmp>00..0"
6966 * new = "00..0<new>00..0"
6967 */
6968
6969 emit_insn (seq2);
6970 emit_insn (seq3);
6971
6972 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
6973 if (is_weak)
6974 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6975 else
6976 {
6977 rtx tmp;
6978
6979 /* Jump to end if we're done (likely?). */
6980 s390_emit_jump (csend, cc);
6981
6982 /* Check for changes outside mode, and loop internal if so.
6983 Arrange the moves so that the compare is adjacent to the
6984 branch so that we can generate CRJ. */
6985 tmp = copy_to_reg (val);
6986 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6987 1, OPTAB_DIRECT);
6988 cc = s390_emit_compare (NE, val, tmp);
6989 s390_emit_jump (csloop, cc);
6990
6991 /* Failed. */
6992 emit_move_insn (btarget, const0_rtx);
6993 emit_label (csend);
6994 }
6995
6996 /* Return the correct part of the bitfield. */
6997 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6998 NULL_RTX, 1, OPTAB_DIRECT), 1);
6999 }
7000
7001 /* Variant of s390_expand_cs for SI, DI and TI modes. */
7002 static void
7003 s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7004 rtx cmp, rtx new_rtx, bool is_weak)
7005 {
7006 rtx output = vtarget;
7007 rtx_code_label *skip_cs_label = NULL;
7008 bool do_const_opt = false;
7009
7010 if (!register_operand (output, mode))
7011 output = gen_reg_rtx (mode);
7012
7013 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
7014 with the constant first and skip the compare_and_swap because its very
7015 expensive and likely to fail anyway.
7016 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
7017 cause spurious in that case.
7018 Note 2: It may be useful to do this also for non-constant INPUT.
7019 Note 3: Currently only targets with "load on condition" are supported
7020 (z196 and newer). */
7021
7022 if (TARGET_Z196
7023 && (mode == SImode || mode == DImode))
7024 do_const_opt = (is_weak && CONST_INT_P (cmp));
7025
7026 if (do_const_opt)
7027 {
7028 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7029
7030 skip_cs_label = gen_label_rtx ();
7031 emit_move_insn (btarget, const0_rtx);
7032 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
7033 {
7034 rtvec lt = rtvec_alloc (2);
7035
7036 /* Load-and-test + conditional jump. */
7037 RTVEC_ELT (lt, 0)
7038 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
7039 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
7040 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
7041 }
7042 else
7043 {
7044 emit_move_insn (output, mem);
7045 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
7046 }
7047 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
7048 add_reg_br_prob_note (get_last_insn (),
7049 profile_probability::very_unlikely ());
7050 /* If the jump is not taken, OUTPUT is the expected value. */
7051 cmp = output;
7052 /* Reload newval to a register manually, *after* the compare and jump
7053 above. Otherwise Reload might place it before the jump. */
7054 }
7055 else
7056 cmp = force_reg (mode, cmp);
7057 new_rtx = force_reg (mode, new_rtx);
7058 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
7059 (do_const_opt) ? CCZmode : CCZ1mode);
7060 if (skip_cs_label != NULL)
7061 emit_label (skip_cs_label);
7062
7063 /* We deliberately accept non-register operands in the predicate
7064 to ensure the write back to the output operand happens *before*
7065 the store-flags code below. This makes it easier for combine
7066 to merge the store-flags code with a potential test-and-branch
7067 pattern following (immediately!) afterwards. */
7068 if (output != vtarget)
7069 emit_move_insn (vtarget, output);
7070
7071 if (do_const_opt)
7072 {
7073 rtx cc, cond, ite;
7074
7075 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
7076 btarget has already been initialized with 0 above. */
7077 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7078 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
7079 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
7080 emit_insn (gen_rtx_SET (btarget, ite));
7081 }
7082 else
7083 {
7084 rtx cc, cond;
7085
7086 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
7087 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
7088 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
7089 }
7090 }
7091
7092 /* Expand an atomic compare and swap operation. MEM is the memory location,
7093 CMP the old value to compare MEM with and NEW_RTX the value to set if
7094 CMP == MEM. */
7095
7096 void
7097 s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7098 rtx cmp, rtx new_rtx, bool is_weak)
7099 {
7100 switch (mode)
7101 {
7102 case E_TImode:
7103 case E_DImode:
7104 case E_SImode:
7105 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7106 break;
7107 case E_HImode:
7108 case E_QImode:
7109 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7110 break;
7111 default:
7112 gcc_unreachable ();
7113 }
7114 }
7115
7116 /* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
7117 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
7118 of MEM. */
7119
7120 void
7121 s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
7122 {
7123 machine_mode mode = GET_MODE (mem);
7124 rtx_code_label *csloop;
7125
7126 if (TARGET_Z196
7127 && (mode == DImode || mode == SImode)
7128 && CONST_INT_P (input) && INTVAL (input) == 0)
7129 {
7130 emit_move_insn (output, const0_rtx);
7131 if (mode == DImode)
7132 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
7133 else
7134 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7135 return;
7136 }
7137
7138 input = force_reg (mode, input);
7139 emit_move_insn (output, mem);
7140 csloop = gen_label_rtx ();
7141 emit_label (csloop);
7142 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7143 input, CCZ1mode));
7144 }
7145
7146 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
7147 and VAL the value to play with. If AFTER is true then store the value
7148 MEM holds after the operation, if AFTER is false then store the value MEM
7149 holds before the operation. If TARGET is zero then discard that value, else
7150 store it to TARGET. */
7151
7152 void
7153 s390_expand_atomic (machine_mode mode, enum rtx_code code,
7154 rtx target, rtx mem, rtx val, bool after)
7155 {
7156 struct alignment_context ac;
7157 rtx cmp;
7158 rtx new_rtx = gen_reg_rtx (SImode);
7159 rtx orig = gen_reg_rtx (SImode);
7160 rtx_code_label *csloop = gen_label_rtx ();
7161
7162 gcc_assert (!target || register_operand (target, VOIDmode));
7163 gcc_assert (MEM_P (mem));
7164
7165 init_alignment_context (&ac, mem, mode);
7166
7167 /* Shift val to the correct bit positions.
7168 Preserve "icm", but prevent "ex icm". */
7169 if (!(ac.aligned && code == SET && MEM_P (val)))
7170 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7171
7172 /* Further preparation insns. */
7173 if (code == PLUS || code == MINUS)
7174 emit_move_insn (orig, val);
7175 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7176 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7177 NULL_RTX, 1, OPTAB_DIRECT);
7178
7179 /* Load full word. Subsequent loads are performed by CS. */
7180 cmp = force_reg (SImode, ac.memsi);
7181
7182 /* Start CS loop. */
7183 emit_label (csloop);
7184 emit_move_insn (new_rtx, cmp);
7185
7186 /* Patch new with val at correct position. */
7187 switch (code)
7188 {
7189 case PLUS:
7190 case MINUS:
7191 val = expand_simple_binop (SImode, code, new_rtx, orig,
7192 NULL_RTX, 1, OPTAB_DIRECT);
7193 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7194 NULL_RTX, 1, OPTAB_DIRECT);
7195 /* FALLTHRU */
7196 case SET:
7197 if (ac.aligned && MEM_P (val))
7198 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
7199 0, 0, SImode, val, false);
7200 else
7201 {
7202 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7203 NULL_RTX, 1, OPTAB_DIRECT);
7204 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7205 NULL_RTX, 1, OPTAB_DIRECT);
7206 }
7207 break;
7208 case AND:
7209 case IOR:
7210 case XOR:
7211 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7212 NULL_RTX, 1, OPTAB_DIRECT);
7213 break;
7214 case MULT: /* NAND */
7215 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7216 NULL_RTX, 1, OPTAB_DIRECT);
7217 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7218 NULL_RTX, 1, OPTAB_DIRECT);
7219 break;
7220 default:
7221 gcc_unreachable ();
7222 }
7223
7224 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
7225 ac.memsi, cmp, new_rtx,
7226 CCZ1mode));
7227
7228 /* Return the correct part of the bitfield. */
7229 if (target)
7230 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
7231 after ? new_rtx : cmp, ac.shift,
7232 NULL_RTX, 1, OPTAB_DIRECT), 1);
7233 }
7234
7235 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7236 We need to emit DTP-relative relocations. */
7237
7238 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7239
7240 static void
7241 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
7242 {
7243 switch (size)
7244 {
7245 case 4:
7246 fputs ("\t.long\t", file);
7247 break;
7248 case 8:
7249 fputs ("\t.quad\t", file);
7250 break;
7251 default:
7252 gcc_unreachable ();
7253 }
7254 output_addr_const (file, x);
7255 fputs ("@DTPOFF", file);
7256 }
7257
7258 /* Return the proper mode for REGNO being represented in the dwarf
7259 unwind table. */
7260 machine_mode
7261 s390_dwarf_frame_reg_mode (int regno)
7262 {
7263 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7264
7265 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7266 if (GENERAL_REGNO_P (regno))
7267 save_mode = Pmode;
7268
7269 /* The rightmost 64 bits of vector registers are call-clobbered. */
7270 if (GET_MODE_SIZE (save_mode) > 8)
7271 save_mode = DImode;
7272
7273 return save_mode;
7274 }
7275
7276 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
7277 /* Implement TARGET_MANGLE_TYPE. */
7278
7279 static const char *
7280 s390_mangle_type (const_tree type)
7281 {
7282 type = TYPE_MAIN_VARIANT (type);
7283
7284 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7285 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7286 return NULL;
7287
7288 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7289 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7290 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7291 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7292
7293 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7294 && TARGET_LONG_DOUBLE_128)
7295 return "g";
7296
7297 /* For all other types, use normal C++ mangling. */
7298 return NULL;
7299 }
7300 #endif
7301
7302 /* In the name of slightly smaller debug output, and to cater to
7303 general assembler lossage, recognize various UNSPEC sequences
7304 and turn them back into a direct symbol reference. */
7305
7306 static rtx
7307 s390_delegitimize_address (rtx orig_x)
7308 {
7309 rtx x, y;
7310
7311 orig_x = delegitimize_mem_from_attrs (orig_x);
7312 x = orig_x;
7313
7314 /* Extract the symbol ref from:
7315 (plus:SI (reg:SI 12 %r12)
7316 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7317 UNSPEC_GOTOFF/PLTOFF)))
7318 and
7319 (plus:SI (reg:SI 12 %r12)
7320 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7321 UNSPEC_GOTOFF/PLTOFF)
7322 (const_int 4 [0x4])))) */
7323 if (GET_CODE (x) == PLUS
7324 && REG_P (XEXP (x, 0))
7325 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7326 && GET_CODE (XEXP (x, 1)) == CONST)
7327 {
7328 HOST_WIDE_INT offset = 0;
7329
7330 /* The const operand. */
7331 y = XEXP (XEXP (x, 1), 0);
7332
7333 if (GET_CODE (y) == PLUS
7334 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7335 {
7336 offset = INTVAL (XEXP (y, 1));
7337 y = XEXP (y, 0);
7338 }
7339
7340 if (GET_CODE (y) == UNSPEC
7341 && (XINT (y, 1) == UNSPEC_GOTOFF
7342 || XINT (y, 1) == UNSPEC_PLTOFF))
7343 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7344 }
7345
7346 if (GET_CODE (x) != MEM)
7347 return orig_x;
7348
7349 x = XEXP (x, 0);
7350 if (GET_CODE (x) == PLUS
7351 && GET_CODE (XEXP (x, 1)) == CONST
7352 && GET_CODE (XEXP (x, 0)) == REG
7353 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7354 {
7355 y = XEXP (XEXP (x, 1), 0);
7356 if (GET_CODE (y) == UNSPEC
7357 && XINT (y, 1) == UNSPEC_GOT)
7358 y = XVECEXP (y, 0, 0);
7359 else
7360 return orig_x;
7361 }
7362 else if (GET_CODE (x) == CONST)
7363 {
7364 /* Extract the symbol ref from:
7365 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7366 UNSPEC_PLT/GOTENT))) */
7367
7368 y = XEXP (x, 0);
7369 if (GET_CODE (y) == UNSPEC
7370 && (XINT (y, 1) == UNSPEC_GOTENT
7371 || XINT (y, 1) == UNSPEC_PLT))
7372 y = XVECEXP (y, 0, 0);
7373 else
7374 return orig_x;
7375 }
7376 else
7377 return orig_x;
7378
7379 if (GET_MODE (orig_x) != Pmode)
7380 {
7381 if (GET_MODE (orig_x) == BLKmode)
7382 return orig_x;
7383 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7384 if (y == NULL_RTX)
7385 return orig_x;
7386 }
7387 return y;
7388 }
7389
7390 /* Output operand OP to stdio stream FILE.
7391 OP is an address (register + offset) which is not used to address data;
7392 instead the rightmost bits are interpreted as the value. */
7393
7394 static void
7395 print_addrstyle_operand (FILE *file, rtx op)
7396 {
7397 HOST_WIDE_INT offset;
7398 rtx base;
7399
7400 /* Extract base register and offset. */
7401 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7402 gcc_unreachable ();
7403
7404 /* Sanity check. */
7405 if (base)
7406 {
7407 gcc_assert (GET_CODE (base) == REG);
7408 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7409 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7410 }
7411
7412 /* Offsets are constricted to twelve bits. */
7413 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7414 if (base)
7415 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7416 }
7417
7418 /* Assigns the number of NOP halfwords to be emitted before and after the
7419 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7420 If hotpatching is disabled for the function, the values are set to zero.
7421 */
7422
7423 static void
7424 s390_function_num_hotpatch_hw (tree decl,
7425 int *hw_before,
7426 int *hw_after)
7427 {
7428 tree attr;
7429
7430 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7431
7432 /* Handle the arguments of the hotpatch attribute. The values
7433 specified via attribute might override the cmdline argument
7434 values. */
7435 if (attr)
7436 {
7437 tree args = TREE_VALUE (attr);
7438
7439 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7440 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7441 }
7442 else
7443 {
7444 /* Use the values specified by the cmdline arguments. */
7445 *hw_before = s390_hotpatch_hw_before_label;
7446 *hw_after = s390_hotpatch_hw_after_label;
7447 }
7448 }
7449
7450 /* Write the current .machine and .machinemode specification to the assembler
7451 file. */
7452
7453 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7454 static void
7455 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7456 {
7457 fprintf (asm_out_file, "\t.machinemode %s\n",
7458 (TARGET_ZARCH) ? "zarch" : "esa");
7459 fprintf (asm_out_file, "\t.machine \"%s",
7460 processor_table[s390_arch].binutils_name);
7461 if (S390_USE_ARCHITECTURE_MODIFIERS)
7462 {
7463 int cpu_flags;
7464
7465 cpu_flags = processor_flags_table[(int) s390_arch];
7466 if (TARGET_HTM && !(cpu_flags & PF_TX))
7467 fprintf (asm_out_file, "+htm");
7468 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7469 fprintf (asm_out_file, "+nohtm");
7470 if (TARGET_VX && !(cpu_flags & PF_VX))
7471 fprintf (asm_out_file, "+vx");
7472 else if (!TARGET_VX && (cpu_flags & PF_VX))
7473 fprintf (asm_out_file, "+novx");
7474 }
7475 fprintf (asm_out_file, "\"\n");
7476 }
7477
7478 /* Write an extra function header before the very start of the function. */
7479
7480 void
7481 s390_asm_output_function_prefix (FILE *asm_out_file,
7482 const char *fnname ATTRIBUTE_UNUSED)
7483 {
7484 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7485 return;
7486 /* Since only the function specific options are saved but not the indications
7487 which options are set, it's too much work here to figure out which options
7488 have actually changed. Thus, generate .machine and .machinemode whenever a
7489 function has the target attribute or pragma. */
7490 fprintf (asm_out_file, "\t.machinemode push\n");
7491 fprintf (asm_out_file, "\t.machine push\n");
7492 s390_asm_output_machine_for_arch (asm_out_file);
7493 }
7494
7495 /* Write an extra function footer after the very end of the function. */
7496
7497 void
7498 s390_asm_declare_function_size (FILE *asm_out_file,
7499 const char *fnname, tree decl)
7500 {
7501 if (!flag_inhibit_size_directive)
7502 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7503 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7504 return;
7505 fprintf (asm_out_file, "\t.machine pop\n");
7506 fprintf (asm_out_file, "\t.machinemode pop\n");
7507 }
7508 #endif
7509
7510 /* Write the extra assembler code needed to declare a function properly. */
7511
7512 void
7513 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7514 tree decl)
7515 {
7516 int hw_before, hw_after;
7517
7518 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7519 if (hw_before > 0)
7520 {
7521 unsigned int function_alignment;
7522 int i;
7523
7524 /* Add a trampoline code area before the function label and initialize it
7525 with two-byte nop instructions. This area can be overwritten with code
7526 that jumps to a patched version of the function. */
7527 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7528 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7529 hw_before);
7530 for (i = 1; i < hw_before; i++)
7531 fputs ("\tnopr\t%r0\n", asm_out_file);
7532
7533 /* Note: The function label must be aligned so that (a) the bytes of the
7534 following nop do not cross a cacheline boundary, and (b) a jump address
7535 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7536 stored directly before the label without crossing a cacheline
7537 boundary. All this is necessary to make sure the trampoline code can
7538 be changed atomically.
7539 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7540 if there are NOPs before the function label, the alignment is placed
7541 before them. So it is necessary to duplicate the alignment after the
7542 NOPs. */
7543 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7544 if (! DECL_USER_ALIGN (decl))
7545 function_alignment
7546 = MAX (function_alignment,
7547 (unsigned int) align_functions.levels[0].get_value ());
7548 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7549 ASM_OUTPUT_ALIGN (asm_out_file, align_functions.levels[0].log);
7550 }
7551
7552 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7553 {
7554 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7555 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7556 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7557 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7558 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7559 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7560 s390_warn_framesize);
7561 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7562 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7563 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7564 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7565 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7566 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7567 TARGET_PACKED_STACK);
7568 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7569 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7570 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7571 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7572 s390_warn_dynamicstack_p);
7573 }
7574 ASM_OUTPUT_LABEL (asm_out_file, fname);
7575 if (hw_after > 0)
7576 asm_fprintf (asm_out_file,
7577 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7578 hw_after);
7579 }
7580
7581 /* Output machine-dependent UNSPECs occurring in address constant X
7582 in assembler syntax to stdio stream FILE. Returns true if the
7583 constant X could be recognized, false otherwise. */
7584
7585 static bool
7586 s390_output_addr_const_extra (FILE *file, rtx x)
7587 {
7588 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7589 switch (XINT (x, 1))
7590 {
7591 case UNSPEC_GOTENT:
7592 output_addr_const (file, XVECEXP (x, 0, 0));
7593 fprintf (file, "@GOTENT");
7594 return true;
7595 case UNSPEC_GOT:
7596 output_addr_const (file, XVECEXP (x, 0, 0));
7597 fprintf (file, "@GOT");
7598 return true;
7599 case UNSPEC_GOTOFF:
7600 output_addr_const (file, XVECEXP (x, 0, 0));
7601 fprintf (file, "@GOTOFF");
7602 return true;
7603 case UNSPEC_PLT:
7604 output_addr_const (file, XVECEXP (x, 0, 0));
7605 fprintf (file, "@PLT");
7606 return true;
7607 case UNSPEC_PLTOFF:
7608 output_addr_const (file, XVECEXP (x, 0, 0));
7609 fprintf (file, "@PLTOFF");
7610 return true;
7611 case UNSPEC_TLSGD:
7612 output_addr_const (file, XVECEXP (x, 0, 0));
7613 fprintf (file, "@TLSGD");
7614 return true;
7615 case UNSPEC_TLSLDM:
7616 assemble_name (file, get_some_local_dynamic_name ());
7617 fprintf (file, "@TLSLDM");
7618 return true;
7619 case UNSPEC_DTPOFF:
7620 output_addr_const (file, XVECEXP (x, 0, 0));
7621 fprintf (file, "@DTPOFF");
7622 return true;
7623 case UNSPEC_NTPOFF:
7624 output_addr_const (file, XVECEXP (x, 0, 0));
7625 fprintf (file, "@NTPOFF");
7626 return true;
7627 case UNSPEC_GOTNTPOFF:
7628 output_addr_const (file, XVECEXP (x, 0, 0));
7629 fprintf (file, "@GOTNTPOFF");
7630 return true;
7631 case UNSPEC_INDNTPOFF:
7632 output_addr_const (file, XVECEXP (x, 0, 0));
7633 fprintf (file, "@INDNTPOFF");
7634 return true;
7635 }
7636
7637 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7638 switch (XINT (x, 1))
7639 {
7640 case UNSPEC_POOL_OFFSET:
7641 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7642 output_addr_const (file, x);
7643 return true;
7644 }
7645 return false;
7646 }
7647
7648 /* Output address operand ADDR in assembler syntax to
7649 stdio stream FILE. */
7650
7651 void
7652 print_operand_address (FILE *file, rtx addr)
7653 {
7654 struct s390_address ad;
7655 memset (&ad, 0, sizeof (s390_address));
7656
7657 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7658 {
7659 if (!TARGET_Z10)
7660 {
7661 output_operand_lossage ("symbolic memory references are "
7662 "only supported on z10 or later");
7663 return;
7664 }
7665 output_addr_const (file, addr);
7666 return;
7667 }
7668
7669 if (!s390_decompose_address (addr, &ad)
7670 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7671 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7672 output_operand_lossage ("cannot decompose address");
7673
7674 if (ad.disp)
7675 output_addr_const (file, ad.disp);
7676 else
7677 fprintf (file, "0");
7678
7679 if (ad.base && ad.indx)
7680 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7681 reg_names[REGNO (ad.base)]);
7682 else if (ad.base)
7683 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7684 }
7685
7686 /* Output operand X in assembler syntax to stdio stream FILE.
7687 CODE specified the format flag. The following format flags
7688 are recognized:
7689
7690 'C': print opcode suffix for branch condition.
7691 'D': print opcode suffix for inverse branch condition.
7692 'E': print opcode suffix for branch on index instruction.
7693 'G': print the size of the operand in bytes.
7694 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7695 'M': print the second word of a TImode operand.
7696 'N': print the second word of a DImode operand.
7697 'O': print only the displacement of a memory reference or address.
7698 'R': print only the base register of a memory reference or address.
7699 'S': print S-type memory reference (base+displacement).
7700 'Y': print address style operand without index (e.g. shift count or setmem
7701 operand).
7702
7703 'b': print integer X as if it's an unsigned byte.
7704 'c': print integer X as if it's an signed byte.
7705 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7706 'f': "end" contiguous bitmask X in SImode.
7707 'h': print integer X as if it's a signed halfword.
7708 'i': print the first nonzero HImode part of X.
7709 'j': print the first HImode part unequal to -1 of X.
7710 'k': print the first nonzero SImode part of X.
7711 'm': print the first SImode part unequal to -1 of X.
7712 'o': print integer X as if it's an unsigned 32bit word.
7713 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7714 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7715 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7716 'x': print integer X as if it's an unsigned halfword.
7717 'v': print register number as vector register (v1 instead of f1).
7718 */
7719
7720 void
7721 print_operand (FILE *file, rtx x, int code)
7722 {
7723 HOST_WIDE_INT ival;
7724
7725 switch (code)
7726 {
7727 case 'C':
7728 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7729 return;
7730
7731 case 'D':
7732 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7733 return;
7734
7735 case 'E':
7736 if (GET_CODE (x) == LE)
7737 fprintf (file, "l");
7738 else if (GET_CODE (x) == GT)
7739 fprintf (file, "h");
7740 else
7741 output_operand_lossage ("invalid comparison operator "
7742 "for 'E' output modifier");
7743 return;
7744
7745 case 'J':
7746 if (GET_CODE (x) == SYMBOL_REF)
7747 {
7748 fprintf (file, "%s", ":tls_load:");
7749 output_addr_const (file, x);
7750 }
7751 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7752 {
7753 fprintf (file, "%s", ":tls_gdcall:");
7754 output_addr_const (file, XVECEXP (x, 0, 0));
7755 }
7756 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7757 {
7758 fprintf (file, "%s", ":tls_ldcall:");
7759 const char *name = get_some_local_dynamic_name ();
7760 gcc_assert (name);
7761 assemble_name (file, name);
7762 }
7763 else
7764 output_operand_lossage ("invalid reference for 'J' output modifier");
7765 return;
7766
7767 case 'G':
7768 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7769 return;
7770
7771 case 'O':
7772 {
7773 struct s390_address ad;
7774 int ret;
7775
7776 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7777
7778 if (!ret
7779 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7780 || ad.indx)
7781 {
7782 output_operand_lossage ("invalid address for 'O' output modifier");
7783 return;
7784 }
7785
7786 if (ad.disp)
7787 output_addr_const (file, ad.disp);
7788 else
7789 fprintf (file, "0");
7790 }
7791 return;
7792
7793 case 'R':
7794 {
7795 struct s390_address ad;
7796 int ret;
7797
7798 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7799
7800 if (!ret
7801 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7802 || ad.indx)
7803 {
7804 output_operand_lossage ("invalid address for 'R' output modifier");
7805 return;
7806 }
7807
7808 if (ad.base)
7809 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7810 else
7811 fprintf (file, "0");
7812 }
7813 return;
7814
7815 case 'S':
7816 {
7817 struct s390_address ad;
7818 int ret;
7819
7820 if (!MEM_P (x))
7821 {
7822 output_operand_lossage ("memory reference expected for "
7823 "'S' output modifier");
7824 return;
7825 }
7826 ret = s390_decompose_address (XEXP (x, 0), &ad);
7827
7828 if (!ret
7829 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7830 || ad.indx)
7831 {
7832 output_operand_lossage ("invalid address for 'S' output modifier");
7833 return;
7834 }
7835
7836 if (ad.disp)
7837 output_addr_const (file, ad.disp);
7838 else
7839 fprintf (file, "0");
7840
7841 if (ad.base)
7842 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7843 }
7844 return;
7845
7846 case 'N':
7847 if (GET_CODE (x) == REG)
7848 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7849 else if (GET_CODE (x) == MEM)
7850 x = change_address (x, VOIDmode,
7851 plus_constant (Pmode, XEXP (x, 0), 4));
7852 else
7853 output_operand_lossage ("register or memory expression expected "
7854 "for 'N' output modifier");
7855 break;
7856
7857 case 'M':
7858 if (GET_CODE (x) == REG)
7859 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7860 else if (GET_CODE (x) == MEM)
7861 x = change_address (x, VOIDmode,
7862 plus_constant (Pmode, XEXP (x, 0), 8));
7863 else
7864 output_operand_lossage ("register or memory expression expected "
7865 "for 'M' output modifier");
7866 break;
7867
7868 case 'Y':
7869 print_addrstyle_operand (file, x);
7870 return;
7871 }
7872
7873 switch (GET_CODE (x))
7874 {
7875 case REG:
7876 /* Print FP regs as fx instead of vx when they are accessed
7877 through non-vector mode. */
7878 if (code == 'v'
7879 || VECTOR_NOFP_REG_P (x)
7880 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7881 || (VECTOR_REG_P (x)
7882 && (GET_MODE_SIZE (GET_MODE (x)) /
7883 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7884 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7885 else
7886 fprintf (file, "%s", reg_names[REGNO (x)]);
7887 break;
7888
7889 case MEM:
7890 output_address (GET_MODE (x), XEXP (x, 0));
7891 break;
7892
7893 case CONST:
7894 case CODE_LABEL:
7895 case LABEL_REF:
7896 case SYMBOL_REF:
7897 output_addr_const (file, x);
7898 break;
7899
7900 case CONST_INT:
7901 ival = INTVAL (x);
7902 switch (code)
7903 {
7904 case 0:
7905 break;
7906 case 'b':
7907 ival &= 0xff;
7908 break;
7909 case 'c':
7910 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7911 break;
7912 case 'x':
7913 ival &= 0xffff;
7914 break;
7915 case 'h':
7916 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7917 break;
7918 case 'i':
7919 ival = s390_extract_part (x, HImode, 0);
7920 break;
7921 case 'j':
7922 ival = s390_extract_part (x, HImode, -1);
7923 break;
7924 case 'k':
7925 ival = s390_extract_part (x, SImode, 0);
7926 break;
7927 case 'm':
7928 ival = s390_extract_part (x, SImode, -1);
7929 break;
7930 case 'o':
7931 ival &= 0xffffffff;
7932 break;
7933 case 'e': case 'f':
7934 case 's': case 't':
7935 {
7936 int start, end;
7937 int len;
7938 bool ok;
7939
7940 len = (code == 's' || code == 'e' ? 64 : 32);
7941 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7942 gcc_assert (ok);
7943 if (code == 's' || code == 't')
7944 ival = start;
7945 else
7946 ival = end;
7947 }
7948 break;
7949 default:
7950 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7951 }
7952 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7953 break;
7954
7955 case CONST_WIDE_INT:
7956 if (code == 'b')
7957 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7958 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7959 else if (code == 'x')
7960 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7961 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7962 else if (code == 'h')
7963 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7964 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7965 else
7966 {
7967 if (code == 0)
7968 output_operand_lossage ("invalid constant - try using "
7969 "an output modifier");
7970 else
7971 output_operand_lossage ("invalid constant for output modifier '%c'",
7972 code);
7973 }
7974 break;
7975 case CONST_VECTOR:
7976 switch (code)
7977 {
7978 case 'h':
7979 gcc_assert (const_vec_duplicate_p (x));
7980 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7981 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7982 break;
7983 case 'e':
7984 case 's':
7985 {
7986 int start, end;
7987 bool ok;
7988
7989 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7990 gcc_assert (ok);
7991 ival = (code == 's') ? start : end;
7992 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7993 }
7994 break;
7995 case 't':
7996 {
7997 unsigned mask;
7998 bool ok = s390_bytemask_vector_p (x, &mask);
7999 gcc_assert (ok);
8000 fprintf (file, "%u", mask);
8001 }
8002 break;
8003
8004 default:
8005 output_operand_lossage ("invalid constant vector for output "
8006 "modifier '%c'", code);
8007 }
8008 break;
8009
8010 default:
8011 if (code == 0)
8012 output_operand_lossage ("invalid expression - try using "
8013 "an output modifier");
8014 else
8015 output_operand_lossage ("invalid expression for output "
8016 "modifier '%c'", code);
8017 break;
8018 }
8019 }
8020
8021 /* Target hook for assembling integer objects. We need to define it
8022 here to work a round a bug in some versions of GAS, which couldn't
8023 handle values smaller than INT_MIN when printed in decimal. */
8024
8025 static bool
8026 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
8027 {
8028 if (size == 8 && aligned_p
8029 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
8030 {
8031 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
8032 INTVAL (x));
8033 return true;
8034 }
8035 return default_assemble_integer (x, size, aligned_p);
8036 }
8037
8038 /* Returns true if register REGNO is used for forming
8039 a memory address in expression X. */
8040
8041 static bool
8042 reg_used_in_mem_p (int regno, rtx x)
8043 {
8044 enum rtx_code code = GET_CODE (x);
8045 int i, j;
8046 const char *fmt;
8047
8048 if (code == MEM)
8049 {
8050 if (refers_to_regno_p (regno, XEXP (x, 0)))
8051 return true;
8052 }
8053 else if (code == SET
8054 && GET_CODE (SET_DEST (x)) == PC)
8055 {
8056 if (refers_to_regno_p (regno, SET_SRC (x)))
8057 return true;
8058 }
8059
8060 fmt = GET_RTX_FORMAT (code);
8061 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8062 {
8063 if (fmt[i] == 'e'
8064 && reg_used_in_mem_p (regno, XEXP (x, i)))
8065 return true;
8066
8067 else if (fmt[i] == 'E')
8068 for (j = 0; j < XVECLEN (x, i); j++)
8069 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
8070 return true;
8071 }
8072 return false;
8073 }
8074
8075 /* Returns true if expression DEP_RTX sets an address register
8076 used by instruction INSN to address memory. */
8077
8078 static bool
8079 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
8080 {
8081 rtx target, pat;
8082
8083 if (NONJUMP_INSN_P (dep_rtx))
8084 dep_rtx = PATTERN (dep_rtx);
8085
8086 if (GET_CODE (dep_rtx) == SET)
8087 {
8088 target = SET_DEST (dep_rtx);
8089 if (GET_CODE (target) == STRICT_LOW_PART)
8090 target = XEXP (target, 0);
8091 while (GET_CODE (target) == SUBREG)
8092 target = SUBREG_REG (target);
8093
8094 if (GET_CODE (target) == REG)
8095 {
8096 int regno = REGNO (target);
8097
8098 if (s390_safe_attr_type (insn) == TYPE_LA)
8099 {
8100 pat = PATTERN (insn);
8101 if (GET_CODE (pat) == PARALLEL)
8102 {
8103 gcc_assert (XVECLEN (pat, 0) == 2);
8104 pat = XVECEXP (pat, 0, 0);
8105 }
8106 gcc_assert (GET_CODE (pat) == SET);
8107 return refers_to_regno_p (regno, SET_SRC (pat));
8108 }
8109 else if (get_attr_atype (insn) == ATYPE_AGEN)
8110 return reg_used_in_mem_p (regno, PATTERN (insn));
8111 }
8112 }
8113 return false;
8114 }
8115
8116 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
8117
8118 int
8119 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
8120 {
8121 rtx dep_rtx = PATTERN (dep_insn);
8122 int i;
8123
8124 if (GET_CODE (dep_rtx) == SET
8125 && addr_generation_dependency_p (dep_rtx, insn))
8126 return 1;
8127 else if (GET_CODE (dep_rtx) == PARALLEL)
8128 {
8129 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
8130 {
8131 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8132 return 1;
8133 }
8134 }
8135 return 0;
8136 }
8137
8138
8139 /* A C statement (sans semicolon) to update the integer scheduling priority
8140 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8141 reduce the priority to execute INSN later. Do not define this macro if
8142 you do not need to adjust the scheduling priorities of insns.
8143
8144 A STD instruction should be scheduled earlier,
8145 in order to use the bypass. */
8146 static int
8147 s390_adjust_priority (rtx_insn *insn, int priority)
8148 {
8149 if (! INSN_P (insn))
8150 return priority;
8151
8152 if (s390_tune <= PROCESSOR_2064_Z900)
8153 return priority;
8154
8155 switch (s390_safe_attr_type (insn))
8156 {
8157 case TYPE_FSTOREDF:
8158 case TYPE_FSTORESF:
8159 priority = priority << 3;
8160 break;
8161 case TYPE_STORE:
8162 case TYPE_STM:
8163 priority = priority << 1;
8164 break;
8165 default:
8166 break;
8167 }
8168 return priority;
8169 }
8170
8171
8172 /* The number of instructions that can be issued per cycle. */
8173
8174 static int
8175 s390_issue_rate (void)
8176 {
8177 switch (s390_tune)
8178 {
8179 case PROCESSOR_2084_Z990:
8180 case PROCESSOR_2094_Z9_109:
8181 case PROCESSOR_2094_Z9_EC:
8182 case PROCESSOR_2817_Z196:
8183 return 3;
8184 case PROCESSOR_2097_Z10:
8185 return 2;
8186 case PROCESSOR_9672_G5:
8187 case PROCESSOR_9672_G6:
8188 case PROCESSOR_2064_Z900:
8189 /* Starting with EC12 we use the sched_reorder hook to take care
8190 of instruction dispatch constraints. The algorithm only
8191 picks the best instruction and assumes only a single
8192 instruction gets issued per cycle. */
8193 case PROCESSOR_2827_ZEC12:
8194 case PROCESSOR_2964_Z13:
8195 case PROCESSOR_3906_Z14:
8196 default:
8197 return 1;
8198 }
8199 }
8200
8201 static int
8202 s390_first_cycle_multipass_dfa_lookahead (void)
8203 {
8204 return 4;
8205 }
8206
8207 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8208 Fix up MEMs as required. */
8209
8210 static void
8211 annotate_constant_pool_refs (rtx *x)
8212 {
8213 int i, j;
8214 const char *fmt;
8215
8216 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8217 || !CONSTANT_POOL_ADDRESS_P (*x));
8218
8219 /* Literal pool references can only occur inside a MEM ... */
8220 if (GET_CODE (*x) == MEM)
8221 {
8222 rtx memref = XEXP (*x, 0);
8223
8224 if (GET_CODE (memref) == SYMBOL_REF
8225 && CONSTANT_POOL_ADDRESS_P (memref))
8226 {
8227 rtx base = cfun->machine->base_reg;
8228 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8229 UNSPEC_LTREF);
8230
8231 *x = replace_equiv_address (*x, addr);
8232 return;
8233 }
8234
8235 if (GET_CODE (memref) == CONST
8236 && GET_CODE (XEXP (memref, 0)) == PLUS
8237 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8238 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8239 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8240 {
8241 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8242 rtx sym = XEXP (XEXP (memref, 0), 0);
8243 rtx base = cfun->machine->base_reg;
8244 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8245 UNSPEC_LTREF);
8246
8247 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
8248 return;
8249 }
8250 }
8251
8252 /* ... or a load-address type pattern. */
8253 if (GET_CODE (*x) == SET)
8254 {
8255 rtx addrref = SET_SRC (*x);
8256
8257 if (GET_CODE (addrref) == SYMBOL_REF
8258 && CONSTANT_POOL_ADDRESS_P (addrref))
8259 {
8260 rtx base = cfun->machine->base_reg;
8261 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8262 UNSPEC_LTREF);
8263
8264 SET_SRC (*x) = addr;
8265 return;
8266 }
8267
8268 if (GET_CODE (addrref) == CONST
8269 && GET_CODE (XEXP (addrref, 0)) == PLUS
8270 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8271 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8272 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8273 {
8274 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8275 rtx sym = XEXP (XEXP (addrref, 0), 0);
8276 rtx base = cfun->machine->base_reg;
8277 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8278 UNSPEC_LTREF);
8279
8280 SET_SRC (*x) = plus_constant (Pmode, addr, off);
8281 return;
8282 }
8283 }
8284
8285 /* Annotate LTREL_BASE as well. */
8286 if (GET_CODE (*x) == UNSPEC
8287 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8288 {
8289 rtx base = cfun->machine->base_reg;
8290 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8291 UNSPEC_LTREL_BASE);
8292 return;
8293 }
8294
8295 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8296 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8297 {
8298 if (fmt[i] == 'e')
8299 {
8300 annotate_constant_pool_refs (&XEXP (*x, i));
8301 }
8302 else if (fmt[i] == 'E')
8303 {
8304 for (j = 0; j < XVECLEN (*x, i); j++)
8305 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8306 }
8307 }
8308 }
8309
8310 /* Split all branches that exceed the maximum distance.
8311 Returns true if this created a new literal pool entry. */
8312
8313 static int
8314 s390_split_branches (void)
8315 {
8316 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8317 int new_literal = 0, ret;
8318 rtx_insn *insn;
8319 rtx pat, target;
8320 rtx *label;
8321
8322 /* We need correct insn addresses. */
8323
8324 shorten_branches (get_insns ());
8325
8326 /* Find all branches that exceed 64KB, and split them. */
8327
8328 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8329 {
8330 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8331 continue;
8332
8333 pat = PATTERN (insn);
8334 if (GET_CODE (pat) == PARALLEL)
8335 pat = XVECEXP (pat, 0, 0);
8336 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8337 continue;
8338
8339 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8340 {
8341 label = &SET_SRC (pat);
8342 }
8343 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8344 {
8345 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8346 label = &XEXP (SET_SRC (pat), 1);
8347 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8348 label = &XEXP (SET_SRC (pat), 2);
8349 else
8350 continue;
8351 }
8352 else
8353 continue;
8354
8355 if (get_attr_length (insn) <= 4)
8356 continue;
8357
8358 /* We are going to use the return register as scratch register,
8359 make sure it will be saved/restored by the prologue/epilogue. */
8360 cfun_frame_layout.save_return_addr_p = 1;
8361
8362 if (!flag_pic)
8363 {
8364 new_literal = 1;
8365 rtx mem = force_const_mem (Pmode, *label);
8366 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8367 insn);
8368 INSN_ADDRESSES_NEW (set_insn, -1);
8369 annotate_constant_pool_refs (&PATTERN (set_insn));
8370
8371 target = temp_reg;
8372 }
8373 else
8374 {
8375 new_literal = 1;
8376 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8377 UNSPEC_LTREL_OFFSET);
8378 target = gen_rtx_CONST (Pmode, target);
8379 target = force_const_mem (Pmode, target);
8380 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8381 insn);
8382 INSN_ADDRESSES_NEW (set_insn, -1);
8383 annotate_constant_pool_refs (&PATTERN (set_insn));
8384
8385 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8386 cfun->machine->base_reg),
8387 UNSPEC_LTREL_BASE);
8388 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8389 }
8390
8391 ret = validate_change (insn, label, target, 0);
8392 gcc_assert (ret);
8393 }
8394
8395 return new_literal;
8396 }
8397
8398
8399 /* Find an annotated literal pool symbol referenced in RTX X,
8400 and store it at REF. Will abort if X contains references to
8401 more than one such pool symbol; multiple references to the same
8402 symbol are allowed, however.
8403
8404 The rtx pointed to by REF must be initialized to NULL_RTX
8405 by the caller before calling this routine. */
8406
8407 static void
8408 find_constant_pool_ref (rtx x, rtx *ref)
8409 {
8410 int i, j;
8411 const char *fmt;
8412
8413 /* Ignore LTREL_BASE references. */
8414 if (GET_CODE (x) == UNSPEC
8415 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8416 return;
8417 /* Likewise POOL_ENTRY insns. */
8418 if (GET_CODE (x) == UNSPEC_VOLATILE
8419 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8420 return;
8421
8422 gcc_assert (GET_CODE (x) != SYMBOL_REF
8423 || !CONSTANT_POOL_ADDRESS_P (x));
8424
8425 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8426 {
8427 rtx sym = XVECEXP (x, 0, 0);
8428 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8429 && CONSTANT_POOL_ADDRESS_P (sym));
8430
8431 if (*ref == NULL_RTX)
8432 *ref = sym;
8433 else
8434 gcc_assert (*ref == sym);
8435
8436 return;
8437 }
8438
8439 fmt = GET_RTX_FORMAT (GET_CODE (x));
8440 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8441 {
8442 if (fmt[i] == 'e')
8443 {
8444 find_constant_pool_ref (XEXP (x, i), ref);
8445 }
8446 else if (fmt[i] == 'E')
8447 {
8448 for (j = 0; j < XVECLEN (x, i); j++)
8449 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8450 }
8451 }
8452 }
8453
8454 /* Replace every reference to the annotated literal pool
8455 symbol REF in X by its base plus OFFSET. */
8456
8457 static void
8458 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8459 {
8460 int i, j;
8461 const char *fmt;
8462
8463 gcc_assert (*x != ref);
8464
8465 if (GET_CODE (*x) == UNSPEC
8466 && XINT (*x, 1) == UNSPEC_LTREF
8467 && XVECEXP (*x, 0, 0) == ref)
8468 {
8469 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8470 return;
8471 }
8472
8473 if (GET_CODE (*x) == PLUS
8474 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8475 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8476 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8477 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8478 {
8479 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8480 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8481 return;
8482 }
8483
8484 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8485 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8486 {
8487 if (fmt[i] == 'e')
8488 {
8489 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8490 }
8491 else if (fmt[i] == 'E')
8492 {
8493 for (j = 0; j < XVECLEN (*x, i); j++)
8494 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8495 }
8496 }
8497 }
8498
8499 /* Check whether X contains an UNSPEC_LTREL_BASE.
8500 Return its constant pool symbol if found, NULL_RTX otherwise. */
8501
8502 static rtx
8503 find_ltrel_base (rtx x)
8504 {
8505 int i, j;
8506 const char *fmt;
8507
8508 if (GET_CODE (x) == UNSPEC
8509 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8510 return XVECEXP (x, 0, 0);
8511
8512 fmt = GET_RTX_FORMAT (GET_CODE (x));
8513 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8514 {
8515 if (fmt[i] == 'e')
8516 {
8517 rtx fnd = find_ltrel_base (XEXP (x, i));
8518 if (fnd)
8519 return fnd;
8520 }
8521 else if (fmt[i] == 'E')
8522 {
8523 for (j = 0; j < XVECLEN (x, i); j++)
8524 {
8525 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8526 if (fnd)
8527 return fnd;
8528 }
8529 }
8530 }
8531
8532 return NULL_RTX;
8533 }
8534
8535 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8536
8537 static void
8538 replace_ltrel_base (rtx *x)
8539 {
8540 int i, j;
8541 const char *fmt;
8542
8543 if (GET_CODE (*x) == UNSPEC
8544 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8545 {
8546 *x = XVECEXP (*x, 0, 1);
8547 return;
8548 }
8549
8550 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8551 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8552 {
8553 if (fmt[i] == 'e')
8554 {
8555 replace_ltrel_base (&XEXP (*x, i));
8556 }
8557 else if (fmt[i] == 'E')
8558 {
8559 for (j = 0; j < XVECLEN (*x, i); j++)
8560 replace_ltrel_base (&XVECEXP (*x, i, j));
8561 }
8562 }
8563 }
8564
8565
8566 /* We keep a list of constants which we have to add to internal
8567 constant tables in the middle of large functions. */
8568
8569 #define NR_C_MODES 32
8570 machine_mode constant_modes[NR_C_MODES] =
8571 {
8572 TFmode, TImode, TDmode,
8573 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8574 V4SFmode, V2DFmode, V1TFmode,
8575 DFmode, DImode, DDmode,
8576 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8577 SFmode, SImode, SDmode,
8578 V4QImode, V2HImode, V1SImode, V1SFmode,
8579 HImode,
8580 V2QImode, V1HImode,
8581 QImode,
8582 V1QImode
8583 };
8584
8585 struct constant
8586 {
8587 struct constant *next;
8588 rtx value;
8589 rtx_code_label *label;
8590 };
8591
8592 struct constant_pool
8593 {
8594 struct constant_pool *next;
8595 rtx_insn *first_insn;
8596 rtx_insn *pool_insn;
8597 bitmap insns;
8598 rtx_insn *emit_pool_after;
8599
8600 struct constant *constants[NR_C_MODES];
8601 struct constant *execute;
8602 rtx_code_label *label;
8603 int size;
8604 };
8605
8606 /* Allocate new constant_pool structure. */
8607
8608 static struct constant_pool *
8609 s390_alloc_pool (void)
8610 {
8611 struct constant_pool *pool;
8612 int i;
8613
8614 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8615 pool->next = NULL;
8616 for (i = 0; i < NR_C_MODES; i++)
8617 pool->constants[i] = NULL;
8618
8619 pool->execute = NULL;
8620 pool->label = gen_label_rtx ();
8621 pool->first_insn = NULL;
8622 pool->pool_insn = NULL;
8623 pool->insns = BITMAP_ALLOC (NULL);
8624 pool->size = 0;
8625 pool->emit_pool_after = NULL;
8626
8627 return pool;
8628 }
8629
8630 /* Create new constant pool covering instructions starting at INSN
8631 and chain it to the end of POOL_LIST. */
8632
8633 static struct constant_pool *
8634 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8635 {
8636 struct constant_pool *pool, **prev;
8637
8638 pool = s390_alloc_pool ();
8639 pool->first_insn = insn;
8640
8641 for (prev = pool_list; *prev; prev = &(*prev)->next)
8642 ;
8643 *prev = pool;
8644
8645 return pool;
8646 }
8647
8648 /* End range of instructions covered by POOL at INSN and emit
8649 placeholder insn representing the pool. */
8650
8651 static void
8652 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8653 {
8654 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8655
8656 if (!insn)
8657 insn = get_last_insn ();
8658
8659 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8660 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8661 }
8662
8663 /* Add INSN to the list of insns covered by POOL. */
8664
8665 static void
8666 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8667 {
8668 bitmap_set_bit (pool->insns, INSN_UID (insn));
8669 }
8670
8671 /* Return pool out of POOL_LIST that covers INSN. */
8672
8673 static struct constant_pool *
8674 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8675 {
8676 struct constant_pool *pool;
8677
8678 for (pool = pool_list; pool; pool = pool->next)
8679 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8680 break;
8681
8682 return pool;
8683 }
8684
8685 /* Add constant VAL of mode MODE to the constant pool POOL. */
8686
8687 static void
8688 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8689 {
8690 struct constant *c;
8691 int i;
8692
8693 for (i = 0; i < NR_C_MODES; i++)
8694 if (constant_modes[i] == mode)
8695 break;
8696 gcc_assert (i != NR_C_MODES);
8697
8698 for (c = pool->constants[i]; c != NULL; c = c->next)
8699 if (rtx_equal_p (val, c->value))
8700 break;
8701
8702 if (c == NULL)
8703 {
8704 c = (struct constant *) xmalloc (sizeof *c);
8705 c->value = val;
8706 c->label = gen_label_rtx ();
8707 c->next = pool->constants[i];
8708 pool->constants[i] = c;
8709 pool->size += GET_MODE_SIZE (mode);
8710 }
8711 }
8712
8713 /* Return an rtx that represents the offset of X from the start of
8714 pool POOL. */
8715
8716 static rtx
8717 s390_pool_offset (struct constant_pool *pool, rtx x)
8718 {
8719 rtx label;
8720
8721 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8722 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8723 UNSPEC_POOL_OFFSET);
8724 return gen_rtx_CONST (GET_MODE (x), x);
8725 }
8726
8727 /* Find constant VAL of mode MODE in the constant pool POOL.
8728 Return an RTX describing the distance from the start of
8729 the pool to the location of the new constant. */
8730
8731 static rtx
8732 s390_find_constant (struct constant_pool *pool, rtx val,
8733 machine_mode mode)
8734 {
8735 struct constant *c;
8736 int i;
8737
8738 for (i = 0; i < NR_C_MODES; i++)
8739 if (constant_modes[i] == mode)
8740 break;
8741 gcc_assert (i != NR_C_MODES);
8742
8743 for (c = pool->constants[i]; c != NULL; c = c->next)
8744 if (rtx_equal_p (val, c->value))
8745 break;
8746
8747 gcc_assert (c);
8748
8749 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8750 }
8751
8752 /* Check whether INSN is an execute. Return the label_ref to its
8753 execute target template if so, NULL_RTX otherwise. */
8754
8755 static rtx
8756 s390_execute_label (rtx insn)
8757 {
8758 if (INSN_P (insn)
8759 && GET_CODE (PATTERN (insn)) == PARALLEL
8760 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8761 && (XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE
8762 || XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE_JUMP))
8763 {
8764 if (XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8765 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8766 else
8767 {
8768 gcc_assert (JUMP_P (insn));
8769 /* For jump insns as execute target:
8770 - There is one operand less in the parallel (the
8771 modification register of the execute is always 0).
8772 - The execute target label is wrapped into an
8773 if_then_else in order to hide it from jump analysis. */
8774 return XEXP (XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 0), 0);
8775 }
8776 }
8777
8778 return NULL_RTX;
8779 }
8780
8781 /* Add execute target for INSN to the constant pool POOL. */
8782
8783 static void
8784 s390_add_execute (struct constant_pool *pool, rtx insn)
8785 {
8786 struct constant *c;
8787
8788 for (c = pool->execute; c != NULL; c = c->next)
8789 if (INSN_UID (insn) == INSN_UID (c->value))
8790 break;
8791
8792 if (c == NULL)
8793 {
8794 c = (struct constant *) xmalloc (sizeof *c);
8795 c->value = insn;
8796 c->label = gen_label_rtx ();
8797 c->next = pool->execute;
8798 pool->execute = c;
8799 pool->size += 6;
8800 }
8801 }
8802
8803 /* Find execute target for INSN in the constant pool POOL.
8804 Return an RTX describing the distance from the start of
8805 the pool to the location of the execute target. */
8806
8807 static rtx
8808 s390_find_execute (struct constant_pool *pool, rtx insn)
8809 {
8810 struct constant *c;
8811
8812 for (c = pool->execute; c != NULL; c = c->next)
8813 if (INSN_UID (insn) == INSN_UID (c->value))
8814 break;
8815
8816 gcc_assert (c);
8817
8818 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8819 }
8820
8821 /* For an execute INSN, extract the execute target template. */
8822
8823 static rtx
8824 s390_execute_target (rtx insn)
8825 {
8826 rtx pattern = PATTERN (insn);
8827 gcc_assert (s390_execute_label (insn));
8828
8829 if (XVECLEN (pattern, 0) == 2)
8830 {
8831 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8832 }
8833 else
8834 {
8835 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8836 int i;
8837
8838 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8839 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8840
8841 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8842 }
8843
8844 return pattern;
8845 }
8846
8847 /* Indicate that INSN cannot be duplicated. This is the case for
8848 execute insns that carry a unique label. */
8849
8850 static bool
8851 s390_cannot_copy_insn_p (rtx_insn *insn)
8852 {
8853 rtx label = s390_execute_label (insn);
8854 return label && label != const0_rtx;
8855 }
8856
8857 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8858 do not emit the pool base label. */
8859
8860 static void
8861 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8862 {
8863 struct constant *c;
8864 rtx_insn *insn = pool->pool_insn;
8865 int i;
8866
8867 /* Switch to rodata section. */
8868 if (TARGET_CPU_ZARCH)
8869 {
8870 insn = emit_insn_after (gen_pool_section_start (), insn);
8871 INSN_ADDRESSES_NEW (insn, -1);
8872 }
8873
8874 /* Ensure minimum pool alignment. */
8875 if (TARGET_CPU_ZARCH)
8876 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8877 else
8878 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8879 INSN_ADDRESSES_NEW (insn, -1);
8880
8881 /* Emit pool base label. */
8882 if (!remote_label)
8883 {
8884 insn = emit_label_after (pool->label, insn);
8885 INSN_ADDRESSES_NEW (insn, -1);
8886 }
8887
8888 /* Dump constants in descending alignment requirement order,
8889 ensuring proper alignment for every constant. */
8890 for (i = 0; i < NR_C_MODES; i++)
8891 for (c = pool->constants[i]; c; c = c->next)
8892 {
8893 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8894 rtx value = copy_rtx (c->value);
8895 if (GET_CODE (value) == CONST
8896 && GET_CODE (XEXP (value, 0)) == UNSPEC
8897 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8898 && XVECLEN (XEXP (value, 0), 0) == 1)
8899 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8900
8901 insn = emit_label_after (c->label, insn);
8902 INSN_ADDRESSES_NEW (insn, -1);
8903
8904 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8905 gen_rtvec (1, value),
8906 UNSPECV_POOL_ENTRY);
8907 insn = emit_insn_after (value, insn);
8908 INSN_ADDRESSES_NEW (insn, -1);
8909 }
8910
8911 /* Ensure minimum alignment for instructions. */
8912 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8913 INSN_ADDRESSES_NEW (insn, -1);
8914
8915 /* Output in-pool execute template insns. */
8916 for (c = pool->execute; c; c = c->next)
8917 {
8918 insn = emit_label_after (c->label, insn);
8919 INSN_ADDRESSES_NEW (insn, -1);
8920
8921 insn = emit_insn_after (s390_execute_target (c->value), insn);
8922 INSN_ADDRESSES_NEW (insn, -1);
8923 }
8924
8925 /* Switch back to previous section. */
8926 if (TARGET_CPU_ZARCH)
8927 {
8928 insn = emit_insn_after (gen_pool_section_end (), insn);
8929 INSN_ADDRESSES_NEW (insn, -1);
8930 }
8931
8932 insn = emit_barrier_after (insn);
8933 INSN_ADDRESSES_NEW (insn, -1);
8934
8935 /* Remove placeholder insn. */
8936 remove_insn (pool->pool_insn);
8937 }
8938
8939 /* Free all memory used by POOL. */
8940
8941 static void
8942 s390_free_pool (struct constant_pool *pool)
8943 {
8944 struct constant *c, *next;
8945 int i;
8946
8947 for (i = 0; i < NR_C_MODES; i++)
8948 for (c = pool->constants[i]; c; c = next)
8949 {
8950 next = c->next;
8951 free (c);
8952 }
8953
8954 for (c = pool->execute; c; c = next)
8955 {
8956 next = c->next;
8957 free (c);
8958 }
8959
8960 BITMAP_FREE (pool->insns);
8961 free (pool);
8962 }
8963
8964
8965 /* Collect main literal pool. Return NULL on overflow. */
8966
8967 static struct constant_pool *
8968 s390_mainpool_start (void)
8969 {
8970 struct constant_pool *pool;
8971 rtx_insn *insn;
8972
8973 pool = s390_alloc_pool ();
8974
8975 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8976 {
8977 if (NONJUMP_INSN_P (insn)
8978 && GET_CODE (PATTERN (insn)) == SET
8979 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8980 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8981 {
8982 /* There might be two main_pool instructions if base_reg
8983 is call-clobbered; one for shrink-wrapped code and one
8984 for the rest. We want to keep the first. */
8985 if (pool->pool_insn)
8986 {
8987 insn = PREV_INSN (insn);
8988 delete_insn (NEXT_INSN (insn));
8989 continue;
8990 }
8991 pool->pool_insn = insn;
8992 }
8993
8994 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8995 {
8996 s390_add_execute (pool, insn);
8997 }
8998 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8999 {
9000 rtx pool_ref = NULL_RTX;
9001 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9002 if (pool_ref)
9003 {
9004 rtx constant = get_pool_constant (pool_ref);
9005 machine_mode mode = get_pool_mode (pool_ref);
9006 s390_add_constant (pool, constant, mode);
9007 }
9008 }
9009
9010 /* If hot/cold partitioning is enabled we have to make sure that
9011 the literal pool is emitted in the same section where the
9012 initialization of the literal pool base pointer takes place.
9013 emit_pool_after is only used in the non-overflow case on non
9014 Z cpus where we can emit the literal pool at the end of the
9015 function body within the text section. */
9016 if (NOTE_P (insn)
9017 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
9018 && !pool->emit_pool_after)
9019 pool->emit_pool_after = PREV_INSN (insn);
9020 }
9021
9022 gcc_assert (pool->pool_insn || pool->size == 0);
9023
9024 if (pool->size >= 4096)
9025 {
9026 /* We're going to chunkify the pool, so remove the main
9027 pool placeholder insn. */
9028 remove_insn (pool->pool_insn);
9029
9030 s390_free_pool (pool);
9031 pool = NULL;
9032 }
9033
9034 /* If the functions ends with the section where the literal pool
9035 should be emitted set the marker to its end. */
9036 if (pool && !pool->emit_pool_after)
9037 pool->emit_pool_after = get_last_insn ();
9038
9039 return pool;
9040 }
9041
9042 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9043 Modify the current function to output the pool constants as well as
9044 the pool register setup instruction. */
9045
9046 static void
9047 s390_mainpool_finish (struct constant_pool *pool)
9048 {
9049 rtx base_reg = cfun->machine->base_reg;
9050
9051 /* If the pool is empty, we're done. */
9052 if (pool->size == 0)
9053 {
9054 /* We don't actually need a base register after all. */
9055 cfun->machine->base_reg = NULL_RTX;
9056
9057 if (pool->pool_insn)
9058 remove_insn (pool->pool_insn);
9059 s390_free_pool (pool);
9060 return;
9061 }
9062
9063 /* We need correct insn addresses. */
9064 shorten_branches (get_insns ());
9065
9066 /* On zSeries, we use a LARL to load the pool register. The pool is
9067 located in the .rodata section, so we emit it after the function. */
9068 if (TARGET_CPU_ZARCH)
9069 {
9070 rtx set = gen_main_base_64 (base_reg, pool->label);
9071 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
9072 INSN_ADDRESSES_NEW (insn, -1);
9073 remove_insn (pool->pool_insn);
9074
9075 insn = get_last_insn ();
9076 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9077 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9078
9079 s390_dump_pool (pool, 0);
9080 }
9081
9082 /* On S/390, if the total size of the function's code plus literal pool
9083 does not exceed 4096 bytes, we use BASR to set up a function base
9084 pointer, and emit the literal pool at the end of the function. */
9085 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
9086 + pool->size + 8 /* alignment slop */ < 4096)
9087 {
9088 rtx set = gen_main_base_31_small (base_reg, pool->label);
9089 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
9090 INSN_ADDRESSES_NEW (insn, -1);
9091 remove_insn (pool->pool_insn);
9092
9093 insn = emit_label_after (pool->label, insn);
9094 INSN_ADDRESSES_NEW (insn, -1);
9095
9096 /* emit_pool_after will be set by s390_mainpool_start to the
9097 last insn of the section where the literal pool should be
9098 emitted. */
9099 insn = pool->emit_pool_after;
9100
9101 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9102 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9103
9104 s390_dump_pool (pool, 1);
9105 }
9106
9107 /* Otherwise, we emit an inline literal pool and use BASR to branch
9108 over it, setting up the pool register at the same time. */
9109 else
9110 {
9111 rtx_code_label *pool_end = gen_label_rtx ();
9112
9113 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
9114 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
9115 JUMP_LABEL (insn) = pool_end;
9116 INSN_ADDRESSES_NEW (insn, -1);
9117 remove_insn (pool->pool_insn);
9118
9119 insn = emit_label_after (pool->label, insn);
9120 INSN_ADDRESSES_NEW (insn, -1);
9121
9122 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9123 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9124
9125 insn = emit_label_after (pool_end, pool->pool_insn);
9126 INSN_ADDRESSES_NEW (insn, -1);
9127
9128 s390_dump_pool (pool, 1);
9129 }
9130
9131
9132 /* Replace all literal pool references. */
9133
9134 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9135 {
9136 if (INSN_P (insn))
9137 replace_ltrel_base (&PATTERN (insn));
9138
9139 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9140 {
9141 rtx addr, pool_ref = NULL_RTX;
9142 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9143 if (pool_ref)
9144 {
9145 if (s390_execute_label (insn))
9146 addr = s390_find_execute (pool, insn);
9147 else
9148 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
9149 get_pool_mode (pool_ref));
9150
9151 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9152 INSN_CODE (insn) = -1;
9153 }
9154 }
9155 }
9156
9157
9158 /* Free the pool. */
9159 s390_free_pool (pool);
9160 }
9161
9162 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9163 We have decided we cannot use this pool, so revert all changes
9164 to the current function that were done by s390_mainpool_start. */
9165 static void
9166 s390_mainpool_cancel (struct constant_pool *pool)
9167 {
9168 /* We didn't actually change the instruction stream, so simply
9169 free the pool memory. */
9170 s390_free_pool (pool);
9171 }
9172
9173
9174 /* Chunkify the literal pool. */
9175
9176 #define S390_POOL_CHUNK_MIN 0xc00
9177 #define S390_POOL_CHUNK_MAX 0xe00
9178
9179 static struct constant_pool *
9180 s390_chunkify_start (void)
9181 {
9182 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9183 int extra_size = 0;
9184 bitmap far_labels;
9185 rtx pending_ltrel = NULL_RTX;
9186 rtx_insn *insn;
9187
9188 rtx (*gen_reload_base) (rtx, rtx) =
9189 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
9190
9191
9192 /* We need correct insn addresses. */
9193
9194 shorten_branches (get_insns ());
9195
9196 /* Scan all insns and move literals to pool chunks. */
9197
9198 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9199 {
9200 bool section_switch_p = false;
9201
9202 /* Check for pending LTREL_BASE. */
9203 if (INSN_P (insn))
9204 {
9205 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9206 if (ltrel_base)
9207 {
9208 gcc_assert (ltrel_base == pending_ltrel);
9209 pending_ltrel = NULL_RTX;
9210 }
9211 }
9212
9213 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
9214 {
9215 if (!curr_pool)
9216 curr_pool = s390_start_pool (&pool_list, insn);
9217
9218 s390_add_execute (curr_pool, insn);
9219 s390_add_pool_insn (curr_pool, insn);
9220 }
9221 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9222 {
9223 rtx pool_ref = NULL_RTX;
9224 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9225 if (pool_ref)
9226 {
9227 rtx constant = get_pool_constant (pool_ref);
9228 machine_mode mode = get_pool_mode (pool_ref);
9229
9230 if (!curr_pool)
9231 curr_pool = s390_start_pool (&pool_list, insn);
9232
9233 s390_add_constant (curr_pool, constant, mode);
9234 s390_add_pool_insn (curr_pool, insn);
9235
9236 /* Don't split the pool chunk between a LTREL_OFFSET load
9237 and the corresponding LTREL_BASE. */
9238 if (GET_CODE (constant) == CONST
9239 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9240 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9241 {
9242 gcc_assert (!pending_ltrel);
9243 pending_ltrel = pool_ref;
9244 }
9245 }
9246 }
9247
9248 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
9249 {
9250 if (curr_pool)
9251 s390_add_pool_insn (curr_pool, insn);
9252 /* An LTREL_BASE must follow within the same basic block. */
9253 gcc_assert (!pending_ltrel);
9254 }
9255
9256 if (NOTE_P (insn))
9257 switch (NOTE_KIND (insn))
9258 {
9259 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9260 section_switch_p = true;
9261 break;
9262 case NOTE_INSN_VAR_LOCATION:
9263 continue;
9264 default:
9265 break;
9266 }
9267
9268 if (!curr_pool
9269 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9270 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
9271 continue;
9272
9273 if (TARGET_CPU_ZARCH)
9274 {
9275 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9276 continue;
9277
9278 s390_end_pool (curr_pool, NULL);
9279 curr_pool = NULL;
9280 }
9281 else
9282 {
9283 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
9284 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
9285 + extra_size;
9286
9287 /* We will later have to insert base register reload insns.
9288 Those will have an effect on code size, which we need to
9289 consider here. This calculation makes rather pessimistic
9290 worst-case assumptions. */
9291 if (LABEL_P (insn))
9292 extra_size += 6;
9293
9294 if (chunk_size < S390_POOL_CHUNK_MIN
9295 && curr_pool->size < S390_POOL_CHUNK_MIN
9296 && !section_switch_p)
9297 continue;
9298
9299 /* Pool chunks can only be inserted after BARRIERs ... */
9300 if (BARRIER_P (insn))
9301 {
9302 s390_end_pool (curr_pool, insn);
9303 curr_pool = NULL;
9304 extra_size = 0;
9305 }
9306
9307 /* ... so if we don't find one in time, create one. */
9308 else if (chunk_size > S390_POOL_CHUNK_MAX
9309 || curr_pool->size > S390_POOL_CHUNK_MAX
9310 || section_switch_p)
9311 {
9312 rtx_insn *label, *jump, *barrier, *next, *prev;
9313
9314 if (!section_switch_p)
9315 {
9316 /* We can insert the barrier only after a 'real' insn. */
9317 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
9318 continue;
9319 if (get_attr_length (insn) == 0)
9320 continue;
9321 /* Don't separate LTREL_BASE from the corresponding
9322 LTREL_OFFSET load. */
9323 if (pending_ltrel)
9324 continue;
9325 next = insn;
9326 do
9327 {
9328 insn = next;
9329 next = NEXT_INSN (insn);
9330 }
9331 while (next
9332 && NOTE_P (next)
9333 && NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION);
9334 }
9335 else
9336 {
9337 gcc_assert (!pending_ltrel);
9338
9339 /* The old pool has to end before the section switch
9340 note in order to make it part of the current
9341 section. */
9342 insn = PREV_INSN (insn);
9343 }
9344
9345 label = gen_label_rtx ();
9346 prev = insn;
9347 if (prev && NOTE_P (prev))
9348 prev = prev_nonnote_insn (prev);
9349 if (prev)
9350 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9351 INSN_LOCATION (prev));
9352 else
9353 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9354 barrier = emit_barrier_after (jump);
9355 insn = emit_label_after (label, barrier);
9356 JUMP_LABEL (jump) = label;
9357 LABEL_NUSES (label) = 1;
9358
9359 INSN_ADDRESSES_NEW (jump, -1);
9360 INSN_ADDRESSES_NEW (barrier, -1);
9361 INSN_ADDRESSES_NEW (insn, -1);
9362
9363 s390_end_pool (curr_pool, barrier);
9364 curr_pool = NULL;
9365 extra_size = 0;
9366 }
9367 }
9368 }
9369
9370 if (curr_pool)
9371 s390_end_pool (curr_pool, NULL);
9372 gcc_assert (!pending_ltrel);
9373
9374 /* Find all labels that are branched into
9375 from an insn belonging to a different chunk. */
9376
9377 far_labels = BITMAP_ALLOC (NULL);
9378
9379 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9380 {
9381 rtx_jump_table_data *table;
9382
9383 /* Labels marked with LABEL_PRESERVE_P can be target
9384 of non-local jumps, so we have to mark them.
9385 The same holds for named labels.
9386
9387 Don't do that, however, if it is the label before
9388 a jump table. */
9389
9390 if (LABEL_P (insn)
9391 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9392 {
9393 rtx_insn *vec_insn = NEXT_INSN (insn);
9394 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9395 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9396 }
9397 /* Check potential targets in a table jump (casesi_jump). */
9398 else if (tablejump_p (insn, NULL, &table))
9399 {
9400 rtx vec_pat = PATTERN (table);
9401 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9402
9403 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9404 {
9405 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9406
9407 if (s390_find_pool (pool_list, label)
9408 != s390_find_pool (pool_list, insn))
9409 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9410 }
9411 }
9412 /* If we have a direct jump (conditional or unconditional),
9413 check all potential targets. */
9414 else if (JUMP_P (insn))
9415 {
9416 rtx pat = PATTERN (insn);
9417
9418 if (GET_CODE (pat) == PARALLEL)
9419 pat = XVECEXP (pat, 0, 0);
9420
9421 if (GET_CODE (pat) == SET)
9422 {
9423 rtx label = JUMP_LABEL (insn);
9424 if (label && !ANY_RETURN_P (label))
9425 {
9426 if (s390_find_pool (pool_list, label)
9427 != s390_find_pool (pool_list, insn))
9428 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9429 }
9430 }
9431 }
9432 }
9433
9434 /* Insert base register reload insns before every pool. */
9435
9436 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9437 {
9438 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9439 curr_pool->label);
9440 rtx_insn *insn = curr_pool->first_insn;
9441 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9442 }
9443
9444 /* Insert base register reload insns at every far label. */
9445
9446 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9447 if (LABEL_P (insn)
9448 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9449 {
9450 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9451 if (pool)
9452 {
9453 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9454 pool->label);
9455 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9456 }
9457 }
9458
9459
9460 BITMAP_FREE (far_labels);
9461
9462
9463 /* Recompute insn addresses. */
9464
9465 init_insn_lengths ();
9466 shorten_branches (get_insns ());
9467
9468 return pool_list;
9469 }
9470
9471 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9472 After we have decided to use this list, finish implementing
9473 all changes to the current function as required. */
9474
9475 static void
9476 s390_chunkify_finish (struct constant_pool *pool_list)
9477 {
9478 struct constant_pool *curr_pool = NULL;
9479 rtx_insn *insn;
9480
9481
9482 /* Replace all literal pool references. */
9483
9484 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9485 {
9486 if (INSN_P (insn))
9487 replace_ltrel_base (&PATTERN (insn));
9488
9489 curr_pool = s390_find_pool (pool_list, insn);
9490 if (!curr_pool)
9491 continue;
9492
9493 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9494 {
9495 rtx addr, pool_ref = NULL_RTX;
9496 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9497 if (pool_ref)
9498 {
9499 if (s390_execute_label (insn))
9500 addr = s390_find_execute (curr_pool, insn);
9501 else
9502 addr = s390_find_constant (curr_pool,
9503 get_pool_constant (pool_ref),
9504 get_pool_mode (pool_ref));
9505
9506 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9507 INSN_CODE (insn) = -1;
9508 }
9509 }
9510 }
9511
9512 /* Dump out all literal pools. */
9513
9514 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9515 s390_dump_pool (curr_pool, 0);
9516
9517 /* Free pool list. */
9518
9519 while (pool_list)
9520 {
9521 struct constant_pool *next = pool_list->next;
9522 s390_free_pool (pool_list);
9523 pool_list = next;
9524 }
9525 }
9526
9527 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9528 We have decided we cannot use this list, so revert all changes
9529 to the current function that were done by s390_chunkify_start. */
9530
9531 static void
9532 s390_chunkify_cancel (struct constant_pool *pool_list)
9533 {
9534 struct constant_pool *curr_pool = NULL;
9535 rtx_insn *insn;
9536
9537 /* Remove all pool placeholder insns. */
9538
9539 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9540 {
9541 /* Did we insert an extra barrier? Remove it. */
9542 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9543 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9544 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9545
9546 if (jump && JUMP_P (jump)
9547 && barrier && BARRIER_P (barrier)
9548 && label && LABEL_P (label)
9549 && GET_CODE (PATTERN (jump)) == SET
9550 && SET_DEST (PATTERN (jump)) == pc_rtx
9551 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9552 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9553 {
9554 remove_insn (jump);
9555 remove_insn (barrier);
9556 remove_insn (label);
9557 }
9558
9559 remove_insn (curr_pool->pool_insn);
9560 }
9561
9562 /* Remove all base register reload insns. */
9563
9564 for (insn = get_insns (); insn; )
9565 {
9566 rtx_insn *next_insn = NEXT_INSN (insn);
9567
9568 if (NONJUMP_INSN_P (insn)
9569 && GET_CODE (PATTERN (insn)) == SET
9570 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9571 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9572 remove_insn (insn);
9573
9574 insn = next_insn;
9575 }
9576
9577 /* Free pool list. */
9578
9579 while (pool_list)
9580 {
9581 struct constant_pool *next = pool_list->next;
9582 s390_free_pool (pool_list);
9583 pool_list = next;
9584 }
9585 }
9586
9587 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9588
9589 void
9590 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9591 {
9592 switch (GET_MODE_CLASS (mode))
9593 {
9594 case MODE_FLOAT:
9595 case MODE_DECIMAL_FLOAT:
9596 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9597
9598 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp),
9599 as_a <scalar_float_mode> (mode), align);
9600 break;
9601
9602 case MODE_INT:
9603 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9604 mark_symbol_refs_as_used (exp);
9605 break;
9606
9607 case MODE_VECTOR_INT:
9608 case MODE_VECTOR_FLOAT:
9609 {
9610 int i;
9611 machine_mode inner_mode;
9612 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9613
9614 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9615 for (i = 0; i < XVECLEN (exp, 0); i++)
9616 s390_output_pool_entry (XVECEXP (exp, 0, i),
9617 inner_mode,
9618 i == 0
9619 ? align
9620 : GET_MODE_BITSIZE (inner_mode));
9621 }
9622 break;
9623
9624 default:
9625 gcc_unreachable ();
9626 }
9627 }
9628
9629
9630 /* Return an RTL expression representing the value of the return address
9631 for the frame COUNT steps up from the current frame. FRAME is the
9632 frame pointer of that frame. */
9633
9634 rtx
9635 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9636 {
9637 int offset;
9638 rtx addr;
9639
9640 /* Without backchain, we fail for all but the current frame. */
9641
9642 if (!TARGET_BACKCHAIN && count > 0)
9643 return NULL_RTX;
9644
9645 /* For the current frame, we need to make sure the initial
9646 value of RETURN_REGNUM is actually saved. */
9647
9648 if (count == 0)
9649 {
9650 /* On non-z architectures branch splitting could overwrite r14. */
9651 if (TARGET_CPU_ZARCH)
9652 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9653 else
9654 {
9655 cfun_frame_layout.save_return_addr_p = true;
9656 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9657 }
9658 }
9659
9660 if (TARGET_PACKED_STACK)
9661 offset = -2 * UNITS_PER_LONG;
9662 else
9663 offset = RETURN_REGNUM * UNITS_PER_LONG;
9664
9665 addr = plus_constant (Pmode, frame, offset);
9666 addr = memory_address (Pmode, addr);
9667 return gen_rtx_MEM (Pmode, addr);
9668 }
9669
9670 /* Return an RTL expression representing the back chain stored in
9671 the current stack frame. */
9672
9673 rtx
9674 s390_back_chain_rtx (void)
9675 {
9676 rtx chain;
9677
9678 gcc_assert (TARGET_BACKCHAIN);
9679
9680 if (TARGET_PACKED_STACK)
9681 chain = plus_constant (Pmode, stack_pointer_rtx,
9682 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9683 else
9684 chain = stack_pointer_rtx;
9685
9686 chain = gen_rtx_MEM (Pmode, chain);
9687 return chain;
9688 }
9689
9690 /* Find first call clobbered register unused in a function.
9691 This could be used as base register in a leaf function
9692 or for holding the return address before epilogue. */
9693
9694 static int
9695 find_unused_clobbered_reg (void)
9696 {
9697 int i;
9698 for (i = 0; i < 6; i++)
9699 if (!df_regs_ever_live_p (i))
9700 return i;
9701 return 0;
9702 }
9703
9704
9705 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9706 clobbered hard regs in SETREG. */
9707
9708 static void
9709 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9710 {
9711 char *regs_ever_clobbered = (char *)data;
9712 unsigned int i, regno;
9713 machine_mode mode = GET_MODE (setreg);
9714
9715 if (GET_CODE (setreg) == SUBREG)
9716 {
9717 rtx inner = SUBREG_REG (setreg);
9718 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9719 return;
9720 regno = subreg_regno (setreg);
9721 }
9722 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9723 regno = REGNO (setreg);
9724 else
9725 return;
9726
9727 for (i = regno;
9728 i < end_hard_regno (mode, regno);
9729 i++)
9730 regs_ever_clobbered[i] = 1;
9731 }
9732
9733 /* Walks through all basic blocks of the current function looking
9734 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9735 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9736 each of those regs. */
9737
9738 static void
9739 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9740 {
9741 basic_block cur_bb;
9742 rtx_insn *cur_insn;
9743 unsigned int i;
9744
9745 memset (regs_ever_clobbered, 0, 32);
9746
9747 /* For non-leaf functions we have to consider all call clobbered regs to be
9748 clobbered. */
9749 if (!crtl->is_leaf)
9750 {
9751 for (i = 0; i < 32; i++)
9752 regs_ever_clobbered[i] = call_really_used_regs[i];
9753 }
9754
9755 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9756 this work is done by liveness analysis (mark_regs_live_at_end).
9757 Special care is needed for functions containing landing pads. Landing pads
9758 may use the eh registers, but the code which sets these registers is not
9759 contained in that function. Hence s390_regs_ever_clobbered is not able to
9760 deal with this automatically. */
9761 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9762 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9763 if (crtl->calls_eh_return
9764 || (cfun->machine->has_landing_pad_p
9765 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9766 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9767
9768 /* For nonlocal gotos all call-saved registers have to be saved.
9769 This flag is also set for the unwinding code in libgcc.
9770 See expand_builtin_unwind_init. For regs_ever_live this is done by
9771 reload. */
9772 if (crtl->saves_all_registers)
9773 for (i = 0; i < 32; i++)
9774 if (!call_really_used_regs[i])
9775 regs_ever_clobbered[i] = 1;
9776
9777 FOR_EACH_BB_FN (cur_bb, cfun)
9778 {
9779 FOR_BB_INSNS (cur_bb, cur_insn)
9780 {
9781 rtx pat;
9782
9783 if (!INSN_P (cur_insn))
9784 continue;
9785
9786 pat = PATTERN (cur_insn);
9787
9788 /* Ignore GPR restore insns. */
9789 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9790 {
9791 if (GET_CODE (pat) == SET
9792 && GENERAL_REG_P (SET_DEST (pat)))
9793 {
9794 /* lgdr */
9795 if (GET_MODE (SET_SRC (pat)) == DImode
9796 && FP_REG_P (SET_SRC (pat)))
9797 continue;
9798
9799 /* l / lg */
9800 if (GET_CODE (SET_SRC (pat)) == MEM)
9801 continue;
9802 }
9803
9804 /* lm / lmg */
9805 if (GET_CODE (pat) == PARALLEL
9806 && load_multiple_operation (pat, VOIDmode))
9807 continue;
9808 }
9809
9810 note_stores (pat,
9811 s390_reg_clobbered_rtx,
9812 regs_ever_clobbered);
9813 }
9814 }
9815 }
9816
9817 /* Determine the frame area which actually has to be accessed
9818 in the function epilogue. The values are stored at the
9819 given pointers AREA_BOTTOM (address of the lowest used stack
9820 address) and AREA_TOP (address of the first item which does
9821 not belong to the stack frame). */
9822
9823 static void
9824 s390_frame_area (int *area_bottom, int *area_top)
9825 {
9826 int b, t;
9827
9828 b = INT_MAX;
9829 t = INT_MIN;
9830
9831 if (cfun_frame_layout.first_restore_gpr != -1)
9832 {
9833 b = (cfun_frame_layout.gprs_offset
9834 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9835 t = b + (cfun_frame_layout.last_restore_gpr
9836 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9837 }
9838
9839 if (TARGET_64BIT && cfun_save_high_fprs_p)
9840 {
9841 b = MIN (b, cfun_frame_layout.f8_offset);
9842 t = MAX (t, (cfun_frame_layout.f8_offset
9843 + cfun_frame_layout.high_fprs * 8));
9844 }
9845
9846 if (!TARGET_64BIT)
9847 {
9848 if (cfun_fpr_save_p (FPR4_REGNUM))
9849 {
9850 b = MIN (b, cfun_frame_layout.f4_offset);
9851 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9852 }
9853 if (cfun_fpr_save_p (FPR6_REGNUM))
9854 {
9855 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9856 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9857 }
9858 }
9859 *area_bottom = b;
9860 *area_top = t;
9861 }
9862 /* Update gpr_save_slots in the frame layout trying to make use of
9863 FPRs as GPR save slots.
9864 This is a helper routine of s390_register_info. */
9865
9866 static void
9867 s390_register_info_gprtofpr ()
9868 {
9869 int save_reg_slot = FPR0_REGNUM;
9870 int i, j;
9871
9872 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9873 return;
9874
9875 /* builtin_eh_return needs to be able to modify the return address
9876 on the stack. It could also adjust the FPR save slot instead but
9877 is it worth the trouble?! */
9878 if (crtl->calls_eh_return)
9879 return;
9880
9881 for (i = 15; i >= 6; i--)
9882 {
9883 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9884 continue;
9885
9886 /* Advance to the next FP register which can be used as a
9887 GPR save slot. */
9888 while ((!call_really_used_regs[save_reg_slot]
9889 || df_regs_ever_live_p (save_reg_slot)
9890 || cfun_fpr_save_p (save_reg_slot))
9891 && FP_REGNO_P (save_reg_slot))
9892 save_reg_slot++;
9893 if (!FP_REGNO_P (save_reg_slot))
9894 {
9895 /* We only want to use ldgr/lgdr if we can get rid of
9896 stm/lm entirely. So undo the gpr slot allocation in
9897 case we ran out of FPR save slots. */
9898 for (j = 6; j <= 15; j++)
9899 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9900 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9901 break;
9902 }
9903 cfun_gpr_save_slot (i) = save_reg_slot++;
9904 }
9905 }
9906
9907 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9908 stdarg.
9909 This is a helper routine for s390_register_info. */
9910
9911 static void
9912 s390_register_info_stdarg_fpr ()
9913 {
9914 int i;
9915 int min_fpr;
9916 int max_fpr;
9917
9918 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9919 f0-f4 for 64 bit. */
9920 if (!cfun->stdarg
9921 || !TARGET_HARD_FLOAT
9922 || !cfun->va_list_fpr_size
9923 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9924 return;
9925
9926 min_fpr = crtl->args.info.fprs;
9927 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9928 if (max_fpr >= FP_ARG_NUM_REG)
9929 max_fpr = FP_ARG_NUM_REG - 1;
9930
9931 /* FPR argument regs start at f0. */
9932 min_fpr += FPR0_REGNUM;
9933 max_fpr += FPR0_REGNUM;
9934
9935 for (i = min_fpr; i <= max_fpr; i++)
9936 cfun_set_fpr_save (i);
9937 }
9938
9939 /* Reserve the GPR save slots for GPRs which need to be saved due to
9940 stdarg.
9941 This is a helper routine for s390_register_info. */
9942
9943 static void
9944 s390_register_info_stdarg_gpr ()
9945 {
9946 int i;
9947 int min_gpr;
9948 int max_gpr;
9949
9950 if (!cfun->stdarg
9951 || !cfun->va_list_gpr_size
9952 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9953 return;
9954
9955 min_gpr = crtl->args.info.gprs;
9956 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9957 if (max_gpr >= GP_ARG_NUM_REG)
9958 max_gpr = GP_ARG_NUM_REG - 1;
9959
9960 /* GPR argument regs start at r2. */
9961 min_gpr += GPR2_REGNUM;
9962 max_gpr += GPR2_REGNUM;
9963
9964 /* If r6 was supposed to be saved into an FPR and now needs to go to
9965 the stack for vararg we have to adjust the restore range to make
9966 sure that the restore is done from stack as well. */
9967 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9968 && min_gpr <= GPR6_REGNUM
9969 && max_gpr >= GPR6_REGNUM)
9970 {
9971 if (cfun_frame_layout.first_restore_gpr == -1
9972 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9973 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9974 if (cfun_frame_layout.last_restore_gpr == -1
9975 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9976 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9977 }
9978
9979 if (cfun_frame_layout.first_save_gpr == -1
9980 || cfun_frame_layout.first_save_gpr > min_gpr)
9981 cfun_frame_layout.first_save_gpr = min_gpr;
9982
9983 if (cfun_frame_layout.last_save_gpr == -1
9984 || cfun_frame_layout.last_save_gpr < max_gpr)
9985 cfun_frame_layout.last_save_gpr = max_gpr;
9986
9987 for (i = min_gpr; i <= max_gpr; i++)
9988 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9989 }
9990
9991 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9992 prologue and epilogue. */
9993
9994 static void
9995 s390_register_info_set_ranges ()
9996 {
9997 int i, j;
9998
9999 /* Find the first and the last save slot supposed to use the stack
10000 to set the restore range.
10001 Vararg regs might be marked as save to stack but only the
10002 call-saved regs really need restoring (i.e. r6). This code
10003 assumes that the vararg regs have not yet been recorded in
10004 cfun_gpr_save_slot. */
10005 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
10006 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
10007 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
10008 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
10009 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
10010 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
10011 }
10012
10013 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
10014 for registers which need to be saved in function prologue.
10015 This function can be used until the insns emitted for save/restore
10016 of the regs are visible in the RTL stream. */
10017
10018 static void
10019 s390_register_info ()
10020 {
10021 int i;
10022 char clobbered_regs[32];
10023
10024 gcc_assert (!epilogue_completed);
10025
10026 if (reload_completed)
10027 /* After reload we rely on our own routine to determine which
10028 registers need saving. */
10029 s390_regs_ever_clobbered (clobbered_regs);
10030 else
10031 /* During reload we use regs_ever_live as a base since reload
10032 does changes in there which we otherwise would not be aware
10033 of. */
10034 for (i = 0; i < 32; i++)
10035 clobbered_regs[i] = df_regs_ever_live_p (i);
10036
10037 for (i = 0; i < 32; i++)
10038 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10039
10040 /* Mark the call-saved FPRs which need to be saved.
10041 This needs to be done before checking the special GPRs since the
10042 stack pointer usage depends on whether high FPRs have to be saved
10043 or not. */
10044 cfun_frame_layout.fpr_bitmap = 0;
10045 cfun_frame_layout.high_fprs = 0;
10046 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10047 if (clobbered_regs[i] && !call_really_used_regs[i])
10048 {
10049 cfun_set_fpr_save (i);
10050 if (i >= FPR8_REGNUM)
10051 cfun_frame_layout.high_fprs++;
10052 }
10053
10054 /* Register 12 is used for GOT address, but also as temp in prologue
10055 for split-stack stdarg functions (unless r14 is available). */
10056 clobbered_regs[12]
10057 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10058 || (flag_split_stack && cfun->stdarg
10059 && (crtl->is_leaf || TARGET_TPF_PROFILING
10060 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
10061
10062 clobbered_regs[BASE_REGNUM]
10063 |= (cfun->machine->base_reg
10064 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
10065
10066 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
10067 |= !!frame_pointer_needed;
10068
10069 /* On pre z900 machines this might take until machine dependent
10070 reorg to decide.
10071 save_return_addr_p will only be set on non-zarch machines so
10072 there is no risk that r14 goes into an FPR instead of a stack
10073 slot. */
10074 clobbered_regs[RETURN_REGNUM]
10075 |= (!crtl->is_leaf
10076 || TARGET_TPF_PROFILING
10077 || cfun->machine->split_branches_pending_p
10078 || cfun_frame_layout.save_return_addr_p
10079 || crtl->calls_eh_return);
10080
10081 clobbered_regs[STACK_POINTER_REGNUM]
10082 |= (!crtl->is_leaf
10083 || TARGET_TPF_PROFILING
10084 || cfun_save_high_fprs_p
10085 || get_frame_size () > 0
10086 || (reload_completed && cfun_frame_layout.frame_size > 0)
10087 || cfun->calls_alloca);
10088
10089 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
10090
10091 for (i = 6; i < 16; i++)
10092 if (clobbered_regs[i])
10093 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
10094
10095 s390_register_info_stdarg_fpr ();
10096 s390_register_info_gprtofpr ();
10097 s390_register_info_set_ranges ();
10098 /* stdarg functions might need to save GPRs 2 to 6. This might
10099 override the GPR->FPR save decision made by
10100 s390_register_info_gprtofpr for r6 since vararg regs must go to
10101 the stack. */
10102 s390_register_info_stdarg_gpr ();
10103 }
10104
10105 /* This function is called by s390_optimize_prologue in order to get
10106 rid of unnecessary GPR save/restore instructions. The register info
10107 for the GPRs is re-computed and the ranges are re-calculated. */
10108
10109 static void
10110 s390_optimize_register_info ()
10111 {
10112 char clobbered_regs[32];
10113 int i;
10114
10115 gcc_assert (epilogue_completed);
10116 gcc_assert (!cfun->machine->split_branches_pending_p);
10117
10118 s390_regs_ever_clobbered (clobbered_regs);
10119
10120 for (i = 0; i < 32; i++)
10121 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10122
10123 /* There is still special treatment needed for cases invisible to
10124 s390_regs_ever_clobbered. */
10125 clobbered_regs[RETURN_REGNUM]
10126 |= (TARGET_TPF_PROFILING
10127 /* When expanding builtin_return_addr in ESA mode we do not
10128 know whether r14 will later be needed as scratch reg when
10129 doing branch splitting. So the builtin always accesses the
10130 r14 save slot and we need to stick to the save/restore
10131 decision for r14 even if it turns out that it didn't get
10132 clobbered. */
10133 || cfun_frame_layout.save_return_addr_p
10134 || crtl->calls_eh_return);
10135
10136 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
10137
10138 for (i = 6; i < 16; i++)
10139 if (!clobbered_regs[i])
10140 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
10141
10142 s390_register_info_set_ranges ();
10143 s390_register_info_stdarg_gpr ();
10144 }
10145
10146 /* Fill cfun->machine with info about frame of current function. */
10147
10148 static void
10149 s390_frame_info (void)
10150 {
10151 HOST_WIDE_INT lowest_offset;
10152
10153 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10154 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10155
10156 /* The va_arg builtin uses a constant distance of 16 *
10157 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10158 pointer. So even if we are going to save the stack pointer in an
10159 FPR we need the stack space in order to keep the offsets
10160 correct. */
10161 if (cfun->stdarg && cfun_save_arg_fprs_p)
10162 {
10163 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10164
10165 if (cfun_frame_layout.first_save_gpr_slot == -1)
10166 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10167 }
10168
10169 cfun_frame_layout.frame_size = get_frame_size ();
10170 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
10171 fatal_error (input_location,
10172 "total size of local variables exceeds architecture limit");
10173
10174 if (!TARGET_PACKED_STACK)
10175 {
10176 /* Fixed stack layout. */
10177 cfun_frame_layout.backchain_offset = 0;
10178 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
10179 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10180 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
10181 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
10182 * UNITS_PER_LONG);
10183 }
10184 else if (TARGET_BACKCHAIN)
10185 {
10186 /* Kernel stack layout - packed stack, backchain, no float */
10187 gcc_assert (TARGET_SOFT_FLOAT);
10188 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
10189 - UNITS_PER_LONG);
10190
10191 /* The distance between the backchain and the return address
10192 save slot must not change. So we always need a slot for the
10193 stack pointer which resides in between. */
10194 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10195
10196 cfun_frame_layout.gprs_offset
10197 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
10198
10199 /* FPRs will not be saved. Nevertheless pick sane values to
10200 keep area calculations valid. */
10201 cfun_frame_layout.f0_offset =
10202 cfun_frame_layout.f4_offset =
10203 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
10204 }
10205 else
10206 {
10207 int num_fprs;
10208
10209 /* Packed stack layout without backchain. */
10210
10211 /* With stdarg FPRs need their dedicated slots. */
10212 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10213 : (cfun_fpr_save_p (FPR4_REGNUM) +
10214 cfun_fpr_save_p (FPR6_REGNUM)));
10215 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10216
10217 num_fprs = (cfun->stdarg ? 2
10218 : (cfun_fpr_save_p (FPR0_REGNUM)
10219 + cfun_fpr_save_p (FPR2_REGNUM)));
10220 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
10221
10222 cfun_frame_layout.gprs_offset
10223 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
10224
10225 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10226 - cfun_frame_layout.high_fprs * 8);
10227 }
10228
10229 if (cfun_save_high_fprs_p)
10230 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10231
10232 if (!crtl->is_leaf)
10233 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10234
10235 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10236 sized area at the bottom of the stack. This is required also for
10237 leaf functions. When GCC generates a local stack reference it
10238 will always add STACK_POINTER_OFFSET to all these references. */
10239 if (crtl->is_leaf
10240 && !TARGET_TPF_PROFILING
10241 && cfun_frame_layout.frame_size == 0
10242 && !cfun->calls_alloca)
10243 return;
10244
10245 /* Calculate the number of bytes we have used in our own register
10246 save area. With the packed stack layout we can re-use the
10247 remaining bytes for normal stack elements. */
10248
10249 if (TARGET_PACKED_STACK)
10250 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10251 cfun_frame_layout.f4_offset),
10252 cfun_frame_layout.gprs_offset);
10253 else
10254 lowest_offset = 0;
10255
10256 if (TARGET_BACKCHAIN)
10257 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
10258
10259 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
10260
10261 /* If under 31 bit an odd number of gprs has to be saved we have to
10262 adjust the frame size to sustain 8 byte alignment of stack
10263 frames. */
10264 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10265 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10266 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
10267 }
10268
10269 /* Generate frame layout. Fills in register and frame data for the current
10270 function in cfun->machine. This routine can be called multiple times;
10271 it will re-do the complete frame layout every time. */
10272
10273 static void
10274 s390_init_frame_layout (void)
10275 {
10276 HOST_WIDE_INT frame_size;
10277 int base_used;
10278
10279 /* After LRA the frame layout is supposed to be read-only and should
10280 not be re-computed. */
10281 if (reload_completed)
10282 return;
10283
10284 /* On S/390 machines, we may need to perform branch splitting, which
10285 will require both base and return address register. We have no
10286 choice but to assume we're going to need them until right at the
10287 end of the machine dependent reorg phase. */
10288 if (!TARGET_CPU_ZARCH)
10289 cfun->machine->split_branches_pending_p = true;
10290
10291 do
10292 {
10293 frame_size = cfun_frame_layout.frame_size;
10294
10295 /* Try to predict whether we'll need the base register. */
10296 base_used = cfun->machine->split_branches_pending_p
10297 || crtl->uses_const_pool
10298 || (!DISP_IN_RANGE (frame_size)
10299 && !CONST_OK_FOR_K (frame_size));
10300
10301 /* Decide which register to use as literal pool base. In small
10302 leaf functions, try to use an unused call-clobbered register
10303 as base register to avoid save/restore overhead. */
10304 if (!base_used)
10305 cfun->machine->base_reg = NULL_RTX;
10306 else
10307 {
10308 int br = 0;
10309
10310 if (crtl->is_leaf)
10311 /* Prefer r5 (most likely to be free). */
10312 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10313 ;
10314 cfun->machine->base_reg =
10315 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
10316 }
10317
10318 s390_register_info ();
10319 s390_frame_info ();
10320 }
10321 while (frame_size != cfun_frame_layout.frame_size);
10322 }
10323
10324 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
10325 the TX is nonescaping. A transaction is considered escaping if
10326 there is at least one path from tbegin returning CC0 to the
10327 function exit block without an tend.
10328
10329 The check so far has some limitations:
10330 - only single tbegin/tend BBs are supported
10331 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10332 - when CC is copied to a GPR and the CC0 check is done with the GPR
10333 this is not supported
10334 */
10335
10336 static void
10337 s390_optimize_nonescaping_tx (void)
10338 {
10339 const unsigned int CC0 = 1 << 3;
10340 basic_block tbegin_bb = NULL;
10341 basic_block tend_bb = NULL;
10342 basic_block bb;
10343 rtx_insn *insn;
10344 bool result = true;
10345 int bb_index;
10346 rtx_insn *tbegin_insn = NULL;
10347
10348 if (!cfun->machine->tbegin_p)
10349 return;
10350
10351 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10352 {
10353 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10354
10355 if (!bb)
10356 continue;
10357
10358 FOR_BB_INSNS (bb, insn)
10359 {
10360 rtx ite, cc, pat, target;
10361 unsigned HOST_WIDE_INT mask;
10362
10363 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10364 continue;
10365
10366 pat = PATTERN (insn);
10367
10368 if (GET_CODE (pat) == PARALLEL)
10369 pat = XVECEXP (pat, 0, 0);
10370
10371 if (GET_CODE (pat) != SET
10372 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10373 continue;
10374
10375 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10376 {
10377 rtx_insn *tmp;
10378
10379 tbegin_insn = insn;
10380
10381 /* Just return if the tbegin doesn't have clobbers. */
10382 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10383 return;
10384
10385 if (tbegin_bb != NULL)
10386 return;
10387
10388 /* Find the next conditional jump. */
10389 for (tmp = NEXT_INSN (insn);
10390 tmp != NULL_RTX;
10391 tmp = NEXT_INSN (tmp))
10392 {
10393 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10394 return;
10395 if (!JUMP_P (tmp))
10396 continue;
10397
10398 ite = SET_SRC (PATTERN (tmp));
10399 if (GET_CODE (ite) != IF_THEN_ELSE)
10400 continue;
10401
10402 cc = XEXP (XEXP (ite, 0), 0);
10403 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10404 || GET_MODE (cc) != CCRAWmode
10405 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10406 return;
10407
10408 if (bb->succs->length () != 2)
10409 return;
10410
10411 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10412 if (GET_CODE (XEXP (ite, 0)) == NE)
10413 mask ^= 0xf;
10414
10415 if (mask == CC0)
10416 target = XEXP (ite, 1);
10417 else if (mask == (CC0 ^ 0xf))
10418 target = XEXP (ite, 2);
10419 else
10420 return;
10421
10422 {
10423 edge_iterator ei;
10424 edge e1, e2;
10425
10426 ei = ei_start (bb->succs);
10427 e1 = ei_safe_edge (ei);
10428 ei_next (&ei);
10429 e2 = ei_safe_edge (ei);
10430
10431 if (e2->flags & EDGE_FALLTHRU)
10432 {
10433 e2 = e1;
10434 e1 = ei_safe_edge (ei);
10435 }
10436
10437 if (!(e1->flags & EDGE_FALLTHRU))
10438 return;
10439
10440 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10441 }
10442 if (tmp == BB_END (bb))
10443 break;
10444 }
10445 }
10446
10447 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10448 {
10449 if (tend_bb != NULL)
10450 return;
10451 tend_bb = bb;
10452 }
10453 }
10454 }
10455
10456 /* Either we successfully remove the FPR clobbers here or we are not
10457 able to do anything for this TX. Both cases don't qualify for
10458 another look. */
10459 cfun->machine->tbegin_p = false;
10460
10461 if (tbegin_bb == NULL || tend_bb == NULL)
10462 return;
10463
10464 calculate_dominance_info (CDI_POST_DOMINATORS);
10465 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10466 free_dominance_info (CDI_POST_DOMINATORS);
10467
10468 if (!result)
10469 return;
10470
10471 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10472 gen_rtvec (2,
10473 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10474 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10475 INSN_CODE (tbegin_insn) = -1;
10476 df_insn_rescan (tbegin_insn);
10477
10478 return;
10479 }
10480
10481 /* Implement TARGET_HARD_REGNO_NREGS. Because all registers in a class
10482 have the same size, this is equivalent to CLASS_MAX_NREGS. */
10483
10484 static unsigned int
10485 s390_hard_regno_nregs (unsigned int regno, machine_mode mode)
10486 {
10487 return s390_class_max_nregs (REGNO_REG_CLASS (regno), mode);
10488 }
10489
10490 /* Implement TARGET_HARD_REGNO_MODE_OK.
10491
10492 Integer modes <= word size fit into any GPR.
10493 Integer modes > word size fit into successive GPRs, starting with
10494 an even-numbered register.
10495 SImode and DImode fit into FPRs as well.
10496
10497 Floating point modes <= word size fit into any FPR or GPR.
10498 Floating point modes > word size (i.e. DFmode on 32-bit) fit
10499 into any FPR, or an even-odd GPR pair.
10500 TFmode fits only into an even-odd FPR pair.
10501
10502 Complex floating point modes fit either into two FPRs, or into
10503 successive GPRs (again starting with an even number).
10504 TCmode fits only into two successive even-odd FPR pairs.
10505
10506 Condition code modes fit only into the CC register. */
10507
10508 static bool
10509 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10510 {
10511 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10512 return false;
10513
10514 switch (REGNO_REG_CLASS (regno))
10515 {
10516 case VEC_REGS:
10517 return ((GET_MODE_CLASS (mode) == MODE_INT
10518 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10519 || mode == DFmode
10520 || (TARGET_VXE && mode == SFmode)
10521 || s390_vector_mode_supported_p (mode));
10522 break;
10523 case FP_REGS:
10524 if (TARGET_VX
10525 && ((GET_MODE_CLASS (mode) == MODE_INT
10526 && s390_class_max_nregs (FP_REGS, mode) == 1)
10527 || mode == DFmode
10528 || s390_vector_mode_supported_p (mode)))
10529 return true;
10530
10531 if (REGNO_PAIR_OK (regno, mode))
10532 {
10533 if (mode == SImode || mode == DImode)
10534 return true;
10535
10536 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10537 return true;
10538 }
10539 break;
10540 case ADDR_REGS:
10541 if (FRAME_REGNO_P (regno) && mode == Pmode)
10542 return true;
10543
10544 /* fallthrough */
10545 case GENERAL_REGS:
10546 if (REGNO_PAIR_OK (regno, mode))
10547 {
10548 if (TARGET_ZARCH
10549 || (mode != TFmode && mode != TCmode && mode != TDmode))
10550 return true;
10551 }
10552 break;
10553 case CC_REGS:
10554 if (GET_MODE_CLASS (mode) == MODE_CC)
10555 return true;
10556 break;
10557 case ACCESS_REGS:
10558 if (REGNO_PAIR_OK (regno, mode))
10559 {
10560 if (mode == SImode || mode == Pmode)
10561 return true;
10562 }
10563 break;
10564 default:
10565 return false;
10566 }
10567
10568 return false;
10569 }
10570
10571 /* Implement TARGET_MODES_TIEABLE_P. */
10572
10573 static bool
10574 s390_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10575 {
10576 return ((mode1 == SFmode || mode1 == DFmode)
10577 == (mode2 == SFmode || mode2 == DFmode));
10578 }
10579
10580 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10581
10582 bool
10583 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10584 {
10585 /* Once we've decided upon a register to use as base register, it must
10586 no longer be used for any other purpose. */
10587 if (cfun->machine->base_reg)
10588 if (REGNO (cfun->machine->base_reg) == old_reg
10589 || REGNO (cfun->machine->base_reg) == new_reg)
10590 return false;
10591
10592 /* Prevent regrename from using call-saved regs which haven't
10593 actually been saved. This is necessary since regrename assumes
10594 the backend save/restore decisions are based on
10595 df_regs_ever_live. Since we have our own routine we have to tell
10596 regrename manually about it. */
10597 if (GENERAL_REGNO_P (new_reg)
10598 && !call_really_used_regs[new_reg]
10599 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10600 return false;
10601
10602 return true;
10603 }
10604
10605 /* Return nonzero if register REGNO can be used as a scratch register
10606 in peephole2. */
10607
10608 static bool
10609 s390_hard_regno_scratch_ok (unsigned int regno)
10610 {
10611 /* See s390_hard_regno_rename_ok. */
10612 if (GENERAL_REGNO_P (regno)
10613 && !call_really_used_regs[regno]
10614 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10615 return false;
10616
10617 return true;
10618 }
10619
10620 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. When generating
10621 code that runs in z/Architecture mode, but conforms to the 31-bit
10622 ABI, GPRs can hold 8 bytes; the ABI guarantees only that the lower 4
10623 bytes are saved across calls, however. */
10624
10625 static bool
10626 s390_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
10627 {
10628 if (!TARGET_64BIT
10629 && TARGET_ZARCH
10630 && GET_MODE_SIZE (mode) > 4
10631 && ((regno >= 6 && regno <= 15) || regno == 32))
10632 return true;
10633
10634 if (TARGET_VX
10635 && GET_MODE_SIZE (mode) > 8
10636 && (((TARGET_64BIT && regno >= 24 && regno <= 31))
10637 || (!TARGET_64BIT && (regno == 18 || regno == 19))))
10638 return true;
10639
10640 return false;
10641 }
10642
10643 /* Maximum number of registers to represent a value of mode MODE
10644 in a register of class RCLASS. */
10645
10646 int
10647 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10648 {
10649 int reg_size;
10650 bool reg_pair_required_p = false;
10651
10652 switch (rclass)
10653 {
10654 case FP_REGS:
10655 case VEC_REGS:
10656 reg_size = TARGET_VX ? 16 : 8;
10657
10658 /* TF and TD modes would fit into a VR but we put them into a
10659 register pair since we do not have 128bit FP instructions on
10660 full VRs. */
10661 if (TARGET_VX
10662 && SCALAR_FLOAT_MODE_P (mode)
10663 && GET_MODE_SIZE (mode) >= 16)
10664 reg_pair_required_p = true;
10665
10666 /* Even if complex types would fit into a single FPR/VR we force
10667 them into a register pair to deal with the parts more easily.
10668 (FIXME: What about complex ints?) */
10669 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10670 reg_pair_required_p = true;
10671 break;
10672 case ACCESS_REGS:
10673 reg_size = 4;
10674 break;
10675 default:
10676 reg_size = UNITS_PER_WORD;
10677 break;
10678 }
10679
10680 if (reg_pair_required_p)
10681 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10682
10683 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10684 }
10685
10686 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10687
10688 static bool
10689 s390_can_change_mode_class (machine_mode from_mode,
10690 machine_mode to_mode,
10691 reg_class_t rclass)
10692 {
10693 machine_mode small_mode;
10694 machine_mode big_mode;
10695
10696 /* V1TF and TF have different representations in vector
10697 registers. */
10698 if (reg_classes_intersect_p (VEC_REGS, rclass)
10699 && ((from_mode == V1TFmode && to_mode == TFmode)
10700 || (from_mode == TFmode && to_mode == V1TFmode)))
10701 return false;
10702
10703 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10704 return true;
10705
10706 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10707 {
10708 small_mode = from_mode;
10709 big_mode = to_mode;
10710 }
10711 else
10712 {
10713 small_mode = to_mode;
10714 big_mode = from_mode;
10715 }
10716
10717 /* Values residing in VRs are little-endian style. All modes are
10718 placed left-aligned in an VR. This means that we cannot allow
10719 switching between modes with differing sizes. Also if the vector
10720 facility is available we still place TFmode values in VR register
10721 pairs, since the only instructions we have operating on TFmodes
10722 only deal with register pairs. Therefore we have to allow DFmode
10723 subregs of TFmodes to enable the TFmode splitters. */
10724 if (reg_classes_intersect_p (VEC_REGS, rclass)
10725 && (GET_MODE_SIZE (small_mode) < 8
10726 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10727 return false;
10728
10729 /* Likewise for access registers, since they have only half the
10730 word size on 64-bit. */
10731 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10732 return false;
10733
10734 return true;
10735 }
10736
10737 /* Return true if we use LRA instead of reload pass. */
10738 static bool
10739 s390_lra_p (void)
10740 {
10741 return s390_lra_flag;
10742 }
10743
10744 /* Return true if register FROM can be eliminated via register TO. */
10745
10746 static bool
10747 s390_can_eliminate (const int from, const int to)
10748 {
10749 /* On zSeries machines, we have not marked the base register as fixed.
10750 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10751 If a function requires the base register, we say here that this
10752 elimination cannot be performed. This will cause reload to free
10753 up the base register (as if it were fixed). On the other hand,
10754 if the current function does *not* require the base register, we
10755 say here the elimination succeeds, which in turn allows reload
10756 to allocate the base register for any other purpose. */
10757 if (from == BASE_REGNUM && to == BASE_REGNUM)
10758 {
10759 if (TARGET_CPU_ZARCH)
10760 {
10761 s390_init_frame_layout ();
10762 return cfun->machine->base_reg == NULL_RTX;
10763 }
10764
10765 return false;
10766 }
10767
10768 /* Everything else must point into the stack frame. */
10769 gcc_assert (to == STACK_POINTER_REGNUM
10770 || to == HARD_FRAME_POINTER_REGNUM);
10771
10772 gcc_assert (from == FRAME_POINTER_REGNUM
10773 || from == ARG_POINTER_REGNUM
10774 || from == RETURN_ADDRESS_POINTER_REGNUM);
10775
10776 /* Make sure we actually saved the return address. */
10777 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10778 if (!crtl->calls_eh_return
10779 && !cfun->stdarg
10780 && !cfun_frame_layout.save_return_addr_p)
10781 return false;
10782
10783 return true;
10784 }
10785
10786 /* Return offset between register FROM and TO initially after prolog. */
10787
10788 HOST_WIDE_INT
10789 s390_initial_elimination_offset (int from, int to)
10790 {
10791 HOST_WIDE_INT offset;
10792
10793 /* ??? Why are we called for non-eliminable pairs? */
10794 if (!s390_can_eliminate (from, to))
10795 return 0;
10796
10797 switch (from)
10798 {
10799 case FRAME_POINTER_REGNUM:
10800 offset = (get_frame_size()
10801 + STACK_POINTER_OFFSET
10802 + crtl->outgoing_args_size);
10803 break;
10804
10805 case ARG_POINTER_REGNUM:
10806 s390_init_frame_layout ();
10807 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10808 break;
10809
10810 case RETURN_ADDRESS_POINTER_REGNUM:
10811 s390_init_frame_layout ();
10812
10813 if (cfun_frame_layout.first_save_gpr_slot == -1)
10814 {
10815 /* If it turns out that for stdarg nothing went into the reg
10816 save area we also do not need the return address
10817 pointer. */
10818 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10819 return 0;
10820
10821 gcc_unreachable ();
10822 }
10823
10824 /* In order to make the following work it is not necessary for
10825 r14 to have a save slot. It is sufficient if one other GPR
10826 got one. Since the GPRs are always stored without gaps we
10827 are able to calculate where the r14 save slot would
10828 reside. */
10829 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10830 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10831 UNITS_PER_LONG);
10832 break;
10833
10834 case BASE_REGNUM:
10835 offset = 0;
10836 break;
10837
10838 default:
10839 gcc_unreachable ();
10840 }
10841
10842 return offset;
10843 }
10844
10845 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10846 to register BASE. Return generated insn. */
10847
10848 static rtx
10849 save_fpr (rtx base, int offset, int regnum)
10850 {
10851 rtx addr;
10852 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10853
10854 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10855 set_mem_alias_set (addr, get_varargs_alias_set ());
10856 else
10857 set_mem_alias_set (addr, get_frame_alias_set ());
10858
10859 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10860 }
10861
10862 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10863 to register BASE. Return generated insn. */
10864
10865 static rtx
10866 restore_fpr (rtx base, int offset, int regnum)
10867 {
10868 rtx addr;
10869 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10870 set_mem_alias_set (addr, get_frame_alias_set ());
10871
10872 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10873 }
10874
10875 /* Return true if REGNO is a global register, but not one
10876 of the special ones that need to be saved/restored in anyway. */
10877
10878 static inline bool
10879 global_not_special_regno_p (int regno)
10880 {
10881 return (global_regs[regno]
10882 /* These registers are special and need to be
10883 restored in any case. */
10884 && !(regno == STACK_POINTER_REGNUM
10885 || regno == RETURN_REGNUM
10886 || regno == BASE_REGNUM
10887 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10888 }
10889
10890 /* Generate insn to save registers FIRST to LAST into
10891 the register save area located at offset OFFSET
10892 relative to register BASE. */
10893
10894 static rtx
10895 save_gprs (rtx base, int offset, int first, int last)
10896 {
10897 rtx addr, insn, note;
10898 int i;
10899
10900 addr = plus_constant (Pmode, base, offset);
10901 addr = gen_rtx_MEM (Pmode, addr);
10902
10903 set_mem_alias_set (addr, get_frame_alias_set ());
10904
10905 /* Special-case single register. */
10906 if (first == last)
10907 {
10908 if (TARGET_64BIT)
10909 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10910 else
10911 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10912
10913 if (!global_not_special_regno_p (first))
10914 RTX_FRAME_RELATED_P (insn) = 1;
10915 return insn;
10916 }
10917
10918
10919 insn = gen_store_multiple (addr,
10920 gen_rtx_REG (Pmode, first),
10921 GEN_INT (last - first + 1));
10922
10923 if (first <= 6 && cfun->stdarg)
10924 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10925 {
10926 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10927
10928 if (first + i <= 6)
10929 set_mem_alias_set (mem, get_varargs_alias_set ());
10930 }
10931
10932 /* We need to set the FRAME_RELATED flag on all SETs
10933 inside the store-multiple pattern.
10934
10935 However, we must not emit DWARF records for registers 2..5
10936 if they are stored for use by variable arguments ...
10937
10938 ??? Unfortunately, it is not enough to simply not the
10939 FRAME_RELATED flags for those SETs, because the first SET
10940 of the PARALLEL is always treated as if it had the flag
10941 set, even if it does not. Therefore we emit a new pattern
10942 without those registers as REG_FRAME_RELATED_EXPR note. */
10943
10944 if (first >= 6 && !global_not_special_regno_p (first))
10945 {
10946 rtx pat = PATTERN (insn);
10947
10948 for (i = 0; i < XVECLEN (pat, 0); i++)
10949 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10950 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10951 0, i)))))
10952 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10953
10954 RTX_FRAME_RELATED_P (insn) = 1;
10955 }
10956 else if (last >= 6)
10957 {
10958 int start;
10959
10960 for (start = first >= 6 ? first : 6; start <= last; start++)
10961 if (!global_not_special_regno_p (start))
10962 break;
10963
10964 if (start > last)
10965 return insn;
10966
10967 addr = plus_constant (Pmode, base,
10968 offset + (start - first) * UNITS_PER_LONG);
10969
10970 if (start == last)
10971 {
10972 if (TARGET_64BIT)
10973 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10974 gen_rtx_REG (Pmode, start));
10975 else
10976 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10977 gen_rtx_REG (Pmode, start));
10978 note = PATTERN (note);
10979
10980 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10981 RTX_FRAME_RELATED_P (insn) = 1;
10982
10983 return insn;
10984 }
10985
10986 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10987 gen_rtx_REG (Pmode, start),
10988 GEN_INT (last - start + 1));
10989 note = PATTERN (note);
10990
10991 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10992
10993 for (i = 0; i < XVECLEN (note, 0); i++)
10994 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10995 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10996 0, i)))))
10997 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10998
10999 RTX_FRAME_RELATED_P (insn) = 1;
11000 }
11001
11002 return insn;
11003 }
11004
11005 /* Generate insn to restore registers FIRST to LAST from
11006 the register save area located at offset OFFSET
11007 relative to register BASE. */
11008
11009 static rtx
11010 restore_gprs (rtx base, int offset, int first, int last)
11011 {
11012 rtx addr, insn;
11013
11014 addr = plus_constant (Pmode, base, offset);
11015 addr = gen_rtx_MEM (Pmode, addr);
11016 set_mem_alias_set (addr, get_frame_alias_set ());
11017
11018 /* Special-case single register. */
11019 if (first == last)
11020 {
11021 if (TARGET_64BIT)
11022 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
11023 else
11024 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
11025
11026 RTX_FRAME_RELATED_P (insn) = 1;
11027 return insn;
11028 }
11029
11030 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
11031 addr,
11032 GEN_INT (last - first + 1));
11033 RTX_FRAME_RELATED_P (insn) = 1;
11034 return insn;
11035 }
11036
11037 /* Return insn sequence to load the GOT register. */
11038
11039 rtx_insn *
11040 s390_load_got (void)
11041 {
11042 rtx_insn *insns;
11043
11044 /* We cannot use pic_offset_table_rtx here since we use this
11045 function also for non-pic if __tls_get_offset is called and in
11046 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
11047 aren't usable. */
11048 rtx got_rtx = gen_rtx_REG (Pmode, 12);
11049
11050 start_sequence ();
11051
11052 if (TARGET_CPU_ZARCH)
11053 {
11054 emit_move_insn (got_rtx, s390_got_symbol ());
11055 }
11056 else
11057 {
11058 rtx offset;
11059
11060 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
11061 UNSPEC_LTREL_OFFSET);
11062 offset = gen_rtx_CONST (Pmode, offset);
11063 offset = force_const_mem (Pmode, offset);
11064
11065 emit_move_insn (got_rtx, offset);
11066
11067 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
11068 UNSPEC_LTREL_BASE);
11069 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
11070
11071 emit_move_insn (got_rtx, offset);
11072 }
11073
11074 insns = get_insns ();
11075 end_sequence ();
11076 return insns;
11077 }
11078
11079 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
11080 and the change to the stack pointer. */
11081
11082 static void
11083 s390_emit_stack_tie (void)
11084 {
11085 rtx mem = gen_frame_mem (BLKmode,
11086 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
11087
11088 emit_insn (gen_stack_tie (mem));
11089 }
11090
11091 /* Copy GPRS into FPR save slots. */
11092
11093 static void
11094 s390_save_gprs_to_fprs (void)
11095 {
11096 int i;
11097
11098 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11099 return;
11100
11101 for (i = 6; i < 16; i++)
11102 {
11103 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
11104 {
11105 rtx_insn *insn =
11106 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
11107 gen_rtx_REG (DImode, i));
11108 RTX_FRAME_RELATED_P (insn) = 1;
11109 /* This prevents dwarf2cfi from interpreting the set. Doing
11110 so it might emit def_cfa_register infos setting an FPR as
11111 new CFA. */
11112 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
11113 }
11114 }
11115 }
11116
11117 /* Restore GPRs from FPR save slots. */
11118
11119 static void
11120 s390_restore_gprs_from_fprs (void)
11121 {
11122 int i;
11123
11124 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11125 return;
11126
11127 for (i = 6; i < 16; i++)
11128 {
11129 rtx_insn *insn;
11130
11131 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
11132 continue;
11133
11134 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
11135
11136 if (i == STACK_POINTER_REGNUM)
11137 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
11138 else
11139 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
11140
11141 df_set_regs_ever_live (i, true);
11142 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
11143 if (i == STACK_POINTER_REGNUM)
11144 add_reg_note (insn, REG_CFA_DEF_CFA,
11145 plus_constant (Pmode, stack_pointer_rtx,
11146 STACK_POINTER_OFFSET));
11147 RTX_FRAME_RELATED_P (insn) = 1;
11148 }
11149 }
11150
11151
11152 /* A pass run immediately before shrink-wrapping and prologue and epilogue
11153 generation. */
11154
11155 namespace {
11156
11157 const pass_data pass_data_s390_early_mach =
11158 {
11159 RTL_PASS, /* type */
11160 "early_mach", /* name */
11161 OPTGROUP_NONE, /* optinfo_flags */
11162 TV_MACH_DEP, /* tv_id */
11163 0, /* properties_required */
11164 0, /* properties_provided */
11165 0, /* properties_destroyed */
11166 0, /* todo_flags_start */
11167 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
11168 };
11169
11170 class pass_s390_early_mach : public rtl_opt_pass
11171 {
11172 public:
11173 pass_s390_early_mach (gcc::context *ctxt)
11174 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
11175 {}
11176
11177 /* opt_pass methods: */
11178 virtual unsigned int execute (function *);
11179
11180 }; // class pass_s390_early_mach
11181
11182 unsigned int
11183 pass_s390_early_mach::execute (function *fun)
11184 {
11185 rtx_insn *insn;
11186
11187 /* Try to get rid of the FPR clobbers. */
11188 s390_optimize_nonescaping_tx ();
11189
11190 /* Re-compute register info. */
11191 s390_register_info ();
11192
11193 /* If we're using a base register, ensure that it is always valid for
11194 the first non-prologue instruction. */
11195 if (fun->machine->base_reg)
11196 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
11197
11198 /* Annotate all constant pool references to let the scheduler know
11199 they implicitly use the base register. */
11200 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11201 if (INSN_P (insn))
11202 {
11203 annotate_constant_pool_refs (&PATTERN (insn));
11204 df_insn_rescan (insn);
11205 }
11206 return 0;
11207 }
11208
11209 } // anon namespace
11210
11211 /* Calculate TARGET = REG + OFFSET as s390_emit_prologue would do it.
11212 - push too big immediates to the literal pool and annotate the refs
11213 - emit frame related notes for stack pointer changes. */
11214
11215 static rtx
11216 s390_prologue_plus_offset (rtx target, rtx reg, rtx offset, bool frame_related_p)
11217 {
11218 rtx insn;
11219 rtx orig_offset = offset;
11220
11221 gcc_assert (REG_P (target));
11222 gcc_assert (REG_P (reg));
11223 gcc_assert (CONST_INT_P (offset));
11224
11225 if (offset == const0_rtx) /* lr/lgr */
11226 {
11227 insn = emit_move_insn (target, reg);
11228 }
11229 else if (DISP_IN_RANGE (INTVAL (offset))) /* la */
11230 {
11231 insn = emit_move_insn (target, gen_rtx_PLUS (Pmode, reg,
11232 offset));
11233 }
11234 else
11235 {
11236 if (!satisfies_constraint_K (offset) /* ahi/aghi */
11237 && (!TARGET_EXTIMM
11238 || (!satisfies_constraint_Op (offset) /* alfi/algfi */
11239 && !satisfies_constraint_On (offset)))) /* slfi/slgfi */
11240 offset = force_const_mem (Pmode, offset);
11241
11242 if (target != reg)
11243 {
11244 insn = emit_move_insn (target, reg);
11245 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11246 }
11247
11248 insn = emit_insn (gen_add2_insn (target, offset));
11249
11250 if (!CONST_INT_P (offset))
11251 {
11252 annotate_constant_pool_refs (&PATTERN (insn));
11253
11254 if (frame_related_p)
11255 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11256 gen_rtx_SET (target,
11257 gen_rtx_PLUS (Pmode, target,
11258 orig_offset)));
11259 }
11260 }
11261
11262 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11263
11264 /* If this is a stack adjustment and we are generating a stack clash
11265 prologue, then add a REG_STACK_CHECK note to signal that this insn
11266 should be left alone. */
11267 if (flag_stack_clash_protection && target == stack_pointer_rtx)
11268 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
11269
11270 return insn;
11271 }
11272
11273 /* Emit a compare instruction with a volatile memory access as stack
11274 probe. It does not waste store tags and does not clobber any
11275 registers apart from the condition code. */
11276 static void
11277 s390_emit_stack_probe (rtx addr)
11278 {
11279 rtx tmp = gen_rtx_MEM (Pmode, addr);
11280 MEM_VOLATILE_P (tmp) = 1;
11281 s390_emit_compare (EQ, gen_rtx_REG (Pmode, 0), tmp);
11282 emit_insn (gen_blockage ());
11283 }
11284
11285 /* Use a runtime loop if we have to emit more probes than this. */
11286 #define MIN_UNROLL_PROBES 3
11287
11288 /* Allocate SIZE bytes of stack space, using TEMP_REG as a temporary
11289 if necessary. LAST_PROBE_OFFSET contains the offset of the closest
11290 probe relative to the stack pointer.
11291
11292 Note that SIZE is negative.
11293
11294 The return value is true if TEMP_REG has been clobbered. */
11295 static bool
11296 allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
11297 rtx temp_reg)
11298 {
11299 bool temp_reg_clobbered_p = false;
11300 HOST_WIDE_INT probe_interval
11301 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11302 HOST_WIDE_INT guard_size
11303 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
11304
11305 if (flag_stack_clash_protection)
11306 {
11307 if (last_probe_offset + -INTVAL (size) < guard_size)
11308 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
11309 else
11310 {
11311 rtx offset = GEN_INT (probe_interval - UNITS_PER_LONG);
11312 HOST_WIDE_INT rounded_size = -INTVAL (size) & -probe_interval;
11313 HOST_WIDE_INT num_probes = rounded_size / probe_interval;
11314 HOST_WIDE_INT residual = -INTVAL (size) - rounded_size;
11315
11316 if (num_probes < MIN_UNROLL_PROBES)
11317 {
11318 /* Emit unrolled probe statements. */
11319
11320 for (unsigned int i = 0; i < num_probes; i++)
11321 {
11322 s390_prologue_plus_offset (stack_pointer_rtx,
11323 stack_pointer_rtx,
11324 GEN_INT (-probe_interval), true);
11325 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11326 stack_pointer_rtx,
11327 offset));
11328 }
11329 dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
11330 }
11331 else
11332 {
11333 /* Emit a loop probing the pages. */
11334
11335 rtx_code_label *loop_start_label = gen_label_rtx ();
11336
11337 /* From now on temp_reg will be the CFA register. */
11338 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11339 GEN_INT (-rounded_size), true);
11340 emit_label (loop_start_label);
11341
11342 s390_prologue_plus_offset (stack_pointer_rtx,
11343 stack_pointer_rtx,
11344 GEN_INT (-probe_interval), false);
11345 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11346 stack_pointer_rtx,
11347 offset));
11348 emit_cmp_and_jump_insns (stack_pointer_rtx, temp_reg,
11349 GT, NULL_RTX,
11350 Pmode, 1, loop_start_label);
11351
11352 /* Without this make_edges ICEes. */
11353 JUMP_LABEL (get_last_insn ()) = loop_start_label;
11354 LABEL_NUSES (loop_start_label) = 1;
11355
11356 /* That's going to be a NOP since stack pointer and
11357 temp_reg are supposed to be the same here. We just
11358 emit it to set the CFA reg back to r15. */
11359 s390_prologue_plus_offset (stack_pointer_rtx, temp_reg,
11360 const0_rtx, true);
11361 temp_reg_clobbered_p = true;
11362 dump_stack_clash_frame_info (PROBE_LOOP, residual != 0);
11363 }
11364
11365 /* Handle any residual allocation request. */
11366 s390_prologue_plus_offset (stack_pointer_rtx,
11367 stack_pointer_rtx,
11368 GEN_INT (-residual), true);
11369 last_probe_offset += residual;
11370 if (last_probe_offset >= probe_interval)
11371 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11372 stack_pointer_rtx,
11373 GEN_INT (residual
11374 - UNITS_PER_LONG)));
11375
11376 return temp_reg_clobbered_p;
11377 }
11378 }
11379
11380 /* Subtract frame size from stack pointer. */
11381 s390_prologue_plus_offset (stack_pointer_rtx,
11382 stack_pointer_rtx,
11383 size, true);
11384
11385 return temp_reg_clobbered_p;
11386 }
11387
11388 /* Expand the prologue into a bunch of separate insns. */
11389
11390 void
11391 s390_emit_prologue (void)
11392 {
11393 rtx insn, addr;
11394 rtx temp_reg;
11395 int i;
11396 int offset;
11397 int next_fpr = 0;
11398
11399 /* Choose best register to use for temp use within prologue.
11400 TPF with profiling must avoid the register 14 - the tracing function
11401 needs the original contents of r14 to be preserved. */
11402
11403 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
11404 && !crtl->is_leaf
11405 && !TARGET_TPF_PROFILING)
11406 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11407 else if (flag_split_stack && cfun->stdarg)
11408 temp_reg = gen_rtx_REG (Pmode, 12);
11409 else
11410 temp_reg = gen_rtx_REG (Pmode, 1);
11411
11412 /* When probing for stack-clash mitigation, we have to track the distance
11413 between the stack pointer and closest known reference.
11414
11415 Most of the time we have to make a worst case assumption. The
11416 only exception is when TARGET_BACKCHAIN is active, in which case
11417 we know *sp (offset 0) was written. */
11418 HOST_WIDE_INT probe_interval
11419 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11420 HOST_WIDE_INT last_probe_offset
11421 = (TARGET_BACKCHAIN
11422 ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
11423 : probe_interval - (STACK_BOUNDARY / UNITS_PER_WORD));
11424
11425 s390_save_gprs_to_fprs ();
11426
11427 /* Save call saved gprs. */
11428 if (cfun_frame_layout.first_save_gpr != -1)
11429 {
11430 insn = save_gprs (stack_pointer_rtx,
11431 cfun_frame_layout.gprs_offset +
11432 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11433 - cfun_frame_layout.first_save_gpr_slot),
11434 cfun_frame_layout.first_save_gpr,
11435 cfun_frame_layout.last_save_gpr);
11436
11437 /* This is not 100% correct. If we have more than one register saved,
11438 then LAST_PROBE_OFFSET can move even closer to sp. */
11439 last_probe_offset
11440 = (cfun_frame_layout.gprs_offset +
11441 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11442 - cfun_frame_layout.first_save_gpr_slot));
11443
11444 emit_insn (insn);
11445 }
11446
11447 /* Dummy insn to mark literal pool slot. */
11448
11449 if (cfun->machine->base_reg)
11450 emit_insn (gen_main_pool (cfun->machine->base_reg));
11451
11452 offset = cfun_frame_layout.f0_offset;
11453
11454 /* Save f0 and f2. */
11455 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
11456 {
11457 if (cfun_fpr_save_p (i))
11458 {
11459 save_fpr (stack_pointer_rtx, offset, i);
11460 if (offset < last_probe_offset)
11461 last_probe_offset = offset;
11462 offset += 8;
11463 }
11464 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11465 offset += 8;
11466 }
11467
11468 /* Save f4 and f6. */
11469 offset = cfun_frame_layout.f4_offset;
11470 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11471 {
11472 if (cfun_fpr_save_p (i))
11473 {
11474 insn = save_fpr (stack_pointer_rtx, offset, i);
11475 if (offset < last_probe_offset)
11476 last_probe_offset = offset;
11477 offset += 8;
11478
11479 /* If f4 and f6 are call clobbered they are saved due to
11480 stdargs and therefore are not frame related. */
11481 if (!call_really_used_regs[i])
11482 RTX_FRAME_RELATED_P (insn) = 1;
11483 }
11484 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
11485 offset += 8;
11486 }
11487
11488 if (TARGET_PACKED_STACK
11489 && cfun_save_high_fprs_p
11490 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11491 {
11492 offset = (cfun_frame_layout.f8_offset
11493 + (cfun_frame_layout.high_fprs - 1) * 8);
11494
11495 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
11496 if (cfun_fpr_save_p (i))
11497 {
11498 insn = save_fpr (stack_pointer_rtx, offset, i);
11499 if (offset < last_probe_offset)
11500 last_probe_offset = offset;
11501
11502 RTX_FRAME_RELATED_P (insn) = 1;
11503 offset -= 8;
11504 }
11505 if (offset >= cfun_frame_layout.f8_offset)
11506 next_fpr = i;
11507 }
11508
11509 if (!TARGET_PACKED_STACK)
11510 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
11511
11512 if (flag_stack_usage_info)
11513 current_function_static_stack_size = cfun_frame_layout.frame_size;
11514
11515 /* Decrement stack pointer. */
11516
11517 if (cfun_frame_layout.frame_size > 0)
11518 {
11519 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11520 rtx_insn *stack_pointer_backup_loc;
11521 bool temp_reg_clobbered_p;
11522
11523 if (s390_stack_size)
11524 {
11525 HOST_WIDE_INT stack_guard;
11526
11527 if (s390_stack_guard)
11528 stack_guard = s390_stack_guard;
11529 else
11530 {
11531 /* If no value for stack guard is provided the smallest power of 2
11532 larger than the current frame size is chosen. */
11533 stack_guard = 1;
11534 while (stack_guard < cfun_frame_layout.frame_size)
11535 stack_guard <<= 1;
11536 }
11537
11538 if (cfun_frame_layout.frame_size >= s390_stack_size)
11539 {
11540 warning (0, "frame size of function %qs is %wd"
11541 " bytes exceeding user provided stack limit of "
11542 "%d bytes. "
11543 "An unconditional trap is added.",
11544 current_function_name(), cfun_frame_layout.frame_size,
11545 s390_stack_size);
11546 emit_insn (gen_trap ());
11547 emit_barrier ();
11548 }
11549 else
11550 {
11551 /* stack_guard has to be smaller than s390_stack_size.
11552 Otherwise we would emit an AND with zero which would
11553 not match the test under mask pattern. */
11554 if (stack_guard >= s390_stack_size)
11555 {
11556 warning (0, "frame size of function %qs is %wd"
11557 " bytes which is more than half the stack size. "
11558 "The dynamic check would not be reliable. "
11559 "No check emitted for this function.",
11560 current_function_name(),
11561 cfun_frame_layout.frame_size);
11562 }
11563 else
11564 {
11565 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11566 & ~(stack_guard - 1));
11567
11568 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11569 GEN_INT (stack_check_mask));
11570 if (TARGET_64BIT)
11571 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11572 t, const0_rtx),
11573 t, const0_rtx, const0_rtx));
11574 else
11575 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11576 t, const0_rtx),
11577 t, const0_rtx, const0_rtx));
11578 }
11579 }
11580 }
11581
11582 if (s390_warn_framesize > 0
11583 && cfun_frame_layout.frame_size >= s390_warn_framesize)
11584 warning (0, "frame size of %qs is %wd bytes",
11585 current_function_name (), cfun_frame_layout.frame_size);
11586
11587 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11588 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11589
11590 /* Save the location where we could backup the incoming stack
11591 pointer. */
11592 stack_pointer_backup_loc = get_last_insn ();
11593
11594 temp_reg_clobbered_p = allocate_stack_space (frame_off, last_probe_offset,
11595 temp_reg);
11596
11597 if (TARGET_BACKCHAIN || next_fpr)
11598 {
11599 if (temp_reg_clobbered_p)
11600 {
11601 /* allocate_stack_space had to make use of temp_reg and
11602 we need it to hold a backup of the incoming stack
11603 pointer. Calculate back that value from the current
11604 stack pointer. */
11605 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11606 GEN_INT (cfun_frame_layout.frame_size),
11607 false);
11608 }
11609 else
11610 {
11611 /* allocate_stack_space didn't actually required
11612 temp_reg. Insert the stack pointer backup insn
11613 before the stack pointer decrement code - knowing now
11614 that the value will survive. */
11615 emit_insn_after (gen_move_insn (temp_reg, stack_pointer_rtx),
11616 stack_pointer_backup_loc);
11617 }
11618 }
11619
11620 /* Set backchain. */
11621
11622 if (TARGET_BACKCHAIN)
11623 {
11624 if (cfun_frame_layout.backchain_offset)
11625 addr = gen_rtx_MEM (Pmode,
11626 plus_constant (Pmode, stack_pointer_rtx,
11627 cfun_frame_layout.backchain_offset));
11628 else
11629 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11630 set_mem_alias_set (addr, get_frame_alias_set ());
11631 insn = emit_insn (gen_move_insn (addr, temp_reg));
11632 }
11633
11634 /* If we support non-call exceptions (e.g. for Java),
11635 we need to make sure the backchain pointer is set up
11636 before any possibly trapping memory access. */
11637 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11638 {
11639 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11640 emit_clobber (addr);
11641 }
11642 }
11643 else if (flag_stack_clash_protection)
11644 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
11645
11646 /* Save fprs 8 - 15 (64 bit ABI). */
11647
11648 if (cfun_save_high_fprs_p && next_fpr)
11649 {
11650 /* If the stack might be accessed through a different register
11651 we have to make sure that the stack pointer decrement is not
11652 moved below the use of the stack slots. */
11653 s390_emit_stack_tie ();
11654
11655 insn = emit_insn (gen_add2_insn (temp_reg,
11656 GEN_INT (cfun_frame_layout.f8_offset)));
11657
11658 offset = 0;
11659
11660 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11661 if (cfun_fpr_save_p (i))
11662 {
11663 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11664 cfun_frame_layout.frame_size
11665 + cfun_frame_layout.f8_offset
11666 + offset);
11667
11668 insn = save_fpr (temp_reg, offset, i);
11669 offset += 8;
11670 RTX_FRAME_RELATED_P (insn) = 1;
11671 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11672 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11673 gen_rtx_REG (DFmode, i)));
11674 }
11675 }
11676
11677 /* Set frame pointer, if needed. */
11678
11679 if (frame_pointer_needed)
11680 {
11681 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11682 RTX_FRAME_RELATED_P (insn) = 1;
11683 }
11684
11685 /* Set up got pointer, if needed. */
11686
11687 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11688 {
11689 rtx_insn *insns = s390_load_got ();
11690
11691 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11692 annotate_constant_pool_refs (&PATTERN (insn));
11693
11694 emit_insn (insns);
11695 }
11696
11697 if (TARGET_TPF_PROFILING)
11698 {
11699 /* Generate a BAS instruction to serve as a function
11700 entry intercept to facilitate the use of tracing
11701 algorithms located at the branch target. */
11702 emit_insn (gen_prologue_tpf ());
11703
11704 /* Emit a blockage here so that all code
11705 lies between the profiling mechanisms. */
11706 emit_insn (gen_blockage ());
11707 }
11708 }
11709
11710 /* Expand the epilogue into a bunch of separate insns. */
11711
11712 void
11713 s390_emit_epilogue (bool sibcall)
11714 {
11715 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11716 int area_bottom, area_top, offset = 0;
11717 int next_offset;
11718 int i;
11719
11720 if (TARGET_TPF_PROFILING)
11721 {
11722
11723 /* Generate a BAS instruction to serve as a function
11724 entry intercept to facilitate the use of tracing
11725 algorithms located at the branch target. */
11726
11727 /* Emit a blockage here so that all code
11728 lies between the profiling mechanisms. */
11729 emit_insn (gen_blockage ());
11730
11731 emit_insn (gen_epilogue_tpf ());
11732 }
11733
11734 /* Check whether to use frame or stack pointer for restore. */
11735
11736 frame_pointer = (frame_pointer_needed
11737 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11738
11739 s390_frame_area (&area_bottom, &area_top);
11740
11741 /* Check whether we can access the register save area.
11742 If not, increment the frame pointer as required. */
11743
11744 if (area_top <= area_bottom)
11745 {
11746 /* Nothing to restore. */
11747 }
11748 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11749 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11750 {
11751 /* Area is in range. */
11752 offset = cfun_frame_layout.frame_size;
11753 }
11754 else
11755 {
11756 rtx insn, frame_off, cfa;
11757
11758 offset = area_bottom < 0 ? -area_bottom : 0;
11759 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11760
11761 cfa = gen_rtx_SET (frame_pointer,
11762 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11763 if (DISP_IN_RANGE (INTVAL (frame_off)))
11764 {
11765 insn = gen_rtx_SET (frame_pointer,
11766 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11767 insn = emit_insn (insn);
11768 }
11769 else
11770 {
11771 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11772 frame_off = force_const_mem (Pmode, frame_off);
11773
11774 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11775 annotate_constant_pool_refs (&PATTERN (insn));
11776 }
11777 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11778 RTX_FRAME_RELATED_P (insn) = 1;
11779 }
11780
11781 /* Restore call saved fprs. */
11782
11783 if (TARGET_64BIT)
11784 {
11785 if (cfun_save_high_fprs_p)
11786 {
11787 next_offset = cfun_frame_layout.f8_offset;
11788 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11789 {
11790 if (cfun_fpr_save_p (i))
11791 {
11792 restore_fpr (frame_pointer,
11793 offset + next_offset, i);
11794 cfa_restores
11795 = alloc_reg_note (REG_CFA_RESTORE,
11796 gen_rtx_REG (DFmode, i), cfa_restores);
11797 next_offset += 8;
11798 }
11799 }
11800 }
11801
11802 }
11803 else
11804 {
11805 next_offset = cfun_frame_layout.f4_offset;
11806 /* f4, f6 */
11807 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11808 {
11809 if (cfun_fpr_save_p (i))
11810 {
11811 restore_fpr (frame_pointer,
11812 offset + next_offset, i);
11813 cfa_restores
11814 = alloc_reg_note (REG_CFA_RESTORE,
11815 gen_rtx_REG (DFmode, i), cfa_restores);
11816 next_offset += 8;
11817 }
11818 else if (!TARGET_PACKED_STACK)
11819 next_offset += 8;
11820 }
11821
11822 }
11823
11824 /* Return register. */
11825
11826 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11827
11828 /* Restore call saved gprs. */
11829
11830 if (cfun_frame_layout.first_restore_gpr != -1)
11831 {
11832 rtx insn, addr;
11833 int i;
11834
11835 /* Check for global register and save them
11836 to stack location from where they get restored. */
11837
11838 for (i = cfun_frame_layout.first_restore_gpr;
11839 i <= cfun_frame_layout.last_restore_gpr;
11840 i++)
11841 {
11842 if (global_not_special_regno_p (i))
11843 {
11844 addr = plus_constant (Pmode, frame_pointer,
11845 offset + cfun_frame_layout.gprs_offset
11846 + (i - cfun_frame_layout.first_save_gpr_slot)
11847 * UNITS_PER_LONG);
11848 addr = gen_rtx_MEM (Pmode, addr);
11849 set_mem_alias_set (addr, get_frame_alias_set ());
11850 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11851 }
11852 else
11853 cfa_restores
11854 = alloc_reg_note (REG_CFA_RESTORE,
11855 gen_rtx_REG (Pmode, i), cfa_restores);
11856 }
11857
11858 /* Fetch return address from stack before load multiple,
11859 this will do good for scheduling.
11860
11861 Only do this if we already decided that r14 needs to be
11862 saved to a stack slot. (And not just because r14 happens to
11863 be in between two GPRs which need saving.) Otherwise it
11864 would be difficult to take that decision back in
11865 s390_optimize_prologue.
11866
11867 This optimization is only helpful on in-order machines. */
11868 if (! sibcall
11869 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11870 && s390_tune <= PROCESSOR_2097_Z10)
11871 {
11872 int return_regnum = find_unused_clobbered_reg();
11873 if (!return_regnum
11874 || (TARGET_INDIRECT_BRANCH_NOBP_RET_OPTION
11875 && !TARGET_CPU_Z10
11876 && return_regnum == INDIRECT_BRANCH_THUNK_REGNUM))
11877 {
11878 gcc_assert (INDIRECT_BRANCH_THUNK_REGNUM != 4);
11879 return_regnum = 4;
11880 }
11881 return_reg = gen_rtx_REG (Pmode, return_regnum);
11882
11883 addr = plus_constant (Pmode, frame_pointer,
11884 offset + cfun_frame_layout.gprs_offset
11885 + (RETURN_REGNUM
11886 - cfun_frame_layout.first_save_gpr_slot)
11887 * UNITS_PER_LONG);
11888 addr = gen_rtx_MEM (Pmode, addr);
11889 set_mem_alias_set (addr, get_frame_alias_set ());
11890 emit_move_insn (return_reg, addr);
11891
11892 /* Once we did that optimization we have to make sure
11893 s390_optimize_prologue does not try to remove the store
11894 of r14 since we will not be able to find the load issued
11895 here. */
11896 cfun_frame_layout.save_return_addr_p = true;
11897 }
11898
11899 insn = restore_gprs (frame_pointer,
11900 offset + cfun_frame_layout.gprs_offset
11901 + (cfun_frame_layout.first_restore_gpr
11902 - cfun_frame_layout.first_save_gpr_slot)
11903 * UNITS_PER_LONG,
11904 cfun_frame_layout.first_restore_gpr,
11905 cfun_frame_layout.last_restore_gpr);
11906 insn = emit_insn (insn);
11907 REG_NOTES (insn) = cfa_restores;
11908 add_reg_note (insn, REG_CFA_DEF_CFA,
11909 plus_constant (Pmode, stack_pointer_rtx,
11910 STACK_POINTER_OFFSET));
11911 RTX_FRAME_RELATED_P (insn) = 1;
11912 }
11913
11914 s390_restore_gprs_from_fprs ();
11915
11916 if (! sibcall)
11917 emit_jump_insn (gen_return_use (return_reg));
11918 }
11919
11920 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11921
11922 static void
11923 s300_set_up_by_prologue (hard_reg_set_container *regs)
11924 {
11925 if (cfun->machine->base_reg
11926 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11927 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11928 }
11929
11930 /* -fsplit-stack support. */
11931
11932 /* A SYMBOL_REF for __morestack. */
11933 static GTY(()) rtx morestack_ref;
11934
11935 /* When using -fsplit-stack, the allocation routines set a field in
11936 the TCB to the bottom of the stack plus this much space, measured
11937 in bytes. */
11938
11939 #define SPLIT_STACK_AVAILABLE 1024
11940
11941 /* Emit -fsplit-stack prologue, which goes before the regular function
11942 prologue. */
11943
11944 void
11945 s390_expand_split_stack_prologue (void)
11946 {
11947 rtx r1, guard, cc = NULL;
11948 rtx_insn *insn;
11949 /* Offset from thread pointer to __private_ss. */
11950 int psso = TARGET_64BIT ? 0x38 : 0x20;
11951 /* Pointer size in bytes. */
11952 /* Frame size and argument size - the two parameters to __morestack. */
11953 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11954 /* Align argument size to 8 bytes - simplifies __morestack code. */
11955 HOST_WIDE_INT args_size = crtl->args.size >= 0
11956 ? ((crtl->args.size + 7) & ~7)
11957 : 0;
11958 /* Label to be called by __morestack. */
11959 rtx_code_label *call_done = NULL;
11960 rtx_code_label *parm_base = NULL;
11961 rtx tmp;
11962
11963 gcc_assert (flag_split_stack && reload_completed);
11964 if (!TARGET_CPU_ZARCH)
11965 {
11966 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11967 return;
11968 }
11969
11970 r1 = gen_rtx_REG (Pmode, 1);
11971
11972 /* If no stack frame will be allocated, don't do anything. */
11973 if (!frame_size)
11974 {
11975 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11976 {
11977 /* If va_start is used, just use r15. */
11978 emit_move_insn (r1,
11979 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11980 GEN_INT (STACK_POINTER_OFFSET)));
11981
11982 }
11983 return;
11984 }
11985
11986 if (morestack_ref == NULL_RTX)
11987 {
11988 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11989 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11990 | SYMBOL_FLAG_FUNCTION);
11991 }
11992
11993 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11994 {
11995 /* If frame_size will fit in an add instruction, do a stack space
11996 check, and only call __morestack if there's not enough space. */
11997
11998 /* Get thread pointer. r1 is the only register we can always destroy - r0
11999 could contain a static chain (and cannot be used to address memory
12000 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
12001 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
12002 /* Aim at __private_ss. */
12003 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
12004
12005 /* If less that 1kiB used, skip addition and compare directly with
12006 __private_ss. */
12007 if (frame_size > SPLIT_STACK_AVAILABLE)
12008 {
12009 emit_move_insn (r1, guard);
12010 if (TARGET_64BIT)
12011 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
12012 else
12013 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
12014 guard = r1;
12015 }
12016
12017 /* Compare the (maybe adjusted) guard with the stack pointer. */
12018 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
12019 }
12020
12021 call_done = gen_label_rtx ();
12022 parm_base = gen_label_rtx ();
12023
12024 /* Emit the parameter block. */
12025 tmp = gen_split_stack_data (parm_base, call_done,
12026 GEN_INT (frame_size),
12027 GEN_INT (args_size));
12028 insn = emit_insn (tmp);
12029 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
12030 LABEL_NUSES (call_done)++;
12031 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12032 LABEL_NUSES (parm_base)++;
12033
12034 /* %r1 = litbase. */
12035 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
12036 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12037 LABEL_NUSES (parm_base)++;
12038
12039 /* Now, we need to call __morestack. It has very special calling
12040 conventions: it preserves param/return/static chain registers for
12041 calling main function body, and looks for its own parameters at %r1. */
12042
12043 if (cc != NULL)
12044 {
12045 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
12046
12047 insn = emit_jump_insn (tmp);
12048 JUMP_LABEL (insn) = call_done;
12049 LABEL_NUSES (call_done)++;
12050
12051 /* Mark the jump as very unlikely to be taken. */
12052 add_reg_br_prob_note (insn,
12053 profile_probability::very_unlikely ());
12054
12055 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12056 {
12057 /* If va_start is used, and __morestack was not called, just use
12058 r15. */
12059 emit_move_insn (r1,
12060 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
12061 GEN_INT (STACK_POINTER_OFFSET)));
12062 }
12063 }
12064 else
12065 {
12066 tmp = gen_split_stack_call (morestack_ref, call_done);
12067 insn = emit_jump_insn (tmp);
12068 JUMP_LABEL (insn) = call_done;
12069 LABEL_NUSES (call_done)++;
12070 emit_barrier ();
12071 }
12072
12073 /* __morestack will call us here. */
12074
12075 emit_label (call_done);
12076 }
12077
12078 /* We may have to tell the dataflow pass that the split stack prologue
12079 is initializing a register. */
12080
12081 static void
12082 s390_live_on_entry (bitmap regs)
12083 {
12084 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12085 {
12086 gcc_assert (flag_split_stack);
12087 bitmap_set_bit (regs, 1);
12088 }
12089 }
12090
12091 /* Return true if the function can use simple_return to return outside
12092 of a shrink-wrapped region. At present shrink-wrapping is supported
12093 in all cases. */
12094
12095 bool
12096 s390_can_use_simple_return_insn (void)
12097 {
12098 return true;
12099 }
12100
12101 /* Return true if the epilogue is guaranteed to contain only a return
12102 instruction and if a direct return can therefore be used instead.
12103 One of the main advantages of using direct return instructions
12104 is that we can then use conditional returns. */
12105
12106 bool
12107 s390_can_use_return_insn (void)
12108 {
12109 int i;
12110
12111 if (!reload_completed)
12112 return false;
12113
12114 if (crtl->profile)
12115 return false;
12116
12117 if (TARGET_TPF_PROFILING)
12118 return false;
12119
12120 for (i = 0; i < 16; i++)
12121 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
12122 return false;
12123
12124 /* For 31 bit this is not covered by the frame_size check below
12125 since f4, f6 are saved in the register save area without needing
12126 additional stack space. */
12127 if (!TARGET_64BIT
12128 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
12129 return false;
12130
12131 if (cfun->machine->base_reg
12132 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
12133 return false;
12134
12135 return cfun_frame_layout.frame_size == 0;
12136 }
12137
12138 /* The VX ABI differs for vararg functions. Therefore we need the
12139 prototype of the callee to be available when passing vector type
12140 values. */
12141 static const char *
12142 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
12143 {
12144 return ((TARGET_VX_ABI
12145 && typelist == 0
12146 && VECTOR_TYPE_P (TREE_TYPE (val))
12147 && (funcdecl == NULL_TREE
12148 || (TREE_CODE (funcdecl) == FUNCTION_DECL
12149 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
12150 ? N_("vector argument passed to unprototyped function")
12151 : NULL);
12152 }
12153
12154
12155 /* Return the size in bytes of a function argument of
12156 type TYPE and/or mode MODE. At least one of TYPE or
12157 MODE must be specified. */
12158
12159 static int
12160 s390_function_arg_size (machine_mode mode, const_tree type)
12161 {
12162 if (type)
12163 return int_size_in_bytes (type);
12164
12165 /* No type info available for some library calls ... */
12166 if (mode != BLKmode)
12167 return GET_MODE_SIZE (mode);
12168
12169 /* If we have neither type nor mode, abort */
12170 gcc_unreachable ();
12171 }
12172
12173 /* Return true if a function argument of type TYPE and mode MODE
12174 is to be passed in a vector register, if available. */
12175
12176 bool
12177 s390_function_arg_vector (machine_mode mode, const_tree type)
12178 {
12179 if (!TARGET_VX_ABI)
12180 return false;
12181
12182 if (s390_function_arg_size (mode, type) > 16)
12183 return false;
12184
12185 /* No type info available for some library calls ... */
12186 if (!type)
12187 return VECTOR_MODE_P (mode);
12188
12189 /* The ABI says that record types with a single member are treated
12190 just like that member would be. */
12191 while (TREE_CODE (type) == RECORD_TYPE)
12192 {
12193 tree field, single = NULL_TREE;
12194
12195 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12196 {
12197 if (TREE_CODE (field) != FIELD_DECL)
12198 continue;
12199
12200 if (single == NULL_TREE)
12201 single = TREE_TYPE (field);
12202 else
12203 return false;
12204 }
12205
12206 if (single == NULL_TREE)
12207 return false;
12208 else
12209 {
12210 /* If the field declaration adds extra byte due to
12211 e.g. padding this is not accepted as vector type. */
12212 if (int_size_in_bytes (single) <= 0
12213 || int_size_in_bytes (single) != int_size_in_bytes (type))
12214 return false;
12215 type = single;
12216 }
12217 }
12218
12219 return VECTOR_TYPE_P (type);
12220 }
12221
12222 /* Return true if a function argument of type TYPE and mode MODE
12223 is to be passed in a floating-point register, if available. */
12224
12225 static bool
12226 s390_function_arg_float (machine_mode mode, const_tree type)
12227 {
12228 if (s390_function_arg_size (mode, type) > 8)
12229 return false;
12230
12231 /* Soft-float changes the ABI: no floating-point registers are used. */
12232 if (TARGET_SOFT_FLOAT)
12233 return false;
12234
12235 /* No type info available for some library calls ... */
12236 if (!type)
12237 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
12238
12239 /* The ABI says that record types with a single member are treated
12240 just like that member would be. */
12241 while (TREE_CODE (type) == RECORD_TYPE)
12242 {
12243 tree field, single = NULL_TREE;
12244
12245 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12246 {
12247 if (TREE_CODE (field) != FIELD_DECL)
12248 continue;
12249
12250 if (single == NULL_TREE)
12251 single = TREE_TYPE (field);
12252 else
12253 return false;
12254 }
12255
12256 if (single == NULL_TREE)
12257 return false;
12258 else
12259 type = single;
12260 }
12261
12262 return TREE_CODE (type) == REAL_TYPE;
12263 }
12264
12265 /* Return true if a function argument of type TYPE and mode MODE
12266 is to be passed in an integer register, or a pair of integer
12267 registers, if available. */
12268
12269 static bool
12270 s390_function_arg_integer (machine_mode mode, const_tree type)
12271 {
12272 int size = s390_function_arg_size (mode, type);
12273 if (size > 8)
12274 return false;
12275
12276 /* No type info available for some library calls ... */
12277 if (!type)
12278 return GET_MODE_CLASS (mode) == MODE_INT
12279 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
12280
12281 /* We accept small integral (and similar) types. */
12282 if (INTEGRAL_TYPE_P (type)
12283 || POINTER_TYPE_P (type)
12284 || TREE_CODE (type) == NULLPTR_TYPE
12285 || TREE_CODE (type) == OFFSET_TYPE
12286 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
12287 return true;
12288
12289 /* We also accept structs of size 1, 2, 4, 8 that are not
12290 passed in floating-point registers. */
12291 if (AGGREGATE_TYPE_P (type)
12292 && exact_log2 (size) >= 0
12293 && !s390_function_arg_float (mode, type))
12294 return true;
12295
12296 return false;
12297 }
12298
12299 /* Return 1 if a function argument of type TYPE and mode MODE
12300 is to be passed by reference. The ABI specifies that only
12301 structures of size 1, 2, 4, or 8 bytes are passed by value,
12302 all other structures (and complex numbers) are passed by
12303 reference. */
12304
12305 static bool
12306 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
12307 machine_mode mode, const_tree type,
12308 bool named ATTRIBUTE_UNUSED)
12309 {
12310 int size = s390_function_arg_size (mode, type);
12311
12312 if (s390_function_arg_vector (mode, type))
12313 return false;
12314
12315 if (size > 8)
12316 return true;
12317
12318 if (type)
12319 {
12320 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
12321 return true;
12322
12323 if (TREE_CODE (type) == COMPLEX_TYPE
12324 || TREE_CODE (type) == VECTOR_TYPE)
12325 return true;
12326 }
12327
12328 return false;
12329 }
12330
12331 /* Update the data in CUM to advance over an argument of mode MODE and
12332 data type TYPE. (TYPE is null for libcalls where that information
12333 may not be available.). The boolean NAMED specifies whether the
12334 argument is a named argument (as opposed to an unnamed argument
12335 matching an ellipsis). */
12336
12337 static void
12338 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
12339 const_tree type, bool named)
12340 {
12341 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12342
12343 if (s390_function_arg_vector (mode, type))
12344 {
12345 /* We are called for unnamed vector stdarg arguments which are
12346 passed on the stack. In this case this hook does not have to
12347 do anything since stack arguments are tracked by common
12348 code. */
12349 if (!named)
12350 return;
12351 cum->vrs += 1;
12352 }
12353 else if (s390_function_arg_float (mode, type))
12354 {
12355 cum->fprs += 1;
12356 }
12357 else if (s390_function_arg_integer (mode, type))
12358 {
12359 int size = s390_function_arg_size (mode, type);
12360 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
12361 }
12362 else
12363 gcc_unreachable ();
12364 }
12365
12366 /* Define where to put the arguments to a function.
12367 Value is zero to push the argument on the stack,
12368 or a hard register in which to store the argument.
12369
12370 MODE is the argument's machine mode.
12371 TYPE is the data type of the argument (as a tree).
12372 This is null for libcalls where that information may
12373 not be available.
12374 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12375 the preceding args and about the function being called.
12376 NAMED is nonzero if this argument is a named parameter
12377 (otherwise it is an extra parameter matching an ellipsis).
12378
12379 On S/390, we use general purpose registers 2 through 6 to
12380 pass integer, pointer, and certain structure arguments, and
12381 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
12382 to pass floating point arguments. All remaining arguments
12383 are pushed to the stack. */
12384
12385 static rtx
12386 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
12387 const_tree type, bool named)
12388 {
12389 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12390
12391 if (!named)
12392 s390_check_type_for_vector_abi (type, true, false);
12393
12394 if (s390_function_arg_vector (mode, type))
12395 {
12396 /* Vector arguments being part of the ellipsis are passed on the
12397 stack. */
12398 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
12399 return NULL_RTX;
12400
12401 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
12402 }
12403 else if (s390_function_arg_float (mode, type))
12404 {
12405 if (cum->fprs + 1 > FP_ARG_NUM_REG)
12406 return NULL_RTX;
12407 else
12408 return gen_rtx_REG (mode, cum->fprs + 16);
12409 }
12410 else if (s390_function_arg_integer (mode, type))
12411 {
12412 int size = s390_function_arg_size (mode, type);
12413 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12414
12415 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
12416 return NULL_RTX;
12417 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
12418 return gen_rtx_REG (mode, cum->gprs + 2);
12419 else if (n_gprs == 2)
12420 {
12421 rtvec p = rtvec_alloc (2);
12422
12423 RTVEC_ELT (p, 0)
12424 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12425 const0_rtx);
12426 RTVEC_ELT (p, 1)
12427 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12428 GEN_INT (4));
12429
12430 return gen_rtx_PARALLEL (mode, p);
12431 }
12432 }
12433
12434 /* After the real arguments, expand_call calls us once again
12435 with a void_type_node type. Whatever we return here is
12436 passed as operand 2 to the call expanders.
12437
12438 We don't need this feature ... */
12439 else if (type == void_type_node)
12440 return const0_rtx;
12441
12442 gcc_unreachable ();
12443 }
12444
12445 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Vector arguments are
12446 left-justified when placed on the stack during parameter passing. */
12447
12448 static pad_direction
12449 s390_function_arg_padding (machine_mode mode, const_tree type)
12450 {
12451 if (s390_function_arg_vector (mode, type))
12452 return PAD_UPWARD;
12453
12454 return default_function_arg_padding (mode, type);
12455 }
12456
12457 /* Return true if return values of type TYPE should be returned
12458 in a memory buffer whose address is passed by the caller as
12459 hidden first argument. */
12460
12461 static bool
12462 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
12463 {
12464 /* We accept small integral (and similar) types. */
12465 if (INTEGRAL_TYPE_P (type)
12466 || POINTER_TYPE_P (type)
12467 || TREE_CODE (type) == OFFSET_TYPE
12468 || TREE_CODE (type) == REAL_TYPE)
12469 return int_size_in_bytes (type) > 8;
12470
12471 /* vector types which fit into a VR. */
12472 if (TARGET_VX_ABI
12473 && VECTOR_TYPE_P (type)
12474 && int_size_in_bytes (type) <= 16)
12475 return false;
12476
12477 /* Aggregates and similar constructs are always returned
12478 in memory. */
12479 if (AGGREGATE_TYPE_P (type)
12480 || TREE_CODE (type) == COMPLEX_TYPE
12481 || VECTOR_TYPE_P (type))
12482 return true;
12483
12484 /* ??? We get called on all sorts of random stuff from
12485 aggregate_value_p. We can't abort, but it's not clear
12486 what's safe to return. Pretend it's a struct I guess. */
12487 return true;
12488 }
12489
12490 /* Function arguments and return values are promoted to word size. */
12491
12492 static machine_mode
12493 s390_promote_function_mode (const_tree type, machine_mode mode,
12494 int *punsignedp,
12495 const_tree fntype ATTRIBUTE_UNUSED,
12496 int for_return ATTRIBUTE_UNUSED)
12497 {
12498 if (INTEGRAL_MODE_P (mode)
12499 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
12500 {
12501 if (type != NULL_TREE && POINTER_TYPE_P (type))
12502 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12503 return Pmode;
12504 }
12505
12506 return mode;
12507 }
12508
12509 /* Define where to return a (scalar) value of type RET_TYPE.
12510 If RET_TYPE is null, define where to return a (scalar)
12511 value of mode MODE from a libcall. */
12512
12513 static rtx
12514 s390_function_and_libcall_value (machine_mode mode,
12515 const_tree ret_type,
12516 const_tree fntype_or_decl,
12517 bool outgoing ATTRIBUTE_UNUSED)
12518 {
12519 /* For vector return types it is important to use the RET_TYPE
12520 argument whenever available since the middle-end might have
12521 changed the mode to a scalar mode. */
12522 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12523 || (!ret_type && VECTOR_MODE_P (mode)));
12524
12525 /* For normal functions perform the promotion as
12526 promote_function_mode would do. */
12527 if (ret_type)
12528 {
12529 int unsignedp = TYPE_UNSIGNED (ret_type);
12530 mode = promote_function_mode (ret_type, mode, &unsignedp,
12531 fntype_or_decl, 1);
12532 }
12533
12534 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12535 || SCALAR_FLOAT_MODE_P (mode)
12536 || (TARGET_VX_ABI && vector_ret_type_p));
12537 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
12538
12539 if (TARGET_VX_ABI && vector_ret_type_p)
12540 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12541 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
12542 return gen_rtx_REG (mode, 16);
12543 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12544 || UNITS_PER_LONG == UNITS_PER_WORD)
12545 return gen_rtx_REG (mode, 2);
12546 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12547 {
12548 /* This case is triggered when returning a 64 bit value with
12549 -m31 -mzarch. Although the value would fit into a single
12550 register it has to be forced into a 32 bit register pair in
12551 order to match the ABI. */
12552 rtvec p = rtvec_alloc (2);
12553
12554 RTVEC_ELT (p, 0)
12555 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12556 RTVEC_ELT (p, 1)
12557 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12558
12559 return gen_rtx_PARALLEL (mode, p);
12560 }
12561
12562 gcc_unreachable ();
12563 }
12564
12565 /* Define where to return a scalar return value of type RET_TYPE. */
12566
12567 static rtx
12568 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12569 bool outgoing)
12570 {
12571 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12572 fn_decl_or_type, outgoing);
12573 }
12574
12575 /* Define where to return a scalar libcall return value of mode
12576 MODE. */
12577
12578 static rtx
12579 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
12580 {
12581 return s390_function_and_libcall_value (mode, NULL_TREE,
12582 NULL_TREE, true);
12583 }
12584
12585
12586 /* Create and return the va_list datatype.
12587
12588 On S/390, va_list is an array type equivalent to
12589
12590 typedef struct __va_list_tag
12591 {
12592 long __gpr;
12593 long __fpr;
12594 void *__overflow_arg_area;
12595 void *__reg_save_area;
12596 } va_list[1];
12597
12598 where __gpr and __fpr hold the number of general purpose
12599 or floating point arguments used up to now, respectively,
12600 __overflow_arg_area points to the stack location of the
12601 next argument passed on the stack, and __reg_save_area
12602 always points to the start of the register area in the
12603 call frame of the current function. The function prologue
12604 saves all registers used for argument passing into this
12605 area if the function uses variable arguments. */
12606
12607 static tree
12608 s390_build_builtin_va_list (void)
12609 {
12610 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12611
12612 record = lang_hooks.types.make_type (RECORD_TYPE);
12613
12614 type_decl =
12615 build_decl (BUILTINS_LOCATION,
12616 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12617
12618 f_gpr = build_decl (BUILTINS_LOCATION,
12619 FIELD_DECL, get_identifier ("__gpr"),
12620 long_integer_type_node);
12621 f_fpr = build_decl (BUILTINS_LOCATION,
12622 FIELD_DECL, get_identifier ("__fpr"),
12623 long_integer_type_node);
12624 f_ovf = build_decl (BUILTINS_LOCATION,
12625 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12626 ptr_type_node);
12627 f_sav = build_decl (BUILTINS_LOCATION,
12628 FIELD_DECL, get_identifier ("__reg_save_area"),
12629 ptr_type_node);
12630
12631 va_list_gpr_counter_field = f_gpr;
12632 va_list_fpr_counter_field = f_fpr;
12633
12634 DECL_FIELD_CONTEXT (f_gpr) = record;
12635 DECL_FIELD_CONTEXT (f_fpr) = record;
12636 DECL_FIELD_CONTEXT (f_ovf) = record;
12637 DECL_FIELD_CONTEXT (f_sav) = record;
12638
12639 TYPE_STUB_DECL (record) = type_decl;
12640 TYPE_NAME (record) = type_decl;
12641 TYPE_FIELDS (record) = f_gpr;
12642 DECL_CHAIN (f_gpr) = f_fpr;
12643 DECL_CHAIN (f_fpr) = f_ovf;
12644 DECL_CHAIN (f_ovf) = f_sav;
12645
12646 layout_type (record);
12647
12648 /* The correct type is an array type of one element. */
12649 return build_array_type (record, build_index_type (size_zero_node));
12650 }
12651
12652 /* Implement va_start by filling the va_list structure VALIST.
12653 STDARG_P is always true, and ignored.
12654 NEXTARG points to the first anonymous stack argument.
12655
12656 The following global variables are used to initialize
12657 the va_list structure:
12658
12659 crtl->args.info:
12660 holds number of gprs and fprs used for named arguments.
12661 crtl->args.arg_offset_rtx:
12662 holds the offset of the first anonymous stack argument
12663 (relative to the virtual arg pointer). */
12664
12665 static void
12666 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12667 {
12668 HOST_WIDE_INT n_gpr, n_fpr;
12669 int off;
12670 tree f_gpr, f_fpr, f_ovf, f_sav;
12671 tree gpr, fpr, ovf, sav, t;
12672
12673 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12674 f_fpr = DECL_CHAIN (f_gpr);
12675 f_ovf = DECL_CHAIN (f_fpr);
12676 f_sav = DECL_CHAIN (f_ovf);
12677
12678 valist = build_simple_mem_ref (valist);
12679 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12680 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12681 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12682 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12683
12684 /* Count number of gp and fp argument registers used. */
12685
12686 n_gpr = crtl->args.info.gprs;
12687 n_fpr = crtl->args.info.fprs;
12688
12689 if (cfun->va_list_gpr_size)
12690 {
12691 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12692 build_int_cst (NULL_TREE, n_gpr));
12693 TREE_SIDE_EFFECTS (t) = 1;
12694 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12695 }
12696
12697 if (cfun->va_list_fpr_size)
12698 {
12699 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12700 build_int_cst (NULL_TREE, n_fpr));
12701 TREE_SIDE_EFFECTS (t) = 1;
12702 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12703 }
12704
12705 if (flag_split_stack
12706 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12707 == NULL)
12708 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12709 {
12710 rtx reg;
12711 rtx_insn *seq;
12712
12713 reg = gen_reg_rtx (Pmode);
12714 cfun->machine->split_stack_varargs_pointer = reg;
12715
12716 start_sequence ();
12717 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12718 seq = get_insns ();
12719 end_sequence ();
12720
12721 push_topmost_sequence ();
12722 emit_insn_after (seq, entry_of_function ());
12723 pop_topmost_sequence ();
12724 }
12725
12726 /* Find the overflow area.
12727 FIXME: This currently is too pessimistic when the vector ABI is
12728 enabled. In that case we *always* set up the overflow area
12729 pointer. */
12730 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12731 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12732 || TARGET_VX_ABI)
12733 {
12734 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12735 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12736 else
12737 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12738
12739 off = INTVAL (crtl->args.arg_offset_rtx);
12740 off = off < 0 ? 0 : off;
12741 if (TARGET_DEBUG_ARG)
12742 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12743 (int)n_gpr, (int)n_fpr, off);
12744
12745 t = fold_build_pointer_plus_hwi (t, off);
12746
12747 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12748 TREE_SIDE_EFFECTS (t) = 1;
12749 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12750 }
12751
12752 /* Find the register save area. */
12753 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12754 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12755 {
12756 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12757 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12758
12759 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12760 TREE_SIDE_EFFECTS (t) = 1;
12761 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12762 }
12763 }
12764
12765 /* Implement va_arg by updating the va_list structure
12766 VALIST as required to retrieve an argument of type
12767 TYPE, and returning that argument.
12768
12769 Generates code equivalent to:
12770
12771 if (integral value) {
12772 if (size <= 4 && args.gpr < 5 ||
12773 size > 4 && args.gpr < 4 )
12774 ret = args.reg_save_area[args.gpr+8]
12775 else
12776 ret = *args.overflow_arg_area++;
12777 } else if (vector value) {
12778 ret = *args.overflow_arg_area;
12779 args.overflow_arg_area += size / 8;
12780 } else if (float value) {
12781 if (args.fgpr < 2)
12782 ret = args.reg_save_area[args.fpr+64]
12783 else
12784 ret = *args.overflow_arg_area++;
12785 } else if (aggregate value) {
12786 if (args.gpr < 5)
12787 ret = *args.reg_save_area[args.gpr]
12788 else
12789 ret = **args.overflow_arg_area++;
12790 } */
12791
12792 static tree
12793 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12794 gimple_seq *post_p ATTRIBUTE_UNUSED)
12795 {
12796 tree f_gpr, f_fpr, f_ovf, f_sav;
12797 tree gpr, fpr, ovf, sav, reg, t, u;
12798 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12799 tree lab_false, lab_over = NULL_TREE;
12800 tree addr = create_tmp_var (ptr_type_node, "addr");
12801 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12802 a stack slot. */
12803
12804 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12805 f_fpr = DECL_CHAIN (f_gpr);
12806 f_ovf = DECL_CHAIN (f_fpr);
12807 f_sav = DECL_CHAIN (f_ovf);
12808
12809 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12810 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12811 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12812
12813 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12814 both appear on a lhs. */
12815 valist = unshare_expr (valist);
12816 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12817
12818 size = int_size_in_bytes (type);
12819
12820 s390_check_type_for_vector_abi (type, true, false);
12821
12822 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12823 {
12824 if (TARGET_DEBUG_ARG)
12825 {
12826 fprintf (stderr, "va_arg: aggregate type");
12827 debug_tree (type);
12828 }
12829
12830 /* Aggregates are passed by reference. */
12831 indirect_p = 1;
12832 reg = gpr;
12833 n_reg = 1;
12834
12835 /* kernel stack layout on 31 bit: It is assumed here that no padding
12836 will be added by s390_frame_info because for va_args always an even
12837 number of gprs has to be saved r15-r2 = 14 regs. */
12838 sav_ofs = 2 * UNITS_PER_LONG;
12839 sav_scale = UNITS_PER_LONG;
12840 size = UNITS_PER_LONG;
12841 max_reg = GP_ARG_NUM_REG - n_reg;
12842 left_align_p = false;
12843 }
12844 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12845 {
12846 if (TARGET_DEBUG_ARG)
12847 {
12848 fprintf (stderr, "va_arg: vector type");
12849 debug_tree (type);
12850 }
12851
12852 indirect_p = 0;
12853 reg = NULL_TREE;
12854 n_reg = 0;
12855 sav_ofs = 0;
12856 sav_scale = 8;
12857 max_reg = 0;
12858 left_align_p = true;
12859 }
12860 else if (s390_function_arg_float (TYPE_MODE (type), type))
12861 {
12862 if (TARGET_DEBUG_ARG)
12863 {
12864 fprintf (stderr, "va_arg: float type");
12865 debug_tree (type);
12866 }
12867
12868 /* FP args go in FP registers, if present. */
12869 indirect_p = 0;
12870 reg = fpr;
12871 n_reg = 1;
12872 sav_ofs = 16 * UNITS_PER_LONG;
12873 sav_scale = 8;
12874 max_reg = FP_ARG_NUM_REG - n_reg;
12875 left_align_p = false;
12876 }
12877 else
12878 {
12879 if (TARGET_DEBUG_ARG)
12880 {
12881 fprintf (stderr, "va_arg: other type");
12882 debug_tree (type);
12883 }
12884
12885 /* Otherwise into GP registers. */
12886 indirect_p = 0;
12887 reg = gpr;
12888 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12889
12890 /* kernel stack layout on 31 bit: It is assumed here that no padding
12891 will be added by s390_frame_info because for va_args always an even
12892 number of gprs has to be saved r15-r2 = 14 regs. */
12893 sav_ofs = 2 * UNITS_PER_LONG;
12894
12895 if (size < UNITS_PER_LONG)
12896 sav_ofs += UNITS_PER_LONG - size;
12897
12898 sav_scale = UNITS_PER_LONG;
12899 max_reg = GP_ARG_NUM_REG - n_reg;
12900 left_align_p = false;
12901 }
12902
12903 /* Pull the value out of the saved registers ... */
12904
12905 if (reg != NULL_TREE)
12906 {
12907 /*
12908 if (reg > ((typeof (reg))max_reg))
12909 goto lab_false;
12910
12911 addr = sav + sav_ofs + reg * save_scale;
12912
12913 goto lab_over;
12914
12915 lab_false:
12916 */
12917
12918 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12919 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12920
12921 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12922 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12923 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12924 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12925 gimplify_and_add (t, pre_p);
12926
12927 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12928 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12929 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12930 t = fold_build_pointer_plus (t, u);
12931
12932 gimplify_assign (addr, t, pre_p);
12933
12934 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12935
12936 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12937 }
12938
12939 /* ... Otherwise out of the overflow area. */
12940
12941 t = ovf;
12942 if (size < UNITS_PER_LONG && !left_align_p)
12943 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12944
12945 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12946
12947 gimplify_assign (addr, t, pre_p);
12948
12949 if (size < UNITS_PER_LONG && left_align_p)
12950 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12951 else
12952 t = fold_build_pointer_plus_hwi (t, size);
12953
12954 gimplify_assign (ovf, t, pre_p);
12955
12956 if (reg != NULL_TREE)
12957 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12958
12959
12960 /* Increment register save count. */
12961
12962 if (n_reg > 0)
12963 {
12964 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12965 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12966 gimplify_and_add (u, pre_p);
12967 }
12968
12969 if (indirect_p)
12970 {
12971 t = build_pointer_type_for_mode (build_pointer_type (type),
12972 ptr_mode, true);
12973 addr = fold_convert (t, addr);
12974 addr = build_va_arg_indirect_ref (addr);
12975 }
12976 else
12977 {
12978 t = build_pointer_type_for_mode (type, ptr_mode, true);
12979 addr = fold_convert (t, addr);
12980 }
12981
12982 return build_va_arg_indirect_ref (addr);
12983 }
12984
12985 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12986 expanders.
12987 DEST - Register location where CC will be stored.
12988 TDB - Pointer to a 256 byte area where to store the transaction.
12989 diagnostic block. NULL if TDB is not needed.
12990 RETRY - Retry count value. If non-NULL a retry loop for CC2
12991 is emitted
12992 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12993 of the tbegin instruction pattern. */
12994
12995 void
12996 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12997 {
12998 rtx retry_plus_two = gen_reg_rtx (SImode);
12999 rtx retry_reg = gen_reg_rtx (SImode);
13000 rtx_code_label *retry_label = NULL;
13001
13002 if (retry != NULL_RTX)
13003 {
13004 emit_move_insn (retry_reg, retry);
13005 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
13006 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
13007 retry_label = gen_label_rtx ();
13008 emit_label (retry_label);
13009 }
13010
13011 if (clobber_fprs_p)
13012 {
13013 if (TARGET_VX)
13014 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
13015 tdb));
13016 else
13017 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
13018 tdb));
13019 }
13020 else
13021 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
13022 tdb));
13023
13024 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
13025 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
13026 CC_REGNUM)),
13027 UNSPEC_CC_TO_INT));
13028 if (retry != NULL_RTX)
13029 {
13030 const int CC0 = 1 << 3;
13031 const int CC1 = 1 << 2;
13032 const int CC3 = 1 << 0;
13033 rtx jump;
13034 rtx count = gen_reg_rtx (SImode);
13035 rtx_code_label *leave_label = gen_label_rtx ();
13036
13037 /* Exit for success and permanent failures. */
13038 jump = s390_emit_jump (leave_label,
13039 gen_rtx_EQ (VOIDmode,
13040 gen_rtx_REG (CCRAWmode, CC_REGNUM),
13041 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
13042 LABEL_NUSES (leave_label) = 1;
13043
13044 /* CC2 - transient failure. Perform retry with ppa. */
13045 emit_move_insn (count, retry_plus_two);
13046 emit_insn (gen_subsi3 (count, count, retry_reg));
13047 emit_insn (gen_tx_assist (count));
13048 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
13049 retry_reg,
13050 retry_reg));
13051 JUMP_LABEL (jump) = retry_label;
13052 LABEL_NUSES (retry_label) = 1;
13053 emit_label (leave_label);
13054 }
13055 }
13056
13057
13058 /* Return the decl for the target specific builtin with the function
13059 code FCODE. */
13060
13061 static tree
13062 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
13063 {
13064 if (fcode >= S390_BUILTIN_MAX)
13065 return error_mark_node;
13066
13067 return s390_builtin_decls[fcode];
13068 }
13069
13070 /* We call mcount before the function prologue. So a profiled leaf
13071 function should stay a leaf function. */
13072
13073 static bool
13074 s390_keep_leaf_when_profiled ()
13075 {
13076 return true;
13077 }
13078
13079 /* Output assembly code for the trampoline template to
13080 stdio stream FILE.
13081
13082 On S/390, we use gpr 1 internally in the trampoline code;
13083 gpr 0 is used to hold the static chain. */
13084
13085 static void
13086 s390_asm_trampoline_template (FILE *file)
13087 {
13088 rtx op[2];
13089 op[0] = gen_rtx_REG (Pmode, 0);
13090 op[1] = gen_rtx_REG (Pmode, 1);
13091
13092 if (TARGET_64BIT)
13093 {
13094 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13095 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
13096 output_asm_insn ("br\t%1", op); /* 2 byte */
13097 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
13098 }
13099 else
13100 {
13101 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13102 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
13103 output_asm_insn ("br\t%1", op); /* 2 byte */
13104 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
13105 }
13106 }
13107
13108 /* Emit RTL insns to initialize the variable parts of a trampoline.
13109 FNADDR is an RTX for the address of the function's pure code.
13110 CXT is an RTX for the static chain value for the function. */
13111
13112 static void
13113 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
13114 {
13115 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
13116 rtx mem;
13117
13118 emit_block_move (m_tramp, assemble_trampoline_template (),
13119 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
13120
13121 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
13122 emit_move_insn (mem, cxt);
13123 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
13124 emit_move_insn (mem, fnaddr);
13125 }
13126
13127 static void
13128 output_asm_nops (const char *user, int hw)
13129 {
13130 asm_fprintf (asm_out_file, "\t# NOPs for %s (%d halfwords)\n", user, hw);
13131 while (hw > 0)
13132 {
13133 if (TARGET_CPU_ZARCH && hw >= 3)
13134 {
13135 output_asm_insn ("brcl\t0,0", NULL);
13136 hw -= 3;
13137 }
13138 else if (hw >= 2)
13139 {
13140 output_asm_insn ("bc\t0,0", NULL);
13141 hw -= 2;
13142 }
13143 else
13144 {
13145 output_asm_insn ("bcr\t0,0", NULL);
13146 hw -= 1;
13147 }
13148 }
13149 }
13150
13151 /* Output assembler code to FILE to increment profiler label # LABELNO
13152 for profiling a function entry. */
13153
13154 void
13155 s390_function_profiler (FILE *file, int labelno)
13156 {
13157 rtx op[7];
13158
13159 char label[128];
13160 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
13161
13162 fprintf (file, "# function profiler \n");
13163
13164 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
13165 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
13166 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
13167
13168 op[2] = gen_rtx_REG (Pmode, 1);
13169 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
13170 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
13171
13172 op[4] = gen_rtx_SYMBOL_REF (Pmode, flag_fentry ? "__fentry__" : "_mcount");
13173 if (flag_pic)
13174 {
13175 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
13176 op[4] = gen_rtx_CONST (Pmode, op[4]);
13177 }
13178
13179 if (flag_record_mcount)
13180 fprintf (file, "1:\n");
13181
13182 if (flag_fentry)
13183 {
13184 if (flag_nop_mcount)
13185 output_asm_nops ("-mnop-mcount", /* brasl */ 3);
13186 else if (cfun->static_chain_decl)
13187 warning (OPT_Wcannot_profile, "nested functions cannot be profiled "
13188 "with -mfentry on s390");
13189 else
13190 output_asm_insn ("brasl\t0,%4", op);
13191 }
13192 else if (TARGET_64BIT)
13193 {
13194 if (flag_nop_mcount)
13195 output_asm_nops ("-mnop-mcount", /* stg */ 3 + /* larl */ 3 +
13196 /* brasl */ 3 + /* lg */ 3);
13197 else
13198 {
13199 output_asm_insn ("stg\t%0,%1", op);
13200 output_asm_insn ("larl\t%2,%3", op);
13201 output_asm_insn ("brasl\t%0,%4", op);
13202 output_asm_insn ("lg\t%0,%1", op);
13203 }
13204 }
13205 else if (TARGET_CPU_ZARCH)
13206 {
13207 if (flag_nop_mcount)
13208 output_asm_nops ("-mnop-mcount", /* st */ 2 + /* larl */ 3 +
13209 /* brasl */ 3 + /* l */ 2);
13210 else
13211 {
13212 output_asm_insn ("st\t%0,%1", op);
13213 output_asm_insn ("larl\t%2,%3", op);
13214 output_asm_insn ("brasl\t%0,%4", op);
13215 output_asm_insn ("l\t%0,%1", op);
13216 }
13217 }
13218 else if (!flag_pic)
13219 {
13220 op[6] = gen_label_rtx ();
13221
13222 if (flag_nop_mcount)
13223 output_asm_nops ("-mnop-mcount", /* st */ 2 + /* bras */ 2 +
13224 /* .long */ 2 + /* .long */ 2 + /* l */ 2 +
13225 /* l */ 2 + /* basr */ 1 + /* l */ 2);
13226 else
13227 {
13228 output_asm_insn ("st\t%0,%1", op);
13229 output_asm_insn ("bras\t%2,%l6", op);
13230 output_asm_insn (".long\t%4", op);
13231 output_asm_insn (".long\t%3", op);
13232 targetm.asm_out.internal_label (file, "L",
13233 CODE_LABEL_NUMBER (op[6]));
13234 output_asm_insn ("l\t%0,0(%2)", op);
13235 output_asm_insn ("l\t%2,4(%2)", op);
13236 output_asm_insn ("basr\t%0,%0", op);
13237 output_asm_insn ("l\t%0,%1", op);
13238 }
13239 }
13240 else
13241 {
13242 op[5] = gen_label_rtx ();
13243 op[6] = gen_label_rtx ();
13244
13245 if (flag_nop_mcount)
13246 output_asm_nops ("-mnop-mcount", /* st */ 2 + /* bras */ 2 +
13247 /* .long */ 2 + /* .long */ 2 + /* lr */ 1 +
13248 /* a */ 2 + /* a */ 2 + /* basr */ 1 + /* l */ 2);
13249 else
13250 {
13251 output_asm_insn ("st\t%0,%1", op);
13252 output_asm_insn ("bras\t%2,%l6", op);
13253 targetm.asm_out.internal_label (file, "L",
13254 CODE_LABEL_NUMBER (op[5]));
13255 output_asm_insn (".long\t%4-%l5", op);
13256 output_asm_insn (".long\t%3-%l5", op);
13257 targetm.asm_out.internal_label (file, "L",
13258 CODE_LABEL_NUMBER (op[6]));
13259 output_asm_insn ("lr\t%0,%2", op);
13260 output_asm_insn ("a\t%0,0(%2)", op);
13261 output_asm_insn ("a\t%2,4(%2)", op);
13262 output_asm_insn ("basr\t%0,%0", op);
13263 output_asm_insn ("l\t%0,%1", op);
13264 }
13265 }
13266
13267 if (flag_record_mcount)
13268 {
13269 fprintf (file, "\t.section __mcount_loc, \"a\",@progbits\n");
13270 fprintf (file, "\t.%s 1b\n", TARGET_64BIT ? "quad" : "long");
13271 fprintf (file, "\t.previous\n");
13272 }
13273 }
13274
13275 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
13276 into its SYMBOL_REF_FLAGS. */
13277
13278 static void
13279 s390_encode_section_info (tree decl, rtx rtl, int first)
13280 {
13281 default_encode_section_info (decl, rtl, first);
13282
13283 if (TREE_CODE (decl) == VAR_DECL)
13284 {
13285 /* Store the alignment to be able to check if we can use
13286 a larl/load-relative instruction. We only handle the cases
13287 that can go wrong (i.e. no FUNC_DECLs). */
13288 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
13289 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
13290 else if (DECL_ALIGN (decl) % 32)
13291 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13292 else if (DECL_ALIGN (decl) % 64)
13293 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
13294 }
13295
13296 /* Literal pool references don't have a decl so they are handled
13297 differently here. We rely on the information in the MEM_ALIGN
13298 entry to decide upon the alignment. */
13299 if (MEM_P (rtl)
13300 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
13301 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
13302 {
13303 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
13304 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
13305 else if (MEM_ALIGN (rtl) % 32)
13306 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13307 else if (MEM_ALIGN (rtl) % 64)
13308 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
13309 }
13310 }
13311
13312 /* Output thunk to FILE that implements a C++ virtual function call (with
13313 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
13314 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
13315 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
13316 relative to the resulting this pointer. */
13317
13318 static void
13319 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13320 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
13321 tree function)
13322 {
13323 rtx op[10];
13324 int nonlocal = 0;
13325
13326 /* Make sure unwind info is emitted for the thunk if needed. */
13327 final_start_function (emit_barrier (), file, 1);
13328
13329 /* Operand 0 is the target function. */
13330 op[0] = XEXP (DECL_RTL (function), 0);
13331 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
13332 {
13333 nonlocal = 1;
13334 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
13335 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
13336 op[0] = gen_rtx_CONST (Pmode, op[0]);
13337 }
13338
13339 /* Operand 1 is the 'this' pointer. */
13340 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
13341 op[1] = gen_rtx_REG (Pmode, 3);
13342 else
13343 op[1] = gen_rtx_REG (Pmode, 2);
13344
13345 /* Operand 2 is the delta. */
13346 op[2] = GEN_INT (delta);
13347
13348 /* Operand 3 is the vcall_offset. */
13349 op[3] = GEN_INT (vcall_offset);
13350
13351 /* Operand 4 is the temporary register. */
13352 op[4] = gen_rtx_REG (Pmode, 1);
13353
13354 /* Operands 5 to 8 can be used as labels. */
13355 op[5] = NULL_RTX;
13356 op[6] = NULL_RTX;
13357 op[7] = NULL_RTX;
13358 op[8] = NULL_RTX;
13359
13360 /* Operand 9 can be used for temporary register. */
13361 op[9] = NULL_RTX;
13362
13363 /* Generate code. */
13364 if (TARGET_64BIT)
13365 {
13366 /* Setup literal pool pointer if required. */
13367 if ((!DISP_IN_RANGE (delta)
13368 && !CONST_OK_FOR_K (delta)
13369 && !CONST_OK_FOR_Os (delta))
13370 || (!DISP_IN_RANGE (vcall_offset)
13371 && !CONST_OK_FOR_K (vcall_offset)
13372 && !CONST_OK_FOR_Os (vcall_offset)))
13373 {
13374 op[5] = gen_label_rtx ();
13375 output_asm_insn ("larl\t%4,%5", op);
13376 }
13377
13378 /* Add DELTA to this pointer. */
13379 if (delta)
13380 {
13381 if (CONST_OK_FOR_J (delta))
13382 output_asm_insn ("la\t%1,%2(%1)", op);
13383 else if (DISP_IN_RANGE (delta))
13384 output_asm_insn ("lay\t%1,%2(%1)", op);
13385 else if (CONST_OK_FOR_K (delta))
13386 output_asm_insn ("aghi\t%1,%2", op);
13387 else if (CONST_OK_FOR_Os (delta))
13388 output_asm_insn ("agfi\t%1,%2", op);
13389 else
13390 {
13391 op[6] = gen_label_rtx ();
13392 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
13393 }
13394 }
13395
13396 /* Perform vcall adjustment. */
13397 if (vcall_offset)
13398 {
13399 if (DISP_IN_RANGE (vcall_offset))
13400 {
13401 output_asm_insn ("lg\t%4,0(%1)", op);
13402 output_asm_insn ("ag\t%1,%3(%4)", op);
13403 }
13404 else if (CONST_OK_FOR_K (vcall_offset))
13405 {
13406 output_asm_insn ("lghi\t%4,%3", op);
13407 output_asm_insn ("ag\t%4,0(%1)", op);
13408 output_asm_insn ("ag\t%1,0(%4)", op);
13409 }
13410 else if (CONST_OK_FOR_Os (vcall_offset))
13411 {
13412 output_asm_insn ("lgfi\t%4,%3", op);
13413 output_asm_insn ("ag\t%4,0(%1)", op);
13414 output_asm_insn ("ag\t%1,0(%4)", op);
13415 }
13416 else
13417 {
13418 op[7] = gen_label_rtx ();
13419 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
13420 output_asm_insn ("ag\t%4,0(%1)", op);
13421 output_asm_insn ("ag\t%1,0(%4)", op);
13422 }
13423 }
13424
13425 /* Jump to target. */
13426 output_asm_insn ("jg\t%0", op);
13427
13428 /* Output literal pool if required. */
13429 if (op[5])
13430 {
13431 output_asm_insn (".align\t4", op);
13432 targetm.asm_out.internal_label (file, "L",
13433 CODE_LABEL_NUMBER (op[5]));
13434 }
13435 if (op[6])
13436 {
13437 targetm.asm_out.internal_label (file, "L",
13438 CODE_LABEL_NUMBER (op[6]));
13439 output_asm_insn (".long\t%2", op);
13440 }
13441 if (op[7])
13442 {
13443 targetm.asm_out.internal_label (file, "L",
13444 CODE_LABEL_NUMBER (op[7]));
13445 output_asm_insn (".long\t%3", op);
13446 }
13447 }
13448 else
13449 {
13450 /* Setup base pointer if required. */
13451 if (!vcall_offset
13452 || (!DISP_IN_RANGE (delta)
13453 && !CONST_OK_FOR_K (delta)
13454 && !CONST_OK_FOR_Os (delta))
13455 || (!DISP_IN_RANGE (delta)
13456 && !CONST_OK_FOR_K (vcall_offset)
13457 && !CONST_OK_FOR_Os (vcall_offset)))
13458 {
13459 op[5] = gen_label_rtx ();
13460 output_asm_insn ("basr\t%4,0", op);
13461 targetm.asm_out.internal_label (file, "L",
13462 CODE_LABEL_NUMBER (op[5]));
13463 }
13464
13465 /* Add DELTA to this pointer. */
13466 if (delta)
13467 {
13468 if (CONST_OK_FOR_J (delta))
13469 output_asm_insn ("la\t%1,%2(%1)", op);
13470 else if (DISP_IN_RANGE (delta))
13471 output_asm_insn ("lay\t%1,%2(%1)", op);
13472 else if (CONST_OK_FOR_K (delta))
13473 output_asm_insn ("ahi\t%1,%2", op);
13474 else if (CONST_OK_FOR_Os (delta))
13475 output_asm_insn ("afi\t%1,%2", op);
13476 else
13477 {
13478 op[6] = gen_label_rtx ();
13479 output_asm_insn ("a\t%1,%6-%5(%4)", op);
13480 }
13481 }
13482
13483 /* Perform vcall adjustment. */
13484 if (vcall_offset)
13485 {
13486 if (CONST_OK_FOR_J (vcall_offset))
13487 {
13488 output_asm_insn ("l\t%4,0(%1)", op);
13489 output_asm_insn ("a\t%1,%3(%4)", op);
13490 }
13491 else if (DISP_IN_RANGE (vcall_offset))
13492 {
13493 output_asm_insn ("l\t%4,0(%1)", op);
13494 output_asm_insn ("ay\t%1,%3(%4)", op);
13495 }
13496 else if (CONST_OK_FOR_K (vcall_offset))
13497 {
13498 output_asm_insn ("lhi\t%4,%3", op);
13499 output_asm_insn ("a\t%4,0(%1)", op);
13500 output_asm_insn ("a\t%1,0(%4)", op);
13501 }
13502 else if (CONST_OK_FOR_Os (vcall_offset))
13503 {
13504 output_asm_insn ("iilf\t%4,%3", op);
13505 output_asm_insn ("a\t%4,0(%1)", op);
13506 output_asm_insn ("a\t%1,0(%4)", op);
13507 }
13508 else
13509 {
13510 op[7] = gen_label_rtx ();
13511 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13512 output_asm_insn ("a\t%4,0(%1)", op);
13513 output_asm_insn ("a\t%1,0(%4)", op);
13514 }
13515
13516 /* We had to clobber the base pointer register.
13517 Re-setup the base pointer (with a different base). */
13518 op[5] = gen_label_rtx ();
13519 output_asm_insn ("basr\t%4,0", op);
13520 targetm.asm_out.internal_label (file, "L",
13521 CODE_LABEL_NUMBER (op[5]));
13522 }
13523
13524 /* Jump to target. */
13525 op[8] = gen_label_rtx ();
13526
13527 if (!flag_pic)
13528 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13529 else if (!nonlocal)
13530 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13531 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13532 else if (flag_pic == 1)
13533 {
13534 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13535 output_asm_insn ("l\t%4,%0(%4)", op);
13536 }
13537 else if (flag_pic == 2)
13538 {
13539 op[9] = gen_rtx_REG (Pmode, 0);
13540 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13541 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13542 output_asm_insn ("ar\t%4,%9", op);
13543 output_asm_insn ("l\t%4,0(%4)", op);
13544 }
13545
13546 output_asm_insn ("br\t%4", op);
13547
13548 /* Output literal pool. */
13549 output_asm_insn (".align\t4", op);
13550
13551 if (nonlocal && flag_pic == 2)
13552 output_asm_insn (".long\t%0", op);
13553 if (nonlocal)
13554 {
13555 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13556 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13557 }
13558
13559 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13560 if (!flag_pic)
13561 output_asm_insn (".long\t%0", op);
13562 else
13563 output_asm_insn (".long\t%0-%5", op);
13564
13565 if (op[6])
13566 {
13567 targetm.asm_out.internal_label (file, "L",
13568 CODE_LABEL_NUMBER (op[6]));
13569 output_asm_insn (".long\t%2", op);
13570 }
13571 if (op[7])
13572 {
13573 targetm.asm_out.internal_label (file, "L",
13574 CODE_LABEL_NUMBER (op[7]));
13575 output_asm_insn (".long\t%3", op);
13576 }
13577 }
13578 final_end_function ();
13579 }
13580
13581 /* Output either an indirect jump or a an indirect call
13582 (RETURN_ADDR_REGNO != INVALID_REGNUM) with target register REGNO
13583 using a branch trampoline disabling branch target prediction. */
13584
13585 void
13586 s390_indirect_branch_via_thunk (unsigned int regno,
13587 unsigned int return_addr_regno,
13588 rtx comparison_operator,
13589 enum s390_indirect_branch_type type)
13590 {
13591 enum s390_indirect_branch_option option;
13592
13593 if (type == s390_indirect_branch_type_return)
13594 {
13595 if (s390_return_addr_from_memory ())
13596 option = s390_opt_function_return_mem;
13597 else
13598 option = s390_opt_function_return_reg;
13599 }
13600 else if (type == s390_indirect_branch_type_jump)
13601 option = s390_opt_indirect_branch_jump;
13602 else if (type == s390_indirect_branch_type_call)
13603 option = s390_opt_indirect_branch_call;
13604 else
13605 gcc_unreachable ();
13606
13607 if (TARGET_INDIRECT_BRANCH_TABLE)
13608 {
13609 char label[32];
13610
13611 ASM_GENERATE_INTERNAL_LABEL (label,
13612 indirect_branch_table_label[option],
13613 indirect_branch_table_label_no[option]++);
13614 ASM_OUTPUT_LABEL (asm_out_file, label);
13615 }
13616
13617 if (return_addr_regno != INVALID_REGNUM)
13618 {
13619 gcc_assert (comparison_operator == NULL_RTX);
13620 fprintf (asm_out_file, " \tbrasl\t%%r%d,", return_addr_regno);
13621 }
13622 else
13623 {
13624 fputs (" \tjg", asm_out_file);
13625 if (comparison_operator != NULL_RTX)
13626 print_operand (asm_out_file, comparison_operator, 'C');
13627
13628 fputs ("\t", asm_out_file);
13629 }
13630
13631 if (TARGET_CPU_Z10)
13632 fprintf (asm_out_file,
13633 TARGET_INDIRECT_BRANCH_THUNK_NAME_EXRL "\n",
13634 regno);
13635 else
13636 fprintf (asm_out_file,
13637 TARGET_INDIRECT_BRANCH_THUNK_NAME_EX "\n",
13638 INDIRECT_BRANCH_THUNK_REGNUM, regno);
13639
13640 if ((option == s390_opt_indirect_branch_jump
13641 && cfun->machine->indirect_branch_jump == indirect_branch_thunk)
13642 || (option == s390_opt_indirect_branch_call
13643 && cfun->machine->indirect_branch_call == indirect_branch_thunk)
13644 || (option == s390_opt_function_return_reg
13645 && cfun->machine->function_return_reg == indirect_branch_thunk)
13646 || (option == s390_opt_function_return_mem
13647 && cfun->machine->function_return_mem == indirect_branch_thunk))
13648 {
13649 if (TARGET_CPU_Z10)
13650 indirect_branch_z10thunk_mask |= (1 << regno);
13651 else
13652 indirect_branch_prez10thunk_mask |= (1 << regno);
13653 }
13654 }
13655
13656 /* Output an inline thunk for indirect jumps. EXECUTE_TARGET can
13657 either be an address register or a label pointing to the location
13658 of the jump instruction. */
13659
13660 void
13661 s390_indirect_branch_via_inline_thunk (rtx execute_target)
13662 {
13663 if (TARGET_INDIRECT_BRANCH_TABLE)
13664 {
13665 char label[32];
13666
13667 ASM_GENERATE_INTERNAL_LABEL (label,
13668 indirect_branch_table_label[s390_opt_indirect_branch_jump],
13669 indirect_branch_table_label_no[s390_opt_indirect_branch_jump]++);
13670 ASM_OUTPUT_LABEL (asm_out_file, label);
13671 }
13672
13673 if (!TARGET_ZARCH)
13674 fputs ("\t.machinemode zarch\n", asm_out_file);
13675
13676 if (REG_P (execute_target))
13677 fprintf (asm_out_file, "\tex\t%%r0,0(%%r%d)\n", REGNO (execute_target));
13678 else
13679 output_asm_insn ("\texrl\t%%r0,%0", &execute_target);
13680
13681 if (!TARGET_ZARCH)
13682 fputs ("\t.machinemode esa\n", asm_out_file);
13683
13684 fputs ("0:\tj\t0b\n", asm_out_file);
13685 }
13686
13687 static bool
13688 s390_valid_pointer_mode (scalar_int_mode mode)
13689 {
13690 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13691 }
13692
13693 /* Checks whether the given CALL_EXPR would use a caller
13694 saved register. This is used to decide whether sibling call
13695 optimization could be performed on the respective function
13696 call. */
13697
13698 static bool
13699 s390_call_saved_register_used (tree call_expr)
13700 {
13701 CUMULATIVE_ARGS cum_v;
13702 cumulative_args_t cum;
13703 tree parameter;
13704 machine_mode mode;
13705 tree type;
13706 rtx parm_rtx;
13707 int reg, i;
13708
13709 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13710 cum = pack_cumulative_args (&cum_v);
13711
13712 for (i = 0; i < call_expr_nargs (call_expr); i++)
13713 {
13714 parameter = CALL_EXPR_ARG (call_expr, i);
13715 gcc_assert (parameter);
13716
13717 /* For an undeclared variable passed as parameter we will get
13718 an ERROR_MARK node here. */
13719 if (TREE_CODE (parameter) == ERROR_MARK)
13720 return true;
13721
13722 type = TREE_TYPE (parameter);
13723 gcc_assert (type);
13724
13725 mode = TYPE_MODE (type);
13726 gcc_assert (mode);
13727
13728 /* We assume that in the target function all parameters are
13729 named. This only has an impact on vector argument register
13730 usage none of which is call-saved. */
13731 if (pass_by_reference (&cum_v, mode, type, true))
13732 {
13733 mode = Pmode;
13734 type = build_pointer_type (type);
13735 }
13736
13737 parm_rtx = s390_function_arg (cum, mode, type, true);
13738
13739 s390_function_arg_advance (cum, mode, type, true);
13740
13741 if (!parm_rtx)
13742 continue;
13743
13744 if (REG_P (parm_rtx))
13745 {
13746 for (reg = 0; reg < REG_NREGS (parm_rtx); reg++)
13747 if (!call_used_regs[reg + REGNO (parm_rtx)])
13748 return true;
13749 }
13750
13751 if (GET_CODE (parm_rtx) == PARALLEL)
13752 {
13753 int i;
13754
13755 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13756 {
13757 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
13758
13759 gcc_assert (REG_P (r));
13760
13761 for (reg = 0; reg < REG_NREGS (r); reg++)
13762 if (!call_used_regs[reg + REGNO (r)])
13763 return true;
13764 }
13765 }
13766
13767 }
13768 return false;
13769 }
13770
13771 /* Return true if the given call expression can be
13772 turned into a sibling call.
13773 DECL holds the declaration of the function to be called whereas
13774 EXP is the call expression itself. */
13775
13776 static bool
13777 s390_function_ok_for_sibcall (tree decl, tree exp)
13778 {
13779 /* The TPF epilogue uses register 1. */
13780 if (TARGET_TPF_PROFILING)
13781 return false;
13782
13783 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13784 which would have to be restored before the sibcall. */
13785 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13786 return false;
13787
13788 /* The thunks for indirect branches require r1 if no exrl is
13789 available. r1 might not be available when doing a sibling
13790 call. */
13791 if (TARGET_INDIRECT_BRANCH_NOBP_CALL
13792 && !TARGET_CPU_Z10
13793 && !decl)
13794 return false;
13795
13796 /* Register 6 on s390 is available as an argument register but unfortunately
13797 "caller saved". This makes functions needing this register for arguments
13798 not suitable for sibcalls. */
13799 return !s390_call_saved_register_used (exp);
13800 }
13801
13802 /* Return the fixed registers used for condition codes. */
13803
13804 static bool
13805 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13806 {
13807 *p1 = CC_REGNUM;
13808 *p2 = INVALID_REGNUM;
13809
13810 return true;
13811 }
13812
13813 /* This function is used by the call expanders of the machine description.
13814 It emits the call insn itself together with the necessary operations
13815 to adjust the target address and returns the emitted insn.
13816 ADDR_LOCATION is the target address rtx
13817 TLS_CALL the location of the thread-local symbol
13818 RESULT_REG the register where the result of the call should be stored
13819 RETADDR_REG the register where the return address should be stored
13820 If this parameter is NULL_RTX the call is considered
13821 to be a sibling call. */
13822
13823 rtx_insn *
13824 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13825 rtx retaddr_reg)
13826 {
13827 bool plt_call = false;
13828 rtx_insn *insn;
13829 rtx vec[4] = { NULL_RTX };
13830 int elts = 0;
13831 rtx *call = &vec[0];
13832 rtx *clobber_ret_reg = &vec[1];
13833 rtx *use = &vec[2];
13834 rtx *clobber_thunk_reg = &vec[3];
13835 int i;
13836
13837 /* Direct function calls need special treatment. */
13838 if (GET_CODE (addr_location) == SYMBOL_REF)
13839 {
13840 /* When calling a global routine in PIC mode, we must
13841 replace the symbol itself with the PLT stub. */
13842 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13843 {
13844 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13845 {
13846 addr_location = gen_rtx_UNSPEC (Pmode,
13847 gen_rtvec (1, addr_location),
13848 UNSPEC_PLT);
13849 addr_location = gen_rtx_CONST (Pmode, addr_location);
13850 plt_call = true;
13851 }
13852 else
13853 /* For -fpic code the PLT entries might use r12 which is
13854 call-saved. Therefore we cannot do a sibcall when
13855 calling directly using a symbol ref. When reaching
13856 this point we decided (in s390_function_ok_for_sibcall)
13857 to do a sibcall for a function pointer but one of the
13858 optimizers was able to get rid of the function pointer
13859 by propagating the symbol ref into the call. This
13860 optimization is illegal for S/390 so we turn the direct
13861 call into a indirect call again. */
13862 addr_location = force_reg (Pmode, addr_location);
13863 }
13864
13865 /* Unless we can use the bras(l) insn, force the
13866 routine address into a register. */
13867 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13868 {
13869 if (flag_pic)
13870 addr_location = legitimize_pic_address (addr_location, 0);
13871 else
13872 addr_location = force_reg (Pmode, addr_location);
13873 }
13874 }
13875
13876 /* If it is already an indirect call or the code above moved the
13877 SYMBOL_REF to somewhere else make sure the address can be found in
13878 register 1. */
13879 if (retaddr_reg == NULL_RTX
13880 && GET_CODE (addr_location) != SYMBOL_REF
13881 && !plt_call)
13882 {
13883 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13884 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13885 }
13886
13887 if (TARGET_INDIRECT_BRANCH_NOBP_CALL
13888 && GET_CODE (addr_location) != SYMBOL_REF
13889 && !plt_call)
13890 {
13891 /* Indirect branch thunks require the target to be a single GPR. */
13892 addr_location = force_reg (Pmode, addr_location);
13893
13894 /* Without exrl the indirect branch thunks need an additional
13895 register for larl;ex */
13896 if (!TARGET_CPU_Z10)
13897 {
13898 *clobber_thunk_reg = gen_rtx_REG (Pmode, INDIRECT_BRANCH_THUNK_REGNUM);
13899 *clobber_thunk_reg = gen_rtx_CLOBBER (VOIDmode, *clobber_thunk_reg);
13900 }
13901 }
13902
13903 addr_location = gen_rtx_MEM (QImode, addr_location);
13904 *call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13905
13906 if (result_reg != NULL_RTX)
13907 *call = gen_rtx_SET (result_reg, *call);
13908
13909 if (retaddr_reg != NULL_RTX)
13910 {
13911 *clobber_ret_reg = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13912
13913 if (tls_call != NULL_RTX)
13914 *use = gen_rtx_USE (VOIDmode, tls_call);
13915 }
13916
13917
13918 for (i = 0; i < 4; i++)
13919 if (vec[i] != NULL_RTX)
13920 elts++;
13921
13922 if (elts > 1)
13923 {
13924 rtvec v;
13925 int e = 0;
13926
13927 v = rtvec_alloc (elts);
13928 for (i = 0; i < 4; i++)
13929 if (vec[i] != NULL_RTX)
13930 {
13931 RTVEC_ELT (v, e) = vec[i];
13932 e++;
13933 }
13934
13935 *call = gen_rtx_PARALLEL (VOIDmode, v);
13936 }
13937
13938 insn = emit_call_insn (*call);
13939
13940 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13941 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13942 {
13943 /* s390_function_ok_for_sibcall should
13944 have denied sibcalls in this case. */
13945 gcc_assert (retaddr_reg != NULL_RTX);
13946 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13947 }
13948 return insn;
13949 }
13950
13951 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13952
13953 static void
13954 s390_conditional_register_usage (void)
13955 {
13956 int i;
13957
13958 if (flag_pic)
13959 {
13960 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13961 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13962 }
13963 if (TARGET_CPU_ZARCH)
13964 {
13965 fixed_regs[BASE_REGNUM] = 0;
13966 call_used_regs[BASE_REGNUM] = 0;
13967 fixed_regs[RETURN_REGNUM] = 0;
13968 call_used_regs[RETURN_REGNUM] = 0;
13969 }
13970 if (TARGET_64BIT)
13971 {
13972 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13973 call_used_regs[i] = call_really_used_regs[i] = 0;
13974 }
13975 else
13976 {
13977 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13978 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13979 }
13980
13981 if (TARGET_SOFT_FLOAT)
13982 {
13983 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13984 call_used_regs[i] = fixed_regs[i] = 1;
13985 }
13986
13987 /* Disable v16 - v31 for non-vector target. */
13988 if (!TARGET_VX)
13989 {
13990 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13991 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13992 }
13993 }
13994
13995 /* Corresponding function to eh_return expander. */
13996
13997 static GTY(()) rtx s390_tpf_eh_return_symbol;
13998 void
13999 s390_emit_tpf_eh_return (rtx target)
14000 {
14001 rtx_insn *insn;
14002 rtx reg, orig_ra;
14003
14004 if (!s390_tpf_eh_return_symbol)
14005 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
14006
14007 reg = gen_rtx_REG (Pmode, 2);
14008 orig_ra = gen_rtx_REG (Pmode, 3);
14009
14010 emit_move_insn (reg, target);
14011 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
14012 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
14013 gen_rtx_REG (Pmode, RETURN_REGNUM));
14014 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
14015 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
14016
14017 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
14018 }
14019
14020 /* Rework the prologue/epilogue to avoid saving/restoring
14021 registers unnecessarily. */
14022
14023 static void
14024 s390_optimize_prologue (void)
14025 {
14026 rtx_insn *insn, *new_insn, *next_insn;
14027
14028 /* Do a final recompute of the frame-related data. */
14029 s390_optimize_register_info ();
14030
14031 /* If all special registers are in fact used, there's nothing we
14032 can do, so no point in walking the insn list. */
14033
14034 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
14035 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
14036 && (TARGET_CPU_ZARCH
14037 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
14038 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
14039 return;
14040
14041 /* Search for prologue/epilogue insns and replace them. */
14042
14043 for (insn = get_insns (); insn; insn = next_insn)
14044 {
14045 int first, last, off;
14046 rtx set, base, offset;
14047 rtx pat;
14048
14049 next_insn = NEXT_INSN (insn);
14050
14051 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
14052 continue;
14053
14054 pat = PATTERN (insn);
14055
14056 /* Remove ldgr/lgdr instructions used for saving and restore
14057 GPRs if possible. */
14058 if (TARGET_Z10)
14059 {
14060 rtx tmp_pat = pat;
14061
14062 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
14063 tmp_pat = XVECEXP (pat, 0, 0);
14064
14065 if (GET_CODE (tmp_pat) == SET
14066 && GET_MODE (SET_SRC (tmp_pat)) == DImode
14067 && REG_P (SET_SRC (tmp_pat))
14068 && REG_P (SET_DEST (tmp_pat)))
14069 {
14070 int src_regno = REGNO (SET_SRC (tmp_pat));
14071 int dest_regno = REGNO (SET_DEST (tmp_pat));
14072 int gpr_regno;
14073 int fpr_regno;
14074
14075 if (!((GENERAL_REGNO_P (src_regno)
14076 && FP_REGNO_P (dest_regno))
14077 || (FP_REGNO_P (src_regno)
14078 && GENERAL_REGNO_P (dest_regno))))
14079 continue;
14080
14081 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
14082 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
14083
14084 /* GPR must be call-saved, FPR must be call-clobbered. */
14085 if (!call_really_used_regs[fpr_regno]
14086 || call_really_used_regs[gpr_regno])
14087 continue;
14088
14089 /* It must not happen that what we once saved in an FPR now
14090 needs a stack slot. */
14091 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
14092
14093 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
14094 {
14095 remove_insn (insn);
14096 continue;
14097 }
14098 }
14099 }
14100
14101 if (GET_CODE (pat) == PARALLEL
14102 && store_multiple_operation (pat, VOIDmode))
14103 {
14104 set = XVECEXP (pat, 0, 0);
14105 first = REGNO (SET_SRC (set));
14106 last = first + XVECLEN (pat, 0) - 1;
14107 offset = const0_rtx;
14108 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
14109 off = INTVAL (offset);
14110
14111 if (GET_CODE (base) != REG || off < 0)
14112 continue;
14113 if (cfun_frame_layout.first_save_gpr != -1
14114 && (cfun_frame_layout.first_save_gpr < first
14115 || cfun_frame_layout.last_save_gpr > last))
14116 continue;
14117 if (REGNO (base) != STACK_POINTER_REGNUM
14118 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14119 continue;
14120 if (first > BASE_REGNUM || last < BASE_REGNUM)
14121 continue;
14122
14123 if (cfun_frame_layout.first_save_gpr != -1)
14124 {
14125 rtx s_pat = save_gprs (base,
14126 off + (cfun_frame_layout.first_save_gpr
14127 - first) * UNITS_PER_LONG,
14128 cfun_frame_layout.first_save_gpr,
14129 cfun_frame_layout.last_save_gpr);
14130 new_insn = emit_insn_before (s_pat, insn);
14131 INSN_ADDRESSES_NEW (new_insn, -1);
14132 }
14133
14134 remove_insn (insn);
14135 continue;
14136 }
14137
14138 if (cfun_frame_layout.first_save_gpr == -1
14139 && GET_CODE (pat) == SET
14140 && GENERAL_REG_P (SET_SRC (pat))
14141 && GET_CODE (SET_DEST (pat)) == MEM)
14142 {
14143 set = pat;
14144 first = REGNO (SET_SRC (set));
14145 offset = const0_rtx;
14146 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
14147 off = INTVAL (offset);
14148
14149 if (GET_CODE (base) != REG || off < 0)
14150 continue;
14151 if (REGNO (base) != STACK_POINTER_REGNUM
14152 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14153 continue;
14154
14155 remove_insn (insn);
14156 continue;
14157 }
14158
14159 if (GET_CODE (pat) == PARALLEL
14160 && load_multiple_operation (pat, VOIDmode))
14161 {
14162 set = XVECEXP (pat, 0, 0);
14163 first = REGNO (SET_DEST (set));
14164 last = first + XVECLEN (pat, 0) - 1;
14165 offset = const0_rtx;
14166 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
14167 off = INTVAL (offset);
14168
14169 if (GET_CODE (base) != REG || off < 0)
14170 continue;
14171
14172 if (cfun_frame_layout.first_restore_gpr != -1
14173 && (cfun_frame_layout.first_restore_gpr < first
14174 || cfun_frame_layout.last_restore_gpr > last))
14175 continue;
14176 if (REGNO (base) != STACK_POINTER_REGNUM
14177 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14178 continue;
14179 if (first > BASE_REGNUM || last < BASE_REGNUM)
14180 continue;
14181
14182 if (cfun_frame_layout.first_restore_gpr != -1)
14183 {
14184 rtx rpat = restore_gprs (base,
14185 off + (cfun_frame_layout.first_restore_gpr
14186 - first) * UNITS_PER_LONG,
14187 cfun_frame_layout.first_restore_gpr,
14188 cfun_frame_layout.last_restore_gpr);
14189
14190 /* Remove REG_CFA_RESTOREs for registers that we no
14191 longer need to save. */
14192 REG_NOTES (rpat) = REG_NOTES (insn);
14193 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
14194 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
14195 && ((int) REGNO (XEXP (*ptr, 0))
14196 < cfun_frame_layout.first_restore_gpr))
14197 *ptr = XEXP (*ptr, 1);
14198 else
14199 ptr = &XEXP (*ptr, 1);
14200 new_insn = emit_insn_before (rpat, insn);
14201 RTX_FRAME_RELATED_P (new_insn) = 1;
14202 INSN_ADDRESSES_NEW (new_insn, -1);
14203 }
14204
14205 remove_insn (insn);
14206 continue;
14207 }
14208
14209 if (cfun_frame_layout.first_restore_gpr == -1
14210 && GET_CODE (pat) == SET
14211 && GENERAL_REG_P (SET_DEST (pat))
14212 && GET_CODE (SET_SRC (pat)) == MEM)
14213 {
14214 set = pat;
14215 first = REGNO (SET_DEST (set));
14216 offset = const0_rtx;
14217 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
14218 off = INTVAL (offset);
14219
14220 if (GET_CODE (base) != REG || off < 0)
14221 continue;
14222
14223 if (REGNO (base) != STACK_POINTER_REGNUM
14224 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
14225 continue;
14226
14227 remove_insn (insn);
14228 continue;
14229 }
14230 }
14231 }
14232
14233 /* On z10 and later the dynamic branch prediction must see the
14234 backward jump within a certain windows. If not it falls back to
14235 the static prediction. This function rearranges the loop backward
14236 branch in a way which makes the static prediction always correct.
14237 The function returns true if it added an instruction. */
14238 static bool
14239 s390_fix_long_loop_prediction (rtx_insn *insn)
14240 {
14241 rtx set = single_set (insn);
14242 rtx code_label, label_ref;
14243 rtx_insn *uncond_jump;
14244 rtx_insn *cur_insn;
14245 rtx tmp;
14246 int distance;
14247
14248 /* This will exclude branch on count and branch on index patterns
14249 since these are correctly statically predicted. */
14250 if (!set
14251 || SET_DEST (set) != pc_rtx
14252 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
14253 return false;
14254
14255 /* Skip conditional returns. */
14256 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
14257 && XEXP (SET_SRC (set), 2) == pc_rtx)
14258 return false;
14259
14260 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
14261 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
14262
14263 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
14264
14265 code_label = XEXP (label_ref, 0);
14266
14267 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
14268 || INSN_ADDRESSES (INSN_UID (insn)) == -1
14269 || (INSN_ADDRESSES (INSN_UID (insn))
14270 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
14271 return false;
14272
14273 for (distance = 0, cur_insn = PREV_INSN (insn);
14274 distance < PREDICT_DISTANCE - 6;
14275 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
14276 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
14277 return false;
14278
14279 rtx_code_label *new_label = gen_label_rtx ();
14280 uncond_jump = emit_jump_insn_after (
14281 gen_rtx_SET (pc_rtx,
14282 gen_rtx_LABEL_REF (VOIDmode, code_label)),
14283 insn);
14284 emit_label_after (new_label, uncond_jump);
14285
14286 tmp = XEXP (SET_SRC (set), 1);
14287 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
14288 XEXP (SET_SRC (set), 2) = tmp;
14289 INSN_CODE (insn) = -1;
14290
14291 XEXP (label_ref, 0) = new_label;
14292 JUMP_LABEL (insn) = new_label;
14293 JUMP_LABEL (uncond_jump) = code_label;
14294
14295 return true;
14296 }
14297
14298 /* Returns 1 if INSN reads the value of REG for purposes not related
14299 to addressing of memory, and 0 otherwise. */
14300 static int
14301 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
14302 {
14303 return reg_referenced_p (reg, PATTERN (insn))
14304 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
14305 }
14306
14307 /* Starting from INSN find_cond_jump looks downwards in the insn
14308 stream for a single jump insn which is the last user of the
14309 condition code set in INSN. */
14310 static rtx_insn *
14311 find_cond_jump (rtx_insn *insn)
14312 {
14313 for (; insn; insn = NEXT_INSN (insn))
14314 {
14315 rtx ite, cc;
14316
14317 if (LABEL_P (insn))
14318 break;
14319
14320 if (!JUMP_P (insn))
14321 {
14322 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
14323 break;
14324 continue;
14325 }
14326
14327 /* This will be triggered by a return. */
14328 if (GET_CODE (PATTERN (insn)) != SET)
14329 break;
14330
14331 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
14332 ite = SET_SRC (PATTERN (insn));
14333
14334 if (GET_CODE (ite) != IF_THEN_ELSE)
14335 break;
14336
14337 cc = XEXP (XEXP (ite, 0), 0);
14338 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
14339 break;
14340
14341 if (find_reg_note (insn, REG_DEAD, cc))
14342 return insn;
14343 break;
14344 }
14345
14346 return NULL;
14347 }
14348
14349 /* Swap the condition in COND and the operands in OP0 and OP1 so that
14350 the semantics does not change. If NULL_RTX is passed as COND the
14351 function tries to find the conditional jump starting with INSN. */
14352 static void
14353 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
14354 {
14355 rtx tmp = *op0;
14356
14357 if (cond == NULL_RTX)
14358 {
14359 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
14360 rtx set = jump ? single_set (jump) : NULL_RTX;
14361
14362 if (set == NULL_RTX)
14363 return;
14364
14365 cond = XEXP (SET_SRC (set), 0);
14366 }
14367
14368 *op0 = *op1;
14369 *op1 = tmp;
14370 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
14371 }
14372
14373 /* On z10, instructions of the compare-and-branch family have the
14374 property to access the register occurring as second operand with
14375 its bits complemented. If such a compare is grouped with a second
14376 instruction that accesses the same register non-complemented, and
14377 if that register's value is delivered via a bypass, then the
14378 pipeline recycles, thereby causing significant performance decline.
14379 This function locates such situations and exchanges the two
14380 operands of the compare. The function return true whenever it
14381 added an insn. */
14382 static bool
14383 s390_z10_optimize_cmp (rtx_insn *insn)
14384 {
14385 rtx_insn *prev_insn, *next_insn;
14386 bool insn_added_p = false;
14387 rtx cond, *op0, *op1;
14388
14389 if (GET_CODE (PATTERN (insn)) == PARALLEL)
14390 {
14391 /* Handle compare and branch and branch on count
14392 instructions. */
14393 rtx pattern = single_set (insn);
14394
14395 if (!pattern
14396 || SET_DEST (pattern) != pc_rtx
14397 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
14398 return false;
14399
14400 cond = XEXP (SET_SRC (pattern), 0);
14401 op0 = &XEXP (cond, 0);
14402 op1 = &XEXP (cond, 1);
14403 }
14404 else if (GET_CODE (PATTERN (insn)) == SET)
14405 {
14406 rtx src, dest;
14407
14408 /* Handle normal compare instructions. */
14409 src = SET_SRC (PATTERN (insn));
14410 dest = SET_DEST (PATTERN (insn));
14411
14412 if (!REG_P (dest)
14413 || !CC_REGNO_P (REGNO (dest))
14414 || GET_CODE (src) != COMPARE)
14415 return false;
14416
14417 /* s390_swap_cmp will try to find the conditional
14418 jump when passing NULL_RTX as condition. */
14419 cond = NULL_RTX;
14420 op0 = &XEXP (src, 0);
14421 op1 = &XEXP (src, 1);
14422 }
14423 else
14424 return false;
14425
14426 if (!REG_P (*op0) || !REG_P (*op1))
14427 return false;
14428
14429 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
14430 return false;
14431
14432 /* Swap the COMPARE arguments and its mask if there is a
14433 conflicting access in the previous insn. */
14434 prev_insn = prev_active_insn (insn);
14435 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14436 && reg_referenced_p (*op1, PATTERN (prev_insn)))
14437 s390_swap_cmp (cond, op0, op1, insn);
14438
14439 /* Check if there is a conflict with the next insn. If there
14440 was no conflict with the previous insn, then swap the
14441 COMPARE arguments and its mask. If we already swapped
14442 the operands, or if swapping them would cause a conflict
14443 with the previous insn, issue a NOP after the COMPARE in
14444 order to separate the two instuctions. */
14445 next_insn = next_active_insn (insn);
14446 if (next_insn != NULL_RTX && INSN_P (next_insn)
14447 && s390_non_addr_reg_read_p (*op1, next_insn))
14448 {
14449 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14450 && s390_non_addr_reg_read_p (*op0, prev_insn))
14451 {
14452 if (REGNO (*op1) == 0)
14453 emit_insn_after (gen_nop_lr1 (), insn);
14454 else
14455 emit_insn_after (gen_nop_lr0 (), insn);
14456 insn_added_p = true;
14457 }
14458 else
14459 s390_swap_cmp (cond, op0, op1, insn);
14460 }
14461 return insn_added_p;
14462 }
14463
14464 /* Number of INSNs to be scanned backward in the last BB of the loop
14465 and forward in the first BB of the loop. This usually should be a
14466 bit more than the number of INSNs which could go into one
14467 group. */
14468 #define S390_OSC_SCAN_INSN_NUM 5
14469
14470 /* Scan LOOP for static OSC collisions and return true if a osc_break
14471 should be issued for this loop. */
14472 static bool
14473 s390_adjust_loop_scan_osc (struct loop* loop)
14474
14475 {
14476 HARD_REG_SET modregs, newregs;
14477 rtx_insn *insn, *store_insn = NULL;
14478 rtx set;
14479 struct s390_address addr_store, addr_load;
14480 subrtx_iterator::array_type array;
14481 int insn_count;
14482
14483 CLEAR_HARD_REG_SET (modregs);
14484
14485 insn_count = 0;
14486 FOR_BB_INSNS_REVERSE (loop->latch, insn)
14487 {
14488 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14489 continue;
14490
14491 insn_count++;
14492 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14493 return false;
14494
14495 find_all_hard_reg_sets (insn, &newregs, true);
14496 IOR_HARD_REG_SET (modregs, newregs);
14497
14498 set = single_set (insn);
14499 if (!set)
14500 continue;
14501
14502 if (MEM_P (SET_DEST (set))
14503 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
14504 {
14505 store_insn = insn;
14506 break;
14507 }
14508 }
14509
14510 if (store_insn == NULL_RTX)
14511 return false;
14512
14513 insn_count = 0;
14514 FOR_BB_INSNS (loop->header, insn)
14515 {
14516 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14517 continue;
14518
14519 if (insn == store_insn)
14520 return false;
14521
14522 insn_count++;
14523 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14524 return false;
14525
14526 find_all_hard_reg_sets (insn, &newregs, true);
14527 IOR_HARD_REG_SET (modregs, newregs);
14528
14529 set = single_set (insn);
14530 if (!set)
14531 continue;
14532
14533 /* An intermediate store disrupts static OSC checking
14534 anyway. */
14535 if (MEM_P (SET_DEST (set))
14536 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
14537 return false;
14538
14539 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
14540 if (MEM_P (*iter)
14541 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
14542 && rtx_equal_p (addr_load.base, addr_store.base)
14543 && rtx_equal_p (addr_load.indx, addr_store.indx)
14544 && rtx_equal_p (addr_load.disp, addr_store.disp))
14545 {
14546 if ((addr_load.base != NULL_RTX
14547 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
14548 || (addr_load.indx != NULL_RTX
14549 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
14550 return true;
14551 }
14552 }
14553 return false;
14554 }
14555
14556 /* Look for adjustments which can be done on simple innermost
14557 loops. */
14558 static void
14559 s390_adjust_loops ()
14560 {
14561 struct loop *loop = NULL;
14562
14563 df_analyze ();
14564 compute_bb_for_insn ();
14565
14566 /* Find the loops. */
14567 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
14568
14569 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
14570 {
14571 if (dump_file)
14572 {
14573 flow_loop_dump (loop, dump_file, NULL, 0);
14574 fprintf (dump_file, ";; OSC loop scan Loop: ");
14575 }
14576 if (loop->latch == NULL
14577 || pc_set (BB_END (loop->latch)) == NULL_RTX
14578 || !s390_adjust_loop_scan_osc (loop))
14579 {
14580 if (dump_file)
14581 {
14582 if (loop->latch == NULL)
14583 fprintf (dump_file, " muliple backward jumps\n");
14584 else
14585 {
14586 fprintf (dump_file, " header insn: %d latch insn: %d ",
14587 INSN_UID (BB_HEAD (loop->header)),
14588 INSN_UID (BB_END (loop->latch)));
14589 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
14590 fprintf (dump_file, " loop does not end with jump\n");
14591 else
14592 fprintf (dump_file, " not instrumented\n");
14593 }
14594 }
14595 }
14596 else
14597 {
14598 rtx_insn *new_insn;
14599
14600 if (dump_file)
14601 fprintf (dump_file, " adding OSC break insn: ");
14602 new_insn = emit_insn_before (gen_osc_break (),
14603 BB_END (loop->latch));
14604 INSN_ADDRESSES_NEW (new_insn, -1);
14605 }
14606 }
14607
14608 loop_optimizer_finalize ();
14609
14610 df_finish_pass (false);
14611 }
14612
14613 /* Perform machine-dependent processing. */
14614
14615 static void
14616 s390_reorg (void)
14617 {
14618 bool pool_overflow = false;
14619 int hw_before, hw_after;
14620
14621 if (s390_tune == PROCESSOR_2964_Z13)
14622 s390_adjust_loops ();
14623
14624 /* Make sure all splits have been performed; splits after
14625 machine_dependent_reorg might confuse insn length counts. */
14626 split_all_insns_noflow ();
14627
14628 /* Install the main literal pool and the associated base
14629 register load insns.
14630
14631 In addition, there are two problematic situations we need
14632 to correct:
14633
14634 - the literal pool might be > 4096 bytes in size, so that
14635 some of its elements cannot be directly accessed
14636
14637 - a branch target might be > 64K away from the branch, so that
14638 it is not possible to use a PC-relative instruction.
14639
14640 To fix those, we split the single literal pool into multiple
14641 pool chunks, reloading the pool base register at various
14642 points throughout the function to ensure it always points to
14643 the pool chunk the following code expects, and / or replace
14644 PC-relative branches by absolute branches.
14645
14646 However, the two problems are interdependent: splitting the
14647 literal pool can move a branch further away from its target,
14648 causing the 64K limit to overflow, and on the other hand,
14649 replacing a PC-relative branch by an absolute branch means
14650 we need to put the branch target address into the literal
14651 pool, possibly causing it to overflow.
14652
14653 So, we loop trying to fix up both problems until we manage
14654 to satisfy both conditions at the same time. Note that the
14655 loop is guaranteed to terminate as every pass of the loop
14656 strictly decreases the total number of PC-relative branches
14657 in the function. (This is not completely true as there
14658 might be branch-over-pool insns introduced by chunkify_start.
14659 Those never need to be split however.) */
14660
14661 for (;;)
14662 {
14663 struct constant_pool *pool = NULL;
14664
14665 /* Collect the literal pool. */
14666 if (!pool_overflow)
14667 {
14668 pool = s390_mainpool_start ();
14669 if (!pool)
14670 pool_overflow = true;
14671 }
14672
14673 /* If literal pool overflowed, start to chunkify it. */
14674 if (pool_overflow)
14675 pool = s390_chunkify_start ();
14676
14677 /* Split out-of-range branches. If this has created new
14678 literal pool entries, cancel current chunk list and
14679 recompute it. zSeries machines have large branch
14680 instructions, so we never need to split a branch. */
14681 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14682 {
14683 if (pool_overflow)
14684 s390_chunkify_cancel (pool);
14685 else
14686 s390_mainpool_cancel (pool);
14687
14688 continue;
14689 }
14690
14691 /* If we made it up to here, both conditions are satisfied.
14692 Finish up literal pool related changes. */
14693 if (pool_overflow)
14694 s390_chunkify_finish (pool);
14695 else
14696 s390_mainpool_finish (pool);
14697
14698 /* We're done splitting branches. */
14699 cfun->machine->split_branches_pending_p = false;
14700 break;
14701 }
14702
14703 /* Generate out-of-pool execute target insns. */
14704 if (TARGET_CPU_ZARCH)
14705 {
14706 rtx_insn *insn, *target;
14707 rtx label;
14708
14709 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14710 {
14711 label = s390_execute_label (insn);
14712 if (!label)
14713 continue;
14714
14715 gcc_assert (label != const0_rtx);
14716
14717 target = emit_label (XEXP (label, 0));
14718 INSN_ADDRESSES_NEW (target, -1);
14719
14720 if (JUMP_P (insn))
14721 {
14722 target = emit_jump_insn (s390_execute_target (insn));
14723 /* This is important in order to keep a table jump
14724 pointing at the jump table label. Only this makes it
14725 being recognized as table jump. */
14726 JUMP_LABEL (target) = JUMP_LABEL (insn);
14727 }
14728 else
14729 target = emit_insn (s390_execute_target (insn));
14730 INSN_ADDRESSES_NEW (target, -1);
14731 }
14732 }
14733
14734 /* Try to optimize prologue and epilogue further. */
14735 s390_optimize_prologue ();
14736
14737 /* Walk over the insns and do some >=z10 specific changes. */
14738 if (s390_tune >= PROCESSOR_2097_Z10)
14739 {
14740 rtx_insn *insn;
14741 bool insn_added_p = false;
14742
14743 /* The insn lengths and addresses have to be up to date for the
14744 following manipulations. */
14745 shorten_branches (get_insns ());
14746
14747 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14748 {
14749 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14750 continue;
14751
14752 if (JUMP_P (insn))
14753 insn_added_p |= s390_fix_long_loop_prediction (insn);
14754
14755 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14756 || GET_CODE (PATTERN (insn)) == SET)
14757 && s390_tune == PROCESSOR_2097_Z10)
14758 insn_added_p |= s390_z10_optimize_cmp (insn);
14759 }
14760
14761 /* Adjust branches if we added new instructions. */
14762 if (insn_added_p)
14763 shorten_branches (get_insns ());
14764 }
14765
14766 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14767 if (hw_after > 0)
14768 {
14769 rtx_insn *insn;
14770
14771 /* Insert NOPs for hotpatching. */
14772 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14773 /* Emit NOPs
14774 1. inside the area covered by debug information to allow setting
14775 breakpoints at the NOPs,
14776 2. before any insn which results in an asm instruction,
14777 3. before in-function labels to avoid jumping to the NOPs, for
14778 example as part of a loop,
14779 4. before any barrier in case the function is completely empty
14780 (__builtin_unreachable ()) and has neither internal labels nor
14781 active insns.
14782 */
14783 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14784 break;
14785 /* Output a series of NOPs before the first active insn. */
14786 while (insn && hw_after > 0)
14787 {
14788 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14789 {
14790 emit_insn_before (gen_nop_6_byte (), insn);
14791 hw_after -= 3;
14792 }
14793 else if (hw_after >= 2)
14794 {
14795 emit_insn_before (gen_nop_4_byte (), insn);
14796 hw_after -= 2;
14797 }
14798 else
14799 {
14800 emit_insn_before (gen_nop_2_byte (), insn);
14801 hw_after -= 1;
14802 }
14803 }
14804 }
14805 }
14806
14807 /* Return true if INSN is a fp load insn writing register REGNO. */
14808 static inline bool
14809 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
14810 {
14811 rtx set;
14812 enum attr_type flag = s390_safe_attr_type (insn);
14813
14814 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14815 return false;
14816
14817 set = single_set (insn);
14818
14819 if (set == NULL_RTX)
14820 return false;
14821
14822 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14823 return false;
14824
14825 if (REGNO (SET_DEST (set)) != regno)
14826 return false;
14827
14828 return true;
14829 }
14830
14831 /* This value describes the distance to be avoided between an
14832 arithmetic fp instruction and an fp load writing the same register.
14833 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14834 fine but the exact value has to be avoided. Otherwise the FP
14835 pipeline will throw an exception causing a major penalty. */
14836 #define Z10_EARLYLOAD_DISTANCE 7
14837
14838 /* Rearrange the ready list in order to avoid the situation described
14839 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14840 moved to the very end of the ready list. */
14841 static void
14842 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14843 {
14844 unsigned int regno;
14845 int nready = *nready_p;
14846 rtx_insn *tmp;
14847 int i;
14848 rtx_insn *insn;
14849 rtx set;
14850 enum attr_type flag;
14851 int distance;
14852
14853 /* Skip DISTANCE - 1 active insns. */
14854 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14855 distance > 0 && insn != NULL_RTX;
14856 distance--, insn = prev_active_insn (insn))
14857 if (CALL_P (insn) || JUMP_P (insn))
14858 return;
14859
14860 if (insn == NULL_RTX)
14861 return;
14862
14863 set = single_set (insn);
14864
14865 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14866 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14867 return;
14868
14869 flag = s390_safe_attr_type (insn);
14870
14871 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14872 return;
14873
14874 regno = REGNO (SET_DEST (set));
14875 i = nready - 1;
14876
14877 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14878 i--;
14879
14880 if (!i)
14881 return;
14882
14883 tmp = ready[i];
14884 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14885 ready[0] = tmp;
14886 }
14887
14888 /* Returns TRUE if BB is entered via a fallthru edge and all other
14889 incoming edges are less than unlikely. */
14890 static bool
14891 s390_bb_fallthru_entry_likely (basic_block bb)
14892 {
14893 edge e, fallthru_edge;
14894 edge_iterator ei;
14895
14896 if (!bb)
14897 return false;
14898
14899 fallthru_edge = find_fallthru_edge (bb->preds);
14900 if (!fallthru_edge)
14901 return false;
14902
14903 FOR_EACH_EDGE (e, ei, bb->preds)
14904 if (e != fallthru_edge
14905 && e->probability >= profile_probability::unlikely ())
14906 return false;
14907
14908 return true;
14909 }
14910
14911 /* The s390_sched_state variable tracks the state of the current or
14912 the last instruction group.
14913
14914 0,1,2 number of instructions scheduled in the current group
14915 3 the last group is complete - normal insns
14916 4 the last group was a cracked/expanded insn */
14917
14918 static int s390_sched_state = 0;
14919
14920 #define S390_SCHED_STATE_NORMAL 3
14921 #define S390_SCHED_STATE_CRACKED 4
14922
14923 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14924 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14925 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14926 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14927
14928 static unsigned int
14929 s390_get_sched_attrmask (rtx_insn *insn)
14930 {
14931 unsigned int mask = 0;
14932
14933 switch (s390_tune)
14934 {
14935 case PROCESSOR_2827_ZEC12:
14936 if (get_attr_zEC12_cracked (insn))
14937 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14938 if (get_attr_zEC12_expanded (insn))
14939 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14940 if (get_attr_zEC12_endgroup (insn))
14941 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14942 if (get_attr_zEC12_groupalone (insn))
14943 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14944 break;
14945 case PROCESSOR_2964_Z13:
14946 case PROCESSOR_3906_Z14:
14947 if (get_attr_z13_cracked (insn))
14948 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14949 if (get_attr_z13_expanded (insn))
14950 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14951 if (get_attr_z13_endgroup (insn))
14952 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14953 if (get_attr_z13_groupalone (insn))
14954 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14955 break;
14956 default:
14957 gcc_unreachable ();
14958 }
14959 return mask;
14960 }
14961
14962 static unsigned int
14963 s390_get_unit_mask (rtx_insn *insn, int *units)
14964 {
14965 unsigned int mask = 0;
14966
14967 switch (s390_tune)
14968 {
14969 case PROCESSOR_2964_Z13:
14970 case PROCESSOR_3906_Z14:
14971 *units = 3;
14972 if (get_attr_z13_unit_lsu (insn))
14973 mask |= 1 << 0;
14974 if (get_attr_z13_unit_fxu (insn))
14975 mask |= 1 << 1;
14976 if (get_attr_z13_unit_vfu (insn))
14977 mask |= 1 << 2;
14978 break;
14979 default:
14980 gcc_unreachable ();
14981 }
14982 return mask;
14983 }
14984
14985 /* Return the scheduling score for INSN. The higher the score the
14986 better. The score is calculated from the OOO scheduling attributes
14987 of INSN and the scheduling state s390_sched_state. */
14988 static int
14989 s390_sched_score (rtx_insn *insn)
14990 {
14991 unsigned int mask = s390_get_sched_attrmask (insn);
14992 int score = 0;
14993
14994 switch (s390_sched_state)
14995 {
14996 case 0:
14997 /* Try to put insns into the first slot which would otherwise
14998 break a group. */
14999 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
15000 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
15001 score += 5;
15002 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
15003 score += 10;
15004 /* fallthrough */
15005 case 1:
15006 /* Prefer not cracked insns while trying to put together a
15007 group. */
15008 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
15009 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
15010 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
15011 score += 10;
15012 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
15013 score += 5;
15014 break;
15015 case 2:
15016 /* Prefer not cracked insns while trying to put together a
15017 group. */
15018 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
15019 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
15020 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
15021 score += 10;
15022 /* Prefer endgroup insns in the last slot. */
15023 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
15024 score += 10;
15025 break;
15026 case S390_SCHED_STATE_NORMAL:
15027 /* Prefer not cracked insns if the last was not cracked. */
15028 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
15029 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
15030 score += 5;
15031 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
15032 score += 10;
15033 break;
15034 case S390_SCHED_STATE_CRACKED:
15035 /* Try to keep cracked insns together to prevent them from
15036 interrupting groups. */
15037 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
15038 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
15039 score += 5;
15040 break;
15041 }
15042
15043 if (s390_tune >= PROCESSOR_2964_Z13)
15044 {
15045 int units, i;
15046 unsigned unit_mask, m = 1;
15047
15048 unit_mask = s390_get_unit_mask (insn, &units);
15049 gcc_assert (units <= MAX_SCHED_UNITS);
15050
15051 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
15052 ago the last insn of this unit type got scheduled. This is
15053 supposed to help providing a proper instruction mix to the
15054 CPU. */
15055 for (i = 0; i < units; i++, m <<= 1)
15056 if (m & unit_mask)
15057 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
15058 MAX_SCHED_MIX_DISTANCE);
15059
15060 unsigned latency = insn_default_latency (insn);
15061
15062 int other_side = 1 - current_side;
15063
15064 /* Try to delay long-running insns when side is busy. */
15065 if (latency > LONGRUNNING_THRESHOLD)
15066 {
15067 if (get_attr_z13_unit_fxu (insn) && fxu_longrunning[current_side]
15068 && fxu_longrunning[other_side] <= fxu_longrunning[current_side])
15069 score = MAX (0, score - 10);
15070
15071 if (get_attr_z13_unit_vfu (insn) && vfu_longrunning[current_side]
15072 && vfu_longrunning[other_side] <= vfu_longrunning[current_side])
15073 score = MAX (0, score - 10);
15074 }
15075 }
15076
15077 return score;
15078 }
15079
15080 /* This function is called via hook TARGET_SCHED_REORDER before
15081 issuing one insn from list READY which contains *NREADYP entries.
15082 For target z10 it reorders load instructions to avoid early load
15083 conflicts in the floating point pipeline */
15084 static int
15085 s390_sched_reorder (FILE *file, int verbose,
15086 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
15087 {
15088 if (s390_tune == PROCESSOR_2097_Z10
15089 && reload_completed
15090 && *nreadyp > 1)
15091 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
15092
15093 if (s390_tune >= PROCESSOR_2827_ZEC12
15094 && reload_completed
15095 && *nreadyp > 1)
15096 {
15097 int i;
15098 int last_index = *nreadyp - 1;
15099 int max_index = -1;
15100 int max_score = -1;
15101 rtx_insn *tmp;
15102
15103 /* Just move the insn with the highest score to the top (the
15104 end) of the list. A full sort is not needed since a conflict
15105 in the hazard recognition cannot happen. So the top insn in
15106 the ready list will always be taken. */
15107 for (i = last_index; i >= 0; i--)
15108 {
15109 int score;
15110
15111 if (recog_memoized (ready[i]) < 0)
15112 continue;
15113
15114 score = s390_sched_score (ready[i]);
15115 if (score > max_score)
15116 {
15117 max_score = score;
15118 max_index = i;
15119 }
15120 }
15121
15122 if (max_index != -1)
15123 {
15124 if (max_index != last_index)
15125 {
15126 tmp = ready[max_index];
15127 ready[max_index] = ready[last_index];
15128 ready[last_index] = tmp;
15129
15130 if (verbose > 5)
15131 fprintf (file,
15132 ";;\t\tBACKEND: move insn %d to the top of list\n",
15133 INSN_UID (ready[last_index]));
15134 }
15135 else if (verbose > 5)
15136 fprintf (file,
15137 ";;\t\tBACKEND: best insn %d already on top\n",
15138 INSN_UID (ready[last_index]));
15139 }
15140
15141 if (verbose > 5)
15142 {
15143 fprintf (file, "ready list ooo attributes - sched state: %d\n",
15144 s390_sched_state);
15145
15146 for (i = last_index; i >= 0; i--)
15147 {
15148 unsigned int sched_mask;
15149 rtx_insn *insn = ready[i];
15150
15151 if (recog_memoized (insn) < 0)
15152 continue;
15153
15154 sched_mask = s390_get_sched_attrmask (insn);
15155 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
15156 INSN_UID (insn),
15157 s390_sched_score (insn));
15158 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
15159 ((M) & sched_mask) ? #ATTR : "");
15160 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
15161 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
15162 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
15163 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
15164 #undef PRINT_SCHED_ATTR
15165 if (s390_tune >= PROCESSOR_2964_Z13)
15166 {
15167 unsigned int unit_mask, m = 1;
15168 int units, j;
15169
15170 unit_mask = s390_get_unit_mask (insn, &units);
15171 fprintf (file, "(units:");
15172 for (j = 0; j < units; j++, m <<= 1)
15173 if (m & unit_mask)
15174 fprintf (file, " u%d", j);
15175 fprintf (file, ")");
15176 }
15177 fprintf (file, "\n");
15178 }
15179 }
15180 }
15181
15182 return s390_issue_rate ();
15183 }
15184
15185
15186 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
15187 the scheduler has issued INSN. It stores the last issued insn into
15188 last_scheduled_insn in order to make it available for
15189 s390_sched_reorder. */
15190 static int
15191 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
15192 {
15193 last_scheduled_insn = insn;
15194
15195 bool starts_group = false;
15196
15197 if (s390_tune >= PROCESSOR_2827_ZEC12
15198 && reload_completed
15199 && recog_memoized (insn) >= 0)
15200 {
15201 unsigned int mask = s390_get_sched_attrmask (insn);
15202
15203 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
15204 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0
15205 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
15206 starts_group = true;
15207
15208 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
15209 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
15210 s390_sched_state = S390_SCHED_STATE_CRACKED;
15211 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
15212 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
15213 s390_sched_state = S390_SCHED_STATE_NORMAL;
15214 else
15215 {
15216 /* Only normal insns are left (mask == 0). */
15217 switch (s390_sched_state)
15218 {
15219 case 0:
15220 starts_group = true;
15221 /* fallthrough */
15222 case 1:
15223 case 2:
15224 s390_sched_state++;
15225 break;
15226 case S390_SCHED_STATE_NORMAL:
15227 starts_group = true;
15228 s390_sched_state = 1;
15229 break;
15230 case S390_SCHED_STATE_CRACKED:
15231 s390_sched_state = S390_SCHED_STATE_NORMAL;
15232 break;
15233 }
15234 }
15235
15236 if (s390_tune >= PROCESSOR_2964_Z13)
15237 {
15238 int units, i;
15239 unsigned unit_mask, m = 1;
15240
15241 unit_mask = s390_get_unit_mask (insn, &units);
15242 gcc_assert (units <= MAX_SCHED_UNITS);
15243
15244 for (i = 0; i < units; i++, m <<= 1)
15245 if (m & unit_mask)
15246 last_scheduled_unit_distance[i] = 0;
15247 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
15248 last_scheduled_unit_distance[i]++;
15249 }
15250
15251 /* If this insn started a new group, the side flipped. */
15252 if (starts_group)
15253 current_side = current_side ? 0 : 1;
15254
15255 for (int i = 0; i < 2; i++)
15256 {
15257 if (fxu_longrunning[i] >= 1)
15258 fxu_longrunning[i] -= 1;
15259 if (vfu_longrunning[i] >= 1)
15260 vfu_longrunning[i] -= 1;
15261 }
15262
15263 unsigned latency = insn_default_latency (insn);
15264 if (latency > LONGRUNNING_THRESHOLD)
15265 {
15266 if (get_attr_z13_unit_fxu (insn))
15267 fxu_longrunning[current_side] = latency * LATENCY_FACTOR;
15268 else
15269 vfu_longrunning[current_side] = latency * LATENCY_FACTOR;
15270 }
15271
15272 if (verbose > 5)
15273 {
15274 unsigned int sched_mask;
15275
15276 sched_mask = s390_get_sched_attrmask (insn);
15277
15278 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
15279 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
15280 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
15281 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
15282 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
15283 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
15284 #undef PRINT_SCHED_ATTR
15285
15286 if (s390_tune >= PROCESSOR_2964_Z13)
15287 {
15288 unsigned int unit_mask, m = 1;
15289 int units, j;
15290
15291 unit_mask = s390_get_unit_mask (insn, &units);
15292 fprintf (file, "(units:");
15293 for (j = 0; j < units; j++, m <<= 1)
15294 if (m & unit_mask)
15295 fprintf (file, " %d", j);
15296 fprintf (file, ")");
15297 }
15298 fprintf (file, " sched state: %d\n", s390_sched_state);
15299
15300 if (s390_tune >= PROCESSOR_2964_Z13)
15301 {
15302 int units, j;
15303
15304 s390_get_unit_mask (insn, &units);
15305
15306 fprintf (file, ";;\t\tBACKEND: units unused for: ");
15307 for (j = 0; j < units; j++)
15308 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
15309 fprintf (file, "\n");
15310 }
15311 }
15312 }
15313
15314 if (GET_CODE (PATTERN (insn)) != USE
15315 && GET_CODE (PATTERN (insn)) != CLOBBER)
15316 return more - 1;
15317 else
15318 return more;
15319 }
15320
15321 static void
15322 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
15323 int verbose ATTRIBUTE_UNUSED,
15324 int max_ready ATTRIBUTE_UNUSED)
15325 {
15326 last_scheduled_insn = NULL;
15327 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
15328
15329 /* If the next basic block is most likely entered via a fallthru edge
15330 we keep the last sched state. Otherwise we start a new group.
15331 The scheduler traverses basic blocks in "instruction stream" ordering
15332 so if we see a fallthru edge here, s390_sched_state will be of its
15333 source block.
15334
15335 current_sched_info->prev_head is the insn before the first insn of the
15336 block of insns to be scheduled.
15337 */
15338 rtx_insn *insn = current_sched_info->prev_head
15339 ? NEXT_INSN (current_sched_info->prev_head) : NULL;
15340 basic_block bb = insn ? BLOCK_FOR_INSN (insn) : NULL;
15341 if (s390_tune < PROCESSOR_2964_Z13 || !s390_bb_fallthru_entry_likely (bb))
15342 s390_sched_state = 0;
15343 }
15344
15345 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
15346 a new number struct loop *loop should be unrolled if tuned for cpus with
15347 a built-in stride prefetcher.
15348 The loop is analyzed for memory accesses by calling check_dpu for
15349 each rtx of the loop. Depending on the loop_depth and the amount of
15350 memory accesses a new number <=nunroll is returned to improve the
15351 behavior of the hardware prefetch unit. */
15352 static unsigned
15353 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
15354 {
15355 basic_block *bbs;
15356 rtx_insn *insn;
15357 unsigned i;
15358 unsigned mem_count = 0;
15359
15360 if (s390_tune < PROCESSOR_2097_Z10)
15361 return nunroll;
15362
15363 /* Count the number of memory references within the loop body. */
15364 bbs = get_loop_body (loop);
15365 subrtx_iterator::array_type array;
15366 for (i = 0; i < loop->num_nodes; i++)
15367 FOR_BB_INSNS (bbs[i], insn)
15368 if (INSN_P (insn) && INSN_CODE (insn) != -1)
15369 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
15370 if (MEM_P (*iter))
15371 mem_count += 1;
15372 free (bbs);
15373
15374 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
15375 if (mem_count == 0)
15376 return nunroll;
15377
15378 switch (loop_depth(loop))
15379 {
15380 case 1:
15381 return MIN (nunroll, 28 / mem_count);
15382 case 2:
15383 return MIN (nunroll, 22 / mem_count);
15384 default:
15385 return MIN (nunroll, 16 / mem_count);
15386 }
15387 }
15388
15389 /* Restore the current options. This is a hook function and also called
15390 internally. */
15391
15392 static void
15393 s390_function_specific_restore (struct gcc_options *opts,
15394 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
15395 {
15396 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
15397 }
15398
15399 static void
15400 s390_default_align (struct gcc_options *opts)
15401 {
15402 /* Set the default function alignment to 16 in order to get rid of
15403 some unwanted performance effects. */
15404 if (opts->x_flag_align_functions && !opts->x_str_align_functions
15405 && opts->x_s390_tune >= PROCESSOR_2964_Z13)
15406 opts->x_str_align_functions = "16";
15407 }
15408
15409 static void
15410 s390_override_options_after_change (void)
15411 {
15412 s390_default_align (&global_options);
15413 }
15414
15415 static void
15416 s390_option_override_internal (bool main_args_p,
15417 struct gcc_options *opts,
15418 const struct gcc_options *opts_set)
15419 {
15420 const char *prefix;
15421 const char *suffix;
15422
15423 /* Set up prefix/suffix so the error messages refer to either the command
15424 line argument, or the attribute(target). */
15425 if (main_args_p)
15426 {
15427 prefix = "-m";
15428 suffix = "";
15429 }
15430 else
15431 {
15432 prefix = "option(\"";
15433 suffix = "\")";
15434 }
15435
15436
15437 /* Architecture mode defaults according to ABI. */
15438 if (!(opts_set->x_target_flags & MASK_ZARCH))
15439 {
15440 if (TARGET_64BIT)
15441 opts->x_target_flags |= MASK_ZARCH;
15442 else
15443 opts->x_target_flags &= ~MASK_ZARCH;
15444 }
15445
15446 /* Set the march default in case it hasn't been specified on cmdline. */
15447 if (!opts_set->x_s390_arch)
15448 opts->x_s390_arch = PROCESSOR_2064_Z900;
15449 else if (opts->x_s390_arch == PROCESSOR_9672_G5
15450 || opts->x_s390_arch == PROCESSOR_9672_G6)
15451 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
15452 "in future releases; use at least %sarch=z900%s",
15453 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
15454 suffix, prefix, suffix);
15455
15456 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
15457
15458 /* Determine processor to tune for. */
15459 if (!opts_set->x_s390_tune)
15460 opts->x_s390_tune = opts->x_s390_arch;
15461 else if (opts->x_s390_tune == PROCESSOR_9672_G5
15462 || opts->x_s390_tune == PROCESSOR_9672_G6)
15463 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
15464 "in future releases; use at least %stune=z900%s",
15465 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
15466 suffix, prefix, suffix);
15467
15468 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
15469
15470 /* Sanity checks. */
15471 if (opts->x_s390_arch == PROCESSOR_NATIVE
15472 || opts->x_s390_tune == PROCESSOR_NATIVE)
15473 gcc_unreachable ();
15474 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
15475 error ("z/Architecture mode not supported on %s",
15476 processor_table[(int)opts->x_s390_arch].name);
15477 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
15478 error ("64-bit ABI not supported in ESA/390 mode");
15479
15480 if (opts->x_s390_indirect_branch == indirect_branch_thunk_inline
15481 || opts->x_s390_indirect_branch_call == indirect_branch_thunk_inline
15482 || opts->x_s390_function_return == indirect_branch_thunk_inline
15483 || opts->x_s390_function_return_reg == indirect_branch_thunk_inline
15484 || opts->x_s390_function_return_mem == indirect_branch_thunk_inline)
15485 error ("thunk-inline is only supported with -mindirect-branch-jump");
15486
15487 if (opts->x_s390_indirect_branch != indirect_branch_keep)
15488 {
15489 if (!opts_set->x_s390_indirect_branch_call)
15490 opts->x_s390_indirect_branch_call = opts->x_s390_indirect_branch;
15491
15492 if (!opts_set->x_s390_indirect_branch_jump)
15493 opts->x_s390_indirect_branch_jump = opts->x_s390_indirect_branch;
15494 }
15495
15496 if (opts->x_s390_function_return != indirect_branch_keep)
15497 {
15498 if (!opts_set->x_s390_function_return_reg)
15499 opts->x_s390_function_return_reg = opts->x_s390_function_return;
15500
15501 if (!opts_set->x_s390_function_return_mem)
15502 opts->x_s390_function_return_mem = opts->x_s390_function_return;
15503 }
15504
15505 if (!TARGET_CPU_ZARCH)
15506 {
15507 if (opts->x_s390_indirect_branch_call != indirect_branch_keep
15508 || opts->x_s390_indirect_branch_jump != indirect_branch_keep)
15509 error ("-mindirect-branch* options require -march=z900 or higher");
15510 if (opts->x_s390_function_return_reg != indirect_branch_keep
15511 || opts->x_s390_function_return_mem != indirect_branch_keep)
15512 error ("-mfunction-return* options require -march=z900 or higher");
15513 }
15514
15515
15516 /* Enable hardware transactions if available and not explicitly
15517 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
15518 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
15519 {
15520 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
15521 opts->x_target_flags |= MASK_OPT_HTM;
15522 else
15523 opts->x_target_flags &= ~MASK_OPT_HTM;
15524 }
15525
15526 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
15527 {
15528 if (TARGET_OPT_VX_P (opts->x_target_flags))
15529 {
15530 if (!TARGET_CPU_VX_P (opts))
15531 error ("hardware vector support not available on %s",
15532 processor_table[(int)opts->x_s390_arch].name);
15533 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
15534 error ("hardware vector support not available with -msoft-float");
15535 }
15536 }
15537 else
15538 {
15539 if (TARGET_CPU_VX_P (opts))
15540 /* Enable vector support if available and not explicitly disabled
15541 by user. E.g. with -m31 -march=z13 -mzarch */
15542 opts->x_target_flags |= MASK_OPT_VX;
15543 else
15544 opts->x_target_flags &= ~MASK_OPT_VX;
15545 }
15546
15547 /* Use hardware DFP if available and not explicitly disabled by
15548 user. E.g. with -m31 -march=z10 -mzarch */
15549 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
15550 {
15551 if (TARGET_DFP_P (opts))
15552 opts->x_target_flags |= MASK_HARD_DFP;
15553 else
15554 opts->x_target_flags &= ~MASK_HARD_DFP;
15555 }
15556
15557 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
15558 {
15559 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
15560 {
15561 if (!TARGET_CPU_DFP_P (opts))
15562 error ("hardware decimal floating point instructions"
15563 " not available on %s",
15564 processor_table[(int)opts->x_s390_arch].name);
15565 if (!TARGET_ZARCH_P (opts->x_target_flags))
15566 error ("hardware decimal floating point instructions"
15567 " not available in ESA/390 mode");
15568 }
15569 else
15570 opts->x_target_flags &= ~MASK_HARD_DFP;
15571 }
15572
15573 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
15574 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
15575 {
15576 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
15577 && TARGET_HARD_DFP_P (opts->x_target_flags))
15578 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
15579
15580 opts->x_target_flags &= ~MASK_HARD_DFP;
15581 }
15582
15583 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
15584 && TARGET_PACKED_STACK_P (opts->x_target_flags)
15585 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
15586 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
15587 "in combination");
15588
15589 if (opts->x_s390_stack_size)
15590 {
15591 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
15592 error ("stack size must be greater than the stack guard value");
15593 else if (opts->x_s390_stack_size > 1 << 16)
15594 error ("stack size must not be greater than 64k");
15595 }
15596 else if (opts->x_s390_stack_guard)
15597 error ("-mstack-guard implies use of -mstack-size");
15598
15599 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
15600 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
15601 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
15602 #endif
15603
15604 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
15605 {
15606 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
15607 opts->x_param_values,
15608 opts_set->x_param_values);
15609 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
15610 opts->x_param_values,
15611 opts_set->x_param_values);
15612 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
15613 opts->x_param_values,
15614 opts_set->x_param_values);
15615 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
15616 opts->x_param_values,
15617 opts_set->x_param_values);
15618 }
15619
15620 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
15621 opts->x_param_values,
15622 opts_set->x_param_values);
15623 /* values for loop prefetching */
15624 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
15625 opts->x_param_values,
15626 opts_set->x_param_values);
15627 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
15628 opts->x_param_values,
15629 opts_set->x_param_values);
15630 /* s390 has more than 2 levels and the size is much larger. Since
15631 we are always running virtualized assume that we only get a small
15632 part of the caches above l1. */
15633 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
15634 opts->x_param_values,
15635 opts_set->x_param_values);
15636 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
15637 opts->x_param_values,
15638 opts_set->x_param_values);
15639 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
15640 opts->x_param_values,
15641 opts_set->x_param_values);
15642
15643 /* Use the alternative scheduling-pressure algorithm by default. */
15644 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
15645 opts->x_param_values,
15646 opts_set->x_param_values);
15647
15648 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
15649 opts->x_param_values,
15650 opts_set->x_param_values);
15651
15652 /* Set the default alignment. */
15653 s390_default_align (opts);
15654
15655 /* Call target specific restore function to do post-init work. At the moment,
15656 this just sets opts->x_s390_cost_pointer. */
15657 s390_function_specific_restore (opts, NULL);
15658
15659 /* Check whether -mfentry is supported. It cannot be used in 31-bit mode,
15660 because 31-bit PLT stubs assume that %r12 contains GOT address, which is
15661 not the case when the code runs before the prolog. */
15662 if (opts->x_flag_fentry && !TARGET_64BIT)
15663 error ("-mfentry is supported only for 64-bit CPUs");
15664 }
15665
15666 static void
15667 s390_option_override (void)
15668 {
15669 unsigned int i;
15670 cl_deferred_option *opt;
15671 vec<cl_deferred_option> *v =
15672 (vec<cl_deferred_option> *) s390_deferred_options;
15673
15674 if (v)
15675 FOR_EACH_VEC_ELT (*v, i, opt)
15676 {
15677 switch (opt->opt_index)
15678 {
15679 case OPT_mhotpatch_:
15680 {
15681 int val1;
15682 int val2;
15683 char *s = strtok (ASTRDUP (opt->arg), ",");
15684 char *t = strtok (NULL, "\0");
15685
15686 if (t != NULL)
15687 {
15688 val1 = integral_argument (s);
15689 val2 = integral_argument (t);
15690 }
15691 else
15692 {
15693 val1 = -1;
15694 val2 = -1;
15695 }
15696 if (val1 == -1 || val2 == -1)
15697 {
15698 /* argument is not a plain number */
15699 error ("arguments to %qs should be non-negative integers",
15700 "-mhotpatch=n,m");
15701 break;
15702 }
15703 else if (val1 > s390_hotpatch_hw_max
15704 || val2 > s390_hotpatch_hw_max)
15705 {
15706 error ("argument to %qs is too large (max. %d)",
15707 "-mhotpatch=n,m", s390_hotpatch_hw_max);
15708 break;
15709 }
15710 s390_hotpatch_hw_before_label = val1;
15711 s390_hotpatch_hw_after_label = val2;
15712 break;
15713 }
15714 default:
15715 gcc_unreachable ();
15716 }
15717 }
15718
15719 /* Set up function hooks. */
15720 init_machine_status = s390_init_machine_status;
15721
15722 s390_option_override_internal (true, &global_options, &global_options_set);
15723
15724 /* Save the initial options in case the user does function specific
15725 options. */
15726 target_option_default_node = build_target_option_node (&global_options);
15727 target_option_current_node = target_option_default_node;
15728
15729 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
15730 requires the arch flags to be evaluated already. Since prefetching
15731 is beneficial on s390, we enable it if available. */
15732 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
15733 flag_prefetch_loop_arrays = 1;
15734
15735 if (!s390_pic_data_is_text_relative && !flag_pic)
15736 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
15737
15738 if (TARGET_TPF)
15739 {
15740 /* Don't emit DWARF3/4 unless specifically selected. The TPF
15741 debuggers do not yet support DWARF 3/4. */
15742 if (!global_options_set.x_dwarf_strict)
15743 dwarf_strict = 1;
15744 if (!global_options_set.x_dwarf_version)
15745 dwarf_version = 2;
15746 }
15747
15748 /* Register a target-specific optimization-and-lowering pass
15749 to run immediately before prologue and epilogue generation.
15750
15751 Registering the pass must be done at start up. It's
15752 convenient to do it here. */
15753 opt_pass *new_pass = new pass_s390_early_mach (g);
15754 struct register_pass_info insert_pass_s390_early_mach =
15755 {
15756 new_pass, /* pass */
15757 "pro_and_epilogue", /* reference_pass_name */
15758 1, /* ref_pass_instance_number */
15759 PASS_POS_INSERT_BEFORE /* po_op */
15760 };
15761 register_pass (&insert_pass_s390_early_mach);
15762 }
15763
15764 #if S390_USE_TARGET_ATTRIBUTE
15765 /* Inner function to process the attribute((target(...))), take an argument and
15766 set the current options from the argument. If we have a list, recursively go
15767 over the list. */
15768
15769 static bool
15770 s390_valid_target_attribute_inner_p (tree args,
15771 struct gcc_options *opts,
15772 struct gcc_options *new_opts_set,
15773 bool force_pragma)
15774 {
15775 char *next_optstr;
15776 bool ret = true;
15777
15778 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
15779 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
15780 static const struct
15781 {
15782 const char *string;
15783 size_t len;
15784 int opt;
15785 int has_arg;
15786 int only_as_pragma;
15787 } attrs[] = {
15788 /* enum options */
15789 S390_ATTRIB ("arch=", OPT_march_, 1),
15790 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15791 /* uinteger options */
15792 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15793 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15794 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15795 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15796 /* flag options */
15797 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15798 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15799 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15800 S390_ATTRIB ("htm", OPT_mhtm, 0),
15801 S390_ATTRIB ("vx", OPT_mvx, 0),
15802 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15803 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15804 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15805 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15806 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15807 /* boolean options */
15808 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15809 };
15810 #undef S390_ATTRIB
15811 #undef S390_PRAGMA
15812
15813 /* If this is a list, recurse to get the options. */
15814 if (TREE_CODE (args) == TREE_LIST)
15815 {
15816 bool ret = true;
15817 int num_pragma_values;
15818 int i;
15819
15820 /* Note: attribs.c:decl_attributes prepends the values from
15821 current_target_pragma to the list of target attributes. To determine
15822 whether we're looking at a value of the attribute or the pragma we
15823 assume that the first [list_length (current_target_pragma)] values in
15824 the list are the values from the pragma. */
15825 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15826 ? list_length (current_target_pragma) : 0;
15827 for (i = 0; args; args = TREE_CHAIN (args), i++)
15828 {
15829 bool is_pragma;
15830
15831 is_pragma = (force_pragma || i < num_pragma_values);
15832 if (TREE_VALUE (args)
15833 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15834 opts, new_opts_set,
15835 is_pragma))
15836 {
15837 ret = false;
15838 }
15839 }
15840 return ret;
15841 }
15842
15843 else if (TREE_CODE (args) != STRING_CST)
15844 {
15845 error ("attribute %<target%> argument not a string");
15846 return false;
15847 }
15848
15849 /* Handle multiple arguments separated by commas. */
15850 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15851
15852 while (next_optstr && *next_optstr != '\0')
15853 {
15854 char *p = next_optstr;
15855 char *orig_p = p;
15856 char *comma = strchr (next_optstr, ',');
15857 size_t len, opt_len;
15858 int opt;
15859 bool opt_set_p;
15860 char ch;
15861 unsigned i;
15862 int mask = 0;
15863 enum cl_var_type var_type;
15864 bool found;
15865
15866 if (comma)
15867 {
15868 *comma = '\0';
15869 len = comma - next_optstr;
15870 next_optstr = comma + 1;
15871 }
15872 else
15873 {
15874 len = strlen (p);
15875 next_optstr = NULL;
15876 }
15877
15878 /* Recognize no-xxx. */
15879 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15880 {
15881 opt_set_p = false;
15882 p += 3;
15883 len -= 3;
15884 }
15885 else
15886 opt_set_p = true;
15887
15888 /* Find the option. */
15889 ch = *p;
15890 found = false;
15891 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15892 {
15893 opt_len = attrs[i].len;
15894 if (ch == attrs[i].string[0]
15895 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15896 && memcmp (p, attrs[i].string, opt_len) == 0)
15897 {
15898 opt = attrs[i].opt;
15899 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15900 continue;
15901 mask = cl_options[opt].var_value;
15902 var_type = cl_options[opt].var_type;
15903 found = true;
15904 break;
15905 }
15906 }
15907
15908 /* Process the option. */
15909 if (!found)
15910 {
15911 error ("attribute(target(\"%s\")) is unknown", orig_p);
15912 return false;
15913 }
15914 else if (attrs[i].only_as_pragma && !force_pragma)
15915 {
15916 /* Value is not allowed for the target attribute. */
15917 error ("value %qs is not supported by attribute %<target%>",
15918 attrs[i].string);
15919 return false;
15920 }
15921
15922 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15923 {
15924 if (var_type == CLVC_BIT_CLEAR)
15925 opt_set_p = !opt_set_p;
15926
15927 if (opt_set_p)
15928 opts->x_target_flags |= mask;
15929 else
15930 opts->x_target_flags &= ~mask;
15931 new_opts_set->x_target_flags |= mask;
15932 }
15933
15934 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15935 {
15936 int value;
15937
15938 if (cl_options[opt].cl_uinteger)
15939 {
15940 /* Unsigned integer argument. Code based on the function
15941 decode_cmdline_option () in opts-common.c. */
15942 value = integral_argument (p + opt_len);
15943 }
15944 else
15945 value = (opt_set_p) ? 1 : 0;
15946
15947 if (value != -1)
15948 {
15949 struct cl_decoded_option decoded;
15950
15951 /* Value range check; only implemented for numeric and boolean
15952 options at the moment. */
15953 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15954 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15955 set_option (opts, new_opts_set, opt, value,
15956 p + opt_len, DK_UNSPECIFIED, input_location,
15957 global_dc);
15958 }
15959 else
15960 {
15961 error ("attribute(target(\"%s\")) is unknown", orig_p);
15962 ret = false;
15963 }
15964 }
15965
15966 else if (cl_options[opt].var_type == CLVC_ENUM)
15967 {
15968 bool arg_ok;
15969 int value;
15970
15971 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15972 if (arg_ok)
15973 set_option (opts, new_opts_set, opt, value,
15974 p + opt_len, DK_UNSPECIFIED, input_location,
15975 global_dc);
15976 else
15977 {
15978 error ("attribute(target(\"%s\")) is unknown", orig_p);
15979 ret = false;
15980 }
15981 }
15982
15983 else
15984 gcc_unreachable ();
15985 }
15986 return ret;
15987 }
15988
15989 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15990
15991 tree
15992 s390_valid_target_attribute_tree (tree args,
15993 struct gcc_options *opts,
15994 const struct gcc_options *opts_set,
15995 bool force_pragma)
15996 {
15997 tree t = NULL_TREE;
15998 struct gcc_options new_opts_set;
15999
16000 memset (&new_opts_set, 0, sizeof (new_opts_set));
16001
16002 /* Process each of the options on the chain. */
16003 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
16004 force_pragma))
16005 return error_mark_node;
16006
16007 /* If some option was set (even if it has not changed), rerun
16008 s390_option_override_internal, and then save the options away. */
16009 if (new_opts_set.x_target_flags
16010 || new_opts_set.x_s390_arch
16011 || new_opts_set.x_s390_tune
16012 || new_opts_set.x_s390_stack_guard
16013 || new_opts_set.x_s390_stack_size
16014 || new_opts_set.x_s390_branch_cost
16015 || new_opts_set.x_s390_warn_framesize
16016 || new_opts_set.x_s390_warn_dynamicstack_p)
16017 {
16018 const unsigned char *src = (const unsigned char *)opts_set;
16019 unsigned char *dest = (unsigned char *)&new_opts_set;
16020 unsigned int i;
16021
16022 /* Merge the original option flags into the new ones. */
16023 for (i = 0; i < sizeof(*opts_set); i++)
16024 dest[i] |= src[i];
16025
16026 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
16027 s390_option_override_internal (false, opts, &new_opts_set);
16028 /* Save the current options unless we are validating options for
16029 #pragma. */
16030 t = build_target_option_node (opts);
16031 }
16032 return t;
16033 }
16034
16035 /* Hook to validate attribute((target("string"))). */
16036
16037 static bool
16038 s390_valid_target_attribute_p (tree fndecl,
16039 tree ARG_UNUSED (name),
16040 tree args,
16041 int ARG_UNUSED (flags))
16042 {
16043 struct gcc_options func_options;
16044 tree new_target, new_optimize;
16045 bool ret = true;
16046
16047 /* attribute((target("default"))) does nothing, beyond
16048 affecting multi-versioning. */
16049 if (TREE_VALUE (args)
16050 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
16051 && TREE_CHAIN (args) == NULL_TREE
16052 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
16053 return true;
16054
16055 tree old_optimize = build_optimization_node (&global_options);
16056
16057 /* Get the optimization options of the current function. */
16058 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
16059
16060 if (!func_optimize)
16061 func_optimize = old_optimize;
16062
16063 /* Init func_options. */
16064 memset (&func_options, 0, sizeof (func_options));
16065 init_options_struct (&func_options, NULL);
16066 lang_hooks.init_options_struct (&func_options);
16067
16068 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
16069
16070 /* Initialize func_options to the default before its target options can
16071 be set. */
16072 cl_target_option_restore (&func_options,
16073 TREE_TARGET_OPTION (target_option_default_node));
16074
16075 new_target = s390_valid_target_attribute_tree (args, &func_options,
16076 &global_options_set,
16077 (args ==
16078 current_target_pragma));
16079 new_optimize = build_optimization_node (&func_options);
16080 if (new_target == error_mark_node)
16081 ret = false;
16082 else if (fndecl && new_target)
16083 {
16084 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
16085 if (old_optimize != new_optimize)
16086 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
16087 }
16088 return ret;
16089 }
16090
16091 /* Hook to determine if one function can safely inline another. */
16092
16093 static bool
16094 s390_can_inline_p (tree caller, tree callee)
16095 {
16096 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
16097 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
16098
16099 if (!callee_tree)
16100 callee_tree = target_option_default_node;
16101 if (!caller_tree)
16102 caller_tree = target_option_default_node;
16103 if (callee_tree == caller_tree)
16104 return true;
16105
16106 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
16107 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
16108 bool ret = true;
16109
16110 if ((caller_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP))
16111 != (callee_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP)))
16112 ret = false;
16113
16114 /* Don't inline functions to be compiled for a more recent arch into a
16115 function for an older arch. */
16116 else if (caller_opts->x_s390_arch < callee_opts->x_s390_arch)
16117 ret = false;
16118
16119 /* Inlining a hard float function into a soft float function is only
16120 allowed if the hard float function doesn't actually make use of
16121 floating point.
16122
16123 We are called from FEs for multi-versioning call optimization, so
16124 beware of ipa_fn_summaries not available. */
16125 else if (((TARGET_SOFT_FLOAT_P (caller_opts->x_target_flags)
16126 && !TARGET_SOFT_FLOAT_P (callee_opts->x_target_flags))
16127 || (!TARGET_HARD_DFP_P (caller_opts->x_target_flags)
16128 && TARGET_HARD_DFP_P (callee_opts->x_target_flags)))
16129 && (! ipa_fn_summaries
16130 || ipa_fn_summaries->get
16131 (cgraph_node::get (callee))->fp_expressions))
16132 ret = false;
16133
16134 return ret;
16135 }
16136
16137 /* Set VAL to correct enum value according to the indirect-branch or
16138 function-return attribute in ATTR. */
16139
16140 static inline void
16141 s390_indirect_branch_attrvalue (tree attr, enum indirect_branch *val)
16142 {
16143 const char *str = TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr)));
16144 if (strcmp (str, "keep") == 0)
16145 *val = indirect_branch_keep;
16146 else if (strcmp (str, "thunk") == 0)
16147 *val = indirect_branch_thunk;
16148 else if (strcmp (str, "thunk-inline") == 0)
16149 *val = indirect_branch_thunk_inline;
16150 else if (strcmp (str, "thunk-extern") == 0)
16151 *val = indirect_branch_thunk_extern;
16152 }
16153
16154 /* Memorize the setting for -mindirect-branch* and -mfunction-return*
16155 from either the cmdline or the function attributes in
16156 cfun->machine. */
16157
16158 static void
16159 s390_indirect_branch_settings (tree fndecl)
16160 {
16161 tree attr;
16162
16163 if (!fndecl)
16164 return;
16165
16166 /* Initialize with the cmdline options and let the attributes
16167 override it. */
16168 cfun->machine->indirect_branch_jump = s390_indirect_branch_jump;
16169 cfun->machine->indirect_branch_call = s390_indirect_branch_call;
16170
16171 cfun->machine->function_return_reg = s390_function_return_reg;
16172 cfun->machine->function_return_mem = s390_function_return_mem;
16173
16174 if ((attr = lookup_attribute ("indirect_branch",
16175 DECL_ATTRIBUTES (fndecl))))
16176 {
16177 s390_indirect_branch_attrvalue (attr,
16178 &cfun->machine->indirect_branch_jump);
16179 s390_indirect_branch_attrvalue (attr,
16180 &cfun->machine->indirect_branch_call);
16181 }
16182
16183 if ((attr = lookup_attribute ("indirect_branch_jump",
16184 DECL_ATTRIBUTES (fndecl))))
16185 s390_indirect_branch_attrvalue (attr, &cfun->machine->indirect_branch_jump);
16186
16187 if ((attr = lookup_attribute ("indirect_branch_call",
16188 DECL_ATTRIBUTES (fndecl))))
16189 s390_indirect_branch_attrvalue (attr, &cfun->machine->indirect_branch_call);
16190
16191 if ((attr = lookup_attribute ("function_return",
16192 DECL_ATTRIBUTES (fndecl))))
16193 {
16194 s390_indirect_branch_attrvalue (attr,
16195 &cfun->machine->function_return_reg);
16196 s390_indirect_branch_attrvalue (attr,
16197 &cfun->machine->function_return_mem);
16198 }
16199
16200 if ((attr = lookup_attribute ("function_return_reg",
16201 DECL_ATTRIBUTES (fndecl))))
16202 s390_indirect_branch_attrvalue (attr, &cfun->machine->function_return_reg);
16203
16204 if ((attr = lookup_attribute ("function_return_mem",
16205 DECL_ATTRIBUTES (fndecl))))
16206 s390_indirect_branch_attrvalue (attr, &cfun->machine->function_return_mem);
16207 }
16208
16209 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
16210 cache. */
16211
16212 void
16213 s390_activate_target_options (tree new_tree)
16214 {
16215 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
16216 if (TREE_TARGET_GLOBALS (new_tree))
16217 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
16218 else if (new_tree == target_option_default_node)
16219 restore_target_globals (&default_target_globals);
16220 else
16221 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
16222 s390_previous_fndecl = NULL_TREE;
16223 }
16224
16225 /* Establish appropriate back-end context for processing the function
16226 FNDECL. The argument might be NULL to indicate processing at top
16227 level, outside of any function scope. */
16228 static void
16229 s390_set_current_function (tree fndecl)
16230 {
16231 /* Only change the context if the function changes. This hook is called
16232 several times in the course of compiling a function, and we don't want to
16233 slow things down too much or call target_reinit when it isn't safe. */
16234 if (fndecl == s390_previous_fndecl)
16235 {
16236 s390_indirect_branch_settings (fndecl);
16237 return;
16238 }
16239
16240 tree old_tree;
16241 if (s390_previous_fndecl == NULL_TREE)
16242 old_tree = target_option_current_node;
16243 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
16244 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
16245 else
16246 old_tree = target_option_default_node;
16247
16248 if (fndecl == NULL_TREE)
16249 {
16250 if (old_tree != target_option_current_node)
16251 s390_activate_target_options (target_option_current_node);
16252 return;
16253 }
16254
16255 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
16256 if (new_tree == NULL_TREE)
16257 new_tree = target_option_default_node;
16258
16259 if (old_tree != new_tree)
16260 s390_activate_target_options (new_tree);
16261 s390_previous_fndecl = fndecl;
16262
16263 s390_indirect_branch_settings (fndecl);
16264 }
16265 #endif
16266
16267 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
16268
16269 static bool
16270 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
16271 unsigned int align ATTRIBUTE_UNUSED,
16272 enum by_pieces_operation op ATTRIBUTE_UNUSED,
16273 bool speed_p ATTRIBUTE_UNUSED)
16274 {
16275 return (size == 1 || size == 2
16276 || size == 4 || (TARGET_ZARCH && size == 8));
16277 }
16278
16279 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
16280
16281 static void
16282 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
16283 {
16284 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
16285 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
16286 tree call_efpc = build_call_expr (efpc, 0);
16287 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
16288
16289 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
16290 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
16291 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
16292 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
16293 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
16294 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
16295
16296 /* Generates the equivalent of feholdexcept (&fenv_var)
16297
16298 fenv_var = __builtin_s390_efpc ();
16299 __builtin_s390_sfpc (fenv_var & mask) */
16300 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
16301 tree new_fpc =
16302 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
16303 build_int_cst (unsigned_type_node,
16304 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
16305 FPC_EXCEPTION_MASK)));
16306 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
16307 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
16308
16309 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
16310
16311 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
16312 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
16313 build_int_cst (unsigned_type_node,
16314 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
16315 *clear = build_call_expr (sfpc, 1, new_fpc);
16316
16317 /* Generates the equivalent of feupdateenv (fenv_var)
16318
16319 old_fpc = __builtin_s390_efpc ();
16320 __builtin_s390_sfpc (fenv_var);
16321 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
16322
16323 old_fpc = create_tmp_var_raw (unsigned_type_node);
16324 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
16325 old_fpc, call_efpc);
16326
16327 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
16328
16329 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
16330 build_int_cst (unsigned_type_node,
16331 FPC_FLAGS_MASK));
16332 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
16333 build_int_cst (unsigned_type_node,
16334 FPC_FLAGS_SHIFT));
16335 tree atomic_feraiseexcept
16336 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
16337 raise_old_except = build_call_expr (atomic_feraiseexcept,
16338 1, raise_old_except);
16339
16340 *update = build2 (COMPOUND_EXPR, void_type_node,
16341 build2 (COMPOUND_EXPR, void_type_node,
16342 store_old_fpc, set_new_fpc),
16343 raise_old_except);
16344
16345 #undef FPC_EXCEPTION_MASK
16346 #undef FPC_FLAGS_MASK
16347 #undef FPC_DXC_MASK
16348 #undef FPC_EXCEPTION_MASK_SHIFT
16349 #undef FPC_FLAGS_SHIFT
16350 #undef FPC_DXC_SHIFT
16351 }
16352
16353 /* Return the vector mode to be used for inner mode MODE when doing
16354 vectorization. */
16355 static machine_mode
16356 s390_preferred_simd_mode (scalar_mode mode)
16357 {
16358 if (TARGET_VXE)
16359 switch (mode)
16360 {
16361 case E_SFmode:
16362 return V4SFmode;
16363 default:;
16364 }
16365
16366 if (TARGET_VX)
16367 switch (mode)
16368 {
16369 case E_DFmode:
16370 return V2DFmode;
16371 case E_DImode:
16372 return V2DImode;
16373 case E_SImode:
16374 return V4SImode;
16375 case E_HImode:
16376 return V8HImode;
16377 case E_QImode:
16378 return V16QImode;
16379 default:;
16380 }
16381 return word_mode;
16382 }
16383
16384 /* Our hardware does not require vectors to be strictly aligned. */
16385 static bool
16386 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
16387 const_tree type ATTRIBUTE_UNUSED,
16388 int misalignment ATTRIBUTE_UNUSED,
16389 bool is_packed ATTRIBUTE_UNUSED)
16390 {
16391 if (TARGET_VX)
16392 return true;
16393
16394 return default_builtin_support_vector_misalignment (mode, type, misalignment,
16395 is_packed);
16396 }
16397
16398 /* The vector ABI requires vector types to be aligned on an 8 byte
16399 boundary (our stack alignment). However, we allow this to be
16400 overriden by the user, while this definitely breaks the ABI. */
16401 static HOST_WIDE_INT
16402 s390_vector_alignment (const_tree type)
16403 {
16404 if (!TARGET_VX_ABI)
16405 return default_vector_alignment (type);
16406
16407 if (TYPE_USER_ALIGN (type))
16408 return TYPE_ALIGN (type);
16409
16410 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
16411 }
16412
16413 /* Implement TARGET_CONSTANT_ALIGNMENT. Alignment on even addresses for
16414 LARL instruction. */
16415
16416 static HOST_WIDE_INT
16417 s390_constant_alignment (const_tree, HOST_WIDE_INT align)
16418 {
16419 return MAX (align, 16);
16420 }
16421
16422 #ifdef HAVE_AS_MACHINE_MACHINEMODE
16423 /* Implement TARGET_ASM_FILE_START. */
16424 static void
16425 s390_asm_file_start (void)
16426 {
16427 default_file_start ();
16428 s390_asm_output_machine_for_arch (asm_out_file);
16429 }
16430 #endif
16431
16432 /* Implement TARGET_ASM_FILE_END. */
16433 static void
16434 s390_asm_file_end (void)
16435 {
16436 #ifdef HAVE_AS_GNU_ATTRIBUTE
16437 varpool_node *vnode;
16438 cgraph_node *cnode;
16439
16440 FOR_EACH_VARIABLE (vnode)
16441 if (TREE_PUBLIC (vnode->decl))
16442 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
16443
16444 FOR_EACH_FUNCTION (cnode)
16445 if (TREE_PUBLIC (cnode->decl))
16446 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
16447
16448
16449 if (s390_vector_abi != 0)
16450 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
16451 s390_vector_abi);
16452 #endif
16453 file_end_indicate_exec_stack ();
16454
16455 if (flag_split_stack)
16456 file_end_indicate_split_stack ();
16457 }
16458
16459 /* Return true if TYPE is a vector bool type. */
16460 static inline bool
16461 s390_vector_bool_type_p (const_tree type)
16462 {
16463 return TYPE_VECTOR_OPAQUE (type);
16464 }
16465
16466 /* Return the diagnostic message string if the binary operation OP is
16467 not permitted on TYPE1 and TYPE2, NULL otherwise. */
16468 static const char*
16469 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
16470 {
16471 bool bool1_p, bool2_p;
16472 bool plusminus_p;
16473 bool muldiv_p;
16474 bool compare_p;
16475 machine_mode mode1, mode2;
16476
16477 if (!TARGET_ZVECTOR)
16478 return NULL;
16479
16480 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
16481 return NULL;
16482
16483 bool1_p = s390_vector_bool_type_p (type1);
16484 bool2_p = s390_vector_bool_type_p (type2);
16485
16486 /* Mixing signed and unsigned types is forbidden for all
16487 operators. */
16488 if (!bool1_p && !bool2_p
16489 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
16490 return N_("types differ in signedness");
16491
16492 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
16493 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
16494 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
16495 || op == ROUND_DIV_EXPR);
16496 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
16497 || op == EQ_EXPR || op == NE_EXPR);
16498
16499 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
16500 return N_("binary operator does not support two vector bool operands");
16501
16502 if (bool1_p != bool2_p && (muldiv_p || compare_p))
16503 return N_("binary operator does not support vector bool operand");
16504
16505 mode1 = TYPE_MODE (type1);
16506 mode2 = TYPE_MODE (type2);
16507
16508 if (bool1_p != bool2_p && plusminus_p
16509 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
16510 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
16511 return N_("binary operator does not support mixing vector "
16512 "bool with floating point vector operands");
16513
16514 return NULL;
16515 }
16516
16517 /* Implement TARGET_C_EXCESS_PRECISION.
16518
16519 FIXME: For historical reasons, float_t and double_t are typedef'ed to
16520 double on s390, causing operations on float_t to operate in a higher
16521 precision than is necessary. However, it is not the case that SFmode
16522 operations have implicit excess precision, and we generate more optimal
16523 code if we let the compiler know no implicit extra precision is added.
16524
16525 That means when we are compiling with -fexcess-precision=fast, the value
16526 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
16527 float_t (though they would be correct for -fexcess-precision=standard).
16528
16529 A complete fix would modify glibc to remove the unnecessary typedef
16530 of float_t to double. */
16531
16532 static enum flt_eval_method
16533 s390_excess_precision (enum excess_precision_type type)
16534 {
16535 switch (type)
16536 {
16537 case EXCESS_PRECISION_TYPE_IMPLICIT:
16538 case EXCESS_PRECISION_TYPE_FAST:
16539 /* The fastest type to promote to will always be the native type,
16540 whether that occurs with implicit excess precision or
16541 otherwise. */
16542 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
16543 case EXCESS_PRECISION_TYPE_STANDARD:
16544 /* Otherwise, when we are in a standards compliant mode, to
16545 ensure consistency with the implementation in glibc, report that
16546 float is evaluated to the range and precision of double. */
16547 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
16548 default:
16549 gcc_unreachable ();
16550 }
16551 return FLT_EVAL_METHOD_UNPREDICTABLE;
16552 }
16553
16554 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
16555
16556 static unsigned HOST_WIDE_INT
16557 s390_asan_shadow_offset (void)
16558 {
16559 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
16560 }
16561
16562 #ifdef HAVE_GAS_HIDDEN
16563 # define USE_HIDDEN_LINKONCE 1
16564 #else
16565 # define USE_HIDDEN_LINKONCE 0
16566 #endif
16567
16568 /* Output an indirect branch trampoline for target register REGNO. */
16569
16570 static void
16571 s390_output_indirect_thunk_function (unsigned int regno, bool z10_p)
16572 {
16573 tree decl;
16574 char thunk_label[32];
16575 int i;
16576
16577 if (z10_p)
16578 sprintf (thunk_label, TARGET_INDIRECT_BRANCH_THUNK_NAME_EXRL, regno);
16579 else
16580 sprintf (thunk_label, TARGET_INDIRECT_BRANCH_THUNK_NAME_EX,
16581 INDIRECT_BRANCH_THUNK_REGNUM, regno);
16582
16583 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
16584 get_identifier (thunk_label),
16585 build_function_type_list (void_type_node, NULL_TREE));
16586 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
16587 NULL_TREE, void_type_node);
16588 TREE_PUBLIC (decl) = 1;
16589 TREE_STATIC (decl) = 1;
16590 DECL_IGNORED_P (decl) = 1;
16591
16592 if (USE_HIDDEN_LINKONCE)
16593 {
16594 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
16595
16596 targetm.asm_out.unique_section (decl, 0);
16597 switch_to_section (get_named_section (decl, NULL, 0));
16598
16599 targetm.asm_out.globalize_label (asm_out_file, thunk_label);
16600 fputs ("\t.hidden\t", asm_out_file);
16601 assemble_name (asm_out_file, thunk_label);
16602 putc ('\n', asm_out_file);
16603 ASM_DECLARE_FUNCTION_NAME (asm_out_file, thunk_label, decl);
16604 }
16605 else
16606 {
16607 switch_to_section (text_section);
16608 ASM_OUTPUT_LABEL (asm_out_file, thunk_label);
16609 }
16610
16611 DECL_INITIAL (decl) = make_node (BLOCK);
16612 current_function_decl = decl;
16613 allocate_struct_function (decl, false);
16614 init_function_start (decl);
16615 cfun->is_thunk = true;
16616 first_function_block_is_cold = false;
16617 final_start_function (emit_barrier (), asm_out_file, 1);
16618
16619 /* This makes CFI at least usable for indirect jumps.
16620
16621 Stopping in the thunk: backtrace will point to the thunk target
16622 is if it was interrupted by a signal. For a call this means that
16623 the call chain will be: caller->callee->thunk */
16624 if (flag_asynchronous_unwind_tables && flag_dwarf2_cfi_asm)
16625 {
16626 fputs ("\t.cfi_signal_frame\n", asm_out_file);
16627 fprintf (asm_out_file, "\t.cfi_return_column %d\n", regno);
16628 for (i = 0; i < FPR15_REGNUM; i++)
16629 fprintf (asm_out_file, "\t.cfi_same_value %s\n", reg_names[i]);
16630 }
16631
16632 if (z10_p)
16633 {
16634 /* exrl 0,1f */
16635
16636 /* We generate a thunk for z10 compiled code although z10 is
16637 currently not enabled. Tell the assembler to accept the
16638 instruction. */
16639 if (!TARGET_CPU_Z10)
16640 {
16641 fputs ("\t.machine push\n", asm_out_file);
16642 fputs ("\t.machine z10\n", asm_out_file);
16643 }
16644 /* We use exrl even if -mzarch hasn't been specified on the
16645 command line so we have to tell the assembler to accept
16646 it. */
16647 if (!TARGET_ZARCH)
16648 fputs ("\t.machinemode zarch\n", asm_out_file);
16649
16650 fputs ("\texrl\t0,1f\n", asm_out_file);
16651
16652 if (!TARGET_ZARCH)
16653 fputs ("\t.machinemode esa\n", asm_out_file);
16654
16655 if (!TARGET_CPU_Z10)
16656 fputs ("\t.machine pop\n", asm_out_file);
16657 }
16658 else if (TARGET_CPU_ZARCH)
16659 {
16660 /* larl %r1,1f */
16661 fprintf (asm_out_file, "\tlarl\t%%r%d,1f\n",
16662 INDIRECT_BRANCH_THUNK_REGNUM);
16663
16664 /* ex 0,0(%r1) */
16665 fprintf (asm_out_file, "\tex\t0,0(%%r%d)\n",
16666 INDIRECT_BRANCH_THUNK_REGNUM);
16667 }
16668 else
16669 gcc_unreachable ();
16670
16671 /* 0: j 0b */
16672 fputs ("0:\tj\t0b\n", asm_out_file);
16673
16674 /* 1: br <regno> */
16675 fprintf (asm_out_file, "1:\tbr\t%%r%d\n", regno);
16676
16677 final_end_function ();
16678 init_insn_lengths ();
16679 free_after_compilation (cfun);
16680 set_cfun (NULL);
16681 current_function_decl = NULL;
16682 }
16683
16684 /* Implement the asm.code_end target hook. */
16685
16686 static void
16687 s390_code_end (void)
16688 {
16689 int i;
16690
16691 for (i = 1; i < 16; i++)
16692 {
16693 if (indirect_branch_z10thunk_mask & (1 << i))
16694 s390_output_indirect_thunk_function (i, true);
16695
16696 if (indirect_branch_prez10thunk_mask & (1 << i))
16697 s390_output_indirect_thunk_function (i, false);
16698 }
16699
16700 if (TARGET_INDIRECT_BRANCH_TABLE)
16701 {
16702 int o;
16703 int i;
16704
16705 for (o = 0; o < INDIRECT_BRANCH_NUM_OPTIONS; o++)
16706 {
16707 if (indirect_branch_table_label_no[o] == 0)
16708 continue;
16709
16710 switch_to_section (get_section (indirect_branch_table_name[o],
16711 0,
16712 NULL_TREE));
16713 for (i = 0; i < indirect_branch_table_label_no[o]; i++)
16714 {
16715 char label_start[32];
16716
16717 ASM_GENERATE_INTERNAL_LABEL (label_start,
16718 indirect_branch_table_label[o], i);
16719
16720 fputs ("\t.long\t", asm_out_file);
16721 assemble_name_raw (asm_out_file, label_start);
16722 fputs ("-.\n", asm_out_file);
16723 }
16724 switch_to_section (current_function_section ());
16725 }
16726 }
16727 }
16728
16729 /* Implement the TARGET_CASE_VALUES_THRESHOLD target hook. */
16730
16731 unsigned int
16732 s390_case_values_threshold (void)
16733 {
16734 /* Disabling branch prediction for indirect jumps makes jump tables
16735 much more expensive. */
16736 if (TARGET_INDIRECT_BRANCH_NOBP_JUMP)
16737 return 20;
16738
16739 return default_case_values_threshold ();
16740 }
16741
16742 /* Initialize GCC target structure. */
16743
16744 #undef TARGET_ASM_ALIGNED_HI_OP
16745 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
16746 #undef TARGET_ASM_ALIGNED_DI_OP
16747 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
16748 #undef TARGET_ASM_INTEGER
16749 #define TARGET_ASM_INTEGER s390_assemble_integer
16750
16751 #undef TARGET_ASM_OPEN_PAREN
16752 #define TARGET_ASM_OPEN_PAREN ""
16753
16754 #undef TARGET_ASM_CLOSE_PAREN
16755 #define TARGET_ASM_CLOSE_PAREN ""
16756
16757 #undef TARGET_OPTION_OVERRIDE
16758 #define TARGET_OPTION_OVERRIDE s390_option_override
16759
16760 #ifdef TARGET_THREAD_SSP_OFFSET
16761 #undef TARGET_STACK_PROTECT_GUARD
16762 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
16763 #endif
16764
16765 #undef TARGET_ENCODE_SECTION_INFO
16766 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
16767
16768 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16769 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16770
16771 #ifdef HAVE_AS_TLS
16772 #undef TARGET_HAVE_TLS
16773 #define TARGET_HAVE_TLS true
16774 #endif
16775 #undef TARGET_CANNOT_FORCE_CONST_MEM
16776 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
16777
16778 #undef TARGET_DELEGITIMIZE_ADDRESS
16779 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
16780
16781 #undef TARGET_LEGITIMIZE_ADDRESS
16782 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
16783
16784 #undef TARGET_RETURN_IN_MEMORY
16785 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
16786
16787 #undef TARGET_INIT_BUILTINS
16788 #define TARGET_INIT_BUILTINS s390_init_builtins
16789 #undef TARGET_EXPAND_BUILTIN
16790 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
16791 #undef TARGET_BUILTIN_DECL
16792 #define TARGET_BUILTIN_DECL s390_builtin_decl
16793
16794 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
16795 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
16796
16797 #undef TARGET_ASM_OUTPUT_MI_THUNK
16798 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
16799 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
16800 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
16801
16802 #undef TARGET_C_EXCESS_PRECISION
16803 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
16804
16805 #undef TARGET_SCHED_ADJUST_PRIORITY
16806 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
16807 #undef TARGET_SCHED_ISSUE_RATE
16808 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
16809 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
16810 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
16811
16812 #undef TARGET_SCHED_VARIABLE_ISSUE
16813 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
16814 #undef TARGET_SCHED_REORDER
16815 #define TARGET_SCHED_REORDER s390_sched_reorder
16816 #undef TARGET_SCHED_INIT
16817 #define TARGET_SCHED_INIT s390_sched_init
16818
16819 #undef TARGET_CANNOT_COPY_INSN_P
16820 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
16821 #undef TARGET_RTX_COSTS
16822 #define TARGET_RTX_COSTS s390_rtx_costs
16823 #undef TARGET_ADDRESS_COST
16824 #define TARGET_ADDRESS_COST s390_address_cost
16825 #undef TARGET_REGISTER_MOVE_COST
16826 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
16827 #undef TARGET_MEMORY_MOVE_COST
16828 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
16829 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
16830 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
16831 s390_builtin_vectorization_cost
16832
16833 #undef TARGET_MACHINE_DEPENDENT_REORG
16834 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
16835
16836 #undef TARGET_VALID_POINTER_MODE
16837 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
16838
16839 #undef TARGET_BUILD_BUILTIN_VA_LIST
16840 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
16841 #undef TARGET_EXPAND_BUILTIN_VA_START
16842 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
16843 #undef TARGET_ASAN_SHADOW_OFFSET
16844 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
16845 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
16846 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
16847
16848 #undef TARGET_PROMOTE_FUNCTION_MODE
16849 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
16850 #undef TARGET_PASS_BY_REFERENCE
16851 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
16852
16853 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
16854 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE s390_override_options_after_change
16855
16856 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
16857 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
16858 #undef TARGET_FUNCTION_ARG
16859 #define TARGET_FUNCTION_ARG s390_function_arg
16860 #undef TARGET_FUNCTION_ARG_ADVANCE
16861 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
16862 #undef TARGET_FUNCTION_ARG_PADDING
16863 #define TARGET_FUNCTION_ARG_PADDING s390_function_arg_padding
16864 #undef TARGET_FUNCTION_VALUE
16865 #define TARGET_FUNCTION_VALUE s390_function_value
16866 #undef TARGET_LIBCALL_VALUE
16867 #define TARGET_LIBCALL_VALUE s390_libcall_value
16868 #undef TARGET_STRICT_ARGUMENT_NAMING
16869 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
16870
16871 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
16872 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
16873
16874 #undef TARGET_FIXED_CONDITION_CODE_REGS
16875 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
16876
16877 #undef TARGET_CC_MODES_COMPATIBLE
16878 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
16879
16880 #undef TARGET_INVALID_WITHIN_DOLOOP
16881 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
16882
16883 #ifdef HAVE_AS_TLS
16884 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
16885 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
16886 #endif
16887
16888 #undef TARGET_DWARF_FRAME_REG_MODE
16889 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
16890
16891 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
16892 #undef TARGET_MANGLE_TYPE
16893 #define TARGET_MANGLE_TYPE s390_mangle_type
16894 #endif
16895
16896 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16897 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16898
16899 #undef TARGET_VECTOR_MODE_SUPPORTED_P
16900 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
16901
16902 #undef TARGET_PREFERRED_RELOAD_CLASS
16903 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
16904
16905 #undef TARGET_SECONDARY_RELOAD
16906 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
16907 #undef TARGET_SECONDARY_MEMORY_NEEDED
16908 #define TARGET_SECONDARY_MEMORY_NEEDED s390_secondary_memory_needed
16909 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
16910 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE s390_secondary_memory_needed_mode
16911
16912 #undef TARGET_LIBGCC_CMP_RETURN_MODE
16913 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
16914
16915 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
16916 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
16917
16918 #undef TARGET_LEGITIMATE_ADDRESS_P
16919 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
16920
16921 #undef TARGET_LEGITIMATE_CONSTANT_P
16922 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
16923
16924 #undef TARGET_LRA_P
16925 #define TARGET_LRA_P s390_lra_p
16926
16927 #undef TARGET_CAN_ELIMINATE
16928 #define TARGET_CAN_ELIMINATE s390_can_eliminate
16929
16930 #undef TARGET_CONDITIONAL_REGISTER_USAGE
16931 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
16932
16933 #undef TARGET_LOOP_UNROLL_ADJUST
16934 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
16935
16936 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
16937 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
16938 #undef TARGET_TRAMPOLINE_INIT
16939 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
16940
16941 /* PR 79421 */
16942 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
16943 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
16944
16945 #undef TARGET_UNWIND_WORD_MODE
16946 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
16947
16948 #undef TARGET_CANONICALIZE_COMPARISON
16949 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
16950
16951 #undef TARGET_HARD_REGNO_SCRATCH_OK
16952 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
16953
16954 #undef TARGET_HARD_REGNO_NREGS
16955 #define TARGET_HARD_REGNO_NREGS s390_hard_regno_nregs
16956 #undef TARGET_HARD_REGNO_MODE_OK
16957 #define TARGET_HARD_REGNO_MODE_OK s390_hard_regno_mode_ok
16958 #undef TARGET_MODES_TIEABLE_P
16959 #define TARGET_MODES_TIEABLE_P s390_modes_tieable_p
16960
16961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
16962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
16963 s390_hard_regno_call_part_clobbered
16964
16965 #undef TARGET_ATTRIBUTE_TABLE
16966 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
16967
16968 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
16969 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
16970
16971 #undef TARGET_SET_UP_BY_PROLOGUE
16972 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
16973
16974 #undef TARGET_EXTRA_LIVE_ON_ENTRY
16975 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
16976
16977 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
16978 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
16979 s390_use_by_pieces_infrastructure_p
16980
16981 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
16982 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
16983
16984 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
16985 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
16986
16987 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
16988 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
16989
16990 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
16991 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
16992
16993 #undef TARGET_VECTOR_ALIGNMENT
16994 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
16995
16996 #undef TARGET_INVALID_BINARY_OP
16997 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
16998
16999 #ifdef HAVE_AS_MACHINE_MACHINEMODE
17000 #undef TARGET_ASM_FILE_START
17001 #define TARGET_ASM_FILE_START s390_asm_file_start
17002 #endif
17003
17004 #undef TARGET_ASM_FILE_END
17005 #define TARGET_ASM_FILE_END s390_asm_file_end
17006
17007 #if S390_USE_TARGET_ATTRIBUTE
17008 #undef TARGET_SET_CURRENT_FUNCTION
17009 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
17010
17011 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
17012 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
17013
17014 #undef TARGET_CAN_INLINE_P
17015 #define TARGET_CAN_INLINE_P s390_can_inline_p
17016 #endif
17017
17018 #undef TARGET_OPTION_RESTORE
17019 #define TARGET_OPTION_RESTORE s390_function_specific_restore
17020
17021 #undef TARGET_CAN_CHANGE_MODE_CLASS
17022 #define TARGET_CAN_CHANGE_MODE_CLASS s390_can_change_mode_class
17023
17024 #undef TARGET_CONSTANT_ALIGNMENT
17025 #define TARGET_CONSTANT_ALIGNMENT s390_constant_alignment
17026
17027 #undef TARGET_ASM_CODE_END
17028 #define TARGET_ASM_CODE_END s390_code_end
17029
17030 #undef TARGET_CASE_VALUES_THRESHOLD
17031 #define TARGET_CASE_VALUES_THRESHOLD s390_case_values_threshold
17032
17033 struct gcc_target targetm = TARGET_INITIALIZER;
17034
17035 #include "gt-s390.h"