RISC-V: Fix build error
[gcc.git] / gcc / config / riscv / riscv.c
1 /* Subroutines used for code generation for RISC-V.
2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "insn-config.h"
29 #include "insn-attr.h"
30 #include "recog.h"
31 #include "output.h"
32 #include "alias.h"
33 #include "tree.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "varasm.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "function.h"
40 #include "explow.h"
41 #include "memmodel.h"
42 #include "emit-rtl.h"
43 #include "reload.h"
44 #include "tm_p.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "basic-block.h"
48 #include "expr.h"
49 #include "optabs.h"
50 #include "bitmap.h"
51 #include "df.h"
52 #include "diagnostic.h"
53 #include "builtins.h"
54 #include "predict.h"
55
56 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
57 #define UNSPEC_ADDRESS_P(X) \
58 (GET_CODE (X) == UNSPEC \
59 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
60 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
61
62 /* Extract the symbol or label from UNSPEC wrapper X. */
63 #define UNSPEC_ADDRESS(X) \
64 XVECEXP (X, 0, 0)
65
66 /* Extract the symbol type from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS_TYPE(X) \
68 ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
69
70 /* True if bit BIT is set in VALUE. */
71 #define BITSET_P(VALUE, BIT) (((VALUE) & (1ULL << (BIT))) != 0)
72
73 /* Classifies an address.
74
75 ADDRESS_REG
76 A natural register + offset address. The register satisfies
77 riscv_valid_base_register_p and the offset is a const_arith_operand.
78
79 ADDRESS_LO_SUM
80 A LO_SUM rtx. The first operand is a valid base register and
81 the second operand is a symbolic address.
82
83 ADDRESS_CONST_INT
84 A signed 16-bit constant address.
85
86 ADDRESS_SYMBOLIC:
87 A constant symbolic address. */
88 enum riscv_address_type {
89 ADDRESS_REG,
90 ADDRESS_LO_SUM,
91 ADDRESS_CONST_INT,
92 ADDRESS_SYMBOLIC
93 };
94
95 /* Information about a function's frame layout. */
96 struct GTY(()) riscv_frame_info {
97 /* The size of the frame in bytes. */
98 HOST_WIDE_INT total_size;
99
100 /* Bit X is set if the function saves or restores GPR X. */
101 unsigned int mask;
102
103 /* Likewise FPR X. */
104 unsigned int fmask;
105
106 /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
107 unsigned save_libcall_adjustment;
108
109 /* Offsets of fixed-point and floating-point save areas from frame bottom */
110 HOST_WIDE_INT gp_sp_offset;
111 HOST_WIDE_INT fp_sp_offset;
112
113 /* Offset of virtual frame pointer from stack pointer/frame bottom */
114 HOST_WIDE_INT frame_pointer_offset;
115
116 /* Offset of hard frame pointer from stack pointer/frame bottom */
117 HOST_WIDE_INT hard_frame_pointer_offset;
118
119 /* The offset of arg_pointer_rtx from the bottom of the frame. */
120 HOST_WIDE_INT arg_pointer_offset;
121 };
122
123 struct GTY(()) machine_function {
124 /* The number of extra stack bytes taken up by register varargs.
125 This area is allocated by the callee at the very top of the frame. */
126 int varargs_size;
127
128 /* Memoized return value of leaf_function_p. <0 if false, >0 if true. */
129 int is_leaf;
130
131 /* The current frame information, calculated by riscv_compute_frame_info. */
132 struct riscv_frame_info frame;
133 };
134
135 /* Information about a single argument. */
136 struct riscv_arg_info {
137 /* True if the argument is at least partially passed on the stack. */
138 bool stack_p;
139
140 /* The number of integer registers allocated to this argument. */
141 unsigned int num_gprs;
142
143 /* The offset of the first register used, provided num_gprs is nonzero.
144 If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */
145 unsigned int gpr_offset;
146
147 /* The number of floating-point registers allocated to this argument. */
148 unsigned int num_fprs;
149
150 /* The offset of the first register used, provided num_fprs is nonzero. */
151 unsigned int fpr_offset;
152 };
153
154 /* Information about an address described by riscv_address_type.
155
156 ADDRESS_CONST_INT
157 No fields are used.
158
159 ADDRESS_REG
160 REG is the base register and OFFSET is the constant offset.
161
162 ADDRESS_LO_SUM
163 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
164 is the type of symbol it references.
165
166 ADDRESS_SYMBOLIC
167 SYMBOL_TYPE is the type of symbol that the address references. */
168 struct riscv_address_info {
169 enum riscv_address_type type;
170 rtx reg;
171 rtx offset;
172 enum riscv_symbol_type symbol_type;
173 };
174
175 /* One stage in a constant building sequence. These sequences have
176 the form:
177
178 A = VALUE[0]
179 A = A CODE[1] VALUE[1]
180 A = A CODE[2] VALUE[2]
181 ...
182
183 where A is an accumulator, each CODE[i] is a binary rtl operation
184 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
185 struct riscv_integer_op {
186 enum rtx_code code;
187 unsigned HOST_WIDE_INT value;
188 };
189
190 /* The largest number of operations needed to load an integer constant.
191 The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI. */
192 #define RISCV_MAX_INTEGER_OPS 8
193
194 /* Costs of various operations on the different architectures. */
195
196 struct riscv_tune_info
197 {
198 unsigned short fp_add[2];
199 unsigned short fp_mul[2];
200 unsigned short fp_div[2];
201 unsigned short int_mul[2];
202 unsigned short int_div[2];
203 unsigned short issue_rate;
204 unsigned short branch_cost;
205 unsigned short memory_cost;
206 bool slow_unaligned_access;
207 };
208
209 /* Information about one CPU we know about. */
210 struct riscv_cpu_info {
211 /* This CPU's canonical name. */
212 const char *name;
213
214 /* Tuning parameters for this CPU. */
215 const struct riscv_tune_info *tune_info;
216 };
217
218 /* Global variables for machine-dependent things. */
219
220 /* Whether unaligned accesses execute very slowly. */
221 bool riscv_slow_unaligned_access_p;
222
223 /* Which tuning parameters to use. */
224 static const struct riscv_tune_info *tune_info;
225
226 /* Index R is the smallest register class that contains register R. */
227 const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
228 GR_REGS, GR_REGS, GR_REGS, GR_REGS,
229 GR_REGS, GR_REGS, SIBCALL_REGS, SIBCALL_REGS,
230 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
231 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
232 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
233 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
234 JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
235 SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS,
236 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
237 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
238 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
239 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
240 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
241 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
242 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
243 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
244 FRAME_REGS, FRAME_REGS,
245 };
246
247 /* Costs to use when optimizing for rocket. */
248 static const struct riscv_tune_info rocket_tune_info = {
249 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
250 {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
251 {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
252 {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
253 {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
254 1, /* issue_rate */
255 3, /* branch_cost */
256 5, /* memory_cost */
257 true, /* slow_unaligned_access */
258 };
259
260 /* Costs to use when optimizing for size. */
261 static const struct riscv_tune_info optimize_size_tune_info = {
262 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
263 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
264 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
265 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
266 {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
267 1, /* issue_rate */
268 1, /* branch_cost */
269 2, /* memory_cost */
270 false, /* slow_unaligned_access */
271 };
272
273 /* A table describing all the processors GCC knows about. */
274 static const struct riscv_cpu_info riscv_cpu_info_table[] = {
275 { "rocket", &rocket_tune_info },
276 { "size", &optimize_size_tune_info },
277 };
278
279 /* Return the riscv_cpu_info entry for the given name string. */
280
281 static const struct riscv_cpu_info *
282 riscv_parse_cpu (const char *cpu_string)
283 {
284 for (unsigned i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
285 if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
286 return riscv_cpu_info_table + i;
287
288 error ("unknown cpu %qs for -mtune", cpu_string);
289 return riscv_cpu_info_table;
290 }
291
292 /* Helper function for riscv_build_integer; arguments are as for
293 riscv_build_integer. */
294
295 static int
296 riscv_build_integer_1 (struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS],
297 HOST_WIDE_INT value, machine_mode mode)
298 {
299 HOST_WIDE_INT low_part = CONST_LOW_PART (value);
300 int cost = RISCV_MAX_INTEGER_OPS + 1, alt_cost;
301 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
302
303 if (SMALL_OPERAND (value) || LUI_OPERAND (value))
304 {
305 /* Simply ADDI or LUI. */
306 codes[0].code = UNKNOWN;
307 codes[0].value = value;
308 return 1;
309 }
310
311 /* End with ADDI. When constructing HImode constants, do not generate any
312 intermediate value that is not itself a valid HImode constant. The
313 XORI case below will handle those remaining HImode constants. */
314 if (low_part != 0
315 && (mode != HImode
316 || value - low_part <= ((1 << (GET_MODE_BITSIZE (HImode) - 1)) - 1)))
317 {
318 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value - low_part, mode);
319 if (alt_cost < cost)
320 {
321 alt_codes[alt_cost-1].code = PLUS;
322 alt_codes[alt_cost-1].value = low_part;
323 memcpy (codes, alt_codes, sizeof (alt_codes));
324 cost = alt_cost;
325 }
326 }
327
328 /* End with XORI. */
329 if (cost > 2 && (low_part < 0 || mode == HImode))
330 {
331 alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
332 if (alt_cost < cost)
333 {
334 alt_codes[alt_cost-1].code = XOR;
335 alt_codes[alt_cost-1].value = low_part;
336 memcpy (codes, alt_codes, sizeof (alt_codes));
337 cost = alt_cost;
338 }
339 }
340
341 /* Eliminate trailing zeros and end with SLLI. */
342 if (cost > 2 && (value & 1) == 0)
343 {
344 int shift = ctz_hwi (value);
345 unsigned HOST_WIDE_INT x = value;
346 x = sext_hwi (x >> shift, HOST_BITS_PER_WIDE_INT - shift);
347
348 /* Don't eliminate the lower 12 bits if LUI might apply. */
349 if (shift > IMM_BITS && !SMALL_OPERAND (x) && LUI_OPERAND (x << IMM_BITS))
350 shift -= IMM_BITS, x <<= IMM_BITS;
351
352 alt_cost = 1 + riscv_build_integer_1 (alt_codes, x, mode);
353 if (alt_cost < cost)
354 {
355 alt_codes[alt_cost-1].code = ASHIFT;
356 alt_codes[alt_cost-1].value = shift;
357 memcpy (codes, alt_codes, sizeof (alt_codes));
358 cost = alt_cost;
359 }
360 }
361
362 gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
363 return cost;
364 }
365
366 /* Fill CODES with a sequence of rtl operations to load VALUE.
367 Return the number of operations needed. */
368
369 static int
370 riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
371 machine_mode mode)
372 {
373 int cost = riscv_build_integer_1 (codes, value, mode);
374
375 /* Eliminate leading zeros and end with SRLI. */
376 if (value > 0 && cost > 2)
377 {
378 struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
379 int alt_cost, shift = clz_hwi (value);
380 HOST_WIDE_INT shifted_val;
381
382 /* Try filling trailing bits with 1s. */
383 shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
384 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
385 if (alt_cost < cost)
386 {
387 alt_codes[alt_cost-1].code = LSHIFTRT;
388 alt_codes[alt_cost-1].value = shift;
389 memcpy (codes, alt_codes, sizeof (alt_codes));
390 cost = alt_cost;
391 }
392
393 /* Try filling trailing bits with 0s. */
394 shifted_val = value << shift;
395 alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
396 if (alt_cost < cost)
397 {
398 alt_codes[alt_cost-1].code = LSHIFTRT;
399 alt_codes[alt_cost-1].value = shift;
400 memcpy (codes, alt_codes, sizeof (alt_codes));
401 cost = alt_cost;
402 }
403 }
404
405 return cost;
406 }
407
408 /* Return the cost of constructing VAL in the event that a scratch
409 register is available. */
410
411 static int
412 riscv_split_integer_cost (HOST_WIDE_INT val)
413 {
414 int cost;
415 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
416 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
417 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
418
419 cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
420 if (loval != hival)
421 cost += riscv_build_integer (codes, hival, VOIDmode);
422
423 return cost;
424 }
425
426 /* Return the cost of constructing the integer constant VAL. */
427
428 static int
429 riscv_integer_cost (HOST_WIDE_INT val)
430 {
431 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
432 return MIN (riscv_build_integer (codes, val, VOIDmode),
433 riscv_split_integer_cost (val));
434 }
435
436 /* Try to split a 64b integer into 32b parts, then reassemble. */
437
438 static rtx
439 riscv_split_integer (HOST_WIDE_INT val, machine_mode mode)
440 {
441 unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
442 unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
443 rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
444
445 riscv_move_integer (hi, hi, hival);
446 riscv_move_integer (lo, lo, loval);
447
448 hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
449 hi = force_reg (mode, hi);
450
451 return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
452 }
453
454 /* Return true if X is a thread-local symbol. */
455
456 static bool
457 riscv_tls_symbol_p (const_rtx x)
458 {
459 return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0;
460 }
461
462 /* Return true if symbol X binds locally. */
463
464 static bool
465 riscv_symbol_binds_local_p (const_rtx x)
466 {
467 if (SYMBOL_REF_P (x))
468 return (SYMBOL_REF_DECL (x)
469 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
470 : SYMBOL_REF_LOCAL_P (x));
471 else
472 return false;
473 }
474
475 /* Return the method that should be used to access SYMBOL_REF or
476 LABEL_REF X. */
477
478 static enum riscv_symbol_type
479 riscv_classify_symbol (const_rtx x)
480 {
481 if (riscv_tls_symbol_p (x))
482 return SYMBOL_TLS;
483
484 if (GET_CODE (x) == SYMBOL_REF && flag_pic && !riscv_symbol_binds_local_p (x))
485 return SYMBOL_GOT_DISP;
486
487 return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
488 }
489
490 /* Classify the base of symbolic expression X. */
491
492 enum riscv_symbol_type
493 riscv_classify_symbolic_expression (rtx x)
494 {
495 rtx offset;
496
497 split_const (x, &x, &offset);
498 if (UNSPEC_ADDRESS_P (x))
499 return UNSPEC_ADDRESS_TYPE (x);
500
501 return riscv_classify_symbol (x);
502 }
503
504 /* Return true if X is a symbolic constant. If it is, store the type of
505 the symbol in *SYMBOL_TYPE. */
506
507 bool
508 riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
509 {
510 rtx offset;
511
512 split_const (x, &x, &offset);
513 if (UNSPEC_ADDRESS_P (x))
514 {
515 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
516 x = UNSPEC_ADDRESS (x);
517 }
518 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
519 *symbol_type = riscv_classify_symbol (x);
520 else
521 return false;
522
523 if (offset == const0_rtx)
524 return true;
525
526 /* Nonzero offsets are only valid for references that don't use the GOT. */
527 switch (*symbol_type)
528 {
529 case SYMBOL_ABSOLUTE:
530 case SYMBOL_PCREL:
531 case SYMBOL_TLS_LE:
532 /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */
533 return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
534
535 default:
536 return false;
537 }
538 }
539
540 /* Returns the number of instructions necessary to reference a symbol. */
541
542 static int riscv_symbol_insns (enum riscv_symbol_type type)
543 {
544 switch (type)
545 {
546 case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
547 case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference. */
548 case SYMBOL_PCREL: return 2; /* AUIPC + the reference. */
549 case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference. */
550 case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference. */
551 default: gcc_unreachable ();
552 }
553 }
554
555 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
556
557 static bool
558 riscv_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
559 {
560 return riscv_const_insns (x) > 0;
561 }
562
563 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
564
565 static bool
566 riscv_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
567 {
568 enum riscv_symbol_type type;
569 rtx base, offset;
570
571 /* There is no assembler syntax for expressing an address-sized
572 high part. */
573 if (GET_CODE (x) == HIGH)
574 return true;
575
576 split_const (x, &base, &offset);
577 if (riscv_symbolic_constant_p (base, &type))
578 {
579 /* As an optimization, don't spill symbolic constants that are as
580 cheap to rematerialize as to access in the constant pool. */
581 if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
582 return true;
583
584 /* As an optimization, avoid needlessly generate dynamic relocations. */
585 if (flag_pic)
586 return true;
587 }
588
589 /* TLS symbols must be computed by riscv_legitimize_move. */
590 if (tls_referenced_p (x))
591 return true;
592
593 return false;
594 }
595
596 /* Return true if register REGNO is a valid base register for mode MODE.
597 STRICT_P is true if REG_OK_STRICT is in effect. */
598
599 int
600 riscv_regno_mode_ok_for_base_p (int regno,
601 machine_mode mode ATTRIBUTE_UNUSED,
602 bool strict_p)
603 {
604 if (!HARD_REGISTER_NUM_P (regno))
605 {
606 if (!strict_p)
607 return true;
608 regno = reg_renumber[regno];
609 }
610
611 /* These fake registers will be eliminated to either the stack or
612 hard frame pointer, both of which are usually valid base registers.
613 Reload deals with the cases where the eliminated form isn't valid. */
614 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
615 return true;
616
617 return GP_REG_P (regno);
618 }
619
620 /* Return true if X is a valid base register for mode MODE.
621 STRICT_P is true if REG_OK_STRICT is in effect. */
622
623 static bool
624 riscv_valid_base_register_p (rtx x, machine_mode mode, bool strict_p)
625 {
626 if (!strict_p && GET_CODE (x) == SUBREG)
627 x = SUBREG_REG (x);
628
629 return (REG_P (x)
630 && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
631 }
632
633 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
634 can address a value of mode MODE. */
635
636 static bool
637 riscv_valid_offset_p (rtx x, machine_mode mode)
638 {
639 /* Check that X is a signed 12-bit number. */
640 if (!const_arith_operand (x, Pmode))
641 return false;
642
643 /* We may need to split multiword moves, so make sure that every word
644 is accessible. */
645 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
646 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
647 return false;
648
649 return true;
650 }
651
652 /* Should a symbol of type SYMBOL_TYPE should be split in two? */
653
654 bool
655 riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
656 {
657 if (symbol_type == SYMBOL_TLS_LE)
658 return true;
659
660 if (!TARGET_EXPLICIT_RELOCS)
661 return false;
662
663 return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
664 }
665
666 /* Return true if a LO_SUM can address a value of mode MODE when the
667 LO_SUM symbol has type SYM_TYPE. */
668
669 static bool
670 riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, machine_mode mode)
671 {
672 /* Check that symbols of type SYMBOL_TYPE can be used to access values
673 of mode MODE. */
674 if (riscv_symbol_insns (sym_type) == 0)
675 return false;
676
677 /* Check that there is a known low-part relocation. */
678 if (!riscv_split_symbol_type (sym_type))
679 return false;
680
681 /* We may need to split multiword moves, so make sure that each word
682 can be accessed without inducing a carry. */
683 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
684 && (!TARGET_STRICT_ALIGN
685 || GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode)))
686 return false;
687
688 return true;
689 }
690
691 /* Return true if X is a valid address for machine mode MODE. If it is,
692 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
693 effect. */
694
695 static bool
696 riscv_classify_address (struct riscv_address_info *info, rtx x,
697 machine_mode mode, bool strict_p)
698 {
699 switch (GET_CODE (x))
700 {
701 case REG:
702 case SUBREG:
703 info->type = ADDRESS_REG;
704 info->reg = x;
705 info->offset = const0_rtx;
706 return riscv_valid_base_register_p (info->reg, mode, strict_p);
707
708 case PLUS:
709 info->type = ADDRESS_REG;
710 info->reg = XEXP (x, 0);
711 info->offset = XEXP (x, 1);
712 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
713 && riscv_valid_offset_p (info->offset, mode));
714
715 case LO_SUM:
716 info->type = ADDRESS_LO_SUM;
717 info->reg = XEXP (x, 0);
718 info->offset = XEXP (x, 1);
719 /* We have to trust the creator of the LO_SUM to do something vaguely
720 sane. Target-independent code that creates a LO_SUM should also
721 create and verify the matching HIGH. Target-independent code that
722 adds an offset to a LO_SUM must prove that the offset will not
723 induce a carry. Failure to do either of these things would be
724 a bug, and we are not required to check for it here. The RISC-V
725 backend itself should only create LO_SUMs for valid symbolic
726 constants, with the high part being either a HIGH or a copy
727 of _gp. */
728 info->symbol_type
729 = riscv_classify_symbolic_expression (info->offset);
730 return (riscv_valid_base_register_p (info->reg, mode, strict_p)
731 && riscv_valid_lo_sum_p (info->symbol_type, mode));
732
733 case CONST_INT:
734 /* Small-integer addresses don't occur very often, but they
735 are legitimate if x0 is a valid base register. */
736 info->type = ADDRESS_CONST_INT;
737 return SMALL_OPERAND (INTVAL (x));
738
739 default:
740 return false;
741 }
742 }
743
744 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
745
746 static bool
747 riscv_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
748 {
749 struct riscv_address_info addr;
750
751 return riscv_classify_address (&addr, x, mode, strict_p);
752 }
753
754 /* Return the number of instructions needed to load or store a value
755 of mode MODE at address X. Return 0 if X isn't valid for MODE.
756 Assume that multiword moves may need to be split into word moves
757 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
758 enough. */
759
760 int
761 riscv_address_insns (rtx x, machine_mode mode, bool might_split_p)
762 {
763 struct riscv_address_info addr;
764 int n = 1;
765
766 if (!riscv_classify_address (&addr, x, mode, false))
767 return 0;
768
769 /* BLKmode is used for single unaligned loads and stores and should
770 not count as a multiword mode. */
771 if (mode != BLKmode && might_split_p)
772 n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
773
774 if (addr.type == ADDRESS_LO_SUM)
775 n += riscv_symbol_insns (addr.symbol_type) - 1;
776
777 return n;
778 }
779
780 /* Return the number of instructions needed to load constant X.
781 Return 0 if X isn't a valid constant. */
782
783 int
784 riscv_const_insns (rtx x)
785 {
786 enum riscv_symbol_type symbol_type;
787 rtx offset;
788
789 switch (GET_CODE (x))
790 {
791 case HIGH:
792 if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
793 || !riscv_split_symbol_type (symbol_type))
794 return 0;
795
796 /* This is simply an LUI. */
797 return 1;
798
799 case CONST_INT:
800 {
801 int cost = riscv_integer_cost (INTVAL (x));
802 /* Force complicated constants to memory. */
803 return cost < 4 ? cost : 0;
804 }
805
806 case CONST_DOUBLE:
807 case CONST_VECTOR:
808 /* We can use x0 to load floating-point zero. */
809 return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
810
811 case CONST:
812 /* See if we can refer to X directly. */
813 if (riscv_symbolic_constant_p (x, &symbol_type))
814 return riscv_symbol_insns (symbol_type);
815
816 /* Otherwise try splitting the constant into a base and offset. */
817 split_const (x, &x, &offset);
818 if (offset != 0)
819 {
820 int n = riscv_const_insns (x);
821 if (n != 0)
822 return n + riscv_integer_cost (INTVAL (offset));
823 }
824 return 0;
825
826 case SYMBOL_REF:
827 case LABEL_REF:
828 return riscv_symbol_insns (riscv_classify_symbol (x));
829
830 default:
831 return 0;
832 }
833 }
834
835 /* X is a doubleword constant that can be handled by splitting it into
836 two words and loading each word separately. Return the number of
837 instructions required to do this. */
838
839 int
840 riscv_split_const_insns (rtx x)
841 {
842 unsigned int low, high;
843
844 low = riscv_const_insns (riscv_subword (x, false));
845 high = riscv_const_insns (riscv_subword (x, true));
846 gcc_assert (low > 0 && high > 0);
847 return low + high;
848 }
849
850 /* Return the number of instructions needed to implement INSN,
851 given that it loads from or stores to MEM. */
852
853 int
854 riscv_load_store_insns (rtx mem, rtx_insn *insn)
855 {
856 machine_mode mode;
857 bool might_split_p;
858 rtx set;
859
860 gcc_assert (MEM_P (mem));
861 mode = GET_MODE (mem);
862
863 /* Try to prove that INSN does not need to be split. */
864 might_split_p = true;
865 if (GET_MODE_BITSIZE (mode) <= 32)
866 might_split_p = false;
867 else if (GET_MODE_BITSIZE (mode) == 64)
868 {
869 set = single_set (insn);
870 if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
871 might_split_p = false;
872 }
873
874 return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
875 }
876
877 /* Emit a move from SRC to DEST. Assume that the move expanders can
878 handle all moves if !can_create_pseudo_p (). The distinction is
879 important because, unlike emit_move_insn, the move expanders know
880 how to force Pmode objects into the constant pool even when the
881 constant pool address is not itself legitimate. */
882
883 rtx
884 riscv_emit_move (rtx dest, rtx src)
885 {
886 return (can_create_pseudo_p ()
887 ? emit_move_insn (dest, src)
888 : emit_move_insn_1 (dest, src));
889 }
890
891 /* Emit an instruction of the form (set TARGET SRC). */
892
893 static rtx
894 riscv_emit_set (rtx target, rtx src)
895 {
896 emit_insn (gen_rtx_SET (target, src));
897 return target;
898 }
899
900 /* Emit an instruction of the form (set DEST (CODE X Y)). */
901
902 static rtx
903 riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y)
904 {
905 return riscv_emit_set (dest, gen_rtx_fmt_ee (code, GET_MODE (dest), x, y));
906 }
907
908 /* Compute (CODE X Y) and store the result in a new register
909 of mode MODE. Return that new register. */
910
911 static rtx
912 riscv_force_binary (machine_mode mode, enum rtx_code code, rtx x, rtx y)
913 {
914 return riscv_emit_binary (code, gen_reg_rtx (mode), x, y);
915 }
916
917 /* Copy VALUE to a register and return that register. If new pseudos
918 are allowed, copy it into a new register, otherwise use DEST. */
919
920 static rtx
921 riscv_force_temporary (rtx dest, rtx value)
922 {
923 if (can_create_pseudo_p ())
924 return force_reg (Pmode, value);
925 else
926 {
927 riscv_emit_move (dest, value);
928 return dest;
929 }
930 }
931
932 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
933 then add CONST_INT OFFSET to the result. */
934
935 static rtx
936 riscv_unspec_address_offset (rtx base, rtx offset,
937 enum riscv_symbol_type symbol_type)
938 {
939 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
940 UNSPEC_ADDRESS_FIRST + symbol_type);
941 if (offset != const0_rtx)
942 base = gen_rtx_PLUS (Pmode, base, offset);
943 return gen_rtx_CONST (Pmode, base);
944 }
945
946 /* Return an UNSPEC address with underlying address ADDRESS and symbol
947 type SYMBOL_TYPE. */
948
949 rtx
950 riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
951 {
952 rtx base, offset;
953
954 split_const (address, &base, &offset);
955 return riscv_unspec_address_offset (base, offset, symbol_type);
956 }
957
958 /* If OP is an UNSPEC address, return the address to which it refers,
959 otherwise return OP itself. */
960
961 static rtx
962 riscv_strip_unspec_address (rtx op)
963 {
964 rtx base, offset;
965
966 split_const (op, &base, &offset);
967 if (UNSPEC_ADDRESS_P (base))
968 op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
969 return op;
970 }
971
972 /* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
973 high part to BASE and return the result. Just return BASE otherwise.
974 TEMP is as for riscv_force_temporary.
975
976 The returned expression can be used as the first operand to a LO_SUM. */
977
978 static rtx
979 riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
980 {
981 addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
982 return riscv_force_temporary (temp, addr);
983 }
984
985 /* Load an entry from the GOT for a TLS GD access. */
986
987 static rtx riscv_got_load_tls_gd (rtx dest, rtx sym)
988 {
989 if (Pmode == DImode)
990 return gen_got_load_tls_gddi (dest, sym);
991 else
992 return gen_got_load_tls_gdsi (dest, sym);
993 }
994
995 /* Load an entry from the GOT for a TLS IE access. */
996
997 static rtx riscv_got_load_tls_ie (rtx dest, rtx sym)
998 {
999 if (Pmode == DImode)
1000 return gen_got_load_tls_iedi (dest, sym);
1001 else
1002 return gen_got_load_tls_iesi (dest, sym);
1003 }
1004
1005 /* Add in the thread pointer for a TLS LE access. */
1006
1007 static rtx riscv_tls_add_tp_le (rtx dest, rtx base, rtx sym)
1008 {
1009 rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1010 if (Pmode == DImode)
1011 return gen_tls_add_tp_ledi (dest, base, tp, sym);
1012 else
1013 return gen_tls_add_tp_lesi (dest, base, tp, sym);
1014 }
1015
1016 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
1017 it appears in a MEM of that mode. Return true if ADDR is a legitimate
1018 constant in that context and can be split into high and low parts.
1019 If so, and if LOW_OUT is nonnull, emit the high part and store the
1020 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
1021
1022 TEMP is as for riscv_force_temporary and is used to load the high
1023 part into a register.
1024
1025 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
1026 a legitimize SET_SRC for an .md pattern, otherwise the low part
1027 is guaranteed to be a legitimate address for mode MODE. */
1028
1029 bool
1030 riscv_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
1031 {
1032 enum riscv_symbol_type symbol_type;
1033
1034 if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
1035 || !riscv_symbolic_constant_p (addr, &symbol_type)
1036 || riscv_symbol_insns (symbol_type) == 0
1037 || !riscv_split_symbol_type (symbol_type))
1038 return false;
1039
1040 if (low_out)
1041 switch (symbol_type)
1042 {
1043 case SYMBOL_ABSOLUTE:
1044 {
1045 rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
1046 high = riscv_force_temporary (temp, high);
1047 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
1048 }
1049 break;
1050
1051 case SYMBOL_PCREL:
1052 {
1053 static unsigned seqno;
1054 char buf[32];
1055 rtx label;
1056
1057 ssize_t bytes = snprintf (buf, sizeof (buf), ".LA%u", seqno);
1058 gcc_assert ((size_t) bytes < sizeof (buf));
1059
1060 label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
1061 SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
1062
1063 if (temp == NULL)
1064 temp = gen_reg_rtx (Pmode);
1065
1066 if (Pmode == DImode)
1067 emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
1068 else
1069 emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
1070
1071 *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
1072
1073 seqno++;
1074 }
1075 break;
1076
1077 default:
1078 gcc_unreachable ();
1079 }
1080
1081 return true;
1082 }
1083
1084 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1085 riscv_force_temporary; it is only needed when OFFSET is not a
1086 SMALL_OPERAND. */
1087
1088 static rtx
1089 riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1090 {
1091 if (!SMALL_OPERAND (offset))
1092 {
1093 rtx high;
1094
1095 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
1096 The addition inside the macro CONST_HIGH_PART may cause an
1097 overflow, so we need to force a sign-extension check. */
1098 high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
1099 offset = CONST_LOW_PART (offset);
1100 high = riscv_force_temporary (temp, high);
1101 reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1102 }
1103 return plus_constant (Pmode, reg, offset);
1104 }
1105
1106 /* The __tls_get_attr symbol. */
1107 static GTY(()) rtx riscv_tls_symbol;
1108
1109 /* Return an instruction sequence that calls __tls_get_addr. SYM is
1110 the TLS symbol we are referencing and TYPE is the symbol type to use
1111 (either global dynamic or local dynamic). RESULT is an RTX for the
1112 return value location. */
1113
1114 static rtx_insn *
1115 riscv_call_tls_get_addr (rtx sym, rtx result)
1116 {
1117 rtx a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST), func;
1118 rtx_insn *insn;
1119
1120 if (!riscv_tls_symbol)
1121 riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
1122 func = gen_rtx_MEM (FUNCTION_MODE, riscv_tls_symbol);
1123
1124 start_sequence ();
1125
1126 emit_insn (riscv_got_load_tls_gd (a0, sym));
1127 insn = emit_call_insn (gen_call_value (result, func, const0_rtx, NULL));
1128 RTL_CONST_CALL_P (insn) = 1;
1129 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1130 insn = get_insns ();
1131
1132 end_sequence ();
1133
1134 return insn;
1135 }
1136
1137 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
1138 its address. The return value will be both a valid address and a valid
1139 SET_SRC (either a REG or a LO_SUM). */
1140
1141 static rtx
1142 riscv_legitimize_tls_address (rtx loc)
1143 {
1144 rtx dest, tp, tmp;
1145 enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
1146
1147 /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
1148 if (!flag_pic)
1149 model = TLS_MODEL_LOCAL_EXEC;
1150
1151 switch (model)
1152 {
1153 case TLS_MODEL_LOCAL_DYNAMIC:
1154 /* Rely on section anchors for the optimization that LDM TLS
1155 provides. The anchor's address is loaded with GD TLS. */
1156 case TLS_MODEL_GLOBAL_DYNAMIC:
1157 tmp = gen_rtx_REG (Pmode, GP_RETURN);
1158 dest = gen_reg_rtx (Pmode);
1159 emit_libcall_block (riscv_call_tls_get_addr (loc, tmp), dest, tmp, loc);
1160 break;
1161
1162 case TLS_MODEL_INITIAL_EXEC:
1163 /* la.tls.ie; tp-relative add */
1164 tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
1165 tmp = gen_reg_rtx (Pmode);
1166 emit_insn (riscv_got_load_tls_ie (tmp, loc));
1167 dest = gen_reg_rtx (Pmode);
1168 emit_insn (gen_add3_insn (dest, tmp, tp));
1169 break;
1170
1171 case TLS_MODEL_LOCAL_EXEC:
1172 tmp = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
1173 dest = gen_reg_rtx (Pmode);
1174 emit_insn (riscv_tls_add_tp_le (dest, tmp, loc));
1175 dest = gen_rtx_LO_SUM (Pmode, dest,
1176 riscv_unspec_address (loc, SYMBOL_TLS_LE));
1177 break;
1178
1179 default:
1180 gcc_unreachable ();
1181 }
1182 return dest;
1183 }
1184 \f
1185 /* If X is not a valid address for mode MODE, force it into a register. */
1186
1187 static rtx
1188 riscv_force_address (rtx x, machine_mode mode)
1189 {
1190 if (!riscv_legitimate_address_p (mode, x, false))
1191 x = force_reg (Pmode, x);
1192 return x;
1193 }
1194
1195 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
1196 be legitimized in a way that the generic machinery might not expect,
1197 return a new address, otherwise return NULL. MODE is the mode of
1198 the memory being accessed. */
1199
1200 static rtx
1201 riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1202 machine_mode mode)
1203 {
1204 rtx addr;
1205
1206 if (riscv_tls_symbol_p (x))
1207 return riscv_legitimize_tls_address (x);
1208
1209 /* See if the address can split into a high part and a LO_SUM. */
1210 if (riscv_split_symbol (NULL, x, mode, &addr))
1211 return riscv_force_address (addr, mode);
1212
1213 /* Handle BASE + OFFSET using riscv_add_offset. */
1214 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
1215 && INTVAL (XEXP (x, 1)) != 0)
1216 {
1217 rtx base = XEXP (x, 0);
1218 HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
1219
1220 if (!riscv_valid_base_register_p (base, mode, false))
1221 base = copy_to_mode_reg (Pmode, base);
1222 addr = riscv_add_offset (NULL, base, offset);
1223 return riscv_force_address (addr, mode);
1224 }
1225
1226 return x;
1227 }
1228
1229 /* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
1230
1231 void
1232 riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
1233 {
1234 struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
1235 machine_mode mode;
1236 int i, num_ops;
1237 rtx x;
1238
1239 mode = GET_MODE (dest);
1240 num_ops = riscv_build_integer (codes, value, mode);
1241
1242 if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
1243 && num_ops >= riscv_split_integer_cost (value))
1244 x = riscv_split_integer (value, mode);
1245 else
1246 {
1247 /* Apply each binary operation to X. */
1248 x = GEN_INT (codes[0].value);
1249
1250 for (i = 1; i < num_ops; i++)
1251 {
1252 if (!can_create_pseudo_p ())
1253 x = riscv_emit_set (temp, x);
1254 else
1255 x = force_reg (mode, x);
1256
1257 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1258 }
1259 }
1260
1261 riscv_emit_set (dest, x);
1262 }
1263
1264 /* Subroutine of riscv_legitimize_move. Move constant SRC into register
1265 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1266 move_operand. */
1267
1268 static void
1269 riscv_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
1270 {
1271 rtx base, offset;
1272
1273 /* Split moves of big integers into smaller pieces. */
1274 if (splittable_const_int_operand (src, mode))
1275 {
1276 riscv_move_integer (dest, dest, INTVAL (src));
1277 return;
1278 }
1279
1280 /* Split moves of symbolic constants into high/low pairs. */
1281 if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
1282 {
1283 riscv_emit_set (dest, src);
1284 return;
1285 }
1286
1287 /* Generate the appropriate access sequences for TLS symbols. */
1288 if (riscv_tls_symbol_p (src))
1289 {
1290 riscv_emit_move (dest, riscv_legitimize_tls_address (src));
1291 return;
1292 }
1293
1294 /* If we have (const (plus symbol offset)), and that expression cannot
1295 be forced into memory, load the symbol first and add in the offset. Also
1296 prefer to do this even if the constant _can_ be forced into memory, as it
1297 usually produces better code. */
1298 split_const (src, &base, &offset);
1299 if (offset != const0_rtx
1300 && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
1301 {
1302 base = riscv_force_temporary (dest, base);
1303 riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
1304 return;
1305 }
1306
1307 src = force_const_mem (mode, src);
1308
1309 /* When using explicit relocs, constant pool references are sometimes
1310 not legitimate addresses. */
1311 riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
1312 riscv_emit_move (dest, src);
1313 }
1314
1315 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
1316 sequence that is valid. */
1317
1318 bool
1319 riscv_legitimize_move (machine_mode mode, rtx dest, rtx src)
1320 {
1321 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1322 {
1323 riscv_emit_move (dest, force_reg (mode, src));
1324 return true;
1325 }
1326
1327 /* We need to deal with constants that would be legitimate
1328 immediate_operands but aren't legitimate move_operands. */
1329 if (CONSTANT_P (src) && !move_operand (src, mode))
1330 {
1331 riscv_legitimize_const_move (mode, dest, src);
1332 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1333 return true;
1334 }
1335
1336 /* RISC-V GCC may generate non-legitimate address due to we provide some
1337 pattern for optimize access PIC local symbol and it's make GCC generate
1338 unrecognizable instruction during optmizing. */
1339
1340 if (MEM_P (dest) && !riscv_legitimate_address_p (mode, XEXP (dest, 0),
1341 reload_completed))
1342 {
1343 XEXP (dest, 0) = riscv_force_address (XEXP (dest, 0), mode);
1344 }
1345
1346 if (MEM_P (src) && !riscv_legitimate_address_p (mode, XEXP (src, 0),
1347 reload_completed))
1348 {
1349 XEXP (src, 0) = riscv_force_address (XEXP (src, 0), mode);
1350 }
1351
1352 return false;
1353 }
1354
1355 /* Return true if there is an instruction that implements CODE and accepts
1356 X as an immediate operand. */
1357
1358 static int
1359 riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
1360 {
1361 switch (code)
1362 {
1363 case ASHIFT:
1364 case ASHIFTRT:
1365 case LSHIFTRT:
1366 /* All shift counts are truncated to a valid constant. */
1367 return true;
1368
1369 case AND:
1370 case IOR:
1371 case XOR:
1372 case PLUS:
1373 case LT:
1374 case LTU:
1375 /* These instructions take 12-bit signed immediates. */
1376 return SMALL_OPERAND (x);
1377
1378 case LE:
1379 /* We add 1 to the immediate and use SLT. */
1380 return SMALL_OPERAND (x + 1);
1381
1382 case LEU:
1383 /* Likewise SLTU, but reject the always-true case. */
1384 return SMALL_OPERAND (x + 1) && x + 1 != 0;
1385
1386 case GE:
1387 case GEU:
1388 /* We can emulate an immediate of 1 by using GT/GTU against x0. */
1389 return x == 1;
1390
1391 default:
1392 /* By default assume that x0 can be used for 0. */
1393 return x == 0;
1394 }
1395 }
1396
1397 /* Return the cost of binary operation X, given that the instruction
1398 sequence for a word-sized or smaller operation takes SIGNLE_INSNS
1399 instructions and that the sequence of a double-word operation takes
1400 DOUBLE_INSNS instructions. */
1401
1402 static int
1403 riscv_binary_cost (rtx x, int single_insns, int double_insns)
1404 {
1405 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
1406 return COSTS_N_INSNS (double_insns);
1407 return COSTS_N_INSNS (single_insns);
1408 }
1409
1410 /* Return the cost of sign- or zero-extending OP. */
1411
1412 static int
1413 riscv_extend_cost (rtx op, bool unsigned_p)
1414 {
1415 if (MEM_P (op))
1416 return 0;
1417
1418 if (unsigned_p && GET_MODE (op) == QImode)
1419 /* We can use ANDI. */
1420 return COSTS_N_INSNS (1);
1421
1422 if (!unsigned_p && GET_MODE (op) == SImode)
1423 /* We can use SEXT.W. */
1424 return COSTS_N_INSNS (1);
1425
1426 /* We need to use a shift left and a shift right. */
1427 return COSTS_N_INSNS (2);
1428 }
1429
1430 /* Implement TARGET_RTX_COSTS. */
1431
1432 static bool
1433 riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
1434 int *total, bool speed)
1435 {
1436 bool float_mode_p = FLOAT_MODE_P (mode);
1437 int cost;
1438
1439 switch (GET_CODE (x))
1440 {
1441 case CONST_INT:
1442 if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
1443 {
1444 *total = 0;
1445 return true;
1446 }
1447 /* Fall through. */
1448
1449 case SYMBOL_REF:
1450 case LABEL_REF:
1451 case CONST_DOUBLE:
1452 case CONST:
1453 if ((cost = riscv_const_insns (x)) > 0)
1454 {
1455 /* If the constant is likely to be stored in a GPR, SETs of
1456 single-insn constants are as cheap as register sets; we
1457 never want to CSE them. */
1458 if (cost == 1 && outer_code == SET)
1459 *total = 0;
1460 /* When we load a constant more than once, it usually is better
1461 to duplicate the last operation in the sequence than to CSE
1462 the constant itself. */
1463 else if (outer_code == SET || GET_MODE (x) == VOIDmode)
1464 *total = COSTS_N_INSNS (1);
1465 }
1466 else /* The instruction will be fetched from the constant pool. */
1467 *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
1468 return true;
1469
1470 case MEM:
1471 /* If the address is legitimate, return the number of
1472 instructions it needs. */
1473 if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
1474 {
1475 *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
1476 return true;
1477 }
1478 /* Otherwise use the default handling. */
1479 return false;
1480
1481 case NOT:
1482 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
1483 return false;
1484
1485 case AND:
1486 case IOR:
1487 case XOR:
1488 /* Double-word operations use two single-word operations. */
1489 *total = riscv_binary_cost (x, 1, 2);
1490 return false;
1491
1492 case ASHIFT:
1493 case ASHIFTRT:
1494 case LSHIFTRT:
1495 *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
1496 return false;
1497
1498 case ABS:
1499 *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
1500 return false;
1501
1502 case LO_SUM:
1503 *total = set_src_cost (XEXP (x, 0), mode, speed);
1504 return true;
1505
1506 case LT:
1507 case LTU:
1508 case LE:
1509 case LEU:
1510 case GT:
1511 case GTU:
1512 case GE:
1513 case GEU:
1514 case EQ:
1515 case NE:
1516 /* Branch comparisons have VOIDmode, so use the first operand's
1517 mode instead. */
1518 mode = GET_MODE (XEXP (x, 0));
1519 if (float_mode_p)
1520 *total = tune_info->fp_add[mode == DFmode];
1521 else
1522 *total = riscv_binary_cost (x, 1, 3);
1523 return false;
1524
1525 case UNORDERED:
1526 case ORDERED:
1527 /* (FEQ(A, A) & FEQ(B, B)) compared against 0. */
1528 mode = GET_MODE (XEXP (x, 0));
1529 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (2);
1530 return false;
1531
1532 case UNEQ:
1533 case LTGT:
1534 /* (FEQ(A, A) & FEQ(B, B)) compared against FEQ(A, B). */
1535 mode = GET_MODE (XEXP (x, 0));
1536 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (3);
1537 return false;
1538
1539 case UNGE:
1540 case UNGT:
1541 case UNLE:
1542 case UNLT:
1543 /* FLT or FLE, but guarded by an FFLAGS read and write. */
1544 mode = GET_MODE (XEXP (x, 0));
1545 *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (4);
1546 return false;
1547
1548 case MINUS:
1549 case PLUS:
1550 if (float_mode_p)
1551 *total = tune_info->fp_add[mode == DFmode];
1552 else
1553 *total = riscv_binary_cost (x, 1, 4);
1554 return false;
1555
1556 case NEG:
1557 {
1558 rtx op = XEXP (x, 0);
1559 if (GET_CODE (op) == FMA && !HONOR_SIGNED_ZEROS (mode))
1560 {
1561 *total = (tune_info->fp_mul[mode == DFmode]
1562 + set_src_cost (XEXP (op, 0), mode, speed)
1563 + set_src_cost (XEXP (op, 1), mode, speed)
1564 + set_src_cost (XEXP (op, 2), mode, speed));
1565 return true;
1566 }
1567 }
1568
1569 if (float_mode_p)
1570 *total = tune_info->fp_add[mode == DFmode];
1571 else
1572 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
1573 return false;
1574
1575 case MULT:
1576 if (float_mode_p)
1577 *total = tune_info->fp_mul[mode == DFmode];
1578 else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
1579 *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
1580 else if (!speed)
1581 *total = COSTS_N_INSNS (1);
1582 else
1583 *total = tune_info->int_mul[mode == DImode];
1584 return false;
1585
1586 case DIV:
1587 case SQRT:
1588 case MOD:
1589 if (float_mode_p)
1590 {
1591 *total = tune_info->fp_div[mode == DFmode];
1592 return false;
1593 }
1594 /* Fall through. */
1595
1596 case UDIV:
1597 case UMOD:
1598 if (speed)
1599 *total = tune_info->int_div[mode == DImode];
1600 else
1601 *total = COSTS_N_INSNS (1);
1602 return false;
1603
1604 case SIGN_EXTEND:
1605 case ZERO_EXTEND:
1606 *total = riscv_extend_cost (XEXP (x, 0), GET_CODE (x) == ZERO_EXTEND);
1607 return false;
1608
1609 case FLOAT:
1610 case UNSIGNED_FLOAT:
1611 case FIX:
1612 case FLOAT_EXTEND:
1613 case FLOAT_TRUNCATE:
1614 *total = tune_info->fp_add[mode == DFmode];
1615 return false;
1616
1617 case FMA:
1618 *total = (tune_info->fp_mul[mode == DFmode]
1619 + set_src_cost (XEXP (x, 0), mode, speed)
1620 + set_src_cost (XEXP (x, 1), mode, speed)
1621 + set_src_cost (XEXP (x, 2), mode, speed));
1622 return true;
1623
1624 case UNSPEC:
1625 if (XINT (x, 1) == UNSPEC_AUIPC)
1626 {
1627 /* Make AUIPC cheap to avoid spilling its result to the stack. */
1628 *total = 1;
1629 return true;
1630 }
1631 return false;
1632
1633 default:
1634 return false;
1635 }
1636 }
1637
1638 /* Implement TARGET_ADDRESS_COST. */
1639
1640 static int
1641 riscv_address_cost (rtx addr, machine_mode mode,
1642 addr_space_t as ATTRIBUTE_UNUSED,
1643 bool speed ATTRIBUTE_UNUSED)
1644 {
1645 return riscv_address_insns (addr, mode, false);
1646 }
1647
1648 /* Return one word of double-word value OP. HIGH_P is true to select the
1649 high part or false to select the low part. */
1650
1651 rtx
1652 riscv_subword (rtx op, bool high_p)
1653 {
1654 unsigned int byte = high_p ? UNITS_PER_WORD : 0;
1655 machine_mode mode = GET_MODE (op);
1656
1657 if (mode == VOIDmode)
1658 mode = TARGET_64BIT ? TImode : DImode;
1659
1660 if (MEM_P (op))
1661 return adjust_address (op, word_mode, byte);
1662
1663 if (REG_P (op))
1664 gcc_assert (!FP_REG_RTX_P (op));
1665
1666 return simplify_gen_subreg (word_mode, op, mode, byte);
1667 }
1668
1669 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
1670
1671 bool
1672 riscv_split_64bit_move_p (rtx dest, rtx src)
1673 {
1674 if (TARGET_64BIT)
1675 return false;
1676
1677 /* Allow FPR <-> FPR and FPR <-> MEM moves, and permit the special case
1678 of zeroing an FPR with FCVT.D.W. */
1679 if (TARGET_DOUBLE_FLOAT
1680 && ((FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
1681 || (FP_REG_RTX_P (dest) && MEM_P (src))
1682 || (FP_REG_RTX_P (src) && MEM_P (dest))
1683 || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))))
1684 return false;
1685
1686 return true;
1687 }
1688
1689 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
1690 this function handles 64-bit moves for which riscv_split_64bit_move_p
1691 holds. For 64-bit targets, this function handles 128-bit moves. */
1692
1693 void
1694 riscv_split_doubleword_move (rtx dest, rtx src)
1695 {
1696 rtx low_dest;
1697
1698 /* The operation can be split into two normal moves. Decide in
1699 which order to do them. */
1700 low_dest = riscv_subword (dest, false);
1701 if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
1702 {
1703 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1704 riscv_emit_move (low_dest, riscv_subword (src, false));
1705 }
1706 else
1707 {
1708 riscv_emit_move (low_dest, riscv_subword (src, false));
1709 riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
1710 }
1711 }
1712 \f
1713 /* Return the appropriate instructions to move SRC into DEST. Assume
1714 that SRC is operand 1 and DEST is operand 0. */
1715
1716 const char *
1717 riscv_output_move (rtx dest, rtx src)
1718 {
1719 enum rtx_code dest_code, src_code;
1720 machine_mode mode;
1721 bool dbl_p;
1722
1723 dest_code = GET_CODE (dest);
1724 src_code = GET_CODE (src);
1725 mode = GET_MODE (dest);
1726 dbl_p = (GET_MODE_SIZE (mode) == 8);
1727
1728 if (dbl_p && riscv_split_64bit_move_p (dest, src))
1729 return "#";
1730
1731 if (dest_code == REG && GP_REG_P (REGNO (dest)))
1732 {
1733 if (src_code == REG && FP_REG_P (REGNO (src)))
1734 return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
1735
1736 if (src_code == MEM)
1737 switch (GET_MODE_SIZE (mode))
1738 {
1739 case 1: return "lbu\t%0,%1";
1740 case 2: return "lhu\t%0,%1";
1741 case 4: return "lw\t%0,%1";
1742 case 8: return "ld\t%0,%1";
1743 }
1744
1745 if (src_code == CONST_INT)
1746 return "li\t%0,%1";
1747
1748 if (src_code == HIGH)
1749 return "lui\t%0,%h1";
1750
1751 if (symbolic_operand (src, VOIDmode))
1752 switch (riscv_classify_symbolic_expression (src))
1753 {
1754 case SYMBOL_GOT_DISP: return "la\t%0,%1";
1755 case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
1756 case SYMBOL_PCREL: return "lla\t%0,%1";
1757 default: gcc_unreachable ();
1758 }
1759 }
1760 if ((src_code == REG && GP_REG_P (REGNO (src)))
1761 || (src == CONST0_RTX (mode)))
1762 {
1763 if (dest_code == REG)
1764 {
1765 if (GP_REG_P (REGNO (dest)))
1766 return "mv\t%0,%z1";
1767
1768 if (FP_REG_P (REGNO (dest)))
1769 {
1770 if (!dbl_p)
1771 return "fmv.s.x\t%0,%z1";
1772 if (TARGET_64BIT)
1773 return "fmv.d.x\t%0,%z1";
1774 /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
1775 gcc_assert (src == CONST0_RTX (mode));
1776 return "fcvt.d.w\t%0,x0";
1777 }
1778 }
1779 if (dest_code == MEM)
1780 switch (GET_MODE_SIZE (mode))
1781 {
1782 case 1: return "sb\t%z1,%0";
1783 case 2: return "sh\t%z1,%0";
1784 case 4: return "sw\t%z1,%0";
1785 case 8: return "sd\t%z1,%0";
1786 }
1787 }
1788 if (src_code == REG && FP_REG_P (REGNO (src)))
1789 {
1790 if (dest_code == REG && FP_REG_P (REGNO (dest)))
1791 return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
1792
1793 if (dest_code == MEM)
1794 return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
1795 }
1796 if (dest_code == REG && FP_REG_P (REGNO (dest)))
1797 {
1798 if (src_code == MEM)
1799 return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
1800 }
1801 gcc_unreachable ();
1802 }
1803 \f
1804 /* Return true if CMP1 is a suitable second operand for integer ordering
1805 test CODE. See also the *sCC patterns in riscv.md. */
1806
1807 static bool
1808 riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
1809 {
1810 switch (code)
1811 {
1812 case GT:
1813 case GTU:
1814 return reg_or_0_operand (cmp1, VOIDmode);
1815
1816 case GE:
1817 case GEU:
1818 return cmp1 == const1_rtx;
1819
1820 case LT:
1821 case LTU:
1822 return arith_operand (cmp1, VOIDmode);
1823
1824 case LE:
1825 return sle_operand (cmp1, VOIDmode);
1826
1827 case LEU:
1828 return sleu_operand (cmp1, VOIDmode);
1829
1830 default:
1831 gcc_unreachable ();
1832 }
1833 }
1834
1835 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
1836 integer ordering test *CODE, or if an equivalent combination can
1837 be formed by adjusting *CODE and *CMP1. When returning true, update
1838 *CODE and *CMP1 with the chosen code and operand, otherwise leave
1839 them alone. */
1840
1841 static bool
1842 riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
1843 machine_mode mode)
1844 {
1845 HOST_WIDE_INT plus_one;
1846
1847 if (riscv_int_order_operand_ok_p (*code, *cmp1))
1848 return true;
1849
1850 if (CONST_INT_P (*cmp1))
1851 switch (*code)
1852 {
1853 case LE:
1854 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
1855 if (INTVAL (*cmp1) < plus_one)
1856 {
1857 *code = LT;
1858 *cmp1 = force_reg (mode, GEN_INT (plus_one));
1859 return true;
1860 }
1861 break;
1862
1863 case LEU:
1864 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
1865 if (plus_one != 0)
1866 {
1867 *code = LTU;
1868 *cmp1 = force_reg (mode, GEN_INT (plus_one));
1869 return true;
1870 }
1871 break;
1872
1873 default:
1874 break;
1875 }
1876 return false;
1877 }
1878
1879 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
1880 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
1881 is nonnull, it's OK to set TARGET to the inverse of the result and
1882 flip *INVERT_PTR instead. */
1883
1884 static void
1885 riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
1886 rtx target, rtx cmp0, rtx cmp1)
1887 {
1888 machine_mode mode;
1889
1890 /* First see if there is a RISCV instruction that can do this operation.
1891 If not, try doing the same for the inverse operation. If that also
1892 fails, force CMP1 into a register and try again. */
1893 mode = GET_MODE (cmp0);
1894 if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
1895 riscv_emit_binary (code, target, cmp0, cmp1);
1896 else
1897 {
1898 enum rtx_code inv_code = reverse_condition (code);
1899 if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
1900 {
1901 cmp1 = force_reg (mode, cmp1);
1902 riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
1903 }
1904 else if (invert_ptr == 0)
1905 {
1906 rtx inv_target = riscv_force_binary (GET_MODE (target),
1907 inv_code, cmp0, cmp1);
1908 riscv_emit_binary (XOR, target, inv_target, const1_rtx);
1909 }
1910 else
1911 {
1912 *invert_ptr = !*invert_ptr;
1913 riscv_emit_binary (inv_code, target, cmp0, cmp1);
1914 }
1915 }
1916 }
1917
1918 /* Return a register that is zero iff CMP0 and CMP1 are equal.
1919 The register will have the same mode as CMP0. */
1920
1921 static rtx
1922 riscv_zero_if_equal (rtx cmp0, rtx cmp1)
1923 {
1924 if (cmp1 == const0_rtx)
1925 return cmp0;
1926
1927 return expand_binop (GET_MODE (cmp0), sub_optab,
1928 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
1929 }
1930
1931 /* Sign- or zero-extend OP0 and OP1 for integer comparisons. */
1932
1933 static void
1934 riscv_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
1935 {
1936 /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */
1937 if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0)))
1938 {
1939 /* It is more profitable to zero-extend QImode values. */
1940 if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode)
1941 {
1942 *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0);
1943 if (CONST_INT_P (*op1))
1944 *op1 = GEN_INT ((uint8_t) INTVAL (*op1));
1945 else
1946 *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1);
1947 }
1948 else
1949 {
1950 *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0);
1951 if (*op1 != const0_rtx)
1952 *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1);
1953 }
1954 }
1955 }
1956
1957 /* Convert a comparison into something that can be used in a branch. On
1958 entry, *OP0 and *OP1 are the values being compared and *CODE is the code
1959 used to compare them. Update them to describe the final comparison. */
1960
1961 static void
1962 riscv_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1)
1963 {
1964 if (splittable_const_int_operand (*op1, VOIDmode))
1965 {
1966 HOST_WIDE_INT rhs = INTVAL (*op1);
1967
1968 if (*code == EQ || *code == NE)
1969 {
1970 /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
1971 if (SMALL_OPERAND (-rhs))
1972 {
1973 *op0 = riscv_force_binary (GET_MODE (*op0), PLUS, *op0,
1974 GEN_INT (-rhs));
1975 *op1 = const0_rtx;
1976 }
1977 }
1978 else
1979 {
1980 static const enum rtx_code mag_comparisons[][2] = {
1981 {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}
1982 };
1983
1984 /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */
1985 for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++)
1986 {
1987 HOST_WIDE_INT new_rhs;
1988 bool increment = *code == mag_comparisons[i][0];
1989 bool decrement = *code == mag_comparisons[i][1];
1990 if (!increment && !decrement)
1991 continue;
1992
1993 new_rhs = rhs + (increment ? 1 : -1);
1994 if (riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs)
1995 && (rhs < 0) == (new_rhs < 0))
1996 {
1997 *op1 = GEN_INT (new_rhs);
1998 *code = mag_comparisons[i][increment];
1999 }
2000 break;
2001 }
2002 }
2003 }
2004
2005 riscv_extend_comparands (*code, op0, op1);
2006
2007 *op0 = force_reg (word_mode, *op0);
2008 if (*op1 != const0_rtx)
2009 *op1 = force_reg (word_mode, *op1);
2010 }
2011
2012 /* Like riscv_emit_int_compare, but for floating-point comparisons. */
2013
2014 static void
2015 riscv_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1)
2016 {
2017 rtx tmp0, tmp1, cmp_op0 = *op0, cmp_op1 = *op1;
2018 enum rtx_code fp_code = *code;
2019 *code = NE;
2020
2021 switch (fp_code)
2022 {
2023 case UNORDERED:
2024 *code = EQ;
2025 /* Fall through. */
2026
2027 case ORDERED:
2028 /* a == a && b == b */
2029 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2030 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2031 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2032 *op1 = const0_rtx;
2033 break;
2034
2035 case UNEQ:
2036 case LTGT:
2037 /* ordered(a, b) > (a == b) */
2038 *code = fp_code == LTGT ? GTU : EQ;
2039 tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
2040 tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
2041 *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
2042 *op1 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op1);
2043 break;
2044
2045 #define UNORDERED_COMPARISON(CODE, CMP) \
2046 case CODE: \
2047 *code = EQ; \
2048 *op0 = gen_reg_rtx (word_mode); \
2049 if (GET_MODE (cmp_op0) == SFmode && TARGET_64BIT) \
2050 emit_insn (gen_f##CMP##_quietsfdi4 (*op0, cmp_op0, cmp_op1)); \
2051 else if (GET_MODE (cmp_op0) == SFmode) \
2052 emit_insn (gen_f##CMP##_quietsfsi4 (*op0, cmp_op0, cmp_op1)); \
2053 else if (GET_MODE (cmp_op0) == DFmode && TARGET_64BIT) \
2054 emit_insn (gen_f##CMP##_quietdfdi4 (*op0, cmp_op0, cmp_op1)); \
2055 else if (GET_MODE (cmp_op0) == DFmode) \
2056 emit_insn (gen_f##CMP##_quietdfsi4 (*op0, cmp_op0, cmp_op1)); \
2057 else \
2058 gcc_unreachable (); \
2059 *op1 = const0_rtx; \
2060 break;
2061
2062 case UNLT:
2063 std::swap (cmp_op0, cmp_op1);
2064 gcc_fallthrough ();
2065
2066 UNORDERED_COMPARISON(UNGT, le)
2067
2068 case UNLE:
2069 std::swap (cmp_op0, cmp_op1);
2070 gcc_fallthrough ();
2071
2072 UNORDERED_COMPARISON(UNGE, lt)
2073 #undef UNORDERED_COMPARISON
2074
2075 case NE:
2076 fp_code = EQ;
2077 *code = EQ;
2078 /* Fall through. */
2079
2080 case EQ:
2081 case LE:
2082 case LT:
2083 case GE:
2084 case GT:
2085 /* We have instructions for these cases. */
2086 *op0 = riscv_force_binary (word_mode, fp_code, cmp_op0, cmp_op1);
2087 *op1 = const0_rtx;
2088 break;
2089
2090 default:
2091 gcc_unreachable ();
2092 }
2093 }
2094
2095 /* CODE-compare OP0 and OP1. Store the result in TARGET. */
2096
2097 void
2098 riscv_expand_int_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2099 {
2100 riscv_extend_comparands (code, &op0, &op1);
2101 op0 = force_reg (word_mode, op0);
2102
2103 if (code == EQ || code == NE)
2104 {
2105 rtx zie = riscv_zero_if_equal (op0, op1);
2106 riscv_emit_binary (code, target, zie, const0_rtx);
2107 }
2108 else
2109 riscv_emit_int_order_test (code, 0, target, op0, op1);
2110 }
2111
2112 /* Like riscv_expand_int_scc, but for floating-point comparisons. */
2113
2114 void
2115 riscv_expand_float_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
2116 {
2117 riscv_emit_float_compare (&code, &op0, &op1);
2118
2119 rtx cmp = riscv_force_binary (word_mode, code, op0, op1);
2120 riscv_emit_set (target, lowpart_subreg (SImode, cmp, word_mode));
2121 }
2122
2123 /* Jump to LABEL if (CODE OP0 OP1) holds. */
2124
2125 void
2126 riscv_expand_conditional_branch (rtx label, rtx_code code, rtx op0, rtx op1)
2127 {
2128 if (FLOAT_MODE_P (GET_MODE (op1)))
2129 riscv_emit_float_compare (&code, &op0, &op1);
2130 else
2131 riscv_emit_int_compare (&code, &op0, &op1);
2132
2133 rtx condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2134 emit_jump_insn (gen_condjump (condition, label));
2135 }
2136
2137 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
2138 least PARM_BOUNDARY bits of alignment, but will be given anything up
2139 to STACK_BOUNDARY bits if the type requires it. */
2140
2141 static unsigned int
2142 riscv_function_arg_boundary (machine_mode mode, const_tree type)
2143 {
2144 unsigned int alignment;
2145
2146 /* Use natural alignment if the type is not aggregate data. */
2147 if (type && !AGGREGATE_TYPE_P (type))
2148 alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
2149 else
2150 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
2151
2152 return MIN (STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment));
2153 }
2154
2155 /* If MODE represents an argument that can be passed or returned in
2156 floating-point registers, return the number of registers, else 0. */
2157
2158 static unsigned
2159 riscv_pass_mode_in_fpr_p (machine_mode mode)
2160 {
2161 if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG)
2162 {
2163 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2164 return 1;
2165
2166 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
2167 return 2;
2168 }
2169
2170 return 0;
2171 }
2172
2173 typedef struct {
2174 const_tree type;
2175 HOST_WIDE_INT offset;
2176 } riscv_aggregate_field;
2177
2178 /* Identify subfields of aggregates that are candidates for passing in
2179 floating-point registers. */
2180
2181 static int
2182 riscv_flatten_aggregate_field (const_tree type,
2183 riscv_aggregate_field fields[2],
2184 int n, HOST_WIDE_INT offset)
2185 {
2186 switch (TREE_CODE (type))
2187 {
2188 case RECORD_TYPE:
2189 /* Can't handle incomplete types nor sizes that are not fixed. */
2190 if (!COMPLETE_TYPE_P (type)
2191 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2192 || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2193 return -1;
2194
2195 for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f))
2196 if (TREE_CODE (f) == FIELD_DECL)
2197 {
2198 if (!TYPE_P (TREE_TYPE (f)))
2199 return -1;
2200
2201 HOST_WIDE_INT pos = offset + int_byte_position (f);
2202 n = riscv_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos);
2203 if (n < 0)
2204 return -1;
2205 }
2206 return n;
2207
2208 case ARRAY_TYPE:
2209 {
2210 HOST_WIDE_INT n_elts;
2211 riscv_aggregate_field subfields[2];
2212 tree index = TYPE_DOMAIN (type);
2213 tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type));
2214 int n_subfields = riscv_flatten_aggregate_field (TREE_TYPE (type),
2215 subfields, 0, offset);
2216
2217 /* Can't handle incomplete types nor sizes that are not fixed. */
2218 if (n_subfields <= 0
2219 || !COMPLETE_TYPE_P (type)
2220 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
2221 || !index
2222 || !TYPE_MAX_VALUE (index)
2223 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
2224 || !TYPE_MIN_VALUE (index)
2225 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
2226 || !tree_fits_uhwi_p (elt_size))
2227 return -1;
2228
2229 n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
2230 - tree_to_uhwi (TYPE_MIN_VALUE (index));
2231 gcc_assert (n_elts >= 0);
2232
2233 for (HOST_WIDE_INT i = 0; i < n_elts; i++)
2234 for (int j = 0; j < n_subfields; j++)
2235 {
2236 if (n >= 2)
2237 return -1;
2238
2239 fields[n] = subfields[j];
2240 fields[n++].offset += i * tree_to_uhwi (elt_size);
2241 }
2242
2243 return n;
2244 }
2245
2246 case COMPLEX_TYPE:
2247 {
2248 /* Complex type need consume 2 field, so n must be 0. */
2249 if (n != 0)
2250 return -1;
2251
2252 HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)));
2253
2254 if (elt_size <= UNITS_PER_FP_ARG)
2255 {
2256 fields[0].type = TREE_TYPE (type);
2257 fields[0].offset = offset;
2258 fields[1].type = TREE_TYPE (type);
2259 fields[1].offset = offset + elt_size;
2260
2261 return 2;
2262 }
2263
2264 return -1;
2265 }
2266
2267 default:
2268 if (n < 2
2269 && ((SCALAR_FLOAT_TYPE_P (type)
2270 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG)
2271 || (INTEGRAL_TYPE_P (type)
2272 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)))
2273 {
2274 fields[n].type = type;
2275 fields[n].offset = offset;
2276 return n + 1;
2277 }
2278 else
2279 return -1;
2280 }
2281 }
2282
2283 /* Identify candidate aggregates for passing in floating-point registers.
2284 Candidates have at most two fields after flattening. */
2285
2286 static int
2287 riscv_flatten_aggregate_argument (const_tree type,
2288 riscv_aggregate_field fields[2])
2289 {
2290 if (!type || TREE_CODE (type) != RECORD_TYPE)
2291 return -1;
2292
2293 return riscv_flatten_aggregate_field (type, fields, 0, 0);
2294 }
2295
2296 /* See whether TYPE is a record whose fields should be returned in one or
2297 two floating-point registers. If so, populate FIELDS accordingly. */
2298
2299 static unsigned
2300 riscv_pass_aggregate_in_fpr_pair_p (const_tree type,
2301 riscv_aggregate_field fields[2])
2302 {
2303 int n = riscv_flatten_aggregate_argument (type, fields);
2304
2305 for (int i = 0; i < n; i++)
2306 if (!SCALAR_FLOAT_TYPE_P (fields[i].type))
2307 return 0;
2308
2309 return n > 0 ? n : 0;
2310 }
2311
2312 /* See whether TYPE is a record whose fields should be returned in one or
2313 floating-point register and one integer register. If so, populate
2314 FIELDS accordingly. */
2315
2316 static bool
2317 riscv_pass_aggregate_in_fpr_and_gpr_p (const_tree type,
2318 riscv_aggregate_field fields[2])
2319 {
2320 unsigned num_int = 0, num_float = 0;
2321 int n = riscv_flatten_aggregate_argument (type, fields);
2322
2323 for (int i = 0; i < n; i++)
2324 {
2325 num_float += SCALAR_FLOAT_TYPE_P (fields[i].type);
2326 num_int += INTEGRAL_TYPE_P (fields[i].type);
2327 }
2328
2329 return num_int == 1 && num_float == 1;
2330 }
2331
2332 /* Return the representation of an argument passed or returned in an FPR
2333 when the value has mode VALUE_MODE and the type has TYPE_MODE. The
2334 two modes may be different for structures like:
2335
2336 struct __attribute__((packed)) foo { float f; }
2337
2338 where the SFmode value "f" is passed in REGNO but the struct itself
2339 has mode BLKmode. */
2340
2341 static rtx
2342 riscv_pass_fpr_single (machine_mode type_mode, unsigned regno,
2343 machine_mode value_mode)
2344 {
2345 rtx x = gen_rtx_REG (value_mode, regno);
2346
2347 if (type_mode != value_mode)
2348 {
2349 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
2350 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
2351 }
2352 return x;
2353 }
2354
2355 /* Pass or return a composite value in the FPR pair REGNO and REGNO + 1.
2356 MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and
2357 byte offset for the first value, likewise MODE2 and OFFSET2 for the
2358 second value. */
2359
2360 static rtx
2361 riscv_pass_fpr_pair (machine_mode mode, unsigned regno1,
2362 machine_mode mode1, HOST_WIDE_INT offset1,
2363 unsigned regno2, machine_mode mode2,
2364 HOST_WIDE_INT offset2)
2365 {
2366 return gen_rtx_PARALLEL
2367 (mode,
2368 gen_rtvec (2,
2369 gen_rtx_EXPR_LIST (VOIDmode,
2370 gen_rtx_REG (mode1, regno1),
2371 GEN_INT (offset1)),
2372 gen_rtx_EXPR_LIST (VOIDmode,
2373 gen_rtx_REG (mode2, regno2),
2374 GEN_INT (offset2))));
2375 }
2376
2377 /* Fill INFO with information about a single argument, and return an
2378 RTL pattern to pass or return the argument. CUM is the cumulative
2379 state for earlier arguments. MODE is the mode of this argument and
2380 TYPE is its type (if known). NAMED is true if this is a named
2381 (fixed) argument rather than a variable one. RETURN_P is true if
2382 returning the argument, or false if passing the argument. */
2383
2384 static rtx
2385 riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
2386 machine_mode mode, const_tree type, bool named,
2387 bool return_p)
2388 {
2389 unsigned num_bytes, num_words;
2390 unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST;
2391 unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST;
2392 unsigned alignment = riscv_function_arg_boundary (mode, type);
2393
2394 memset (info, 0, sizeof (*info));
2395 info->gpr_offset = cum->num_gprs;
2396 info->fpr_offset = cum->num_fprs;
2397
2398 if (named)
2399 {
2400 riscv_aggregate_field fields[2];
2401 unsigned fregno = fpr_base + info->fpr_offset;
2402 unsigned gregno = gpr_base + info->gpr_offset;
2403
2404 /* Pass one- or two-element floating-point aggregates in FPRs. */
2405 if ((info->num_fprs = riscv_pass_aggregate_in_fpr_pair_p (type, fields))
2406 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2407 switch (info->num_fprs)
2408 {
2409 case 1:
2410 return riscv_pass_fpr_single (mode, fregno,
2411 TYPE_MODE (fields[0].type));
2412
2413 case 2:
2414 return riscv_pass_fpr_pair (mode, fregno,
2415 TYPE_MODE (fields[0].type),
2416 fields[0].offset,
2417 fregno + 1,
2418 TYPE_MODE (fields[1].type),
2419 fields[1].offset);
2420
2421 default:
2422 gcc_unreachable ();
2423 }
2424
2425 /* Pass real and complex floating-point numbers in FPRs. */
2426 if ((info->num_fprs = riscv_pass_mode_in_fpr_p (mode))
2427 && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
2428 switch (GET_MODE_CLASS (mode))
2429 {
2430 case MODE_FLOAT:
2431 return gen_rtx_REG (mode, fregno);
2432
2433 case MODE_COMPLEX_FLOAT:
2434 return riscv_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0,
2435 fregno + 1, GET_MODE_INNER (mode),
2436 GET_MODE_UNIT_SIZE (mode));
2437
2438 default:
2439 gcc_unreachable ();
2440 }
2441
2442 /* Pass structs with one float and one integer in an FPR and a GPR. */
2443 if (riscv_pass_aggregate_in_fpr_and_gpr_p (type, fields)
2444 && info->gpr_offset < MAX_ARGS_IN_REGISTERS
2445 && info->fpr_offset < MAX_ARGS_IN_REGISTERS)
2446 {
2447 info->num_gprs = 1;
2448 info->num_fprs = 1;
2449
2450 if (!SCALAR_FLOAT_TYPE_P (fields[0].type))
2451 std::swap (fregno, gregno);
2452
2453 return riscv_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type),
2454 fields[0].offset,
2455 gregno, TYPE_MODE (fields[1].type),
2456 fields[1].offset);
2457 }
2458 }
2459
2460 /* Work out the size of the argument. */
2461 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2462 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2463
2464 /* Doubleword-aligned varargs start on an even register boundary. */
2465 if (!named && num_bytes != 0 && alignment > BITS_PER_WORD)
2466 info->gpr_offset += info->gpr_offset & 1;
2467
2468 /* Partition the argument between registers and stack. */
2469 info->num_fprs = 0;
2470 info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset);
2471 info->stack_p = (num_words - info->num_gprs) != 0;
2472
2473 if (info->num_gprs || return_p)
2474 return gen_rtx_REG (mode, gpr_base + info->gpr_offset);
2475
2476 return NULL_RTX;
2477 }
2478
2479 /* Implement TARGET_FUNCTION_ARG. */
2480
2481 static rtx
2482 riscv_function_arg (cumulative_args_t cum_v, machine_mode mode,
2483 const_tree type, bool named)
2484 {
2485 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2486 struct riscv_arg_info info;
2487
2488 if (mode == VOIDmode)
2489 return NULL;
2490
2491 return riscv_get_arg_info (&info, cum, mode, type, named, false);
2492 }
2493
2494 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
2495
2496 static void
2497 riscv_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2498 const_tree type, bool named)
2499 {
2500 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2501 struct riscv_arg_info info;
2502
2503 riscv_get_arg_info (&info, cum, mode, type, named, false);
2504
2505 /* Advance the register count. This has the effect of setting
2506 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
2507 argument required us to skip the final GPR and pass the whole
2508 argument on the stack. */
2509 cum->num_fprs = info.fpr_offset + info.num_fprs;
2510 cum->num_gprs = info.gpr_offset + info.num_gprs;
2511 }
2512
2513 /* Implement TARGET_ARG_PARTIAL_BYTES. */
2514
2515 static int
2516 riscv_arg_partial_bytes (cumulative_args_t cum,
2517 machine_mode mode, tree type, bool named)
2518 {
2519 struct riscv_arg_info arg;
2520
2521 riscv_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false);
2522 return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0;
2523 }
2524
2525 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
2526 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
2527 VALTYPE is null and MODE is the mode of the return value. */
2528
2529 rtx
2530 riscv_function_value (const_tree type, const_tree func, machine_mode mode)
2531 {
2532 struct riscv_arg_info info;
2533 CUMULATIVE_ARGS args;
2534
2535 if (type)
2536 {
2537 int unsigned_p = TYPE_UNSIGNED (type);
2538
2539 mode = TYPE_MODE (type);
2540
2541 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
2542 return values, promote the mode here too. */
2543 mode = promote_function_mode (type, mode, &unsigned_p, func, 1);
2544 }
2545
2546 memset (&args, 0, sizeof args);
2547 return riscv_get_arg_info (&info, &args, mode, type, true, true);
2548 }
2549
2550 /* Implement TARGET_PASS_BY_REFERENCE. */
2551
2552 static bool
2553 riscv_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
2554 const_tree type, bool named)
2555 {
2556 HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2557 struct riscv_arg_info info;
2558 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2559
2560 /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we
2561 never pass variadic arguments in floating-point registers, so we can
2562 avoid the call to riscv_get_arg_info in this case. */
2563 if (cum != NULL)
2564 {
2565 /* Don't pass by reference if we can use a floating-point register. */
2566 riscv_get_arg_info (&info, cum, mode, type, named, false);
2567 if (info.num_fprs)
2568 return false;
2569 }
2570
2571 /* Pass by reference if the data do not fit in two integer registers. */
2572 return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD);
2573 }
2574
2575 /* Implement TARGET_RETURN_IN_MEMORY. */
2576
2577 static bool
2578 riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
2579 {
2580 CUMULATIVE_ARGS args;
2581 cumulative_args_t cum = pack_cumulative_args (&args);
2582
2583 /* The rules for returning in memory are the same as for passing the
2584 first named argument by reference. */
2585 memset (&args, 0, sizeof args);
2586 return riscv_pass_by_reference (cum, TYPE_MODE (type), type, true);
2587 }
2588
2589 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
2590
2591 static void
2592 riscv_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
2593 tree type, int *pretend_size ATTRIBUTE_UNUSED,
2594 int no_rtl)
2595 {
2596 CUMULATIVE_ARGS local_cum;
2597 int gp_saved;
2598
2599 /* The caller has advanced CUM up to, but not beyond, the last named
2600 argument. Advance a local copy of CUM past the last "real" named
2601 argument, to find out how many registers are left over. */
2602 local_cum = *get_cumulative_args (cum);
2603 riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
2604
2605 /* Found out how many registers we need to save. */
2606 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
2607
2608 if (!no_rtl && gp_saved > 0)
2609 {
2610 rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
2611 REG_PARM_STACK_SPACE (cfun->decl)
2612 - gp_saved * UNITS_PER_WORD);
2613 rtx mem = gen_frame_mem (BLKmode, ptr);
2614 set_mem_alias_set (mem, get_varargs_alias_set ());
2615
2616 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
2617 mem, gp_saved);
2618 }
2619 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
2620 cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
2621 }
2622
2623 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
2624
2625 static void
2626 riscv_va_start (tree valist, rtx nextarg)
2627 {
2628 nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
2629 std_expand_builtin_va_start (valist, nextarg);
2630 }
2631
2632 /* Make ADDR suitable for use as a call or sibcall target. */
2633
2634 rtx
2635 riscv_legitimize_call_address (rtx addr)
2636 {
2637 if (!call_insn_operand (addr, VOIDmode))
2638 {
2639 rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
2640 riscv_emit_move (reg, addr);
2641 return reg;
2642 }
2643 return addr;
2644 }
2645
2646 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2647 Assume that the areas do not overlap. */
2648
2649 static void
2650 riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
2651 {
2652 HOST_WIDE_INT offset, delta;
2653 unsigned HOST_WIDE_INT bits;
2654 int i;
2655 enum machine_mode mode;
2656 rtx *regs;
2657
2658 bits = MAX (BITS_PER_UNIT,
2659 MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest))));
2660
2661 mode = mode_for_size (bits, MODE_INT, 0).require ();
2662 delta = bits / BITS_PER_UNIT;
2663
2664 /* Allocate a buffer for the temporary registers. */
2665 regs = XALLOCAVEC (rtx, length / delta);
2666
2667 /* Load as many BITS-sized chunks as possible. Use a normal load if
2668 the source has enough alignment, otherwise use left/right pairs. */
2669 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2670 {
2671 regs[i] = gen_reg_rtx (mode);
2672 riscv_emit_move (regs[i], adjust_address (src, mode, offset));
2673 }
2674
2675 /* Copy the chunks to the destination. */
2676 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2677 riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
2678
2679 /* Mop up any left-over bytes. */
2680 if (offset < length)
2681 {
2682 src = adjust_address (src, BLKmode, offset);
2683 dest = adjust_address (dest, BLKmode, offset);
2684 move_by_pieces (dest, src, length - offset,
2685 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
2686 }
2687 }
2688
2689 /* Helper function for doing a loop-based block operation on memory
2690 reference MEM. Each iteration of the loop will operate on LENGTH
2691 bytes of MEM.
2692
2693 Create a new base register for use within the loop and point it to
2694 the start of MEM. Create a new memory reference that uses this
2695 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2696
2697 static void
2698 riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
2699 rtx *loop_reg, rtx *loop_mem)
2700 {
2701 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
2702
2703 /* Although the new mem does not refer to a known location,
2704 it does keep up to LENGTH bytes of alignment. */
2705 *loop_mem = change_address (mem, BLKmode, *loop_reg);
2706 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
2707 }
2708
2709 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
2710 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
2711 the memory regions do not overlap. */
2712
2713 static void
2714 riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
2715 HOST_WIDE_INT bytes_per_iter)
2716 {
2717 rtx label, src_reg, dest_reg, final_src, test;
2718 HOST_WIDE_INT leftover;
2719
2720 leftover = length % bytes_per_iter;
2721 length -= leftover;
2722
2723 /* Create registers and memory references for use within the loop. */
2724 riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
2725 riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
2726
2727 /* Calculate the value that SRC_REG should have after the last iteration
2728 of the loop. */
2729 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
2730 0, 0, OPTAB_WIDEN);
2731
2732 /* Emit the start of the loop. */
2733 label = gen_label_rtx ();
2734 emit_label (label);
2735
2736 /* Emit the loop body. */
2737 riscv_block_move_straight (dest, src, bytes_per_iter);
2738
2739 /* Move on to the next block. */
2740 riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
2741 riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
2742
2743 /* Emit the loop condition. */
2744 test = gen_rtx_NE (VOIDmode, src_reg, final_src);
2745 if (Pmode == DImode)
2746 emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
2747 else
2748 emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
2749
2750 /* Mop up any left-over bytes. */
2751 if (leftover)
2752 riscv_block_move_straight (dest, src, leftover);
2753 else
2754 emit_insn(gen_nop ());
2755 }
2756
2757 /* Expand a movmemsi instruction, which copies LENGTH bytes from
2758 memory reference SRC to memory reference DEST. */
2759
2760 bool
2761 riscv_expand_block_move (rtx dest, rtx src, rtx length)
2762 {
2763 if (CONST_INT_P (length))
2764 {
2765 HOST_WIDE_INT factor, align;
2766
2767 align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
2768 factor = BITS_PER_WORD / align;
2769
2770 if (optimize_function_for_size_p (cfun)
2771 && INTVAL (length) * factor * UNITS_PER_WORD > MOVE_RATIO (false))
2772 return false;
2773
2774 if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
2775 {
2776 riscv_block_move_straight (dest, src, INTVAL (length));
2777 return true;
2778 }
2779 else if (optimize && align >= BITS_PER_WORD)
2780 {
2781 unsigned min_iter_words
2782 = RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD;
2783 unsigned iter_words = min_iter_words;
2784 HOST_WIDE_INT bytes = INTVAL (length), words = bytes / UNITS_PER_WORD;
2785
2786 /* Lengthen the loop body if it shortens the tail. */
2787 for (unsigned i = min_iter_words; i < min_iter_words * 2 - 1; i++)
2788 {
2789 unsigned cur_cost = iter_words + words % iter_words;
2790 unsigned new_cost = i + words % i;
2791 if (new_cost <= cur_cost)
2792 iter_words = i;
2793 }
2794
2795 riscv_block_move_loop (dest, src, bytes, iter_words * UNITS_PER_WORD);
2796 return true;
2797 }
2798 }
2799 return false;
2800 }
2801
2802 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
2803 in context CONTEXT. HI_RELOC indicates a high-part reloc. */
2804
2805 static void
2806 riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
2807 {
2808 const char *reloc;
2809
2810 switch (riscv_classify_symbolic_expression (op))
2811 {
2812 case SYMBOL_ABSOLUTE:
2813 reloc = hi_reloc ? "%hi" : "%lo";
2814 break;
2815
2816 case SYMBOL_PCREL:
2817 reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
2818 break;
2819
2820 case SYMBOL_TLS_LE:
2821 reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
2822 break;
2823
2824 default:
2825 gcc_unreachable ();
2826 }
2827
2828 fprintf (file, "%s(", reloc);
2829 output_addr_const (file, riscv_strip_unspec_address (op));
2830 fputc (')', file);
2831 }
2832
2833 /* Return true if the .AQ suffix should be added to an AMO to implement the
2834 acquire portion of memory model MODEL. */
2835
2836 static bool
2837 riscv_memmodel_needs_amo_acquire (enum memmodel model)
2838 {
2839 switch (model)
2840 {
2841 case MEMMODEL_ACQ_REL:
2842 case MEMMODEL_SEQ_CST:
2843 case MEMMODEL_SYNC_SEQ_CST:
2844 case MEMMODEL_ACQUIRE:
2845 case MEMMODEL_CONSUME:
2846 case MEMMODEL_SYNC_ACQUIRE:
2847 return true;
2848
2849 case MEMMODEL_RELEASE:
2850 case MEMMODEL_SYNC_RELEASE:
2851 case MEMMODEL_RELAXED:
2852 return false;
2853
2854 default:
2855 gcc_unreachable ();
2856 }
2857 }
2858
2859 /* Return true if a FENCE should be emitted to before a memory access to
2860 implement the release portion of memory model MODEL. */
2861
2862 static bool
2863 riscv_memmodel_needs_release_fence (enum memmodel model)
2864 {
2865 switch (model)
2866 {
2867 case MEMMODEL_ACQ_REL:
2868 case MEMMODEL_SEQ_CST:
2869 case MEMMODEL_SYNC_SEQ_CST:
2870 case MEMMODEL_RELEASE:
2871 case MEMMODEL_SYNC_RELEASE:
2872 return true;
2873
2874 case MEMMODEL_ACQUIRE:
2875 case MEMMODEL_CONSUME:
2876 case MEMMODEL_SYNC_ACQUIRE:
2877 case MEMMODEL_RELAXED:
2878 return false;
2879
2880 default:
2881 gcc_unreachable ();
2882 }
2883 }
2884
2885 /* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
2886
2887 'h' Print the high-part relocation associated with OP, after stripping
2888 any outermost HIGH.
2889 'R' Print the low-part relocation associated with OP.
2890 'C' Print the integer branch condition for comparison OP.
2891 'A' Print the atomic operation suffix for memory model OP.
2892 'F' Print a FENCE if the memory model requires a release.
2893 'z' Print x0 if OP is zero, otherwise print OP normally.
2894 'i' Print i if the operand is not a register. */
2895
2896 static void
2897 riscv_print_operand (FILE *file, rtx op, int letter)
2898 {
2899 machine_mode mode = GET_MODE (op);
2900 enum rtx_code code = GET_CODE (op);
2901
2902 switch (letter)
2903 {
2904 case 'h':
2905 if (code == HIGH)
2906 op = XEXP (op, 0);
2907 riscv_print_operand_reloc (file, op, true);
2908 break;
2909
2910 case 'R':
2911 riscv_print_operand_reloc (file, op, false);
2912 break;
2913
2914 case 'C':
2915 /* The RTL names match the instruction names. */
2916 fputs (GET_RTX_NAME (code), file);
2917 break;
2918
2919 case 'A':
2920 if (riscv_memmodel_needs_amo_acquire ((enum memmodel) INTVAL (op)))
2921 fputs (".aq", file);
2922 break;
2923
2924 case 'F':
2925 if (riscv_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
2926 fputs ("fence iorw,ow; ", file);
2927 break;
2928
2929 case 'i':
2930 if (code != REG)
2931 fputs ("i", file);
2932 break;
2933
2934 default:
2935 switch (code)
2936 {
2937 case REG:
2938 if (letter && letter != 'z')
2939 output_operand_lossage ("invalid use of '%%%c'", letter);
2940 fprintf (file, "%s", reg_names[REGNO (op)]);
2941 break;
2942
2943 case MEM:
2944 if (letter && letter != 'z')
2945 output_operand_lossage ("invalid use of '%%%c'", letter);
2946 else
2947 output_address (mode, XEXP (op, 0));
2948 break;
2949
2950 default:
2951 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
2952 fputs (reg_names[GP_REG_FIRST], file);
2953 else if (letter && letter != 'z')
2954 output_operand_lossage ("invalid use of '%%%c'", letter);
2955 else
2956 output_addr_const (file, riscv_strip_unspec_address (op));
2957 break;
2958 }
2959 }
2960 }
2961
2962 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
2963
2964 static void
2965 riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2966 {
2967 struct riscv_address_info addr;
2968
2969 if (riscv_classify_address (&addr, x, word_mode, true))
2970 switch (addr.type)
2971 {
2972 case ADDRESS_REG:
2973 riscv_print_operand (file, addr.offset, 0);
2974 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
2975 return;
2976
2977 case ADDRESS_LO_SUM:
2978 riscv_print_operand_reloc (file, addr.offset, false);
2979 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
2980 return;
2981
2982 case ADDRESS_CONST_INT:
2983 output_addr_const (file, x);
2984 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
2985 return;
2986
2987 case ADDRESS_SYMBOLIC:
2988 output_addr_const (file, riscv_strip_unspec_address (x));
2989 return;
2990 }
2991 gcc_unreachable ();
2992 }
2993
2994 static bool
2995 riscv_size_ok_for_small_data_p (int size)
2996 {
2997 return g_switch_value && IN_RANGE (size, 1, g_switch_value);
2998 }
2999
3000 /* Return true if EXP should be placed in the small data section. */
3001
3002 static bool
3003 riscv_in_small_data_p (const_tree x)
3004 {
3005 if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
3006 return false;
3007
3008 if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
3009 {
3010 const char *sec = DECL_SECTION_NAME (x);
3011 return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
3012 }
3013
3014 return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
3015 }
3016
3017 /* Return a section for X, handling small data. */
3018
3019 static section *
3020 riscv_elf_select_rtx_section (machine_mode mode, rtx x,
3021 unsigned HOST_WIDE_INT align)
3022 {
3023 section *s = default_elf_select_rtx_section (mode, x, align);
3024
3025 if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
3026 {
3027 if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
3028 {
3029 /* Rename .rodata.cst* to .srodata.cst*. */
3030 char *name = (char *) alloca (strlen (s->named.name) + 2);
3031 sprintf (name, ".s%s", s->named.name + 1);
3032 return get_section (name, s->named.common.flags, NULL);
3033 }
3034
3035 if (s == data_section)
3036 return sdata_section;
3037 }
3038
3039 return s;
3040 }
3041
3042 /* Make the last instruction frame-related and note that it performs
3043 the operation described by FRAME_PATTERN. */
3044
3045 static void
3046 riscv_set_frame_expr (rtx frame_pattern)
3047 {
3048 rtx insn;
3049
3050 insn = get_last_insn ();
3051 RTX_FRAME_RELATED_P (insn) = 1;
3052 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3053 frame_pattern,
3054 REG_NOTES (insn));
3055 }
3056
3057 /* Return a frame-related rtx that stores REG at MEM.
3058 REG must be a single register. */
3059
3060 static rtx
3061 riscv_frame_set (rtx mem, rtx reg)
3062 {
3063 rtx set = gen_rtx_SET (mem, reg);
3064 RTX_FRAME_RELATED_P (set) = 1;
3065 return set;
3066 }
3067
3068 /* Return true if the current function must save register REGNO. */
3069
3070 static bool
3071 riscv_save_reg_p (unsigned int regno)
3072 {
3073 bool call_saved = !global_regs[regno] && !call_used_regs[regno];
3074 bool might_clobber = crtl->saves_all_registers
3075 || df_regs_ever_live_p (regno);
3076
3077 if (call_saved && might_clobber)
3078 return true;
3079
3080 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
3081 return true;
3082
3083 if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
3084 return true;
3085
3086 return false;
3087 }
3088
3089 /* Determine whether to call GPR save/restore routines. */
3090 static bool
3091 riscv_use_save_libcall (const struct riscv_frame_info *frame)
3092 {
3093 if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
3094 return false;
3095
3096 return frame->save_libcall_adjustment != 0;
3097 }
3098
3099 /* Determine which GPR save/restore routine to call. */
3100
3101 static unsigned
3102 riscv_save_libcall_count (unsigned mask)
3103 {
3104 for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
3105 if (BITSET_P (mask, n))
3106 return CALLEE_SAVED_REG_NUMBER (n) + 1;
3107 abort ();
3108 }
3109
3110 /* Populate the current function's riscv_frame_info structure.
3111
3112 RISC-V stack frames grown downward. High addresses are at the top.
3113
3114 +-------------------------------+
3115 | |
3116 | incoming stack arguments |
3117 | |
3118 +-------------------------------+ <-- incoming stack pointer
3119 | |
3120 | callee-allocated save area |
3121 | for arguments that are |
3122 | split between registers and |
3123 | the stack |
3124 | |
3125 +-------------------------------+ <-- arg_pointer_rtx
3126 | |
3127 | callee-allocated save area |
3128 | for register varargs |
3129 | |
3130 +-------------------------------+ <-- hard_frame_pointer_rtx;
3131 | | stack_pointer_rtx + gp_sp_offset
3132 | GPR save area | + UNITS_PER_WORD
3133 | |
3134 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
3135 | | + UNITS_PER_HWVALUE
3136 | FPR save area |
3137 | |
3138 +-------------------------------+ <-- frame_pointer_rtx (virtual)
3139 | |
3140 | local variables |
3141 | |
3142 P +-------------------------------+
3143 | |
3144 | outgoing stack arguments |
3145 | |
3146 +-------------------------------+ <-- stack_pointer_rtx
3147
3148 Dynamic stack allocations such as alloca insert data at point P.
3149 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
3150 hard_frame_pointer_rtx unchanged. */
3151
3152 static void
3153 riscv_compute_frame_info (void)
3154 {
3155 struct riscv_frame_info *frame;
3156 HOST_WIDE_INT offset;
3157 unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
3158
3159 frame = &cfun->machine->frame;
3160 memset (frame, 0, sizeof (*frame));
3161
3162 /* Find out which GPRs we need to save. */
3163 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
3164 if (riscv_save_reg_p (regno))
3165 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3166
3167 /* If this function calls eh_return, we must also save and restore the
3168 EH data registers. */
3169 if (crtl->calls_eh_return)
3170 for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
3171 frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
3172
3173 /* Find out which FPRs we need to save. This loop must iterate over
3174 the same space as its companion in riscv_for_each_saved_reg. */
3175 if (TARGET_HARD_FLOAT)
3176 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3177 if (riscv_save_reg_p (regno))
3178 frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
3179
3180 /* At the bottom of the frame are any outgoing stack arguments. */
3181 offset = crtl->outgoing_args_size;
3182 /* Next are local stack variables. */
3183 offset += RISCV_STACK_ALIGN (get_frame_size ());
3184 /* The virtual frame pointer points above the local variables. */
3185 frame->frame_pointer_offset = offset;
3186 /* Next are the callee-saved FPRs. */
3187 if (frame->fmask)
3188 offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
3189 frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
3190 /* Next are the callee-saved GPRs. */
3191 if (frame->mask)
3192 {
3193 unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
3194 unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
3195
3196 /* Only use save/restore routines if they don't alter the stack size. */
3197 if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
3198 frame->save_libcall_adjustment = x_save_size;
3199
3200 offset += x_save_size;
3201 }
3202 frame->gp_sp_offset = offset - UNITS_PER_WORD;
3203 /* The hard frame pointer points above the callee-saved GPRs. */
3204 frame->hard_frame_pointer_offset = offset;
3205 /* Above the hard frame pointer is the callee-allocated varags save area. */
3206 offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
3207 frame->arg_pointer_offset = offset;
3208 /* Next is the callee-allocated area for pretend stack arguments. */
3209 offset += crtl->args.pretend_args_size;
3210 frame->total_size = offset;
3211 /* Next points the incoming stack pointer and any incoming arguments. */
3212
3213 /* Only use save/restore routines when the GPRs are atop the frame. */
3214 if (frame->hard_frame_pointer_offset != frame->total_size)
3215 frame->save_libcall_adjustment = 0;
3216 }
3217
3218 /* Make sure that we're not trying to eliminate to the wrong hard frame
3219 pointer. */
3220
3221 static bool
3222 riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
3223 {
3224 return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
3225 }
3226
3227 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
3228 or argument pointer. TO is either the stack pointer or hard frame
3229 pointer. */
3230
3231 HOST_WIDE_INT
3232 riscv_initial_elimination_offset (int from, int to)
3233 {
3234 HOST_WIDE_INT src, dest;
3235
3236 riscv_compute_frame_info ();
3237
3238 if (to == HARD_FRAME_POINTER_REGNUM)
3239 dest = cfun->machine->frame.hard_frame_pointer_offset;
3240 else if (to == STACK_POINTER_REGNUM)
3241 dest = 0; /* The stack pointer is the base of all offsets, hence 0. */
3242 else
3243 gcc_unreachable ();
3244
3245 if (from == FRAME_POINTER_REGNUM)
3246 src = cfun->machine->frame.frame_pointer_offset;
3247 else if (from == ARG_POINTER_REGNUM)
3248 src = cfun->machine->frame.arg_pointer_offset;
3249 else
3250 gcc_unreachable ();
3251
3252 return src - dest;
3253 }
3254
3255 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
3256 previous frame. */
3257
3258 rtx
3259 riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
3260 {
3261 if (count != 0)
3262 return const0_rtx;
3263
3264 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
3265 }
3266
3267 /* Emit code to change the current function's return address to
3268 ADDRESS. SCRATCH is available as a scratch register, if needed.
3269 ADDRESS and SCRATCH are both word-mode GPRs. */
3270
3271 void
3272 riscv_set_return_address (rtx address, rtx scratch)
3273 {
3274 rtx slot_address;
3275
3276 gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
3277 slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
3278 cfun->machine->frame.gp_sp_offset);
3279 riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
3280 }
3281
3282 /* A function to save or store a register. The first argument is the
3283 register and the second is the stack slot. */
3284 typedef void (*riscv_save_restore_fn) (rtx, rtx);
3285
3286 /* Use FN to save or restore register REGNO. MODE is the register's
3287 mode and OFFSET is the offset of its save slot from the current
3288 stack pointer. */
3289
3290 static void
3291 riscv_save_restore_reg (machine_mode mode, int regno,
3292 HOST_WIDE_INT offset, riscv_save_restore_fn fn)
3293 {
3294 rtx mem;
3295
3296 mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
3297 fn (gen_rtx_REG (mode, regno), mem);
3298 }
3299
3300 /* Call FN for each register that is saved by the current function.
3301 SP_OFFSET is the offset of the current stack pointer from the start
3302 of the frame. */
3303
3304 static void
3305 riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
3306 {
3307 HOST_WIDE_INT offset;
3308
3309 /* Save the link register and s-registers. */
3310 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
3311 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
3312 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3313 {
3314 riscv_save_restore_reg (word_mode, regno, offset, fn);
3315 offset -= UNITS_PER_WORD;
3316 }
3317
3318 /* This loop must iterate over the same space as its companion in
3319 riscv_compute_frame_info. */
3320 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
3321 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3322 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
3323 {
3324 machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
3325
3326 riscv_save_restore_reg (mode, regno, offset, fn);
3327 offset -= GET_MODE_SIZE (mode);
3328 }
3329 }
3330
3331 /* Save register REG to MEM. Make the instruction frame-related. */
3332
3333 static void
3334 riscv_save_reg (rtx reg, rtx mem)
3335 {
3336 riscv_emit_move (mem, reg);
3337 riscv_set_frame_expr (riscv_frame_set (mem, reg));
3338 }
3339
3340 /* Restore register REG from MEM. */
3341
3342 static void
3343 riscv_restore_reg (rtx reg, rtx mem)
3344 {
3345 rtx insn = riscv_emit_move (reg, mem);
3346 rtx dwarf = NULL_RTX;
3347 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3348 REG_NOTES (insn) = dwarf;
3349
3350 RTX_FRAME_RELATED_P (insn) = 1;
3351 }
3352
3353 /* Return the code to invoke the GPR save routine. */
3354
3355 const char *
3356 riscv_output_gpr_save (unsigned mask)
3357 {
3358 static char s[32];
3359 unsigned n = riscv_save_libcall_count (mask);
3360
3361 ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__riscv_save_%u", n);
3362 gcc_assert ((size_t) bytes < sizeof (s));
3363
3364 return s;
3365 }
3366
3367 /* For stack frames that can't be allocated with a single ADDI instruction,
3368 compute the best value to initially allocate. It must at a minimum
3369 allocate enough space to spill the callee-saved registers. */
3370
3371 static HOST_WIDE_INT
3372 riscv_first_stack_step (struct riscv_frame_info *frame)
3373 {
3374 HOST_WIDE_INT min_first_step = frame->total_size - frame->fp_sp_offset;
3375 HOST_WIDE_INT max_first_step = IMM_REACH / 2 - STACK_BOUNDARY / 8;
3376
3377 if (SMALL_OPERAND (frame->total_size))
3378 return frame->total_size;
3379
3380 /* As an optimization, use the least-significant bits of the total frame
3381 size, so that the second adjustment step is just LUI + ADD. */
3382 if (!SMALL_OPERAND (frame->total_size - max_first_step)
3383 && frame->total_size % IMM_REACH < IMM_REACH / 2
3384 && frame->total_size % IMM_REACH >= min_first_step)
3385 return frame->total_size % IMM_REACH;
3386
3387 gcc_assert (min_first_step <= max_first_step);
3388 return max_first_step;
3389 }
3390
3391 static rtx
3392 riscv_adjust_libcall_cfi_prologue ()
3393 {
3394 rtx dwarf = NULL_RTX;
3395 rtx adjust_sp_rtx, reg, mem, insn;
3396 int saved_size = cfun->machine->frame.save_libcall_adjustment;
3397 int offset;
3398
3399 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
3400 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3401 {
3402 /* The save order is ra, s0, s1, s2 to s11. */
3403 if (regno == RETURN_ADDR_REGNUM)
3404 offset = saved_size - UNITS_PER_WORD;
3405 else if (regno == S0_REGNUM)
3406 offset = saved_size - UNITS_PER_WORD * 2;
3407 else if (regno == S1_REGNUM)
3408 offset = saved_size - UNITS_PER_WORD * 3;
3409 else
3410 offset = saved_size - ((regno - S2_REGNUM + 4) * UNITS_PER_WORD);
3411
3412 reg = gen_rtx_REG (SImode, regno);
3413 mem = gen_frame_mem (SImode, plus_constant (Pmode,
3414 stack_pointer_rtx,
3415 offset));
3416
3417 insn = gen_rtx_SET (mem, reg);
3418 dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf);
3419 }
3420
3421 /* Debug info for adjust sp. */
3422 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
3423 stack_pointer_rtx, GEN_INT (-saved_size));
3424 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
3425 dwarf);
3426 return dwarf;
3427 }
3428
3429 static void
3430 riscv_emit_stack_tie (void)
3431 {
3432 if (Pmode == SImode)
3433 emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx));
3434 else
3435 emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx));
3436 }
3437
3438 /* Expand the "prologue" pattern. */
3439
3440 void
3441 riscv_expand_prologue (void)
3442 {
3443 struct riscv_frame_info *frame = &cfun->machine->frame;
3444 HOST_WIDE_INT size = frame->total_size;
3445 unsigned mask = frame->mask;
3446 rtx insn;
3447
3448 if (flag_stack_usage_info)
3449 current_function_static_stack_size = size;
3450
3451 /* When optimizing for size, call a subroutine to save the registers. */
3452 if (riscv_use_save_libcall (frame))
3453 {
3454 rtx dwarf = NULL_RTX;
3455 dwarf = riscv_adjust_libcall_cfi_prologue ();
3456
3457 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
3458 size -= frame->save_libcall_adjustment;
3459 insn = emit_insn (gen_gpr_save (GEN_INT (mask)));
3460
3461 RTX_FRAME_RELATED_P (insn) = 1;
3462 REG_NOTES (insn) = dwarf;
3463 }
3464
3465 /* Save the registers. */
3466 if ((frame->mask | frame->fmask) != 0)
3467 {
3468 HOST_WIDE_INT step1 = MIN (size, riscv_first_stack_step (frame));
3469
3470 insn = gen_add3_insn (stack_pointer_rtx,
3471 stack_pointer_rtx,
3472 GEN_INT (-step1));
3473 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3474 size -= step1;
3475 riscv_for_each_saved_reg (size, riscv_save_reg);
3476 }
3477
3478 frame->mask = mask; /* Undo the above fib. */
3479
3480 /* Set up the frame pointer, if we're using one. */
3481 if (frame_pointer_needed)
3482 {
3483 insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
3484 GEN_INT (frame->hard_frame_pointer_offset - size));
3485 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3486
3487 riscv_emit_stack_tie ();
3488 }
3489
3490 /* Allocate the rest of the frame. */
3491 if (size > 0)
3492 {
3493 if (SMALL_OPERAND (-size))
3494 {
3495 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3496 GEN_INT (-size));
3497 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
3498 }
3499 else
3500 {
3501 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
3502 emit_insn (gen_add3_insn (stack_pointer_rtx,
3503 stack_pointer_rtx,
3504 RISCV_PROLOGUE_TEMP (Pmode)));
3505
3506 /* Describe the effect of the previous instructions. */
3507 insn = plus_constant (Pmode, stack_pointer_rtx, -size);
3508 insn = gen_rtx_SET (stack_pointer_rtx, insn);
3509 riscv_set_frame_expr (insn);
3510 }
3511 }
3512 }
3513
3514 static rtx
3515 riscv_adjust_libcall_cfi_epilogue ()
3516 {
3517 rtx dwarf = NULL_RTX;
3518 rtx adjust_sp_rtx, reg;
3519 int saved_size = cfun->machine->frame.save_libcall_adjustment;
3520
3521 /* Debug info for adjust sp. */
3522 adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
3523 stack_pointer_rtx, GEN_INT (saved_size));
3524 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
3525 dwarf);
3526
3527 for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
3528 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
3529 {
3530 reg = gen_rtx_REG (SImode, regno);
3531 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
3532 }
3533
3534 return dwarf;
3535 }
3536
3537 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
3538 says which. */
3539
3540 void
3541 riscv_expand_epilogue (bool sibcall_p)
3542 {
3543 /* Split the frame into two. STEP1 is the amount of stack we should
3544 deallocate before restoring the registers. STEP2 is the amount we
3545 should deallocate afterwards.
3546
3547 Start off by assuming that no registers need to be restored. */
3548 struct riscv_frame_info *frame = &cfun->machine->frame;
3549 unsigned mask = frame->mask;
3550 HOST_WIDE_INT step1 = frame->total_size;
3551 HOST_WIDE_INT step2 = 0;
3552 bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
3553 rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
3554 rtx insn;
3555
3556 /* We need to add memory barrier to prevent read from deallocated stack. */
3557 bool need_barrier_p = (get_frame_size ()
3558 + cfun->machine->frame.arg_pointer_offset) != 0;
3559
3560 if (!sibcall_p && riscv_can_use_return_insn ())
3561 {
3562 emit_jump_insn (gen_return ());
3563 return;
3564 }
3565
3566 /* Move past any dynamic stack allocations. */
3567 if (cfun->calls_alloca)
3568 {
3569 /* Emit a barrier to prevent loads from a deallocated stack. */
3570 riscv_emit_stack_tie ();
3571 need_barrier_p = false;
3572
3573 rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
3574 if (!SMALL_OPERAND (INTVAL (adjust)))
3575 {
3576 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
3577 adjust = RISCV_PROLOGUE_TEMP (Pmode);
3578 }
3579
3580 insn = emit_insn (
3581 gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
3582 adjust));
3583
3584 rtx dwarf = NULL_RTX;
3585 rtx cfa_adjust_value = gen_rtx_PLUS (
3586 Pmode, hard_frame_pointer_rtx,
3587 GEN_INT (-frame->hard_frame_pointer_offset));
3588 rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value);
3589 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf);
3590 RTX_FRAME_RELATED_P (insn) = 1;
3591
3592 REG_NOTES (insn) = dwarf;
3593 }
3594
3595 /* If we need to restore registers, deallocate as much stack as
3596 possible in the second step without going out of range. */
3597 if ((frame->mask | frame->fmask) != 0)
3598 {
3599 step2 = riscv_first_stack_step (frame);
3600 step1 -= step2;
3601 }
3602
3603 /* Set TARGET to BASE + STEP1. */
3604 if (step1 > 0)
3605 {
3606 /* Emit a barrier to prevent loads from a deallocated stack. */
3607 riscv_emit_stack_tie ();
3608 need_barrier_p = false;
3609
3610 /* Get an rtx for STEP1 that we can add to BASE. */
3611 rtx adjust = GEN_INT (step1);
3612 if (!SMALL_OPERAND (step1))
3613 {
3614 riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
3615 adjust = RISCV_PROLOGUE_TEMP (Pmode);
3616 }
3617
3618 insn = emit_insn (
3619 gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
3620
3621 rtx dwarf = NULL_RTX;
3622 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3623 GEN_INT (step2));
3624
3625 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3626 RTX_FRAME_RELATED_P (insn) = 1;
3627
3628 REG_NOTES (insn) = dwarf;
3629 }
3630
3631 if (use_restore_libcall)
3632 frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
3633
3634 /* Restore the registers. */
3635 riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
3636
3637 if (use_restore_libcall)
3638 {
3639 frame->mask = mask; /* Undo the above fib. */
3640 gcc_assert (step2 >= frame->save_libcall_adjustment);
3641 step2 -= frame->save_libcall_adjustment;
3642 }
3643
3644 if (need_barrier_p)
3645 riscv_emit_stack_tie ();
3646
3647 /* Deallocate the final bit of the frame. */
3648 if (step2 > 0)
3649 {
3650 insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3651 GEN_INT (step2)));
3652
3653 rtx dwarf = NULL_RTX;
3654 rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
3655 const0_rtx);
3656 dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
3657 RTX_FRAME_RELATED_P (insn) = 1;
3658
3659 REG_NOTES (insn) = dwarf;
3660 }
3661
3662 if (use_restore_libcall)
3663 {
3664 rtx dwarf = riscv_adjust_libcall_cfi_epilogue ();
3665 insn = emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
3666 RTX_FRAME_RELATED_P (insn) = 1;
3667 REG_NOTES (insn) = dwarf;
3668
3669 emit_jump_insn (gen_gpr_restore_return (ra));
3670 return;
3671 }
3672
3673 /* Add in the __builtin_eh_return stack adjustment. */
3674 if (crtl->calls_eh_return)
3675 emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
3676 EH_RETURN_STACKADJ_RTX));
3677
3678 if (!sibcall_p)
3679 emit_jump_insn (gen_simple_return_internal (ra));
3680 }
3681
3682 /* Return nonzero if this function is known to have a null epilogue.
3683 This allows the optimizer to omit jumps to jumps if no stack
3684 was created. */
3685
3686 bool
3687 riscv_can_use_return_insn (void)
3688 {
3689 return reload_completed && cfun->machine->frame.total_size == 0;
3690 }
3691
3692 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
3693
3694 When floating-point registers are wider than integer ones, moves between
3695 them must go through memory. */
3696
3697 static bool
3698 riscv_secondary_memory_needed (machine_mode mode, reg_class_t class1,
3699 reg_class_t class2)
3700 {
3701 return (GET_MODE_SIZE (mode) > UNITS_PER_WORD
3702 && (class1 == FP_REGS) != (class2 == FP_REGS));
3703 }
3704
3705 /* Implement TARGET_REGISTER_MOVE_COST. */
3706
3707 static int
3708 riscv_register_move_cost (machine_mode mode,
3709 reg_class_t from, reg_class_t to)
3710 {
3711 return riscv_secondary_memory_needed (mode, from, to) ? 8 : 2;
3712 }
3713
3714 /* Implement TARGET_HARD_REGNO_NREGS. */
3715
3716 static unsigned int
3717 riscv_hard_regno_nregs (unsigned int regno, machine_mode mode)
3718 {
3719 if (FP_REG_P (regno))
3720 return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
3721
3722 /* All other registers are word-sized. */
3723 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3724 }
3725
3726 /* Implement TARGET_HARD_REGNO_MODE_OK. */
3727
3728 static bool
3729 riscv_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3730 {
3731 unsigned int nregs = riscv_hard_regno_nregs (regno, mode);
3732
3733 if (GP_REG_P (regno))
3734 {
3735 if (!GP_REG_P (regno + nregs - 1))
3736 return false;
3737 }
3738 else if (FP_REG_P (regno))
3739 {
3740 if (!FP_REG_P (regno + nregs - 1))
3741 return false;
3742
3743 if (GET_MODE_CLASS (mode) != MODE_FLOAT
3744 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
3745 return false;
3746
3747 /* Only use callee-saved registers if a potential callee is guaranteed
3748 to spill the requisite width. */
3749 if (GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_REG
3750 || (!call_used_regs[regno]
3751 && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_ARG))
3752 return false;
3753 }
3754 else
3755 return false;
3756
3757 /* Require same callee-savedness for all registers. */
3758 for (unsigned i = 1; i < nregs; i++)
3759 if (call_used_regs[regno] != call_used_regs[regno + i])
3760 return false;
3761
3762 return true;
3763 }
3764
3765 /* Implement TARGET_MODES_TIEABLE_P.
3766
3767 Don't allow floating-point modes to be tied, since type punning of
3768 single-precision and double-precision is implementation defined. */
3769
3770 static bool
3771 riscv_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3772 {
3773 return (mode1 == mode2
3774 || !(GET_MODE_CLASS (mode1) == MODE_FLOAT
3775 && GET_MODE_CLASS (mode2) == MODE_FLOAT));
3776 }
3777
3778 /* Implement CLASS_MAX_NREGS. */
3779
3780 static unsigned char
3781 riscv_class_max_nregs (reg_class_t rclass, machine_mode mode)
3782 {
3783 if (reg_class_subset_p (FP_REGS, rclass))
3784 return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
3785
3786 if (reg_class_subset_p (GR_REGS, rclass))
3787 return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
3788
3789 return 0;
3790 }
3791
3792 /* Implement TARGET_MEMORY_MOVE_COST. */
3793
3794 static int
3795 riscv_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in)
3796 {
3797 return (tune_info->memory_cost
3798 + memory_move_secondary_cost (mode, rclass, in));
3799 }
3800
3801 /* Return the number of instructions that can be issued per cycle. */
3802
3803 static int
3804 riscv_issue_rate (void)
3805 {
3806 return tune_info->issue_rate;
3807 }
3808
3809 /* Implement TARGET_ASM_FILE_START. */
3810
3811 static void
3812 riscv_file_start (void)
3813 {
3814 default_file_start ();
3815
3816 /* Instruct GAS to generate position-[in]dependent code. */
3817 fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
3818 }
3819
3820 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
3821 in order to avoid duplicating too much logic from elsewhere. */
3822
3823 static void
3824 riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
3825 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
3826 tree function)
3827 {
3828 rtx this_rtx, temp1, temp2, fnaddr;
3829 rtx_insn *insn;
3830
3831 /* Pretend to be a post-reload pass while generating rtl. */
3832 reload_completed = 1;
3833
3834 /* Mark the end of the (empty) prologue. */
3835 emit_note (NOTE_INSN_PROLOGUE_END);
3836
3837 /* Determine if we can use a sibcall to call FUNCTION directly. */
3838 fnaddr = gen_rtx_MEM (FUNCTION_MODE, XEXP (DECL_RTL (function), 0));
3839
3840 /* We need two temporary registers in some cases. */
3841 temp1 = gen_rtx_REG (Pmode, RISCV_PROLOGUE_TEMP_REGNUM);
3842 temp2 = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
3843
3844 /* Find out which register contains the "this" pointer. */
3845 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
3846 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
3847 else
3848 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
3849
3850 /* Add DELTA to THIS_RTX. */
3851 if (delta != 0)
3852 {
3853 rtx offset = GEN_INT (delta);
3854 if (!SMALL_OPERAND (delta))
3855 {
3856 riscv_emit_move (temp1, offset);
3857 offset = temp1;
3858 }
3859 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
3860 }
3861
3862 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
3863 if (vcall_offset != 0)
3864 {
3865 rtx addr;
3866
3867 /* Set TEMP1 to *THIS_RTX. */
3868 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
3869
3870 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
3871 addr = riscv_add_offset (temp2, temp1, vcall_offset);
3872
3873 /* Load the offset and add it to THIS_RTX. */
3874 riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
3875 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
3876 }
3877
3878 /* Jump to the target function. */
3879 insn = emit_call_insn (gen_sibcall (fnaddr, const0_rtx, NULL, const0_rtx));
3880 SIBLING_CALL_P (insn) = 1;
3881
3882 /* Run just enough of rest_of_compilation. This sequence was
3883 "borrowed" from alpha.c. */
3884 insn = get_insns ();
3885 split_all_insns_noflow ();
3886 shorten_branches (insn);
3887 final_start_function (insn, file, 1);
3888 final (insn, file, 1);
3889 final_end_function ();
3890
3891 /* Clean up the vars set above. Note that final_end_function resets
3892 the global pointer for us. */
3893 reload_completed = 0;
3894 }
3895
3896 /* Allocate a chunk of memory for per-function machine-dependent data. */
3897
3898 static struct machine_function *
3899 riscv_init_machine_status (void)
3900 {
3901 return ggc_cleared_alloc<machine_function> ();
3902 }
3903
3904 /* Implement TARGET_OPTION_OVERRIDE. */
3905
3906 static void
3907 riscv_option_override (void)
3908 {
3909 const struct riscv_cpu_info *cpu;
3910
3911 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3912 SUBTARGET_OVERRIDE_OPTIONS;
3913 #endif
3914
3915 flag_pcc_struct_return = 0;
3916
3917 if (flag_pic)
3918 g_switch_value = 0;
3919
3920 /* The presence of the M extension implies that division instructions
3921 are present, so include them unless explicitly disabled. */
3922 if (TARGET_MUL && (target_flags_explicit & MASK_DIV) == 0)
3923 target_flags |= MASK_DIV;
3924 else if (!TARGET_MUL && TARGET_DIV)
3925 error ("-mdiv requires -march to subsume the %<M%> extension");
3926
3927 /* Likewise floating-point division and square root. */
3928 if (TARGET_HARD_FLOAT && (target_flags_explicit & MASK_FDIV) == 0)
3929 target_flags |= MASK_FDIV;
3930
3931 /* Handle -mtune. */
3932 cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
3933 RISCV_TUNE_STRING_DEFAULT);
3934 tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
3935
3936 /* Use -mtune's setting for slow_unaligned_access, even when optimizing
3937 for size. For architectures that trap and emulate unaligned accesses,
3938 the performance cost is too great, even for -Os. Similarly, if
3939 -m[no-]strict-align is left unspecified, heed -mtune's advice. */
3940 riscv_slow_unaligned_access_p = (cpu->tune_info->slow_unaligned_access
3941 || TARGET_STRICT_ALIGN);
3942 if ((target_flags_explicit & MASK_STRICT_ALIGN) == 0
3943 && cpu->tune_info->slow_unaligned_access)
3944 target_flags |= MASK_STRICT_ALIGN;
3945
3946 /* If the user hasn't specified a branch cost, use the processor's
3947 default. */
3948 if (riscv_branch_cost == 0)
3949 riscv_branch_cost = tune_info->branch_cost;
3950
3951 /* Function to allocate machine-dependent function status. */
3952 init_machine_status = &riscv_init_machine_status;
3953
3954 if (flag_pic)
3955 riscv_cmodel = CM_PIC;
3956
3957 /* We get better code with explicit relocs for CM_MEDLOW, but
3958 worse code for the others (for now). Pick the best default. */
3959 if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
3960 if (riscv_cmodel == CM_MEDLOW)
3961 target_flags |= MASK_EXPLICIT_RELOCS;
3962
3963 /* Require that the ISA supports the requested floating-point ABI. */
3964 if (UNITS_PER_FP_ARG > (TARGET_HARD_FLOAT ? UNITS_PER_FP_REG : 0))
3965 error ("requested ABI requires -march to subsume the %qc extension",
3966 UNITS_PER_FP_ARG > 8 ? 'Q' : (UNITS_PER_FP_ARG > 4 ? 'D' : 'F'));
3967
3968 /* We do not yet support ILP32 on RV64. */
3969 if (BITS_PER_WORD != POINTER_SIZE)
3970 error ("ABI requires -march=rv%d", POINTER_SIZE);
3971 }
3972
3973 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
3974
3975 static void
3976 riscv_conditional_register_usage (void)
3977 {
3978 if (!TARGET_HARD_FLOAT)
3979 {
3980 for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
3981 fixed_regs[regno] = call_used_regs[regno] = 1;
3982 }
3983 }
3984
3985 /* Return a register priority for hard reg REGNO. */
3986
3987 static int
3988 riscv_register_priority (int regno)
3989 {
3990 /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
3991 if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
3992 || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
3993 return 1;
3994
3995 return 0;
3996 }
3997
3998 /* Implement TARGET_TRAMPOLINE_INIT. */
3999
4000 static void
4001 riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
4002 {
4003 rtx addr, end_addr, mem;
4004 uint32_t trampoline[4];
4005 unsigned int i;
4006 HOST_WIDE_INT static_chain_offset, target_function_offset;
4007
4008 /* Work out the offsets of the pointers from the start of the
4009 trampoline code. */
4010 gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
4011
4012 /* Get pointers to the beginning and end of the code block. */
4013 addr = force_reg (Pmode, XEXP (m_tramp, 0));
4014 end_addr = riscv_force_binary (Pmode, PLUS, addr,
4015 GEN_INT (TRAMPOLINE_CODE_SIZE));
4016
4017
4018 if (Pmode == SImode)
4019 {
4020 chain_value = force_reg (Pmode, chain_value);
4021
4022 rtx target_function = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
4023 /* lui t2, hi(chain)
4024 lui t1, hi(func)
4025 addi t2, t2, lo(chain)
4026 jr r1, lo(func)
4027 */
4028 unsigned HOST_WIDE_INT lui_hi_chain_code, lui_hi_func_code;
4029 unsigned HOST_WIDE_INT lo_chain_code, lo_func_code;
4030
4031 rtx uimm_mask = force_reg (SImode, gen_int_mode (-IMM_REACH, SImode));
4032
4033 /* 0xfff. */
4034 rtx imm12_mask = gen_reg_rtx (SImode);
4035 emit_insn (gen_one_cmplsi2 (imm12_mask, uimm_mask));
4036
4037 rtx fixup_value = force_reg (SImode, gen_int_mode (IMM_REACH/2, SImode));
4038
4039 /* Gen lui t2, hi(chain). */
4040 rtx hi_chain = riscv_force_binary (SImode, PLUS, chain_value,
4041 fixup_value);
4042 hi_chain = riscv_force_binary (SImode, AND, hi_chain,
4043 uimm_mask);
4044 lui_hi_chain_code = OPCODE_LUI | (STATIC_CHAIN_REGNUM << SHIFT_RD);
4045 rtx lui_hi_chain = riscv_force_binary (SImode, IOR, hi_chain,
4046 gen_int_mode (lui_hi_chain_code, SImode));
4047
4048 mem = adjust_address (m_tramp, SImode, 0);
4049 riscv_emit_move (mem, lui_hi_chain);
4050
4051 /* Gen lui t1, hi(func). */
4052 rtx hi_func = riscv_force_binary (SImode, PLUS, target_function,
4053 fixup_value);
4054 hi_func = riscv_force_binary (SImode, AND, hi_func,
4055 uimm_mask);
4056 lui_hi_func_code = OPCODE_LUI | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD);
4057 rtx lui_hi_func = riscv_force_binary (SImode, IOR, hi_func,
4058 gen_int_mode (lui_hi_func_code, SImode));
4059
4060 mem = adjust_address (m_tramp, SImode, 1 * GET_MODE_SIZE (SImode));
4061 riscv_emit_move (mem, lui_hi_func);
4062
4063 /* Gen addi t2, t2, lo(chain). */
4064 rtx lo_chain = riscv_force_binary (SImode, AND, chain_value,
4065 imm12_mask);
4066 lo_chain = riscv_force_binary (SImode, ASHIFT, lo_chain, GEN_INT (20));
4067
4068 lo_chain_code = OPCODE_ADDI
4069 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
4070 | (STATIC_CHAIN_REGNUM << SHIFT_RS1);
4071
4072 rtx addi_lo_chain = riscv_force_binary (SImode, IOR, lo_chain,
4073 force_reg (SImode, GEN_INT (lo_chain_code)));
4074
4075 mem = adjust_address (m_tramp, SImode, 2 * GET_MODE_SIZE (SImode));
4076 riscv_emit_move (mem, addi_lo_chain);
4077
4078 /* Gen jr r1, lo(func). */
4079 rtx lo_func = riscv_force_binary (SImode, AND, target_function,
4080 imm12_mask);
4081 lo_func = riscv_force_binary (SImode, ASHIFT, lo_func, GEN_INT (20));
4082
4083 lo_func_code = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
4084
4085 rtx jr_lo_func = riscv_force_binary (SImode, IOR, lo_func,
4086 force_reg (SImode, GEN_INT (lo_func_code)));
4087
4088 mem = adjust_address (m_tramp, SImode, 3 * GET_MODE_SIZE (SImode));
4089 riscv_emit_move (mem, jr_lo_func);
4090 }
4091 else
4092 {
4093 static_chain_offset = TRAMPOLINE_CODE_SIZE;
4094 target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
4095
4096 /* auipc t2, 0
4097 l[wd] t1, target_function_offset(t2)
4098 l[wd] t2, static_chain_offset(t2)
4099 jr t1
4100 */
4101 trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
4102 trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
4103 | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
4104 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
4105 | (target_function_offset << SHIFT_IMM);
4106 trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
4107 | (STATIC_CHAIN_REGNUM << SHIFT_RD)
4108 | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
4109 | (static_chain_offset << SHIFT_IMM);
4110 trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
4111
4112 /* Copy the trampoline code. */
4113 for (i = 0; i < ARRAY_SIZE (trampoline); i++)
4114 {
4115 mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
4116 riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
4117 }
4118
4119 /* Set up the static chain pointer field. */
4120 mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
4121 riscv_emit_move (mem, chain_value);
4122
4123 /* Set up the target function field. */
4124 mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
4125 riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
4126 }
4127
4128 /* Flush the code part of the trampoline. */
4129 emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
4130 emit_insn (gen_clear_cache (addr, end_addr));
4131 }
4132
4133 /* Return leaf_function_p () and memoize the result. */
4134
4135 static bool
4136 riscv_leaf_function_p (void)
4137 {
4138 if (cfun->machine->is_leaf == 0)
4139 cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
4140
4141 return cfun->machine->is_leaf > 0;
4142 }
4143
4144 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
4145
4146 static bool
4147 riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
4148 tree exp ATTRIBUTE_UNUSED)
4149 {
4150 /* When optimzing for size, don't use sibcalls in non-leaf routines */
4151 if (TARGET_SAVE_RESTORE)
4152 return riscv_leaf_function_p ();
4153
4154 return true;
4155 }
4156
4157 /* Implement TARGET_CANNOT_COPY_INSN_P. */
4158
4159 static bool
4160 riscv_cannot_copy_insn_p (rtx_insn *insn)
4161 {
4162 return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
4163 }
4164
4165 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. */
4166
4167 static bool
4168 riscv_slow_unaligned_access (machine_mode, unsigned int)
4169 {
4170 return riscv_slow_unaligned_access_p;
4171 }
4172
4173 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
4174
4175 static bool
4176 riscv_can_change_mode_class (machine_mode, machine_mode, reg_class_t rclass)
4177 {
4178 return !reg_classes_intersect_p (FP_REGS, rclass);
4179 }
4180
4181
4182 /* Implement TARGET_CONSTANT_ALIGNMENT. */
4183
4184 static HOST_WIDE_INT
4185 riscv_constant_alignment (const_tree exp, HOST_WIDE_INT align)
4186 {
4187 if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR)
4188 return MAX (align, BITS_PER_WORD);
4189 return align;
4190 }
4191
4192 /* Initialize the GCC target structure. */
4193 #undef TARGET_ASM_ALIGNED_HI_OP
4194 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
4195 #undef TARGET_ASM_ALIGNED_SI_OP
4196 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
4197 #undef TARGET_ASM_ALIGNED_DI_OP
4198 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
4199
4200 #undef TARGET_OPTION_OVERRIDE
4201 #define TARGET_OPTION_OVERRIDE riscv_option_override
4202
4203 #undef TARGET_LEGITIMIZE_ADDRESS
4204 #define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
4205
4206 #undef TARGET_SCHED_ISSUE_RATE
4207 #define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
4208
4209 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
4210 #define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
4211
4212 #undef TARGET_REGISTER_MOVE_COST
4213 #define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
4214 #undef TARGET_MEMORY_MOVE_COST
4215 #define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
4216 #undef TARGET_RTX_COSTS
4217 #define TARGET_RTX_COSTS riscv_rtx_costs
4218 #undef TARGET_ADDRESS_COST
4219 #define TARGET_ADDRESS_COST riscv_address_cost
4220
4221 #undef TARGET_ASM_FILE_START
4222 #define TARGET_ASM_FILE_START riscv_file_start
4223 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
4224 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
4225
4226 #undef TARGET_EXPAND_BUILTIN_VA_START
4227 #define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
4228
4229 #undef TARGET_PROMOTE_FUNCTION_MODE
4230 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
4231
4232 #undef TARGET_RETURN_IN_MEMORY
4233 #define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
4234
4235 #undef TARGET_ASM_OUTPUT_MI_THUNK
4236 #define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
4237 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
4238 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
4239
4240 #undef TARGET_PRINT_OPERAND
4241 #define TARGET_PRINT_OPERAND riscv_print_operand
4242 #undef TARGET_PRINT_OPERAND_ADDRESS
4243 #define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
4244
4245 #undef TARGET_SETUP_INCOMING_VARARGS
4246 #define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
4247 #undef TARGET_STRICT_ARGUMENT_NAMING
4248 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
4249 #undef TARGET_MUST_PASS_IN_STACK
4250 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
4251 #undef TARGET_PASS_BY_REFERENCE
4252 #define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
4253 #undef TARGET_ARG_PARTIAL_BYTES
4254 #define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
4255 #undef TARGET_FUNCTION_ARG
4256 #define TARGET_FUNCTION_ARG riscv_function_arg
4257 #undef TARGET_FUNCTION_ARG_ADVANCE
4258 #define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
4259 #undef TARGET_FUNCTION_ARG_BOUNDARY
4260 #define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
4261
4262 /* The generic ELF target does not always have TLS support. */
4263 #ifdef HAVE_AS_TLS
4264 #undef TARGET_HAVE_TLS
4265 #define TARGET_HAVE_TLS true
4266 #endif
4267
4268 #undef TARGET_CANNOT_FORCE_CONST_MEM
4269 #define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
4270
4271 #undef TARGET_LEGITIMATE_CONSTANT_P
4272 #define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
4273
4274 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
4275 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
4276
4277 #undef TARGET_LEGITIMATE_ADDRESS_P
4278 #define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
4279
4280 #undef TARGET_CAN_ELIMINATE
4281 #define TARGET_CAN_ELIMINATE riscv_can_eliminate
4282
4283 #undef TARGET_CONDITIONAL_REGISTER_USAGE
4284 #define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
4285
4286 #undef TARGET_CLASS_MAX_NREGS
4287 #define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
4288
4289 #undef TARGET_TRAMPOLINE_INIT
4290 #define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
4291
4292 #undef TARGET_IN_SMALL_DATA_P
4293 #define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
4294
4295 #undef TARGET_ASM_SELECT_RTX_SECTION
4296 #define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
4297
4298 #undef TARGET_MIN_ANCHOR_OFFSET
4299 #define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
4300
4301 #undef TARGET_MAX_ANCHOR_OFFSET
4302 #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
4303
4304 #undef TARGET_REGISTER_PRIORITY
4305 #define TARGET_REGISTER_PRIORITY riscv_register_priority
4306
4307 #undef TARGET_CANNOT_COPY_INSN_P
4308 #define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
4309
4310 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
4311 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV riscv_atomic_assign_expand_fenv
4312
4313 #undef TARGET_INIT_BUILTINS
4314 #define TARGET_INIT_BUILTINS riscv_init_builtins
4315
4316 #undef TARGET_BUILTIN_DECL
4317 #define TARGET_BUILTIN_DECL riscv_builtin_decl
4318
4319 #undef TARGET_EXPAND_BUILTIN
4320 #define TARGET_EXPAND_BUILTIN riscv_expand_builtin
4321
4322 #undef TARGET_HARD_REGNO_NREGS
4323 #define TARGET_HARD_REGNO_NREGS riscv_hard_regno_nregs
4324 #undef TARGET_HARD_REGNO_MODE_OK
4325 #define TARGET_HARD_REGNO_MODE_OK riscv_hard_regno_mode_ok
4326
4327 #undef TARGET_MODES_TIEABLE_P
4328 #define TARGET_MODES_TIEABLE_P riscv_modes_tieable_p
4329
4330 #undef TARGET_SLOW_UNALIGNED_ACCESS
4331 #define TARGET_SLOW_UNALIGNED_ACCESS riscv_slow_unaligned_access
4332
4333 #undef TARGET_SECONDARY_MEMORY_NEEDED
4334 #define TARGET_SECONDARY_MEMORY_NEEDED riscv_secondary_memory_needed
4335
4336 #undef TARGET_CAN_CHANGE_MODE_CLASS
4337 #define TARGET_CAN_CHANGE_MODE_CLASS riscv_can_change_mode_class
4338
4339 #undef TARGET_CONSTANT_ALIGNMENT
4340 #define TARGET_CONSTANT_ALIGNMENT riscv_constant_alignment
4341
4342 struct gcc_target targetm = TARGET_INITIALIZER;
4343
4344 #include "gt-riscv.h"