coretypes.h: Include machmode.h...
[gcc.git] / gcc / ira-costs.c
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hash-table.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "symtab.h"
29 #include "hashtab.h"
30 #include "hash-set.h"
31 #include "vec.h"
32 #include "input.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "statistics.h"
36 #include "alias.h"
37 #include "inchash.h"
38 #include "tree.h"
39 #include "insn-config.h"
40 #include "expmed.h"
41 #include "dojump.h"
42 #include "explow.h"
43 #include "calls.h"
44 #include "emit-rtl.h"
45 #include "varasm.h"
46 #include "stmt.h"
47 #include "expr.h"
48 #include "tm_p.h"
49 #include "predict.h"
50 #include "dominance.h"
51 #include "cfg.h"
52 #include "basic-block.h"
53 #include "regs.h"
54 #include "addresses.h"
55 #include "recog.h"
56 #include "reload.h"
57 #include "diagnostic-core.h"
58 #include "target.h"
59 #include "params.h"
60 #include "ira-int.h"
61
62 /* The flags is set up every time when we calculate pseudo register
63 classes through function ira_set_pseudo_classes. */
64 static bool pseudo_classes_defined_p = false;
65
66 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
67 static bool allocno_p;
68
69 /* Number of elements in array `costs'. */
70 static int cost_elements_num;
71
72 /* The `costs' struct records the cost of using hard registers of each
73 class considered for the calculation and of using memory for each
74 allocno or pseudo. */
75 struct costs
76 {
77 int mem_cost;
78 /* Costs for register classes start here. We process only some
79 allocno classes. */
80 int cost[1];
81 };
82
83 #define max_struct_costs_size \
84 (this_target_ira_int->x_max_struct_costs_size)
85 #define init_cost \
86 (this_target_ira_int->x_init_cost)
87 #define temp_costs \
88 (this_target_ira_int->x_temp_costs)
89 #define op_costs \
90 (this_target_ira_int->x_op_costs)
91 #define this_op_costs \
92 (this_target_ira_int->x_this_op_costs)
93
94 /* Costs of each class for each allocno or pseudo. */
95 static struct costs *costs;
96
97 /* Accumulated costs of each class for each allocno. */
98 static struct costs *total_allocno_costs;
99
100 /* It is the current size of struct costs. */
101 static int struct_costs_size;
102
103 /* Return pointer to structure containing costs of allocno or pseudo
104 with given NUM in array ARR. */
105 #define COSTS(arr, num) \
106 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
107
108 /* Return index in COSTS when processing reg with REGNO. */
109 #define COST_INDEX(regno) (allocno_p \
110 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
111 : (int) regno)
112
113 /* Record register class preferences of each allocno or pseudo. Null
114 value means no preferences. It happens on the 1st iteration of the
115 cost calculation. */
116 static enum reg_class *pref;
117
118 /* Allocated buffers for pref. */
119 static enum reg_class *pref_buffer;
120
121 /* Record allocno class of each allocno with the same regno. */
122 static enum reg_class *regno_aclass;
123
124 /* Record cost gains for not allocating a register with an invariant
125 equivalence. */
126 static int *regno_equiv_gains;
127
128 /* Execution frequency of the current insn. */
129 static int frequency;
130
131 \f
132
133 /* Info about reg classes whose costs are calculated for a pseudo. */
134 struct cost_classes
135 {
136 /* Number of the cost classes in the subsequent array. */
137 int num;
138 /* Container of the cost classes. */
139 enum reg_class classes[N_REG_CLASSES];
140 /* Map reg class -> index of the reg class in the previous array.
141 -1 if it is not a cost class. */
142 int index[N_REG_CLASSES];
143 /* Map hard regno index of first class in array CLASSES containing
144 the hard regno, -1 otherwise. */
145 int hard_regno_index[FIRST_PSEUDO_REGISTER];
146 };
147
148 /* Types of pointers to the structure above. */
149 typedef struct cost_classes *cost_classes_t;
150 typedef const struct cost_classes *const_cost_classes_t;
151
152 /* Info about cost classes for each pseudo. */
153 static cost_classes_t *regno_cost_classes;
154
155 /* Helper for cost_classes hashing. */
156
157 struct cost_classes_hasher
158 {
159 typedef cost_classes *value_type;
160 typedef cost_classes *compare_type;
161 static inline hashval_t hash (const cost_classes *);
162 static inline bool equal (const cost_classes *, const cost_classes *);
163 static inline void remove (cost_classes *);
164 };
165
166 /* Returns hash value for cost classes info HV. */
167 inline hashval_t
168 cost_classes_hasher::hash (const cost_classes *hv)
169 {
170 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
171 }
172
173 /* Compares cost classes info HV1 and HV2. */
174 inline bool
175 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
176 {
177 return (hv1->num == hv2->num
178 && memcmp (hv1->classes, hv2->classes,
179 sizeof (enum reg_class) * hv1->num) == 0);
180 }
181
182 /* Delete cost classes info V from the hash table. */
183 inline void
184 cost_classes_hasher::remove (cost_classes *v)
185 {
186 ira_free (v);
187 }
188
189 /* Hash table of unique cost classes. */
190 static hash_table<cost_classes_hasher> *cost_classes_htab;
191
192 /* Map allocno class -> cost classes for pseudo of given allocno
193 class. */
194 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
195
196 /* Map mode -> cost classes for pseudo of give mode. */
197 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
198
199 /* Cost classes that include all classes in ira_important_classes. */
200 static cost_classes all_cost_classes;
201
202 /* Use the array of classes in CLASSES_PTR to fill out the rest of
203 the structure. */
204 static void
205 complete_cost_classes (cost_classes_t classes_ptr)
206 {
207 for (int i = 0; i < N_REG_CLASSES; i++)
208 classes_ptr->index[i] = -1;
209 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
210 classes_ptr->hard_regno_index[i] = -1;
211 for (int i = 0; i < classes_ptr->num; i++)
212 {
213 enum reg_class cl = classes_ptr->classes[i];
214 classes_ptr->index[cl] = i;
215 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
216 {
217 unsigned int hard_regno = ira_class_hard_regs[cl][j];
218 if (classes_ptr->hard_regno_index[hard_regno] < 0)
219 classes_ptr->hard_regno_index[hard_regno] = i;
220 }
221 }
222 }
223
224 /* Initialize info about the cost classes for each pseudo. */
225 static void
226 initiate_regno_cost_classes (void)
227 {
228 int size = sizeof (cost_classes_t) * max_reg_num ();
229
230 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
231 memset (regno_cost_classes, 0, size);
232 memset (cost_classes_aclass_cache, 0,
233 sizeof (cost_classes_t) * N_REG_CLASSES);
234 memset (cost_classes_mode_cache, 0,
235 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
236 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
237 all_cost_classes.num = ira_important_classes_num;
238 for (int i = 0; i < ira_important_classes_num; i++)
239 all_cost_classes.classes[i] = ira_important_classes[i];
240 complete_cost_classes (&all_cost_classes);
241 }
242
243 /* Create new cost classes from cost classes FROM and set up members
244 index and hard_regno_index. Return the new classes. The function
245 implements some common code of two functions
246 setup_regno_cost_classes_by_aclass and
247 setup_regno_cost_classes_by_mode. */
248 static cost_classes_t
249 setup_cost_classes (cost_classes_t from)
250 {
251 cost_classes_t classes_ptr;
252
253 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
254 classes_ptr->num = from->num;
255 for (int i = 0; i < from->num; i++)
256 classes_ptr->classes[i] = from->classes[i];
257 complete_cost_classes (classes_ptr);
258 return classes_ptr;
259 }
260
261 /* Return a version of FULL that only considers registers in REGS that are
262 valid for mode MODE. Both FULL and the returned class are globally
263 allocated. */
264 static cost_classes_t
265 restrict_cost_classes (cost_classes_t full, machine_mode mode,
266 const HARD_REG_SET &regs)
267 {
268 static struct cost_classes narrow;
269 int map[N_REG_CLASSES];
270 narrow.num = 0;
271 for (int i = 0; i < full->num; i++)
272 {
273 /* Assume that we'll drop the class. */
274 map[i] = -1;
275
276 /* Ignore classes that are too small for the mode. */
277 enum reg_class cl = full->classes[i];
278 if (!contains_reg_of_mode[cl][mode])
279 continue;
280
281 /* Calculate the set of registers in CL that belong to REGS and
282 are valid for MODE. */
283 HARD_REG_SET valid_for_cl;
284 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
285 AND_HARD_REG_SET (valid_for_cl, regs);
286 AND_COMPL_HARD_REG_SET (valid_for_cl,
287 ira_prohibited_class_mode_regs[cl][mode]);
288 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
289 if (hard_reg_set_empty_p (valid_for_cl))
290 continue;
291
292 /* Don't use this class if the set of valid registers is a subset
293 of an existing class. For example, suppose we have two classes
294 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
295 that the mode changes allowed by FR_REGS are not as general as
296 the mode changes allowed by GR_REGS.
297
298 In this situation, the mode changes for GR_AND_FR_REGS could
299 either be seen as the union or the intersection of the mode
300 changes allowed by the two subclasses. The justification for
301 the union-based definition would be that, if you want a mode
302 change that's only allowed by GR_REGS, you can pick a register
303 from the GR_REGS subclass. The justification for the
304 intersection-based definition would be that every register
305 from the class would allow the mode change.
306
307 However, if we have a register that needs to be in GR_REGS,
308 using GR_AND_FR_REGS with the intersection-based definition
309 would be too pessimistic, since it would bring in restrictions
310 that only apply to FR_REGS. Conversely, if we have a register
311 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
312 union-based definition would lose the extra restrictions
313 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
314 for cases where GR_REGS and FP_REGS are both valid. */
315 int pos;
316 for (pos = 0; pos < narrow.num; ++pos)
317 {
318 enum reg_class cl2 = narrow.classes[pos];
319 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
320 break;
321 }
322 map[i] = pos;
323 if (pos == narrow.num)
324 {
325 /* If several classes are equivalent, prefer to use the one
326 that was chosen as the allocno class. */
327 enum reg_class cl2 = ira_allocno_class_translate[cl];
328 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
329 cl = cl2;
330 narrow.classes[narrow.num++] = cl;
331 }
332 }
333 if (narrow.num == full->num)
334 return full;
335
336 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
337 if (*slot == NULL)
338 {
339 cost_classes_t classes = setup_cost_classes (&narrow);
340 /* Map equivalent classes to the representative that we chose above. */
341 for (int i = 0; i < ira_important_classes_num; i++)
342 {
343 enum reg_class cl = ira_important_classes[i];
344 int index = full->index[cl];
345 if (index >= 0)
346 classes->index[cl] = map[index];
347 }
348 *slot = classes;
349 }
350 return *slot;
351 }
352
353 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
354 This function is used when we know an initial approximation of
355 allocno class of the pseudo already, e.g. on the second iteration
356 of class cost calculation or after class cost calculation in
357 register-pressure sensitive insn scheduling or register-pressure
358 sensitive loop-invariant motion. */
359 static void
360 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
361 {
362 static struct cost_classes classes;
363 cost_classes_t classes_ptr;
364 enum reg_class cl;
365 int i;
366 cost_classes **slot;
367 HARD_REG_SET temp, temp2;
368 bool exclude_p;
369
370 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
371 {
372 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
373 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
374 /* We exclude classes from consideration which are subsets of
375 ACLASS only if ACLASS is an uniform class. */
376 exclude_p = ira_uniform_class_p[aclass];
377 classes.num = 0;
378 for (i = 0; i < ira_important_classes_num; i++)
379 {
380 cl = ira_important_classes[i];
381 if (exclude_p)
382 {
383 /* Exclude non-uniform classes which are subsets of
384 ACLASS. */
385 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
386 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
387 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
388 continue;
389 }
390 classes.classes[classes.num++] = cl;
391 }
392 slot = cost_classes_htab->find_slot (&classes, INSERT);
393 if (*slot == NULL)
394 {
395 classes_ptr = setup_cost_classes (&classes);
396 *slot = classes_ptr;
397 }
398 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
399 }
400 if (regno_reg_rtx[regno] != NULL_RTX)
401 {
402 /* Restrict the classes to those that are valid for REGNO's mode
403 (which might for example exclude singleton classes if the mode
404 requires two registers). Also restrict the classes to those that
405 are valid for subregs of REGNO. */
406 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
407 if (!valid_regs)
408 valid_regs = &reg_class_contents[ALL_REGS];
409 classes_ptr = restrict_cost_classes (classes_ptr,
410 PSEUDO_REGNO_MODE (regno),
411 *valid_regs);
412 }
413 regno_cost_classes[regno] = classes_ptr;
414 }
415
416 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
417 decrease number of cost classes for the pseudo, if hard registers
418 of some important classes can not hold a value of MODE. So the
419 pseudo can not get hard register of some important classes and cost
420 calculation for such important classes is only wasting CPU
421 time. */
422 static void
423 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
424 {
425 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
426 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
427 mode, *valid_regs);
428 else
429 {
430 if (cost_classes_mode_cache[mode] == NULL)
431 cost_classes_mode_cache[mode]
432 = restrict_cost_classes (&all_cost_classes, mode,
433 reg_class_contents[ALL_REGS]);
434 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
435 }
436 }
437
438 /* Finalize info about the cost classes for each pseudo. */
439 static void
440 finish_regno_cost_classes (void)
441 {
442 ira_free (regno_cost_classes);
443 delete cost_classes_htab;
444 cost_classes_htab = NULL;
445 }
446
447 \f
448
449 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
450 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
451 be a pseudo register. */
452 static int
453 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
454 secondary_reload_info *prev_sri)
455 {
456 secondary_reload_info sri;
457 reg_class_t secondary_class = NO_REGS;
458
459 /* If X is a SCRATCH, there is actually nothing to move since we are
460 assuming optimal allocation. */
461 if (GET_CODE (x) == SCRATCH)
462 return 0;
463
464 /* Get the class we will actually use for a reload. */
465 rclass = targetm.preferred_reload_class (x, rclass);
466
467 /* If we need a secondary reload for an intermediate, the cost is
468 that to load the input into the intermediate register, then to
469 copy it. */
470 sri.prev_sri = prev_sri;
471 sri.extra_cost = 0;
472 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
473
474 if (secondary_class != NO_REGS)
475 {
476 ira_init_register_move_cost_if_necessary (mode);
477 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
478 + sri.extra_cost
479 + copy_cost (x, mode, secondary_class, to_p, &sri));
480 }
481
482 /* For memory, use the memory move cost, for (hard) registers, use
483 the cost to move between the register classes, and use 2 for
484 everything else (constants). */
485 if (MEM_P (x) || rclass == NO_REGS)
486 return sri.extra_cost
487 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
488 else if (REG_P (x))
489 {
490 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
491
492 ira_init_register_move_cost_if_necessary (mode);
493 return (sri.extra_cost
494 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
495 }
496 else
497 /* If this is a constant, we may eventually want to call rtx_cost
498 here. */
499 return sri.extra_cost + COSTS_N_INSNS (1);
500 }
501
502 \f
503
504 /* Record the cost of using memory or hard registers of various
505 classes for the operands in INSN.
506
507 N_ALTS is the number of alternatives.
508 N_OPS is the number of operands.
509 OPS is an array of the operands.
510 MODES are the modes of the operands, in case any are VOIDmode.
511 CONSTRAINTS are the constraints to use for the operands. This array
512 is modified by this procedure.
513
514 This procedure works alternative by alternative. For each
515 alternative we assume that we will be able to allocate all allocnos
516 to their ideal register class and calculate the cost of using that
517 alternative. Then we compute, for each operand that is a
518 pseudo-register, the cost of having the allocno allocated to each
519 register class and using it in that alternative. To this cost is
520 added the cost of the alternative.
521
522 The cost of each class for this insn is its lowest cost among all
523 the alternatives. */
524 static void
525 record_reg_classes (int n_alts, int n_ops, rtx *ops,
526 machine_mode *modes, const char **constraints,
527 rtx_insn *insn, enum reg_class *pref)
528 {
529 int alt;
530 int i, j, k;
531 int insn_allows_mem[MAX_RECOG_OPERANDS];
532 move_table *move_in_cost, *move_out_cost;
533 short (*mem_cost)[2];
534
535 for (i = 0; i < n_ops; i++)
536 insn_allows_mem[i] = 0;
537
538 /* Process each alternative, each time minimizing an operand's cost
539 with the cost for each operand in that alternative. */
540 alternative_mask preferred = get_preferred_alternatives (insn);
541 for (alt = 0; alt < n_alts; alt++)
542 {
543 enum reg_class classes[MAX_RECOG_OPERANDS];
544 int allows_mem[MAX_RECOG_OPERANDS];
545 enum reg_class rclass;
546 int alt_fail = 0;
547 int alt_cost = 0, op_cost_add;
548
549 if (!TEST_BIT (preferred, alt))
550 {
551 for (i = 0; i < recog_data.n_operands; i++)
552 constraints[i] = skip_alternative (constraints[i]);
553
554 continue;
555 }
556
557 for (i = 0; i < n_ops; i++)
558 {
559 unsigned char c;
560 const char *p = constraints[i];
561 rtx op = ops[i];
562 machine_mode mode = modes[i];
563 int allows_addr = 0;
564 int win = 0;
565
566 /* Initially show we know nothing about the register class. */
567 classes[i] = NO_REGS;
568 allows_mem[i] = 0;
569
570 /* If this operand has no constraints at all, we can
571 conclude nothing about it since anything is valid. */
572 if (*p == 0)
573 {
574 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
575 memset (this_op_costs[i], 0, struct_costs_size);
576 continue;
577 }
578
579 /* If this alternative is only relevant when this operand
580 matches a previous operand, we do different things
581 depending on whether this operand is a allocno-reg or not.
582 We must process any modifiers for the operand before we
583 can make this test. */
584 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
585 p++;
586
587 if (p[0] >= '0' && p[0] <= '0' + i)
588 {
589 /* Copy class and whether memory is allowed from the
590 matching alternative. Then perform any needed cost
591 computations and/or adjustments. */
592 j = p[0] - '0';
593 classes[i] = classes[j];
594 allows_mem[i] = allows_mem[j];
595 if (allows_mem[i])
596 insn_allows_mem[i] = 1;
597
598 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
599 {
600 /* If this matches the other operand, we have no
601 added cost and we win. */
602 if (rtx_equal_p (ops[j], op))
603 win = 1;
604 /* If we can put the other operand into a register,
605 add to the cost of this alternative the cost to
606 copy this operand to the register used for the
607 other operand. */
608 else if (classes[j] != NO_REGS)
609 {
610 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
611 win = 1;
612 }
613 }
614 else if (! REG_P (ops[j])
615 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
616 {
617 /* This op is an allocno but the one it matches is
618 not. */
619
620 /* If we can't put the other operand into a
621 register, this alternative can't be used. */
622
623 if (classes[j] == NO_REGS)
624 alt_fail = 1;
625 /* Otherwise, add to the cost of this alternative
626 the cost to copy the other operand to the hard
627 register used for this operand. */
628 else
629 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
630 }
631 else
632 {
633 /* The costs of this operand are not the same as the
634 other operand since move costs are not symmetric.
635 Moreover, if we cannot tie them, this alternative
636 needs to do a copy, which is one insn. */
637 struct costs *pp = this_op_costs[i];
638 int *pp_costs = pp->cost;
639 cost_classes_t cost_classes_ptr
640 = regno_cost_classes[REGNO (op)];
641 enum reg_class *cost_classes = cost_classes_ptr->classes;
642 bool in_p = recog_data.operand_type[i] != OP_OUT;
643 bool out_p = recog_data.operand_type[i] != OP_IN;
644 enum reg_class op_class = classes[i];
645
646 ira_init_register_move_cost_if_necessary (mode);
647 if (! in_p)
648 {
649 ira_assert (out_p);
650 if (op_class == NO_REGS)
651 {
652 mem_cost = ira_memory_move_cost[mode];
653 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
654 {
655 rclass = cost_classes[k];
656 pp_costs[k] = mem_cost[rclass][0] * frequency;
657 }
658 }
659 else
660 {
661 move_out_cost = ira_may_move_out_cost[mode];
662 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
663 {
664 rclass = cost_classes[k];
665 pp_costs[k]
666 = move_out_cost[op_class][rclass] * frequency;
667 }
668 }
669 }
670 else if (! out_p)
671 {
672 ira_assert (in_p);
673 if (op_class == NO_REGS)
674 {
675 mem_cost = ira_memory_move_cost[mode];
676 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
677 {
678 rclass = cost_classes[k];
679 pp_costs[k] = mem_cost[rclass][1] * frequency;
680 }
681 }
682 else
683 {
684 move_in_cost = ira_may_move_in_cost[mode];
685 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
686 {
687 rclass = cost_classes[k];
688 pp_costs[k]
689 = move_in_cost[rclass][op_class] * frequency;
690 }
691 }
692 }
693 else
694 {
695 if (op_class == NO_REGS)
696 {
697 mem_cost = ira_memory_move_cost[mode];
698 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
699 {
700 rclass = cost_classes[k];
701 pp_costs[k] = ((mem_cost[rclass][0]
702 + mem_cost[rclass][1])
703 * frequency);
704 }
705 }
706 else
707 {
708 move_in_cost = ira_may_move_in_cost[mode];
709 move_out_cost = ira_may_move_out_cost[mode];
710 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
711 {
712 rclass = cost_classes[k];
713 pp_costs[k] = ((move_in_cost[rclass][op_class]
714 + move_out_cost[op_class][rclass])
715 * frequency);
716 }
717 }
718 }
719
720 /* If the alternative actually allows memory, make
721 things a bit cheaper since we won't need an extra
722 insn to load it. */
723 pp->mem_cost
724 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
725 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
726 - allows_mem[i]) * frequency;
727
728 /* If we have assigned a class to this allocno in
729 our first pass, add a cost to this alternative
730 corresponding to what we would add if this
731 allocno were not in the appropriate class. */
732 if (pref)
733 {
734 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
735
736 if (pref_class == NO_REGS)
737 alt_cost
738 += ((out_p
739 ? ira_memory_move_cost[mode][op_class][0] : 0)
740 + (in_p
741 ? ira_memory_move_cost[mode][op_class][1]
742 : 0));
743 else if (ira_reg_class_intersect
744 [pref_class][op_class] == NO_REGS)
745 alt_cost
746 += ira_register_move_cost[mode][pref_class][op_class];
747 }
748 if (REGNO (ops[i]) != REGNO (ops[j])
749 && ! find_reg_note (insn, REG_DEAD, op))
750 alt_cost += 2;
751
752 p++;
753 }
754 }
755
756 /* Scan all the constraint letters. See if the operand
757 matches any of the constraints. Collect the valid
758 register classes and see if this operand accepts
759 memory. */
760 while ((c = *p))
761 {
762 switch (c)
763 {
764 case '*':
765 /* Ignore the next letter for this pass. */
766 c = *++p;
767 break;
768
769 case '^':
770 alt_cost += 2;
771 break;
772
773 case '?':
774 alt_cost += 2;
775 break;
776
777 case 'g':
778 if (MEM_P (op)
779 || (CONSTANT_P (op)
780 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
781 win = 1;
782 insn_allows_mem[i] = allows_mem[i] = 1;
783 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
784 break;
785
786 default:
787 enum constraint_num cn = lookup_constraint (p);
788 enum reg_class cl;
789 switch (get_constraint_type (cn))
790 {
791 case CT_REGISTER:
792 cl = reg_class_for_constraint (cn);
793 if (cl != NO_REGS)
794 classes[i] = ira_reg_class_subunion[classes[i]][cl];
795 break;
796
797 case CT_CONST_INT:
798 if (CONST_INT_P (op)
799 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
800 win = 1;
801 break;
802
803 case CT_MEMORY:
804 /* Every MEM can be reloaded to fit. */
805 insn_allows_mem[i] = allows_mem[i] = 1;
806 if (MEM_P (op))
807 win = 1;
808 break;
809
810 case CT_ADDRESS:
811 /* Every address can be reloaded to fit. */
812 allows_addr = 1;
813 if (address_operand (op, GET_MODE (op))
814 || constraint_satisfied_p (op, cn))
815 win = 1;
816 /* We know this operand is an address, so we
817 want it to be allocated to a hard register
818 that can be the base of an address,
819 i.e. BASE_REG_CLASS. */
820 classes[i]
821 = ira_reg_class_subunion[classes[i]]
822 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
823 ADDRESS, SCRATCH)];
824 break;
825
826 case CT_FIXED_FORM:
827 if (constraint_satisfied_p (op, cn))
828 win = 1;
829 break;
830 }
831 break;
832 }
833 p += CONSTRAINT_LEN (c, p);
834 if (c == ',')
835 break;
836 }
837
838 constraints[i] = p;
839
840 /* How we account for this operand now depends on whether it
841 is a pseudo register or not. If it is, we first check if
842 any register classes are valid. If not, we ignore this
843 alternative, since we want to assume that all allocnos get
844 allocated for register preferencing. If some register
845 class is valid, compute the costs of moving the allocno
846 into that class. */
847 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
848 {
849 if (classes[i] == NO_REGS && ! allows_mem[i])
850 {
851 /* We must always fail if the operand is a REG, but
852 we did not find a suitable class and memory is
853 not allowed.
854
855 Otherwise we may perform an uninitialized read
856 from this_op_costs after the `continue' statement
857 below. */
858 alt_fail = 1;
859 }
860 else
861 {
862 unsigned int regno = REGNO (op);
863 struct costs *pp = this_op_costs[i];
864 int *pp_costs = pp->cost;
865 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
866 enum reg_class *cost_classes = cost_classes_ptr->classes;
867 bool in_p = recog_data.operand_type[i] != OP_OUT;
868 bool out_p = recog_data.operand_type[i] != OP_IN;
869 enum reg_class op_class = classes[i];
870
871 ira_init_register_move_cost_if_necessary (mode);
872 if (! in_p)
873 {
874 ira_assert (out_p);
875 if (op_class == NO_REGS)
876 {
877 mem_cost = ira_memory_move_cost[mode];
878 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
879 {
880 rclass = cost_classes[k];
881 pp_costs[k] = mem_cost[rclass][0] * frequency;
882 }
883 }
884 else
885 {
886 move_out_cost = ira_may_move_out_cost[mode];
887 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
888 {
889 rclass = cost_classes[k];
890 pp_costs[k]
891 = move_out_cost[op_class][rclass] * frequency;
892 }
893 }
894 }
895 else if (! out_p)
896 {
897 ira_assert (in_p);
898 if (op_class == NO_REGS)
899 {
900 mem_cost = ira_memory_move_cost[mode];
901 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
902 {
903 rclass = cost_classes[k];
904 pp_costs[k] = mem_cost[rclass][1] * frequency;
905 }
906 }
907 else
908 {
909 move_in_cost = ira_may_move_in_cost[mode];
910 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
911 {
912 rclass = cost_classes[k];
913 pp_costs[k]
914 = move_in_cost[rclass][op_class] * frequency;
915 }
916 }
917 }
918 else
919 {
920 if (op_class == NO_REGS)
921 {
922 mem_cost = ira_memory_move_cost[mode];
923 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
924 {
925 rclass = cost_classes[k];
926 pp_costs[k] = ((mem_cost[rclass][0]
927 + mem_cost[rclass][1])
928 * frequency);
929 }
930 }
931 else
932 {
933 move_in_cost = ira_may_move_in_cost[mode];
934 move_out_cost = ira_may_move_out_cost[mode];
935 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
936 {
937 rclass = cost_classes[k];
938 pp_costs[k] = ((move_in_cost[rclass][op_class]
939 + move_out_cost[op_class][rclass])
940 * frequency);
941 }
942 }
943 }
944
945 if (op_class == NO_REGS)
946 /* Although we don't need insn to reload from
947 memory, still accessing memory is usually more
948 expensive than a register. */
949 pp->mem_cost = frequency;
950 else
951 /* If the alternative actually allows memory, make
952 things a bit cheaper since we won't need an
953 extra insn to load it. */
954 pp->mem_cost
955 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
956 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
957 - allows_mem[i]) * frequency;
958 /* If we have assigned a class to this allocno in
959 our first pass, add a cost to this alternative
960 corresponding to what we would add if this
961 allocno were not in the appropriate class. */
962 if (pref)
963 {
964 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
965
966 if (pref_class == NO_REGS)
967 {
968 if (op_class != NO_REGS)
969 alt_cost
970 += ((out_p
971 ? ira_memory_move_cost[mode][op_class][0]
972 : 0)
973 + (in_p
974 ? ira_memory_move_cost[mode][op_class][1]
975 : 0));
976 }
977 else if (op_class == NO_REGS)
978 alt_cost
979 += ((out_p
980 ? ira_memory_move_cost[mode][pref_class][1]
981 : 0)
982 + (in_p
983 ? ira_memory_move_cost[mode][pref_class][0]
984 : 0));
985 else if (ira_reg_class_intersect[pref_class][op_class]
986 == NO_REGS)
987 alt_cost += (ira_register_move_cost
988 [mode][pref_class][op_class]);
989 }
990 }
991 }
992
993 /* Otherwise, if this alternative wins, either because we
994 have already determined that or if we have a hard
995 register of the proper class, there is no cost for this
996 alternative. */
997 else if (win || (REG_P (op)
998 && reg_fits_class_p (op, classes[i],
999 0, GET_MODE (op))))
1000 ;
1001
1002 /* If registers are valid, the cost of this alternative
1003 includes copying the object to and/or from a
1004 register. */
1005 else if (classes[i] != NO_REGS)
1006 {
1007 if (recog_data.operand_type[i] != OP_OUT)
1008 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
1009
1010 if (recog_data.operand_type[i] != OP_IN)
1011 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
1012 }
1013 /* The only other way this alternative can be used is if
1014 this is a constant that could be placed into memory. */
1015 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1016 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1017 else
1018 alt_fail = 1;
1019 }
1020
1021 if (alt_fail)
1022 continue;
1023
1024 op_cost_add = alt_cost * frequency;
1025 /* Finally, update the costs with the information we've
1026 calculated about this alternative. */
1027 for (i = 0; i < n_ops; i++)
1028 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1029 {
1030 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1031 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1032 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1033 cost_classes_t cost_classes_ptr
1034 = regno_cost_classes[REGNO (ops[i])];
1035
1036 pp->mem_cost = MIN (pp->mem_cost,
1037 (qq->mem_cost + op_cost_add) * scale);
1038
1039 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1040 pp_costs[k]
1041 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1042 }
1043 }
1044
1045 if (allocno_p)
1046 for (i = 0; i < n_ops; i++)
1047 {
1048 ira_allocno_t a;
1049 rtx op = ops[i];
1050
1051 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1052 continue;
1053 a = ira_curr_regno_allocno_map [REGNO (op)];
1054 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1055 ALLOCNO_BAD_SPILL_P (a) = true;
1056 }
1057
1058 }
1059
1060 \f
1061
1062 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1063 static inline bool
1064 ok_for_index_p_nonstrict (rtx reg)
1065 {
1066 unsigned regno = REGNO (reg);
1067
1068 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1069 }
1070
1071 /* A version of regno_ok_for_base_p for use here, when all
1072 pseudo-registers should count as OK. Arguments as for
1073 regno_ok_for_base_p. */
1074 static inline bool
1075 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1076 enum rtx_code outer_code, enum rtx_code index_code)
1077 {
1078 unsigned regno = REGNO (reg);
1079
1080 if (regno >= FIRST_PSEUDO_REGISTER)
1081 return true;
1082 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1083 }
1084
1085 /* Record the pseudo registers we must reload into hard registers in a
1086 subexpression of a memory address, X.
1087
1088 If CONTEXT is 0, we are looking at the base part of an address,
1089 otherwise we are looking at the index part.
1090
1091 MODE and AS are the mode and address space of the memory reference;
1092 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1093 These four arguments are passed down to base_reg_class.
1094
1095 SCALE is twice the amount to multiply the cost by (it is twice so
1096 we can represent half-cost adjustments). */
1097 static void
1098 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1099 int context, enum rtx_code outer_code,
1100 enum rtx_code index_code, int scale)
1101 {
1102 enum rtx_code code = GET_CODE (x);
1103 enum reg_class rclass;
1104
1105 if (context == 1)
1106 rclass = INDEX_REG_CLASS;
1107 else
1108 rclass = base_reg_class (mode, as, outer_code, index_code);
1109
1110 switch (code)
1111 {
1112 case CONST_INT:
1113 case CONST:
1114 case CC0:
1115 case PC:
1116 case SYMBOL_REF:
1117 case LABEL_REF:
1118 return;
1119
1120 case PLUS:
1121 /* When we have an address that is a sum, we must determine
1122 whether registers are "base" or "index" regs. If there is a
1123 sum of two registers, we must choose one to be the "base".
1124 Luckily, we can use the REG_POINTER to make a good choice
1125 most of the time. We only need to do this on machines that
1126 can have two registers in an address and where the base and
1127 index register classes are different.
1128
1129 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1130 but that seems bogus since it should only be set when we are
1131 sure the register is being used as a pointer. */
1132 {
1133 rtx arg0 = XEXP (x, 0);
1134 rtx arg1 = XEXP (x, 1);
1135 enum rtx_code code0 = GET_CODE (arg0);
1136 enum rtx_code code1 = GET_CODE (arg1);
1137
1138 /* Look inside subregs. */
1139 if (code0 == SUBREG)
1140 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1141 if (code1 == SUBREG)
1142 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1143
1144 /* If this machine only allows one register per address, it
1145 must be in the first operand. */
1146 if (MAX_REGS_PER_ADDRESS == 1)
1147 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1148
1149 /* If index and base registers are the same on this machine,
1150 just record registers in any non-constant operands. We
1151 assume here, as well as in the tests below, that all
1152 addresses are in canonical form. */
1153 else if (INDEX_REG_CLASS
1154 == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1155 {
1156 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1157 if (! CONSTANT_P (arg1))
1158 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1159 }
1160
1161 /* If the second operand is a constant integer, it doesn't
1162 change what class the first operand must be. */
1163 else if (CONST_SCALAR_INT_P (arg1))
1164 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1165 /* If the second operand is a symbolic constant, the first
1166 operand must be an index register. */
1167 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1168 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1169 /* If both operands are registers but one is already a hard
1170 register of index or reg-base class, give the other the
1171 class that the hard register is not. */
1172 else if (code0 == REG && code1 == REG
1173 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1174 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1175 || ok_for_index_p_nonstrict (arg0)))
1176 record_address_regs (mode, as, arg1,
1177 ok_for_base_p_nonstrict (arg0, mode, as,
1178 PLUS, REG) ? 1 : 0,
1179 PLUS, REG, scale);
1180 else if (code0 == REG && code1 == REG
1181 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1182 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1183 || ok_for_index_p_nonstrict (arg1)))
1184 record_address_regs (mode, as, arg0,
1185 ok_for_base_p_nonstrict (arg1, mode, as,
1186 PLUS, REG) ? 1 : 0,
1187 PLUS, REG, scale);
1188 /* If one operand is known to be a pointer, it must be the
1189 base with the other operand the index. Likewise if the
1190 other operand is a MULT. */
1191 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1192 {
1193 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1194 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1195 }
1196 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1197 {
1198 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1199 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1200 }
1201 /* Otherwise, count equal chances that each might be a base or
1202 index register. This case should be rare. */
1203 else
1204 {
1205 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1206 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1207 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1208 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1209 }
1210 }
1211 break;
1212
1213 /* Double the importance of an allocno that is incremented or
1214 decremented, since it would take two extra insns if it ends
1215 up in the wrong place. */
1216 case POST_MODIFY:
1217 case PRE_MODIFY:
1218 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1219 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1220 if (REG_P (XEXP (XEXP (x, 1), 1)))
1221 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1222 2 * scale);
1223 break;
1224
1225 case POST_INC:
1226 case PRE_INC:
1227 case POST_DEC:
1228 case PRE_DEC:
1229 /* Double the importance of an allocno that is incremented or
1230 decremented, since it would take two extra insns if it ends
1231 up in the wrong place. */
1232 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1233 break;
1234
1235 case REG:
1236 {
1237 struct costs *pp;
1238 int *pp_costs;
1239 enum reg_class i;
1240 int k, regno, add_cost;
1241 cost_classes_t cost_classes_ptr;
1242 enum reg_class *cost_classes;
1243 move_table *move_in_cost;
1244
1245 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1246 break;
1247
1248 regno = REGNO (x);
1249 if (allocno_p)
1250 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1251 pp = COSTS (costs, COST_INDEX (regno));
1252 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1253 if (INT_MAX - add_cost < pp->mem_cost)
1254 pp->mem_cost = INT_MAX;
1255 else
1256 pp->mem_cost += add_cost;
1257 cost_classes_ptr = regno_cost_classes[regno];
1258 cost_classes = cost_classes_ptr->classes;
1259 pp_costs = pp->cost;
1260 ira_init_register_move_cost_if_necessary (Pmode);
1261 move_in_cost = ira_may_move_in_cost[Pmode];
1262 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1263 {
1264 i = cost_classes[k];
1265 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1266 if (INT_MAX - add_cost < pp_costs[k])
1267 pp_costs[k] = INT_MAX;
1268 else
1269 pp_costs[k] += add_cost;
1270 }
1271 }
1272 break;
1273
1274 default:
1275 {
1276 const char *fmt = GET_RTX_FORMAT (code);
1277 int i;
1278 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1279 if (fmt[i] == 'e')
1280 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1281 scale);
1282 }
1283 }
1284 }
1285
1286 \f
1287
1288 /* Calculate the costs of insn operands. */
1289 static void
1290 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1291 {
1292 const char *constraints[MAX_RECOG_OPERANDS];
1293 machine_mode modes[MAX_RECOG_OPERANDS];
1294 rtx ops[MAX_RECOG_OPERANDS];
1295 rtx set;
1296 int i;
1297
1298 for (i = 0; i < recog_data.n_operands; i++)
1299 {
1300 constraints[i] = recog_data.constraints[i];
1301 modes[i] = recog_data.operand_mode[i];
1302 }
1303
1304 /* If we get here, we are set up to record the costs of all the
1305 operands for this insn. Start by initializing the costs. Then
1306 handle any address registers. Finally record the desired classes
1307 for any allocnos, doing it twice if some pair of operands are
1308 commutative. */
1309 for (i = 0; i < recog_data.n_operands; i++)
1310 {
1311 memcpy (op_costs[i], init_cost, struct_costs_size);
1312
1313 ops[i] = recog_data.operand[i];
1314 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1315 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1316
1317 if (MEM_P (recog_data.operand[i]))
1318 record_address_regs (GET_MODE (recog_data.operand[i]),
1319 MEM_ADDR_SPACE (recog_data.operand[i]),
1320 XEXP (recog_data.operand[i], 0),
1321 0, MEM, SCRATCH, frequency * 2);
1322 else if (constraints[i][0] == 'p'
1323 || (insn_extra_address_constraint
1324 (lookup_constraint (constraints[i]))))
1325 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1326 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1327 frequency * 2);
1328 }
1329
1330 /* Check for commutative in a separate loop so everything will have
1331 been initialized. We must do this even if one operand is a
1332 constant--see addsi3 in m68k.md. */
1333 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1334 if (constraints[i][0] == '%')
1335 {
1336 const char *xconstraints[MAX_RECOG_OPERANDS];
1337 int j;
1338
1339 /* Handle commutative operands by swapping the constraints.
1340 We assume the modes are the same. */
1341 for (j = 0; j < recog_data.n_operands; j++)
1342 xconstraints[j] = constraints[j];
1343
1344 xconstraints[i] = constraints[i+1];
1345 xconstraints[i+1] = constraints[i];
1346 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1347 recog_data.operand, modes,
1348 xconstraints, insn, pref);
1349 }
1350 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1351 recog_data.operand, modes,
1352 constraints, insn, pref);
1353
1354 /* If this insn is a single set copying operand 1 to operand 0 and
1355 one operand is an allocno with the other a hard reg or an allocno
1356 that prefers a hard register that is in its own register class
1357 then we may want to adjust the cost of that register class to -1.
1358
1359 Avoid the adjustment if the source does not die to avoid
1360 stressing of register allocator by preferencing two colliding
1361 registers into single class.
1362
1363 Also avoid the adjustment if a copy between hard registers of the
1364 class is expensive (ten times the cost of a default copy is
1365 considered arbitrarily expensive). This avoids losing when the
1366 preferred class is very expensive as the source of a copy
1367 instruction. */
1368 if ((set = single_set (insn)) != NULL_RTX
1369 /* In rare cases the single set insn might have less 2 operands
1370 as the source can be a fixed special reg. */
1371 && recog_data.n_operands > 1
1372 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1373 {
1374 int regno, other_regno;
1375 rtx dest = SET_DEST (set);
1376 rtx src = SET_SRC (set);
1377
1378 if (GET_CODE (dest) == SUBREG
1379 && (GET_MODE_SIZE (GET_MODE (dest))
1380 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1381 dest = SUBREG_REG (dest);
1382 if (GET_CODE (src) == SUBREG
1383 && (GET_MODE_SIZE (GET_MODE (src))
1384 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1385 src = SUBREG_REG (src);
1386 if (REG_P (src) && REG_P (dest)
1387 && find_regno_note (insn, REG_DEAD, REGNO (src))
1388 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1389 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1390 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1391 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1392 {
1393 machine_mode mode = GET_MODE (src);
1394 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1395 enum reg_class *cost_classes = cost_classes_ptr->classes;
1396 reg_class_t rclass;
1397 int k, nr;
1398
1399 i = regno == (int) REGNO (src) ? 1 : 0;
1400 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1401 {
1402 rclass = cost_classes[k];
1403 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1404 && (reg_class_size[(int) rclass]
1405 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1406 {
1407 if (reg_class_size[rclass] == 1)
1408 op_costs[i]->cost[k] = -frequency;
1409 else
1410 {
1411 for (nr = 0;
1412 nr < hard_regno_nregs[other_regno][mode];
1413 nr++)
1414 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
1415 other_regno + nr))
1416 break;
1417
1418 if (nr == hard_regno_nregs[other_regno][mode])
1419 op_costs[i]->cost[k] = -frequency;
1420 }
1421 }
1422 }
1423 }
1424 }
1425 }
1426
1427 \f
1428
1429 /* Process one insn INSN. Scan it and record each time it would save
1430 code to put a certain allocnos in a certain class. Return the last
1431 insn processed, so that the scan can be continued from there. */
1432 static rtx_insn *
1433 scan_one_insn (rtx_insn *insn)
1434 {
1435 enum rtx_code pat_code;
1436 rtx set, note;
1437 int i, k;
1438 bool counted_mem;
1439
1440 if (!NONDEBUG_INSN_P (insn))
1441 return insn;
1442
1443 pat_code = GET_CODE (PATTERN (insn));
1444 if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT)
1445 return insn;
1446
1447 counted_mem = false;
1448 set = single_set (insn);
1449 extract_insn (insn);
1450
1451 /* If this insn loads a parameter from its stack slot, then it
1452 represents a savings, rather than a cost, if the parameter is
1453 stored in memory. Record this fact.
1454
1455 Similarly if we're loading other constants from memory (constant
1456 pool, TOC references, small data areas, etc) and this is the only
1457 assignment to the destination pseudo.
1458
1459 Don't do this if SET_SRC (set) isn't a general operand, if it is
1460 a memory requiring special instructions to load it, decreasing
1461 mem_cost might result in it being loaded using the specialized
1462 instruction into a register, then stored into stack and loaded
1463 again from the stack. See PR52208.
1464
1465 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1466 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1467 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1468 && ((MEM_P (XEXP (note, 0))
1469 && !side_effects_p (SET_SRC (set)))
1470 || (CONSTANT_P (XEXP (note, 0))
1471 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1472 XEXP (note, 0))
1473 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1474 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))))
1475 {
1476 enum reg_class cl = GENERAL_REGS;
1477 rtx reg = SET_DEST (set);
1478 int num = COST_INDEX (REGNO (reg));
1479
1480 COSTS (costs, num)->mem_cost
1481 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1482 record_address_regs (GET_MODE (SET_SRC (set)),
1483 MEM_ADDR_SPACE (SET_SRC (set)),
1484 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1485 frequency * 2);
1486 counted_mem = true;
1487 }
1488
1489 record_operand_costs (insn, pref);
1490
1491 /* Now add the cost for each operand to the total costs for its
1492 allocno. */
1493 for (i = 0; i < recog_data.n_operands; i++)
1494 if (REG_P (recog_data.operand[i])
1495 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1496 {
1497 int regno = REGNO (recog_data.operand[i]);
1498 struct costs *p = COSTS (costs, COST_INDEX (regno));
1499 struct costs *q = op_costs[i];
1500 int *p_costs = p->cost, *q_costs = q->cost;
1501 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1502 int add_cost;
1503
1504 /* If the already accounted for the memory "cost" above, don't
1505 do so again. */
1506 if (!counted_mem)
1507 {
1508 add_cost = q->mem_cost;
1509 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1510 p->mem_cost = INT_MAX;
1511 else
1512 p->mem_cost += add_cost;
1513 }
1514 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1515 {
1516 add_cost = q_costs[k];
1517 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1518 p_costs[k] = INT_MAX;
1519 else
1520 p_costs[k] += add_cost;
1521 }
1522 }
1523
1524 return insn;
1525 }
1526
1527 \f
1528
1529 /* Print allocnos costs to file F. */
1530 static void
1531 print_allocno_costs (FILE *f)
1532 {
1533 int k;
1534 ira_allocno_t a;
1535 ira_allocno_iterator ai;
1536
1537 ira_assert (allocno_p);
1538 fprintf (f, "\n");
1539 FOR_EACH_ALLOCNO (a, ai)
1540 {
1541 int i, rclass;
1542 basic_block bb;
1543 int regno = ALLOCNO_REGNO (a);
1544 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1545 enum reg_class *cost_classes = cost_classes_ptr->classes;
1546
1547 i = ALLOCNO_NUM (a);
1548 fprintf (f, " a%d(r%d,", i, regno);
1549 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1550 fprintf (f, "b%d", bb->index);
1551 else
1552 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1553 fprintf (f, ") costs:");
1554 for (k = 0; k < cost_classes_ptr->num; k++)
1555 {
1556 rclass = cost_classes[k];
1557 fprintf (f, " %s:%d", reg_class_names[rclass],
1558 COSTS (costs, i)->cost[k]);
1559 if (flag_ira_region == IRA_REGION_ALL
1560 || flag_ira_region == IRA_REGION_MIXED)
1561 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1562 }
1563 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1564 if (flag_ira_region == IRA_REGION_ALL
1565 || flag_ira_region == IRA_REGION_MIXED)
1566 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1567 fprintf (f, "\n");
1568 }
1569 }
1570
1571 /* Print pseudo costs to file F. */
1572 static void
1573 print_pseudo_costs (FILE *f)
1574 {
1575 int regno, k;
1576 int rclass;
1577 cost_classes_t cost_classes_ptr;
1578 enum reg_class *cost_classes;
1579
1580 ira_assert (! allocno_p);
1581 fprintf (f, "\n");
1582 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1583 {
1584 if (REG_N_REFS (regno) <= 0)
1585 continue;
1586 cost_classes_ptr = regno_cost_classes[regno];
1587 cost_classes = cost_classes_ptr->classes;
1588 fprintf (f, " r%d costs:", regno);
1589 for (k = 0; k < cost_classes_ptr->num; k++)
1590 {
1591 rclass = cost_classes[k];
1592 fprintf (f, " %s:%d", reg_class_names[rclass],
1593 COSTS (costs, regno)->cost[k]);
1594 }
1595 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1596 }
1597 }
1598
1599 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1600 costs. */
1601 static void
1602 process_bb_for_costs (basic_block bb)
1603 {
1604 rtx_insn *insn;
1605
1606 frequency = REG_FREQ_FROM_BB (bb);
1607 if (frequency == 0)
1608 frequency = 1;
1609 FOR_BB_INSNS (bb, insn)
1610 insn = scan_one_insn (insn);
1611 }
1612
1613 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1614 costs. */
1615 static void
1616 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1617 {
1618 basic_block bb;
1619
1620 bb = loop_tree_node->bb;
1621 if (bb != NULL)
1622 process_bb_for_costs (bb);
1623 }
1624
1625 /* Find costs of register classes and memory for allocnos or pseudos
1626 and their best costs. Set up preferred, alternative and allocno
1627 classes for pseudos. */
1628 static void
1629 find_costs_and_classes (FILE *dump_file)
1630 {
1631 int i, k, start, max_cost_classes_num;
1632 int pass;
1633 basic_block bb;
1634 enum reg_class *regno_best_class, new_class;
1635
1636 init_recog ();
1637 regno_best_class
1638 = (enum reg_class *) ira_allocate (max_reg_num ()
1639 * sizeof (enum reg_class));
1640 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1641 regno_best_class[i] = NO_REGS;
1642 if (!resize_reg_info () && allocno_p
1643 && pseudo_classes_defined_p && flag_expensive_optimizations)
1644 {
1645 ira_allocno_t a;
1646 ira_allocno_iterator ai;
1647
1648 pref = pref_buffer;
1649 max_cost_classes_num = 1;
1650 FOR_EACH_ALLOCNO (a, ai)
1651 {
1652 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1653 setup_regno_cost_classes_by_aclass
1654 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1655 max_cost_classes_num
1656 = MAX (max_cost_classes_num,
1657 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1658 }
1659 start = 1;
1660 }
1661 else
1662 {
1663 pref = NULL;
1664 max_cost_classes_num = ira_important_classes_num;
1665 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1666 if (regno_reg_rtx[i] != NULL_RTX)
1667 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1668 else
1669 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1670 start = 0;
1671 }
1672 if (allocno_p)
1673 /* Clear the flag for the next compiled function. */
1674 pseudo_classes_defined_p = false;
1675 /* Normally we scan the insns once and determine the best class to
1676 use for each allocno. However, if -fexpensive-optimizations are
1677 on, we do so twice, the second time using the tentative best
1678 classes to guide the selection. */
1679 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1680 {
1681 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1682 fprintf (dump_file,
1683 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1684
1685 if (pass != start)
1686 {
1687 max_cost_classes_num = 1;
1688 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1689 {
1690 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1691 max_cost_classes_num
1692 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1693 }
1694 }
1695
1696 struct_costs_size
1697 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1698 /* Zero out our accumulation of the cost of each class for each
1699 allocno. */
1700 memset (costs, 0, cost_elements_num * struct_costs_size);
1701
1702 if (allocno_p)
1703 {
1704 /* Scan the instructions and record each time it would save code
1705 to put a certain allocno in a certain class. */
1706 ira_traverse_loop_tree (true, ira_loop_tree_root,
1707 process_bb_node_for_costs, NULL);
1708
1709 memcpy (total_allocno_costs, costs,
1710 max_struct_costs_size * ira_allocnos_num);
1711 }
1712 else
1713 {
1714 basic_block bb;
1715
1716 FOR_EACH_BB_FN (bb, cfun)
1717 process_bb_for_costs (bb);
1718 }
1719
1720 if (pass == 0)
1721 pref = pref_buffer;
1722
1723 /* Now for each allocno look at how desirable each class is and
1724 find which class is preferred. */
1725 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1726 {
1727 ira_allocno_t a, parent_a;
1728 int rclass, a_num, parent_a_num, add_cost;
1729 ira_loop_tree_node_t parent;
1730 int best_cost, allocno_cost;
1731 enum reg_class best, alt_class;
1732 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1733 enum reg_class *cost_classes = cost_classes_ptr->classes;
1734 int *i_costs = temp_costs->cost;
1735 int i_mem_cost;
1736 int equiv_savings = regno_equiv_gains[i];
1737
1738 if (! allocno_p)
1739 {
1740 if (regno_reg_rtx[i] == NULL_RTX)
1741 continue;
1742 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1743 i_mem_cost = temp_costs->mem_cost;
1744 }
1745 else
1746 {
1747 if (ira_regno_allocno_map[i] == NULL)
1748 continue;
1749 memset (temp_costs, 0, struct_costs_size);
1750 i_mem_cost = 0;
1751 /* Find cost of all allocnos with the same regno. */
1752 for (a = ira_regno_allocno_map[i];
1753 a != NULL;
1754 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1755 {
1756 int *a_costs, *p_costs;
1757
1758 a_num = ALLOCNO_NUM (a);
1759 if ((flag_ira_region == IRA_REGION_ALL
1760 || flag_ira_region == IRA_REGION_MIXED)
1761 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1762 && (parent_a = parent->regno_allocno_map[i]) != NULL
1763 /* There are no caps yet. */
1764 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1765 (a)->border_allocnos,
1766 ALLOCNO_NUM (a)))
1767 {
1768 /* Propagate costs to upper levels in the region
1769 tree. */
1770 parent_a_num = ALLOCNO_NUM (parent_a);
1771 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1772 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1773 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1774 {
1775 add_cost = a_costs[k];
1776 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1777 p_costs[k] = INT_MAX;
1778 else
1779 p_costs[k] += add_cost;
1780 }
1781 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1782 if (add_cost > 0
1783 && (INT_MAX - add_cost
1784 < COSTS (total_allocno_costs,
1785 parent_a_num)->mem_cost))
1786 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1787 = INT_MAX;
1788 else
1789 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1790 += add_cost;
1791
1792 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1793 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1794 }
1795 a_costs = COSTS (costs, a_num)->cost;
1796 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1797 {
1798 add_cost = a_costs[k];
1799 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1800 i_costs[k] = INT_MAX;
1801 else
1802 i_costs[k] += add_cost;
1803 }
1804 add_cost = COSTS (costs, a_num)->mem_cost;
1805 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1806 i_mem_cost = INT_MAX;
1807 else
1808 i_mem_cost += add_cost;
1809 }
1810 }
1811 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1812 i_mem_cost = 0;
1813 else if (equiv_savings < 0)
1814 i_mem_cost = -equiv_savings;
1815 else if (equiv_savings > 0)
1816 {
1817 i_mem_cost = 0;
1818 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1819 i_costs[k] += equiv_savings;
1820 }
1821
1822 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1823 best = ALL_REGS;
1824 alt_class = NO_REGS;
1825 /* Find best common class for all allocnos with the same
1826 regno. */
1827 for (k = 0; k < cost_classes_ptr->num; k++)
1828 {
1829 rclass = cost_classes[k];
1830 if (i_costs[k] < best_cost)
1831 {
1832 best_cost = i_costs[k];
1833 best = (enum reg_class) rclass;
1834 }
1835 else if (i_costs[k] == best_cost)
1836 best = ira_reg_class_subunion[best][rclass];
1837 if (pass == flag_expensive_optimizations
1838 /* We still prefer registers to memory even at this
1839 stage if their costs are the same. We will make
1840 a final decision during assigning hard registers
1841 when we have all info including more accurate
1842 costs which might be affected by assigning hard
1843 registers to other pseudos because the pseudos
1844 involved in moves can be coalesced. */
1845 && i_costs[k] <= i_mem_cost
1846 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1847 > reg_class_size[alt_class]))
1848 alt_class = reg_class_subunion[alt_class][rclass];
1849 }
1850 alt_class = ira_allocno_class_translate[alt_class];
1851 if (best_cost > i_mem_cost)
1852 regno_aclass[i] = NO_REGS;
1853 else if (!optimize && !targetm.class_likely_spilled_p (best))
1854 /* Registers in the alternative class are likely to need
1855 longer or slower sequences than registers in the best class.
1856 When optimizing we make some effort to use the best class
1857 over the alternative class where possible, but at -O0 we
1858 effectively give the alternative class equal weight.
1859 We then run the risk of using slower alternative registers
1860 when plenty of registers from the best class are still free.
1861 This is especially true because live ranges tend to be very
1862 short in -O0 code and so register pressure tends to be low.
1863
1864 Avoid that by ignoring the alternative class if the best
1865 class has plenty of registers. */
1866 regno_aclass[i] = best;
1867 else
1868 {
1869 /* Make the common class the biggest class of best and
1870 alt_class. */
1871 regno_aclass[i]
1872 = ira_reg_class_superunion[best][alt_class];
1873 ira_assert (regno_aclass[i] != NO_REGS
1874 && ira_reg_allocno_class_p[regno_aclass[i]]);
1875 }
1876 if ((new_class
1877 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1878 (i, regno_aclass[i]))) != regno_aclass[i])
1879 {
1880 regno_aclass[i] = new_class;
1881 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1882 reg_class_contents[best]))
1883 best = new_class;
1884 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1885 reg_class_contents[alt_class]))
1886 alt_class = new_class;
1887 }
1888 if (pass == flag_expensive_optimizations)
1889 {
1890 if (best_cost > i_mem_cost)
1891 best = alt_class = NO_REGS;
1892 else if (best == alt_class)
1893 alt_class = NO_REGS;
1894 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1895 if ((!allocno_p || internal_flag_ira_verbose > 2)
1896 && dump_file != NULL)
1897 fprintf (dump_file,
1898 " r%d: preferred %s, alternative %s, allocno %s\n",
1899 i, reg_class_names[best], reg_class_names[alt_class],
1900 reg_class_names[regno_aclass[i]]);
1901 }
1902 regno_best_class[i] = best;
1903 if (! allocno_p)
1904 {
1905 pref[i] = best_cost > i_mem_cost ? NO_REGS : best;
1906 continue;
1907 }
1908 for (a = ira_regno_allocno_map[i];
1909 a != NULL;
1910 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1911 {
1912 enum reg_class aclass = regno_aclass[i];
1913 int a_num = ALLOCNO_NUM (a);
1914 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1915 int *a_costs = COSTS (costs, a_num)->cost;
1916
1917 if (aclass == NO_REGS)
1918 best = NO_REGS;
1919 else
1920 {
1921 /* Finding best class which is subset of the common
1922 class. */
1923 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1924 allocno_cost = best_cost;
1925 best = ALL_REGS;
1926 for (k = 0; k < cost_classes_ptr->num; k++)
1927 {
1928 rclass = cost_classes[k];
1929 if (! ira_class_subset_p[rclass][aclass])
1930 continue;
1931 if (total_a_costs[k] < best_cost)
1932 {
1933 best_cost = total_a_costs[k];
1934 allocno_cost = a_costs[k];
1935 best = (enum reg_class) rclass;
1936 }
1937 else if (total_a_costs[k] == best_cost)
1938 {
1939 best = ira_reg_class_subunion[best][rclass];
1940 allocno_cost = MAX (allocno_cost, a_costs[k]);
1941 }
1942 }
1943 ALLOCNO_CLASS_COST (a) = allocno_cost;
1944 }
1945 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1946 && (pass == 0 || pref[a_num] != best))
1947 {
1948 fprintf (dump_file, " a%d (r%d,", a_num, i);
1949 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1950 fprintf (dump_file, "b%d", bb->index);
1951 else
1952 fprintf (dump_file, "l%d",
1953 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1954 fprintf (dump_file, ") best %s, allocno %s\n",
1955 reg_class_names[best],
1956 reg_class_names[aclass]);
1957 }
1958 pref[a_num] = best;
1959 if (pass == flag_expensive_optimizations && best != aclass
1960 && ira_class_hard_regs_num[best] > 0
1961 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1962 >= ira_class_hard_regs_num[best]))
1963 {
1964 int ind = cost_classes_ptr->index[aclass];
1965
1966 ira_assert (ind >= 0);
1967 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1968 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1969 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1970 / (ira_register_move_cost
1971 [ALLOCNO_MODE (a)][best][aclass]));
1972 for (k = 0; k < cost_classes_ptr->num; k++)
1973 if (ira_class_subset_p[cost_classes[k]][best])
1974 a_costs[k] = a_costs[ind];
1975 }
1976 }
1977 }
1978
1979 if (internal_flag_ira_verbose > 4 && dump_file)
1980 {
1981 if (allocno_p)
1982 print_allocno_costs (dump_file);
1983 else
1984 print_pseudo_costs (dump_file);
1985 fprintf (dump_file,"\n");
1986 }
1987 }
1988 ira_free (regno_best_class);
1989 }
1990
1991 \f
1992
1993 /* Process moves involving hard regs to modify allocno hard register
1994 costs. We can do this only after determining allocno class. If a
1995 hard register forms a register class, then moves with the hard
1996 register are already taken into account in class costs for the
1997 allocno. */
1998 static void
1999 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2000 {
2001 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2002 bool to_p;
2003 ira_allocno_t a, curr_a;
2004 ira_loop_tree_node_t curr_loop_tree_node;
2005 enum reg_class rclass;
2006 basic_block bb;
2007 rtx_insn *insn;
2008 rtx set, src, dst;
2009
2010 bb = loop_tree_node->bb;
2011 if (bb == NULL)
2012 return;
2013 freq = REG_FREQ_FROM_BB (bb);
2014 if (freq == 0)
2015 freq = 1;
2016 FOR_BB_INSNS (bb, insn)
2017 {
2018 if (!NONDEBUG_INSN_P (insn))
2019 continue;
2020 set = single_set (insn);
2021 if (set == NULL_RTX)
2022 continue;
2023 dst = SET_DEST (set);
2024 src = SET_SRC (set);
2025 if (! REG_P (dst) || ! REG_P (src))
2026 continue;
2027 dst_regno = REGNO (dst);
2028 src_regno = REGNO (src);
2029 if (dst_regno >= FIRST_PSEUDO_REGISTER
2030 && src_regno < FIRST_PSEUDO_REGISTER)
2031 {
2032 hard_regno = src_regno;
2033 a = ira_curr_regno_allocno_map[dst_regno];
2034 to_p = true;
2035 }
2036 else if (src_regno >= FIRST_PSEUDO_REGISTER
2037 && dst_regno < FIRST_PSEUDO_REGISTER)
2038 {
2039 hard_regno = dst_regno;
2040 a = ira_curr_regno_allocno_map[src_regno];
2041 to_p = false;
2042 }
2043 else
2044 continue;
2045 rclass = ALLOCNO_CLASS (a);
2046 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2047 continue;
2048 i = ira_class_hard_reg_index[rclass][hard_regno];
2049 if (i < 0)
2050 continue;
2051 a_regno = ALLOCNO_REGNO (a);
2052 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2053 curr_loop_tree_node != NULL;
2054 curr_loop_tree_node = curr_loop_tree_node->parent)
2055 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2056 ira_add_allocno_pref (curr_a, hard_regno, freq);
2057 {
2058 int cost;
2059 enum reg_class hard_reg_class;
2060 machine_mode mode;
2061
2062 mode = ALLOCNO_MODE (a);
2063 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2064 ira_init_register_move_cost_if_necessary (mode);
2065 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2066 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2067 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2068 ALLOCNO_CLASS_COST (a));
2069 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2070 rclass, 0);
2071 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2072 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2073 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2074 ALLOCNO_HARD_REG_COSTS (a)[i]);
2075 }
2076 }
2077 }
2078
2079 /* After we find hard register and memory costs for allocnos, define
2080 its class and modify hard register cost because insns moving
2081 allocno to/from hard registers. */
2082 static void
2083 setup_allocno_class_and_costs (void)
2084 {
2085 int i, j, n, regno, hard_regno, num;
2086 int *reg_costs;
2087 enum reg_class aclass, rclass;
2088 ira_allocno_t a;
2089 ira_allocno_iterator ai;
2090 cost_classes_t cost_classes_ptr;
2091
2092 ira_assert (allocno_p);
2093 FOR_EACH_ALLOCNO (a, ai)
2094 {
2095 i = ALLOCNO_NUM (a);
2096 regno = ALLOCNO_REGNO (a);
2097 aclass = regno_aclass[regno];
2098 cost_classes_ptr = regno_cost_classes[regno];
2099 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2100 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2101 ira_set_allocno_class (a, aclass);
2102 if (aclass == NO_REGS)
2103 continue;
2104 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2105 {
2106 n = ira_class_hard_regs_num[aclass];
2107 ALLOCNO_HARD_REG_COSTS (a)
2108 = reg_costs = ira_allocate_cost_vector (aclass);
2109 for (j = n - 1; j >= 0; j--)
2110 {
2111 hard_regno = ira_class_hard_regs[aclass][j];
2112 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2113 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2114 else
2115 {
2116 rclass = REGNO_REG_CLASS (hard_regno);
2117 num = cost_classes_ptr->index[rclass];
2118 if (num < 0)
2119 {
2120 num = cost_classes_ptr->hard_regno_index[hard_regno];
2121 ira_assert (num >= 0);
2122 }
2123 reg_costs[j] = COSTS (costs, i)->cost[num];
2124 }
2125 }
2126 }
2127 }
2128 if (optimize)
2129 ira_traverse_loop_tree (true, ira_loop_tree_root,
2130 process_bb_node_for_hard_reg_moves, NULL);
2131 }
2132
2133 \f
2134
2135 /* Function called once during compiler work. */
2136 void
2137 ira_init_costs_once (void)
2138 {
2139 int i;
2140
2141 init_cost = NULL;
2142 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2143 {
2144 op_costs[i] = NULL;
2145 this_op_costs[i] = NULL;
2146 }
2147 temp_costs = NULL;
2148 }
2149
2150 /* Free allocated temporary cost vectors. */
2151 void
2152 target_ira_int::free_ira_costs ()
2153 {
2154 int i;
2155
2156 free (x_init_cost);
2157 x_init_cost = NULL;
2158 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2159 {
2160 free (x_op_costs[i]);
2161 free (x_this_op_costs[i]);
2162 x_op_costs[i] = x_this_op_costs[i] = NULL;
2163 }
2164 free (x_temp_costs);
2165 x_temp_costs = NULL;
2166 }
2167
2168 /* This is called each time register related information is
2169 changed. */
2170 void
2171 ira_init_costs (void)
2172 {
2173 int i;
2174
2175 this_target_ira_int->free_ira_costs ();
2176 max_struct_costs_size
2177 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2178 /* Don't use ira_allocate because vectors live through several IRA
2179 calls. */
2180 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2181 init_cost->mem_cost = 1000000;
2182 for (i = 0; i < ira_important_classes_num; i++)
2183 init_cost->cost[i] = 1000000;
2184 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2185 {
2186 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2187 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2188 }
2189 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2190 }
2191
2192 \f
2193
2194 /* Common initialization function for ira_costs and
2195 ira_set_pseudo_classes. */
2196 static void
2197 init_costs (void)
2198 {
2199 init_subregs_of_mode ();
2200 costs = (struct costs *) ira_allocate (max_struct_costs_size
2201 * cost_elements_num);
2202 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2203 * cost_elements_num);
2204 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2205 * max_reg_num ());
2206 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2207 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2208 }
2209
2210 /* Common finalization function for ira_costs and
2211 ira_set_pseudo_classes. */
2212 static void
2213 finish_costs (void)
2214 {
2215 finish_subregs_of_mode ();
2216 ira_free (regno_equiv_gains);
2217 ira_free (regno_aclass);
2218 ira_free (pref_buffer);
2219 ira_free (costs);
2220 }
2221
2222 /* Entry function which defines register class, memory and hard
2223 register costs for each allocno. */
2224 void
2225 ira_costs (void)
2226 {
2227 allocno_p = true;
2228 cost_elements_num = ira_allocnos_num;
2229 init_costs ();
2230 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2231 * ira_allocnos_num);
2232 initiate_regno_cost_classes ();
2233 calculate_elim_costs_all_insns ();
2234 find_costs_and_classes (ira_dump_file);
2235 setup_allocno_class_and_costs ();
2236 finish_regno_cost_classes ();
2237 finish_costs ();
2238 ira_free (total_allocno_costs);
2239 }
2240
2241 /* Entry function which defines classes for pseudos.
2242 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2243 void
2244 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2245 {
2246 allocno_p = false;
2247 internal_flag_ira_verbose = flag_ira_verbose;
2248 cost_elements_num = max_reg_num ();
2249 init_costs ();
2250 initiate_regno_cost_classes ();
2251 find_costs_and_classes (dump_file);
2252 finish_regno_cost_classes ();
2253 if (define_pseudo_classes)
2254 pseudo_classes_defined_p = true;
2255
2256 finish_costs ();
2257 }
2258
2259 \f
2260
2261 /* Change hard register costs for allocnos which lives through
2262 function calls. This is called only when we found all intersected
2263 calls during building allocno live ranges. */
2264 void
2265 ira_tune_allocno_costs (void)
2266 {
2267 int j, n, regno;
2268 int cost, min_cost, *reg_costs;
2269 enum reg_class aclass, rclass;
2270 machine_mode mode;
2271 ira_allocno_t a;
2272 ira_allocno_iterator ai;
2273 ira_allocno_object_iterator oi;
2274 ira_object_t obj;
2275 bool skip_p;
2276 HARD_REG_SET *crossed_calls_clobber_regs;
2277
2278 FOR_EACH_ALLOCNO (a, ai)
2279 {
2280 aclass = ALLOCNO_CLASS (a);
2281 if (aclass == NO_REGS)
2282 continue;
2283 mode = ALLOCNO_MODE (a);
2284 n = ira_class_hard_regs_num[aclass];
2285 min_cost = INT_MAX;
2286 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2287 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2288 {
2289 ira_allocate_and_set_costs
2290 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2291 ALLOCNO_CLASS_COST (a));
2292 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2293 for (j = n - 1; j >= 0; j--)
2294 {
2295 regno = ira_class_hard_regs[aclass][j];
2296 skip_p = false;
2297 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2298 {
2299 if (ira_hard_reg_set_intersection_p (regno, mode,
2300 OBJECT_CONFLICT_HARD_REGS
2301 (obj)))
2302 {
2303 skip_p = true;
2304 break;
2305 }
2306 }
2307 if (skip_p)
2308 continue;
2309 rclass = REGNO_REG_CLASS (regno);
2310 cost = 0;
2311 crossed_calls_clobber_regs
2312 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2313 if (ira_hard_reg_set_intersection_p (regno, mode,
2314 *crossed_calls_clobber_regs)
2315 && (ira_hard_reg_set_intersection_p (regno, mode,
2316 call_used_reg_set)
2317 || HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
2318 cost += (ALLOCNO_CALL_FREQ (a)
2319 * (ira_memory_move_cost[mode][rclass][0]
2320 + ira_memory_move_cost[mode][rclass][1]));
2321 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2322 cost += ((ira_memory_move_cost[mode][rclass][0]
2323 + ira_memory_move_cost[mode][rclass][1])
2324 * ALLOCNO_FREQ (a)
2325 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2326 #endif
2327 if (INT_MAX - cost < reg_costs[j])
2328 reg_costs[j] = INT_MAX;
2329 else
2330 reg_costs[j] += cost;
2331 if (min_cost > reg_costs[j])
2332 min_cost = reg_costs[j];
2333 }
2334 }
2335 if (min_cost != INT_MAX)
2336 ALLOCNO_CLASS_COST (a) = min_cost;
2337
2338 /* Some targets allow pseudos to be allocated to unaligned sequences
2339 of hard registers. However, selecting an unaligned sequence can
2340 unnecessarily restrict later allocations. So increase the cost of
2341 unaligned hard regs to encourage the use of aligned hard regs. */
2342 {
2343 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2344
2345 if (nregs > 1)
2346 {
2347 ira_allocate_and_set_costs
2348 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2349 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2350 for (j = n - 1; j >= 0; j--)
2351 {
2352 regno = ira_non_ordered_class_hard_regs[aclass][j];
2353 if ((regno % nregs) != 0)
2354 {
2355 int index = ira_class_hard_reg_index[aclass][regno];
2356 ira_assert (index != -1);
2357 reg_costs[index] += ALLOCNO_FREQ (a);
2358 }
2359 }
2360 }
2361 }
2362 }
2363 }
2364
2365 /* Add COST to the estimated gain for eliminating REGNO with its
2366 equivalence. If COST is zero, record that no such elimination is
2367 possible. */
2368
2369 void
2370 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2371 {
2372 if (cost == 0)
2373 regno_equiv_gains[regno] = 0;
2374 else
2375 regno_equiv_gains[regno] += cost;
2376 }
2377
2378 void
2379 ira_costs_c_finalize (void)
2380 {
2381 this_target_ira_int->free_ira_costs ();
2382 }