ira-int.h (target_ira_int): Add x_ira_prohibited_mode_move_regs and...
[gcc.git] / gcc / ira-int.h
1 /* Integrated Register Allocator (IRA) intercommunication header file.
2 Copyright (C) 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "cfgloop.h"
23 #include "ira.h"
24 #include "alloc-pool.h"
25
26 /* To provide consistency in naming, all IRA external variables,
27 functions, common typedefs start with prefix ira_. */
28
29 #ifdef ENABLE_CHECKING
30 #define ENABLE_IRA_CHECKING
31 #endif
32
33 #ifdef ENABLE_IRA_CHECKING
34 #define ira_assert(c) gcc_assert (c)
35 #else
36 /* Always define and include C, so that warnings for empty body in an
37 ‘if’ statement and unused variable do not occur. */
38 #define ira_assert(c) ((void)(0 && (c)))
39 #endif
40
41 /* Compute register frequency from edge frequency FREQ. It is
42 analogous to REG_FREQ_FROM_BB. When optimizing for size, or
43 profile driven feedback is available and the function is never
44 executed, frequency is always equivalent. Otherwise rescale the
45 edge frequency. */
46 #define REG_FREQ_FROM_EDGE_FREQ(freq) \
47 (optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
48 ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
49 ? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
50
51 /* All natural loops. */
52 extern struct loops ira_loops;
53
54 /* A modified value of flag `-fira-verbose' used internally. */
55 extern int internal_flag_ira_verbose;
56
57 /* Dump file of the allocator if it is not NULL. */
58 extern FILE *ira_dump_file;
59
60 /* Typedefs for pointers to allocno live range, allocno, and copy of
61 allocnos. */
62 typedef struct live_range *live_range_t;
63 typedef struct ira_allocno *ira_allocno_t;
64 typedef struct ira_allocno_copy *ira_copy_t;
65
66 /* Definition of vector of allocnos and copies. */
67 DEF_VEC_P(ira_allocno_t);
68 DEF_VEC_ALLOC_P(ira_allocno_t, heap);
69 DEF_VEC_P(ira_copy_t);
70 DEF_VEC_ALLOC_P(ira_copy_t, heap);
71
72 /* Typedef for pointer to the subsequent structure. */
73 typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
74
75 /* In general case, IRA is a regional allocator. The regions are
76 nested and form a tree. Currently regions are natural loops. The
77 following structure describes loop tree node (representing basic
78 block or loop). We need such tree because the loop tree from
79 cfgloop.h is not convenient for the optimization: basic blocks are
80 not a part of the tree from cfgloop.h. We also use the nodes for
81 storing additional information about basic blocks/loops for the
82 register allocation purposes. */
83 struct ira_loop_tree_node
84 {
85 /* The node represents basic block if children == NULL. */
86 basic_block bb; /* NULL for loop. */
87 struct loop *loop; /* NULL for BB. */
88 /* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
89 SUBLOOP_NEXT is always NULL for BBs. */
90 ira_loop_tree_node_t subloop_next, next;
91 /* CHILDREN/SUBLOOPS is the first node/loop-node immediately inside
92 the node. They are NULL for BBs. */
93 ira_loop_tree_node_t subloops, children;
94 /* The node immediately containing given node. */
95 ira_loop_tree_node_t parent;
96
97 /* Loop level in range [0, ira_loop_tree_height). */
98 int level;
99
100 /* All the following members are defined only for nodes representing
101 loops. */
102
103 /* True if the loop was marked for removal from the register
104 allocation. */
105 bool to_remove_p;
106
107 /* Allocnos in the loop corresponding to their regnos. If it is
108 NULL the loop does not form a separate register allocation region
109 (e.g. because it has abnormal enter/exit edges and we can not put
110 code for register shuffling on the edges if a different
111 allocation is used for a pseudo-register on different sides of
112 the edges). Caps are not in the map (remember we can have more
113 one cap with the same regno in a region). */
114 ira_allocno_t *regno_allocno_map;
115
116 /* True if there is an entry to given loop not from its parent (or
117 grandparent) basic block. For example, it is possible for two
118 adjacent loops inside another loop. */
119 bool entered_from_non_parent_p;
120
121 /* Maximal register pressure inside loop for given register class
122 (defined only for the cover classes). */
123 int reg_pressure[N_REG_CLASSES];
124
125 /* Numbers of allocnos referred or living in the loop node (except
126 for its subloops). */
127 bitmap all_allocnos;
128
129 /* Numbers of allocnos living at the loop borders. */
130 bitmap border_allocnos;
131
132 /* Regnos of pseudos modified in the loop node (including its
133 subloops). */
134 bitmap modified_regnos;
135
136 /* Numbers of copies referred in the corresponding loop. */
137 bitmap local_copies;
138 };
139
140 /* The root of the loop tree corresponding to the all function. */
141 extern ira_loop_tree_node_t ira_loop_tree_root;
142
143 /* Height of the loop tree. */
144 extern int ira_loop_tree_height;
145
146 /* All nodes representing basic blocks are referred through the
147 following array. We can not use basic block member `aux' for this
148 because it is used for insertion of insns on edges. */
149 extern ira_loop_tree_node_t ira_bb_nodes;
150
151 /* Two access macros to the nodes representing basic blocks. */
152 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
153 #define IRA_BB_NODE_BY_INDEX(index) __extension__ \
154 (({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]); \
155 if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
156 { \
157 fprintf (stderr, \
158 "\n%s: %d: error in %s: it is not a block node\n", \
159 __FILE__, __LINE__, __FUNCTION__); \
160 gcc_unreachable (); \
161 } \
162 _node; }))
163 #else
164 #define IRA_BB_NODE_BY_INDEX(index) (&ira_bb_nodes[index])
165 #endif
166
167 #define IRA_BB_NODE(bb) IRA_BB_NODE_BY_INDEX ((bb)->index)
168
169 /* All nodes representing loops are referred through the following
170 array. */
171 extern ira_loop_tree_node_t ira_loop_nodes;
172
173 /* Two access macros to the nodes representing loops. */
174 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
175 #define IRA_LOOP_NODE_BY_INDEX(index) __extension__ \
176 (({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]);\
177 if (_node->children == NULL || _node->bb != NULL || _node->loop == NULL)\
178 { \
179 fprintf (stderr, \
180 "\n%s: %d: error in %s: it is not a loop node\n", \
181 __FILE__, __LINE__, __FUNCTION__); \
182 gcc_unreachable (); \
183 } \
184 _node; }))
185 #else
186 #define IRA_LOOP_NODE_BY_INDEX(index) (&ira_loop_nodes[index])
187 #endif
188
189 #define IRA_LOOP_NODE(loop) IRA_LOOP_NODE_BY_INDEX ((loop)->num)
190
191 \f
192
193 /* The structure describes program points where a given allocno lives.
194 To save memory we store allocno conflicts only for the same cover
195 class allocnos which is enough to assign hard registers. To find
196 conflicts for other allocnos (e.g. to assign stack memory slot) we
197 use the live ranges. If the live ranges of two allocnos are
198 intersected, the allocnos are in conflict. */
199 struct live_range
200 {
201 /* Allocno whose live range is described by given structure. */
202 ira_allocno_t allocno;
203 /* Program point range. */
204 int start, finish;
205 /* Next structure describing program points where the allocno
206 lives. */
207 live_range_t next;
208 /* Pointer to structures with the same start/finish. */
209 live_range_t start_next, finish_next;
210 };
211
212 /* Program points are enumerated by numbers from range
213 0..IRA_MAX_POINT-1. There are approximately two times more program
214 points than insns. Program points are places in the program where
215 liveness info can be changed. In most general case (there are more
216 complicated cases too) some program points correspond to places
217 where input operand dies and other ones correspond to places where
218 output operands are born. */
219 extern int ira_max_point;
220
221 /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
222 live ranges with given start/finish point. */
223 extern live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
224
225 /* A structure representing an allocno (allocation entity). Allocno
226 represents a pseudo-register in an allocation region. If
227 pseudo-register does not live in a region but it lives in the
228 nested regions, it is represented in the region by special allocno
229 called *cap*. There may be more one cap representing the same
230 pseudo-register in region. It means that the corresponding
231 pseudo-register lives in more one non-intersected subregion. */
232 struct ira_allocno
233 {
234 /* The allocno order number starting with 0. Each allocno has an
235 unique number and the number is never changed for the
236 allocno. */
237 int num;
238 /* Regno for allocno or cap. */
239 int regno;
240 /* Mode of the allocno which is the mode of the corresponding
241 pseudo-register. */
242 enum machine_mode mode;
243 /* Hard register assigned to given allocno. Negative value means
244 that memory was allocated to the allocno. During the reload,
245 spilled allocno has value equal to the corresponding stack slot
246 number (0, ...) - 2. Value -1 is used for allocnos spilled by the
247 reload (at this point pseudo-register has only one allocno) which
248 did not get stack slot yet. */
249 int hard_regno;
250 /* Final rtx representation of the allocno. */
251 rtx reg;
252 /* Allocnos with the same regno are linked by the following member.
253 Allocnos corresponding to inner loops are first in the list (it
254 corresponds to depth-first traverse of the loops). */
255 ira_allocno_t next_regno_allocno;
256 /* There may be different allocnos with the same regno in different
257 regions. Allocnos are bound to the corresponding loop tree node.
258 Pseudo-register may have only one regular allocno with given loop
259 tree node but more than one cap (see comments above). */
260 ira_loop_tree_node_t loop_tree_node;
261 /* Accumulated usage references of the allocno. Here and below,
262 word 'accumulated' means info for given region and all nested
263 subregions. In this case, 'accumulated' means sum of references
264 of the corresponding pseudo-register in this region and in all
265 nested subregions recursively. */
266 int nrefs;
267 /* Accumulated frequency of usage of the allocno. */
268 int freq;
269 /* Register class which should be used for allocation for given
270 allocno. NO_REGS means that we should use memory. */
271 enum reg_class cover_class;
272 /* Minimal accumulated and updated costs of usage register of the
273 cover class for the allocno. */
274 int cover_class_cost, updated_cover_class_cost;
275 /* Minimal accumulated, and updated costs of memory for the allocno.
276 At the allocation start, the original and updated costs are
277 equal. The updated cost may be changed after finishing
278 allocation in a region and starting allocation in a subregion.
279 The change reflects the cost of spill/restore code on the
280 subregion border if we assign memory to the pseudo in the
281 subregion. */
282 int memory_cost, updated_memory_cost;
283 /* Accumulated number of points where the allocno lives and there is
284 excess pressure for its class. Excess pressure for a register
285 class at some point means that there are more allocnos of given
286 register class living at the point than number of hard-registers
287 of the class available for the allocation. */
288 int excess_pressure_points_num;
289 /* Copies to other non-conflicting allocnos. The copies can
290 represent move insn or potential move insn usually because of two
291 operand insn constraints. */
292 ira_copy_t allocno_copies;
293 /* It is a allocno (cap) representing given allocno on upper loop tree
294 level. */
295 ira_allocno_t cap;
296 /* It is a link to allocno (cap) on lower loop level represented by
297 given cap. Null if given allocno is not a cap. */
298 ira_allocno_t cap_member;
299 /* Coalesced allocnos form a cyclic list. One allocno given by
300 FIRST_COALESCED_ALLOCNO represents all coalesced allocnos. The
301 list is chained by NEXT_COALESCED_ALLOCNO. */
302 ira_allocno_t first_coalesced_allocno;
303 ira_allocno_t next_coalesced_allocno;
304 /* Pointer to structures describing at what program point the
305 allocno lives. We always maintain the list in such way that *the
306 ranges in the list are not intersected and ordered by decreasing
307 their program points*. */
308 live_range_t live_ranges;
309 /* Before building conflicts the two member values are
310 correspondingly minimal and maximal points of the accumulated
311 allocno live ranges. After building conflicts the values are
312 correspondingly minimal and maximal conflict ids of allocnos with
313 which given allocno can conflict. */
314 int min, max;
315 /* Vector of accumulated conflicting allocnos with NULL end marker
316 (if CONFLICT_VEC_P is true) or conflict bit vector otherwise.
317 Only allocnos with the same cover class are in the vector or in
318 the bit vector. */
319 void *conflict_allocno_array;
320 /* The unique member value represents given allocno in conflict bit
321 vectors. */
322 int conflict_id;
323 /* Allocated size of the previous array. */
324 unsigned int conflict_allocno_array_size;
325 /* Initial and accumulated hard registers conflicting with this
326 allocno and as a consequences can not be assigned to the allocno.
327 All non-allocatable hard regs and hard regs of cover classes
328 different from given allocno one are included in the sets. */
329 HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
330 /* Number of accumulated conflicts in the vector of conflicting
331 allocnos. */
332 int conflict_allocnos_num;
333 /* Accumulated frequency of calls which given allocno
334 intersects. */
335 int call_freq;
336 /* Accumulated number of the intersected calls. */
337 int calls_crossed_num;
338 /* TRUE if the allocno assigned to memory was a destination of
339 removed move (see ira-emit.c) at loop exit because the value of
340 the corresponding pseudo-register is not changed inside the
341 loop. */
342 unsigned int mem_optimized_dest_p : 1;
343 /* TRUE if the corresponding pseudo-register has disjoint live
344 ranges and the other allocnos of the pseudo-register except this
345 one changed REG. */
346 unsigned int somewhere_renamed_p : 1;
347 /* TRUE if allocno with the same REGNO in a subregion has been
348 renamed, in other words, got a new pseudo-register. */
349 unsigned int child_renamed_p : 1;
350 /* During the reload, value TRUE means that we should not reassign a
351 hard register to the allocno got memory earlier. It is set up
352 when we removed memory-memory move insn before each iteration of
353 the reload. */
354 unsigned int dont_reassign_p : 1;
355 #ifdef STACK_REGS
356 /* Set to TRUE if allocno can't be assigned to the stack hard
357 register correspondingly in this region and area including the
358 region and all its subregions recursively. */
359 unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
360 #endif
361 /* TRUE value means that there is no sense to spill the allocno
362 during coloring because the spill will result in additional
363 reloads in reload pass. */
364 unsigned int bad_spill_p : 1;
365 /* TRUE value means that the allocno was not removed yet from the
366 conflicting graph during colouring. */
367 unsigned int in_graph_p : 1;
368 /* TRUE if a hard register or memory has been assigned to the
369 allocno. */
370 unsigned int assigned_p : 1;
371 /* TRUE if it is put on the stack to make other allocnos
372 colorable. */
373 unsigned int may_be_spilled_p : 1;
374 /* TRUE if the allocno was removed from the splay tree used to
375 choose allocn for spilling (see ira-color.c::. */
376 unsigned int splay_removed_p : 1;
377 /* TRUE if conflicts for given allocno are represented by vector of
378 pointers to the conflicting allocnos. Otherwise, we use a bit
379 vector where a bit with given index represents allocno with the
380 same number. */
381 unsigned int conflict_vec_p : 1;
382 /* Non NULL if we remove restoring value from given allocno to
383 MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
384 allocno value is not changed inside the loop. */
385 ira_allocno_t mem_optimized_dest;
386 /* Array of usage costs (accumulated and the one updated during
387 coloring) for each hard register of the allocno cover class. The
388 member value can be NULL if all costs are the same and equal to
389 COVER_CLASS_COST. For example, the costs of two different hard
390 registers can be different if one hard register is callee-saved
391 and another one is callee-used and the allocno lives through
392 calls. Another example can be case when for some insn the
393 corresponding pseudo-register value should be put in specific
394 register class (e.g. AREG for x86) which is a strict subset of
395 the allocno cover class (GENERAL_REGS for x86). We have updated
396 costs to reflect the situation when the usage cost of a hard
397 register is decreased because the allocno is connected to another
398 allocno by a copy and the another allocno has been assigned to
399 the hard register. */
400 int *hard_reg_costs, *updated_hard_reg_costs;
401 /* Array of decreasing costs (accumulated and the one updated during
402 coloring) for allocnos conflicting with given allocno for hard
403 regno of the allocno cover class. The member value can be NULL
404 if all costs are the same. These costs are used to reflect
405 preferences of other allocnos not assigned yet during assigning
406 to given allocno. */
407 int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
408 /* Size (in hard registers) of the same cover class allocnos with
409 TRUE in_graph_p value and conflicting with given allocno during
410 each point of graph coloring. */
411 int left_conflicts_size;
412 /* Number of hard registers of the allocno cover class really
413 available for the allocno allocation. */
414 int available_regs_num;
415 /* Allocnos in a bucket (used in coloring) chained by the following
416 two members. */
417 ira_allocno_t next_bucket_allocno;
418 ira_allocno_t prev_bucket_allocno;
419 /* Used for temporary purposes. */
420 int temp;
421 };
422
423 /* All members of the allocno structures should be accessed only
424 through the following macros. */
425 #define ALLOCNO_NUM(A) ((A)->num)
426 #define ALLOCNO_REGNO(A) ((A)->regno)
427 #define ALLOCNO_REG(A) ((A)->reg)
428 #define ALLOCNO_NEXT_REGNO_ALLOCNO(A) ((A)->next_regno_allocno)
429 #define ALLOCNO_LOOP_TREE_NODE(A) ((A)->loop_tree_node)
430 #define ALLOCNO_CAP(A) ((A)->cap)
431 #define ALLOCNO_CAP_MEMBER(A) ((A)->cap_member)
432 #define ALLOCNO_CONFLICT_ALLOCNO_ARRAY(A) ((A)->conflict_allocno_array)
433 #define ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE(A) \
434 ((A)->conflict_allocno_array_size)
435 #define ALLOCNO_CONFLICT_ALLOCNOS_NUM(A) \
436 ((A)->conflict_allocnos_num)
437 #define ALLOCNO_CONFLICT_HARD_REGS(A) ((A)->conflict_hard_regs)
438 #define ALLOCNO_TOTAL_CONFLICT_HARD_REGS(A) ((A)->total_conflict_hard_regs)
439 #define ALLOCNO_NREFS(A) ((A)->nrefs)
440 #define ALLOCNO_FREQ(A) ((A)->freq)
441 #define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
442 #define ALLOCNO_CALL_FREQ(A) ((A)->call_freq)
443 #define ALLOCNO_CALLS_CROSSED_NUM(A) ((A)->calls_crossed_num)
444 #define ALLOCNO_MEM_OPTIMIZED_DEST(A) ((A)->mem_optimized_dest)
445 #define ALLOCNO_MEM_OPTIMIZED_DEST_P(A) ((A)->mem_optimized_dest_p)
446 #define ALLOCNO_SOMEWHERE_RENAMED_P(A) ((A)->somewhere_renamed_p)
447 #define ALLOCNO_CHILD_RENAMED_P(A) ((A)->child_renamed_p)
448 #define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
449 #ifdef STACK_REGS
450 #define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
451 #define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
452 #endif
453 #define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
454 #define ALLOCNO_IN_GRAPH_P(A) ((A)->in_graph_p)
455 #define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
456 #define ALLOCNO_MAY_BE_SPILLED_P(A) ((A)->may_be_spilled_p)
457 #define ALLOCNO_SPLAY_REMOVED_P(A) ((A)->splay_removed_p)
458 #define ALLOCNO_CONFLICT_VEC_P(A) ((A)->conflict_vec_p)
459 #define ALLOCNO_MODE(A) ((A)->mode)
460 #define ALLOCNO_COPIES(A) ((A)->allocno_copies)
461 #define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
462 #define ALLOCNO_UPDATED_HARD_REG_COSTS(A) ((A)->updated_hard_reg_costs)
463 #define ALLOCNO_CONFLICT_HARD_REG_COSTS(A) \
464 ((A)->conflict_hard_reg_costs)
465 #define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
466 ((A)->updated_conflict_hard_reg_costs)
467 #define ALLOCNO_LEFT_CONFLICTS_SIZE(A) ((A)->left_conflicts_size)
468 #define ALLOCNO_COVER_CLASS(A) ((A)->cover_class)
469 #define ALLOCNO_COVER_CLASS_COST(A) ((A)->cover_class_cost)
470 #define ALLOCNO_UPDATED_COVER_CLASS_COST(A) ((A)->updated_cover_class_cost)
471 #define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
472 #define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
473 #define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) ((A)->excess_pressure_points_num)
474 #define ALLOCNO_AVAILABLE_REGS_NUM(A) ((A)->available_regs_num)
475 #define ALLOCNO_NEXT_BUCKET_ALLOCNO(A) ((A)->next_bucket_allocno)
476 #define ALLOCNO_PREV_BUCKET_ALLOCNO(A) ((A)->prev_bucket_allocno)
477 #define ALLOCNO_TEMP(A) ((A)->temp)
478 #define ALLOCNO_FIRST_COALESCED_ALLOCNO(A) ((A)->first_coalesced_allocno)
479 #define ALLOCNO_NEXT_COALESCED_ALLOCNO(A) ((A)->next_coalesced_allocno)
480 #define ALLOCNO_LIVE_RANGES(A) ((A)->live_ranges)
481 #define ALLOCNO_MIN(A) ((A)->min)
482 #define ALLOCNO_MAX(A) ((A)->max)
483 #define ALLOCNO_CONFLICT_ID(A) ((A)->conflict_id)
484
485 /* Map regno -> allocnos with given regno (see comments for
486 allocno member `next_regno_allocno'). */
487 extern ira_allocno_t *ira_regno_allocno_map;
488
489 /* Array of references to all allocnos. The order number of the
490 allocno corresponds to the index in the array. Removed allocnos
491 have NULL element value. */
492 extern ira_allocno_t *ira_allocnos;
493
494 /* Sizes of the previous array. */
495 extern int ira_allocnos_num;
496
497 /* Map conflict id -> allocno with given conflict id (see comments for
498 allocno member `conflict_id'). */
499 extern ira_allocno_t *ira_conflict_id_allocno_map;
500
501 /* The following structure represents a copy of two allocnos. The
502 copies represent move insns or potential move insns usually because
503 of two operand insn constraints. To remove register shuffle, we
504 also create copies between allocno which is output of an insn and
505 allocno becoming dead in the insn. */
506 struct ira_allocno_copy
507 {
508 /* The unique order number of the copy node starting with 0. */
509 int num;
510 /* Allocnos connected by the copy. The first allocno should have
511 smaller order number than the second one. */
512 ira_allocno_t first, second;
513 /* Execution frequency of the copy. */
514 int freq;
515 bool constraint_p;
516 /* It is a move insn which is an origin of the copy. The member
517 value for the copy representing two operand insn constraints or
518 for the copy created to remove register shuffle is NULL. In last
519 case the copy frequency is smaller than the corresponding insn
520 execution frequency. */
521 rtx insn;
522 /* All copies with the same allocno as FIRST are linked by the two
523 following members. */
524 ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
525 /* All copies with the same allocno as SECOND are linked by the two
526 following members. */
527 ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
528 /* Region from which given copy is originated. */
529 ira_loop_tree_node_t loop_tree_node;
530 };
531
532 /* Array of references to all copies. The order number of the copy
533 corresponds to the index in the array. Removed copies have NULL
534 element value. */
535 extern ira_copy_t *ira_copies;
536
537 /* Size of the previous array. */
538 extern int ira_copies_num;
539
540 /* The following structure describes a stack slot used for spilled
541 pseudo-registers. */
542 struct ira_spilled_reg_stack_slot
543 {
544 /* pseudo-registers assigned to the stack slot. */
545 bitmap_head spilled_regs;
546 /* RTL representation of the stack slot. */
547 rtx mem;
548 /* Size of the stack slot. */
549 unsigned int width;
550 };
551
552 /* The number of elements in the following array. */
553 extern int ira_spilled_reg_stack_slots_num;
554
555 /* The following array contains info about spilled pseudo-registers
556 stack slots used in current function so far. */
557 extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
558
559 /* Correspondingly overall cost of the allocation, cost of the
560 allocnos assigned to hard-registers, cost of the allocnos assigned
561 to memory, cost of loads, stores and register move insns generated
562 for pseudo-register live range splitting (see ira-emit.c). */
563 extern int ira_overall_cost;
564 extern int ira_reg_cost, ira_mem_cost;
565 extern int ira_load_cost, ira_store_cost, ira_shuffle_cost;
566 extern int ira_move_loops_num, ira_additional_jumps_num;
567 \f
568 /* This page contains a bitset implementation called 'min/max sets' used to
569 record conflicts in IRA.
570 They are named min/maxs set since we keep track of a minimum and a maximum
571 bit number for each set representing the bounds of valid elements. Otherwise,
572 the implementation resembles sbitmaps in that we store an array of integers
573 whose bits directly represent the members of the set. */
574
575 /* The type used as elements in the array, and the number of bits in
576 this type. */
577 #define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
578 #define IRA_INT_TYPE HOST_WIDE_INT
579
580 /* Set, clear or test bit number I in R, a bit vector of elements with
581 minimal index and maximal index equal correspondingly to MIN and
582 MAX. */
583 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
584
585 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
586 (({ int _min = (MIN), _max = (MAX), _i = (I); \
587 if (_i < _min || _i > _max) \
588 { \
589 fprintf (stderr, \
590 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
591 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
592 gcc_unreachable (); \
593 } \
594 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
595 |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
596
597
598 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
599 (({ int _min = (MIN), _max = (MAX), _i = (I); \
600 if (_i < _min || _i > _max) \
601 { \
602 fprintf (stderr, \
603 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
604 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
605 gcc_unreachable (); \
606 } \
607 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
608 &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
609
610 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
611 (({ int _min = (MIN), _max = (MAX), _i = (I); \
612 if (_i < _min || _i > _max) \
613 { \
614 fprintf (stderr, \
615 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
616 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
617 gcc_unreachable (); \
618 } \
619 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
620 & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
621
622 #else
623
624 #define SET_MINMAX_SET_BIT(R, I, MIN, MAX) \
625 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
626 |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
627
628 #define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) \
629 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
630 &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
631
632 #define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) \
633 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
634 & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
635
636 #endif
637
638 /* The iterator for min/max sets. */
639 typedef struct {
640
641 /* Array containing the bit vector. */
642 IRA_INT_TYPE *vec;
643
644 /* The number of the current element in the vector. */
645 unsigned int word_num;
646
647 /* The number of bits in the bit vector. */
648 unsigned int nel;
649
650 /* The current bit index of the bit vector. */
651 unsigned int bit_num;
652
653 /* Index corresponding to the 1st bit of the bit vector. */
654 int start_val;
655
656 /* The word of the bit vector currently visited. */
657 unsigned IRA_INT_TYPE word;
658 } minmax_set_iterator;
659
660 /* Initialize the iterator I for bit vector VEC containing minimal and
661 maximal values MIN and MAX. */
662 static inline void
663 minmax_set_iter_init (minmax_set_iterator *i, IRA_INT_TYPE *vec, int min,
664 int max)
665 {
666 i->vec = vec;
667 i->word_num = 0;
668 i->nel = max < min ? 0 : max - min + 1;
669 i->start_val = min;
670 i->bit_num = 0;
671 i->word = i->nel == 0 ? 0 : vec[0];
672 }
673
674 /* Return TRUE if we have more elements to visit, in which case *N is
675 set to the number of the element to be visited. Otherwise, return
676 FALSE. */
677 static inline bool
678 minmax_set_iter_cond (minmax_set_iterator *i, int *n)
679 {
680 /* Skip words that are zeros. */
681 for (; i->word == 0; i->word = i->vec[i->word_num])
682 {
683 i->word_num++;
684 i->bit_num = i->word_num * IRA_INT_BITS;
685
686 /* If we have reached the end, break. */
687 if (i->bit_num >= i->nel)
688 return false;
689 }
690
691 /* Skip bits that are zero. */
692 for (; (i->word & 1) == 0; i->word >>= 1)
693 i->bit_num++;
694
695 *n = (int) i->bit_num + i->start_val;
696
697 return true;
698 }
699
700 /* Advance to the next element in the set. */
701 static inline void
702 minmax_set_iter_next (minmax_set_iterator *i)
703 {
704 i->word >>= 1;
705 i->bit_num++;
706 }
707
708 /* Loop over all elements of a min/max set given by bit vector VEC and
709 their minimal and maximal values MIN and MAX. In each iteration, N
710 is set to the number of next allocno. ITER is an instance of
711 minmax_set_iterator used to iterate over the set. */
712 #define FOR_EACH_BIT_IN_MINMAX_SET(VEC, MIN, MAX, N, ITER) \
713 for (minmax_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
714 minmax_set_iter_cond (&(ITER), &(N)); \
715 minmax_set_iter_next (&(ITER)))
716 \f
717 struct target_ira_int {
718 /* Initialized once. It is a maximal possible size of the allocated
719 struct costs. */
720 int x_max_struct_costs_size;
721
722 /* Allocated and initialized once, and used to initialize cost values
723 for each insn. */
724 struct costs *x_init_cost;
725
726 /* Allocated once, and used for temporary purposes. */
727 struct costs *x_temp_costs;
728
729 /* Allocated once, and used for the cost calculation. */
730 struct costs *x_op_costs[MAX_RECOG_OPERANDS];
731 struct costs *x_this_op_costs[MAX_RECOG_OPERANDS];
732
733 /* Classes used for cost calculation. They may be different on
734 different iterations of the cost calculations or in different
735 optimization modes. */
736 enum reg_class *x_cost_classes;
737
738 /* Hard registers that can not be used for the register allocator for
739 all functions of the current compilation unit. */
740 HARD_REG_SET x_no_unit_alloc_regs;
741
742 /* Map: hard regs X modes -> set of hard registers for storing value
743 of given mode starting with given hard register. */
744 HARD_REG_SET (x_ira_reg_mode_hard_regset
745 [FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES]);
746
747 /* Array based on TARGET_REGISTER_MOVE_COST. Don't use
748 ira_register_move_cost directly. Use function of
749 ira_get_may_move_cost instead. */
750 move_table *x_ira_register_move_cost[MAX_MACHINE_MODE];
751
752 /* Similar to may_move_in_cost but it is calculated in IRA instead of
753 regclass. Another difference we take only available hard registers
754 into account to figure out that one register class is a subset of
755 the another one. Don't use it directly. Use function of
756 ira_get_may_move_cost instead. */
757 move_table *x_ira_may_move_in_cost[MAX_MACHINE_MODE];
758
759 /* Similar to may_move_out_cost but it is calculated in IRA instead of
760 regclass. Another difference we take only available hard registers
761 into account to figure out that one register class is a subset of
762 the another one. Don't use it directly. Use function of
763 ira_get_may_move_cost instead. */
764 move_table *x_ira_may_move_out_cost[MAX_MACHINE_MODE];
765
766 /* Register class subset relation: TRUE if the first class is a subset
767 of the second one considering only hard registers available for the
768 allocation. */
769 int x_ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
770
771 /* Array of the number of hard registers of given class which are
772 available for allocation. The order is defined by the the hard
773 register numbers. */
774 short x_ira_non_ordered_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
775
776 /* Index (in ira_class_hard_regs; for given register class and hard
777 register (in general case a hard register can belong to several
778 register classes;. The index is negative for hard registers
779 unavailable for the allocation. */
780 short x_ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
781
782 /* Array whose values are hard regset of hard registers available for
783 the allocation of given register class whose HARD_REGNO_MODE_OK
784 values for given mode are zero. */
785 HARD_REG_SET x_prohibited_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
786
787 /* The value is number of elements in the subsequent array. */
788 int x_ira_important_classes_num;
789
790 /* The array containing non-empty classes (including non-empty cover
791 classes; which are subclasses of cover classes. Such classes is
792 important for calculation of the hard register usage costs. */
793 enum reg_class x_ira_important_classes[N_REG_CLASSES];
794
795 /* The biggest important class inside of intersection of the two
796 classes (that is calculated taking only hard registers available
797 for allocation into account;. If the both classes contain no hard
798 registers available for allocation, the value is calculated with
799 taking all hard-registers including fixed ones into account. */
800 enum reg_class x_ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
801
802 /* True if the two classes (that is calculated taking only hard
803 registers available for allocation into account; are
804 intersected. */
805 bool x_ira_reg_classes_intersect_p[N_REG_CLASSES][N_REG_CLASSES];
806
807 /* Classes with end marker LIM_REG_CLASSES which are intersected with
808 given class (the first index;. That includes given class itself.
809 This is calculated taking only hard registers available for
810 allocation into account. */
811 enum reg_class x_ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
812
813 /* The biggest important class inside of union of the two classes
814 (that is calculated taking only hard registers available for
815 allocation into account;. If the both classes contain no hard
816 registers available for allocation, the value is calculated with
817 taking all hard-registers including fixed ones into account. In
818 other words, the value is the corresponding reg_class_subunion
819 value. */
820 enum reg_class x_ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
821
822 /* For each reg class, table listing all the classes contained in it
823 (excluding the class itself. Non-allocatable registers are
824 excluded from the consideration;. */
825 enum reg_class x_alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
826
827 /* Array whose values are hard regset of hard registers for which
828 move of the hard register in given mode into itself is
829 prohibited. */
830 HARD_REG_SET x_ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
831
832 /* Flag of that the above array has been initialized. */
833 bool x_ira_prohibited_mode_move_regs_initialized_p;
834 };
835
836 extern struct target_ira_int default_target_ira_int;
837 #if SWITCHABLE_TARGET
838 extern struct target_ira_int *this_target_ira_int;
839 #else
840 #define this_target_ira_int (&default_target_ira_int)
841 #endif
842
843 #define ira_reg_mode_hard_regset \
844 (this_target_ira_int->x_ira_reg_mode_hard_regset)
845 #define ira_register_move_cost \
846 (this_target_ira_int->x_ira_register_move_cost)
847 #define ira_may_move_in_cost \
848 (this_target_ira_int->x_ira_may_move_in_cost)
849 #define ira_may_move_out_cost \
850 (this_target_ira_int->x_ira_may_move_out_cost)
851 #define ira_class_subset_p \
852 (this_target_ira_int->x_ira_class_subset_p)
853 #define ira_non_ordered_class_hard_regs \
854 (this_target_ira_int->x_ira_non_ordered_class_hard_regs)
855 #define ira_class_hard_reg_index \
856 (this_target_ira_int->x_ira_class_hard_reg_index)
857 #define prohibited_class_mode_regs \
858 (this_target_ira_int->x_prohibited_class_mode_regs)
859 #define ira_important_classes_num \
860 (this_target_ira_int->x_ira_important_classes_num)
861 #define ira_important_classes \
862 (this_target_ira_int->x_ira_important_classes)
863 #define ira_reg_class_intersect \
864 (this_target_ira_int->x_ira_reg_class_intersect)
865 #define ira_reg_classes_intersect_p \
866 (this_target_ira_int->x_ira_reg_classes_intersect_p)
867 #define ira_reg_class_super_classes \
868 (this_target_ira_int->x_ira_reg_class_super_classes)
869 #define ira_reg_class_union \
870 (this_target_ira_int->x_ira_reg_class_union)
871 #define ira_prohibited_mode_move_regs \
872 (this_target_ira_int->x_ira_prohibited_mode_move_regs)
873 \f
874 /* ira.c: */
875
876 extern void *ira_allocate (size_t);
877 extern void *ira_reallocate (void *, size_t);
878 extern void ira_free (void *addr);
879 extern bitmap ira_allocate_bitmap (void);
880 extern void ira_free_bitmap (bitmap);
881 extern void ira_print_disposition (FILE *);
882 extern void ira_debug_disposition (void);
883 extern void ira_debug_class_cover (void);
884 extern void ira_init_register_move_cost (enum machine_mode);
885
886 /* The length of the two following arrays. */
887 extern int ira_reg_equiv_len;
888
889 /* The element value is TRUE if the corresponding regno value is
890 invariant. */
891 extern bool *ira_reg_equiv_invariant_p;
892
893 /* The element value is equiv constant of given pseudo-register or
894 NULL_RTX. */
895 extern rtx *ira_reg_equiv_const;
896
897 /* ira-build.c */
898
899 /* The current loop tree node and its regno allocno map. */
900 extern ira_loop_tree_node_t ira_curr_loop_tree_node;
901 extern ira_allocno_t *ira_curr_regno_allocno_map;
902
903 extern void ira_debug_copy (ira_copy_t);
904 extern void ira_debug_copies (void);
905 extern void ira_debug_allocno_copies (ira_allocno_t);
906
907 extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
908 void (*) (ira_loop_tree_node_t),
909 void (*) (ira_loop_tree_node_t));
910 extern ira_allocno_t ira_parent_allocno (ira_allocno_t);
911 extern ira_allocno_t ira_parent_or_cap_allocno (ira_allocno_t);
912 extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
913 extern void ira_set_allocno_cover_class (ira_allocno_t, enum reg_class);
914 extern bool ira_conflict_vector_profitable_p (ira_allocno_t, int);
915 extern void ira_allocate_allocno_conflict_vec (ira_allocno_t, int);
916 extern void ira_allocate_allocno_conflicts (ira_allocno_t, int);
917 extern void ira_add_allocno_conflict (ira_allocno_t, ira_allocno_t);
918 extern void ira_print_expanded_allocno (ira_allocno_t);
919 extern live_range_t ira_create_allocno_live_range (ira_allocno_t, int, int,
920 live_range_t);
921 extern live_range_t ira_copy_allocno_live_range_list (live_range_t);
922 extern live_range_t ira_merge_allocno_live_ranges (live_range_t, live_range_t);
923 extern bool ira_allocno_live_ranges_intersect_p (live_range_t, live_range_t);
924 extern void ira_finish_allocno_live_range (live_range_t);
925 extern void ira_finish_allocno_live_range_list (live_range_t);
926 extern void ira_free_allocno_updated_costs (ira_allocno_t);
927 extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
928 int, bool, rtx, ira_loop_tree_node_t);
929 extern void ira_add_allocno_copy_to_list (ira_copy_t);
930 extern void ira_swap_allocno_copy_ends_if_necessary (ira_copy_t);
931 extern void ira_remove_allocno_copy_from_list (ira_copy_t);
932 extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int,
933 bool, rtx, ira_loop_tree_node_t);
934
935 extern int *ira_allocate_cost_vector (enum reg_class);
936 extern void ira_free_cost_vector (int *, enum reg_class);
937
938 extern void ira_flattening (int, int);
939 extern bool ira_build (bool);
940 extern void ira_destroy (void);
941
942 /* ira-costs.c */
943 extern void ira_init_costs_once (void);
944 extern void ira_init_costs (void);
945 extern void ira_finish_costs_once (void);
946 extern void ira_costs (void);
947 extern void ira_tune_allocno_costs_and_cover_classes (void);
948
949 /* ira-lives.c */
950
951 extern void ira_rebuild_start_finish_chains (void);
952 extern void ira_print_live_range_list (FILE *, live_range_t);
953 extern void ira_debug_live_range_list (live_range_t);
954 extern void ira_debug_allocno_live_ranges (ira_allocno_t);
955 extern void ira_debug_live_ranges (void);
956 extern void ira_create_allocno_live_ranges (void);
957 extern void ira_compress_allocno_live_ranges (void);
958 extern void ira_finish_allocno_live_ranges (void);
959
960 /* ira-conflicts.c */
961 extern void ira_debug_conflicts (bool);
962 extern void ira_build_conflicts (void);
963
964 /* ira-color.c */
965 extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
966 extern void ira_reassign_conflict_allocnos (int);
967 extern void ira_initiate_assign (void);
968 extern void ira_finish_assign (void);
969 extern void ira_color (void);
970
971 /* ira-emit.c */
972 extern void ira_emit (bool);
973
974 \f
975
976 /* Return cost of moving value of MODE from register of class FROM to
977 register of class TO. */
978 static inline int
979 ira_get_register_move_cost (enum machine_mode mode,
980 enum reg_class from, enum reg_class to)
981 {
982 if (ira_register_move_cost[mode] == NULL)
983 ira_init_register_move_cost (mode);
984 return ira_register_move_cost[mode][from][to];
985 }
986
987 /* Return cost of moving value of MODE from register of class FROM to
988 register of class TO. Return zero if IN_P is true and FROM is
989 subset of TO or if IN_P is false and FROM is superset of TO. */
990 static inline int
991 ira_get_may_move_cost (enum machine_mode mode,
992 enum reg_class from, enum reg_class to,
993 bool in_p)
994 {
995 if (ira_register_move_cost[mode] == NULL)
996 ira_init_register_move_cost (mode);
997 return (in_p
998 ? ira_may_move_in_cost[mode][from][to]
999 : ira_may_move_out_cost[mode][from][to]);
1000 }
1001
1002 \f
1003
1004 /* The iterator for all allocnos. */
1005 typedef struct {
1006 /* The number of the current element in IRA_ALLOCNOS. */
1007 int n;
1008 } ira_allocno_iterator;
1009
1010 /* Initialize the iterator I. */
1011 static inline void
1012 ira_allocno_iter_init (ira_allocno_iterator *i)
1013 {
1014 i->n = 0;
1015 }
1016
1017 /* Return TRUE if we have more allocnos to visit, in which case *A is
1018 set to the allocno to be visited. Otherwise, return FALSE. */
1019 static inline bool
1020 ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
1021 {
1022 int n;
1023
1024 for (n = i->n; n < ira_allocnos_num; n++)
1025 if (ira_allocnos[n] != NULL)
1026 {
1027 *a = ira_allocnos[n];
1028 i->n = n + 1;
1029 return true;
1030 }
1031 return false;
1032 }
1033
1034 /* Loop over all allocnos. In each iteration, A is set to the next
1035 allocno. ITER is an instance of ira_allocno_iterator used to iterate
1036 the allocnos. */
1037 #define FOR_EACH_ALLOCNO(A, ITER) \
1038 for (ira_allocno_iter_init (&(ITER)); \
1039 ira_allocno_iter_cond (&(ITER), &(A));)
1040
1041
1042 \f
1043
1044 /* The iterator for copies. */
1045 typedef struct {
1046 /* The number of the current element in IRA_COPIES. */
1047 int n;
1048 } ira_copy_iterator;
1049
1050 /* Initialize the iterator I. */
1051 static inline void
1052 ira_copy_iter_init (ira_copy_iterator *i)
1053 {
1054 i->n = 0;
1055 }
1056
1057 /* Return TRUE if we have more copies to visit, in which case *CP is
1058 set to the copy to be visited. Otherwise, return FALSE. */
1059 static inline bool
1060 ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
1061 {
1062 int n;
1063
1064 for (n = i->n; n < ira_copies_num; n++)
1065 if (ira_copies[n] != NULL)
1066 {
1067 *cp = ira_copies[n];
1068 i->n = n + 1;
1069 return true;
1070 }
1071 return false;
1072 }
1073
1074 /* Loop over all copies. In each iteration, C is set to the next
1075 copy. ITER is an instance of ira_copy_iterator used to iterate
1076 the copies. */
1077 #define FOR_EACH_COPY(C, ITER) \
1078 for (ira_copy_iter_init (&(ITER)); \
1079 ira_copy_iter_cond (&(ITER), &(C));)
1080
1081
1082 \f
1083
1084 /* The iterator for allocno conflicts. */
1085 typedef struct {
1086
1087 /* TRUE if the conflicts are represented by vector of allocnos. */
1088 bool allocno_conflict_vec_p;
1089
1090 /* The conflict vector or conflict bit vector. */
1091 void *vec;
1092
1093 /* The number of the current element in the vector (of type
1094 ira_allocno_t or IRA_INT_TYPE). */
1095 unsigned int word_num;
1096
1097 /* The bit vector size. It is defined only if
1098 ALLOCNO_CONFLICT_VEC_P is FALSE. */
1099 unsigned int size;
1100
1101 /* The current bit index of bit vector. It is defined only if
1102 ALLOCNO_CONFLICT_VEC_P is FALSE. */
1103 unsigned int bit_num;
1104
1105 /* Allocno conflict id corresponding to the 1st bit of the bit
1106 vector. It is defined only if ALLOCNO_CONFLICT_VEC_P is
1107 FALSE. */
1108 int base_conflict_id;
1109
1110 /* The word of bit vector currently visited. It is defined only if
1111 ALLOCNO_CONFLICT_VEC_P is FALSE. */
1112 unsigned IRA_INT_TYPE word;
1113 } ira_allocno_conflict_iterator;
1114
1115 /* Initialize the iterator I with ALLOCNO conflicts. */
1116 static inline void
1117 ira_allocno_conflict_iter_init (ira_allocno_conflict_iterator *i,
1118 ira_allocno_t allocno)
1119 {
1120 i->allocno_conflict_vec_p = ALLOCNO_CONFLICT_VEC_P (allocno);
1121 i->vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (allocno);
1122 i->word_num = 0;
1123 if (i->allocno_conflict_vec_p)
1124 i->size = i->bit_num = i->base_conflict_id = i->word = 0;
1125 else
1126 {
1127 if (ALLOCNO_MIN (allocno) > ALLOCNO_MAX (allocno))
1128 i->size = 0;
1129 else
1130 i->size = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno)
1131 + IRA_INT_BITS)
1132 / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
1133 i->bit_num = 0;
1134 i->base_conflict_id = ALLOCNO_MIN (allocno);
1135 i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
1136 }
1137 }
1138
1139 /* Return TRUE if we have more conflicting allocnos to visit, in which
1140 case *A is set to the allocno to be visited. Otherwise, return
1141 FALSE. */
1142 static inline bool
1143 ira_allocno_conflict_iter_cond (ira_allocno_conflict_iterator *i,
1144 ira_allocno_t *a)
1145 {
1146 ira_allocno_t conflict_allocno;
1147
1148 if (i->allocno_conflict_vec_p)
1149 {
1150 conflict_allocno = ((ira_allocno_t *) i->vec)[i->word_num];
1151 if (conflict_allocno == NULL)
1152 return false;
1153 *a = conflict_allocno;
1154 return true;
1155 }
1156 else
1157 {
1158 /* Skip words that are zeros. */
1159 for (; i->word == 0; i->word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
1160 {
1161 i->word_num++;
1162
1163 /* If we have reached the end, break. */
1164 if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
1165 return false;
1166
1167 i->bit_num = i->word_num * IRA_INT_BITS;
1168 }
1169
1170 /* Skip bits that are zero. */
1171 for (; (i->word & 1) == 0; i->word >>= 1)
1172 i->bit_num++;
1173
1174 *a = ira_conflict_id_allocno_map[i->bit_num + i->base_conflict_id];
1175
1176 return true;
1177 }
1178 }
1179
1180 /* Advance to the next conflicting allocno. */
1181 static inline void
1182 ira_allocno_conflict_iter_next (ira_allocno_conflict_iterator *i)
1183 {
1184 if (i->allocno_conflict_vec_p)
1185 i->word_num++;
1186 else
1187 {
1188 i->word >>= 1;
1189 i->bit_num++;
1190 }
1191 }
1192
1193 /* Loop over all allocnos conflicting with ALLOCNO. In each
1194 iteration, A is set to the next conflicting allocno. ITER is an
1195 instance of ira_allocno_conflict_iterator used to iterate the
1196 conflicts. */
1197 #define FOR_EACH_ALLOCNO_CONFLICT(ALLOCNO, A, ITER) \
1198 for (ira_allocno_conflict_iter_init (&(ITER), (ALLOCNO)); \
1199 ira_allocno_conflict_iter_cond (&(ITER), &(A)); \
1200 ira_allocno_conflict_iter_next (&(ITER)))
1201
1202 \f
1203
1204 /* The function returns TRUE if hard registers starting with
1205 HARD_REGNO and containing value of MODE are not in set
1206 HARD_REGSET. */
1207 static inline bool
1208 ira_hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
1209 HARD_REG_SET hard_regset)
1210 {
1211 int i;
1212
1213 ira_assert (hard_regno >= 0);
1214 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1215 if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1216 return false;
1217 return true;
1218 }
1219
1220 \f
1221
1222 /* To save memory we use a lazy approach for allocation and
1223 initialization of the cost vectors. We do this only when it is
1224 really necessary. */
1225
1226 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1227 initialize the elements by VAL if it is necessary */
1228 static inline void
1229 ira_allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
1230 {
1231 int i, *reg_costs;
1232 int len;
1233
1234 if (*vec != NULL)
1235 return;
1236 *vec = reg_costs = ira_allocate_cost_vector (cover_class);
1237 len = ira_class_hard_regs_num[cover_class];
1238 for (i = 0; i < len; i++)
1239 reg_costs[i] = val;
1240 }
1241
1242 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1243 copy values of vector SRC into the vector if it is necessary */
1244 static inline void
1245 ira_allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
1246 {
1247 int len;
1248
1249 if (*vec != NULL || src == NULL)
1250 return;
1251 *vec = ira_allocate_cost_vector (cover_class);
1252 len = ira_class_hard_regs_num[cover_class];
1253 memcpy (*vec, src, sizeof (int) * len);
1254 }
1255
1256 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1257 add values of vector SRC into the vector if it is necessary */
1258 static inline void
1259 ira_allocate_and_accumulate_costs (int **vec, enum reg_class cover_class,
1260 int *src)
1261 {
1262 int i, len;
1263
1264 if (src == NULL)
1265 return;
1266 len = ira_class_hard_regs_num[cover_class];
1267 if (*vec == NULL)
1268 {
1269 *vec = ira_allocate_cost_vector (cover_class);
1270 memset (*vec, 0, sizeof (int) * len);
1271 }
1272 for (i = 0; i < len; i++)
1273 (*vec)[i] += src[i];
1274 }
1275
1276 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1277 copy values of vector SRC into the vector or initialize it by VAL
1278 (if SRC is null). */
1279 static inline void
1280 ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
1281 int val, int *src)
1282 {
1283 int i, *reg_costs;
1284 int len;
1285
1286 if (*vec != NULL)
1287 return;
1288 *vec = reg_costs = ira_allocate_cost_vector (cover_class);
1289 len = ira_class_hard_regs_num[cover_class];
1290 if (src != NULL)
1291 memcpy (reg_costs, src, sizeof (int) * len);
1292 else
1293 {
1294 for (i = 0; i < len; i++)
1295 reg_costs[i] = val;
1296 }
1297 }