2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
31 #include "util/register_allocate.h"
36 assign_reg(unsigned *reg_hw_locations
, fs_reg
*reg
)
38 if (reg
->file
== VGRF
) {
39 reg
->nr
= reg_hw_locations
[reg
->nr
] + reg
->offset
/ REG_SIZE
;
40 reg
->offset
%= REG_SIZE
;
45 fs_visitor::assign_regs_trivial()
47 unsigned hw_reg_mapping
[this->alloc
.count
+ 1];
49 int reg_width
= dispatch_width
/ 8;
51 /* Note that compressed instructions require alignment to 2 registers. */
52 hw_reg_mapping
[0] = ALIGN(this->first_non_payload_grf
, reg_width
);
53 for (i
= 1; i
<= this->alloc
.count
; i
++) {
54 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
55 this->alloc
.sizes
[i
- 1]);
57 this->grf_used
= hw_reg_mapping
[this->alloc
.count
];
59 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
60 assign_reg(hw_reg_mapping
, &inst
->dst
);
61 for (i
= 0; i
< inst
->sources
; i
++) {
62 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
66 if (this->grf_used
>= max_grf
) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used
, max_grf
);
70 this->alloc
.count
= this->grf_used
;
76 brw_alloc_reg_set(struct brw_compiler
*compiler
, int dispatch_width
)
78 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
79 int base_reg_count
= BRW_MAX_GRF
;
80 const int index
= _mesa_logbase2(dispatch_width
/ 8);
82 if (dispatch_width
> 8 && devinfo
->gen
>= 7) {
83 /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
84 * SIMD16. Therefore, we can use the exact same register sets for
85 * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
87 compiler
->fs_reg_sets
[index
] = compiler
->fs_reg_sets
[0];
91 /* The registers used to make up almost all values handled in the compiler
92 * are a scalar value occupying a single register (or 2 registers in the
93 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
94 * multiplying allocated register numbers by 2). Things that were
95 * aggregates of scalar values at the GLSL level were split to scalar
96 * values by split_virtual_grfs().
98 * However, texture SEND messages return a series of contiguous registers
99 * to write into. We currently always ask for 4 registers, but we may
100 * convert that to use less some day.
102 * Additionally, on gen5 we need aligned pairs of registers for the PLN
103 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
106 const int class_count
= MAX_VGRF_SIZE
;
107 int class_sizes
[MAX_VGRF_SIZE
];
108 for (unsigned i
= 0; i
< MAX_VGRF_SIZE
; i
++)
109 class_sizes
[i
] = i
+ 1;
111 memset(compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
, 0,
112 sizeof(compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
));
113 int *class_to_ra_reg_range
= compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
;
115 /* Compute the total number of registers across all classes. */
116 int ra_reg_count
= 0;
117 for (int i
= 0; i
< class_count
; i
++) {
118 if (devinfo
->gen
<= 5 && dispatch_width
>= 16) {
121 * In order to reduce the hardware complexity, the following
122 * rules and restrictions apply to the compressed instruction:
124 * * Operand Alignment Rule: With the exceptions listed below, a
125 * source/destination operand in general should be aligned to
126 * even 256-bit physical register with a region size equal to
127 * two 256-bit physical register
129 ra_reg_count
+= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
131 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
133 /* Mark the last register. We'll fill in the beginnings later. */
134 class_to_ra_reg_range
[class_sizes
[i
]] = ra_reg_count
;
137 /* Fill out the rest of the range markers */
138 for (int i
= 1; i
< 17; ++i
) {
139 if (class_to_ra_reg_range
[i
] == 0)
140 class_to_ra_reg_range
[i
] = class_to_ra_reg_range
[i
-1];
143 uint8_t *ra_reg_to_grf
= ralloc_array(compiler
, uint8_t, ra_reg_count
);
144 struct ra_regs
*regs
= ra_alloc_reg_set(compiler
, ra_reg_count
, false);
145 if (devinfo
->gen
>= 6)
146 ra_set_allocate_round_robin(regs
);
147 int *classes
= ralloc_array(compiler
, int, class_count
);
148 int aligned_pairs_class
= -1;
150 /* Allocate space for q values. We allocate class_count + 1 because we
151 * want to leave room for the aligned pairs class if we have it. */
152 unsigned int **q_values
= ralloc_array(compiler
, unsigned int *,
154 for (int i
= 0; i
< class_count
+ 1; ++i
)
155 q_values
[i
] = ralloc_array(q_values
, unsigned int, class_count
+ 1);
157 /* Now, add the registers to their classes, and add the conflicts
158 * between them and the base GRF registers (and also each other).
161 int pairs_base_reg
= 0;
162 int pairs_reg_count
= 0;
163 for (int i
= 0; i
< class_count
; i
++) {
165 if (devinfo
->gen
<= 5 && dispatch_width
>= 16) {
166 class_reg_count
= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
168 /* See comment below. The only difference here is that we are
169 * dealing with pairs of registers instead of single registers.
170 * Registers of odd sizes simply get rounded up. */
171 for (int j
= 0; j
< class_count
; j
++)
172 q_values
[i
][j
] = (class_sizes
[i
] + 1) / 2 +
173 (class_sizes
[j
] + 1) / 2 - 1;
175 class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
177 /* From register_allocate.c:
179 * q(B,C) (indexed by C, B is this register class) in
180 * Runeson/Nyström paper. This is "how many registers of B could
181 * the worst choice register from C conflict with".
183 * If we just let the register allocation algorithm compute these
184 * values, is extremely expensive. However, since all of our
185 * registers are laid out, we can very easily compute them
186 * ourselves. View the register from C as fixed starting at GRF n
187 * somwhere in the middle, and the register from B as sliding back
188 * and forth. Then the first register to conflict from B is the
189 * one starting at n - class_size[B] + 1 and the last register to
190 * conflict will start at n + class_size[B] - 1. Therefore, the
191 * number of conflicts from B is class_size[B] + class_size[C] - 1.
193 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
194 * B | | | | | |n| --> | | | | | | |
195 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
200 for (int j
= 0; j
< class_count
; j
++)
201 q_values
[i
][j
] = class_sizes
[i
] + class_sizes
[j
] - 1;
203 classes
[i
] = ra_alloc_reg_class(regs
);
205 /* Save this off for the aligned pair class at the end. */
206 if (class_sizes
[i
] == 2) {
207 pairs_base_reg
= reg
;
208 pairs_reg_count
= class_reg_count
;
211 if (devinfo
->gen
<= 5 && dispatch_width
>= 16) {
212 for (int j
= 0; j
< class_reg_count
; j
++) {
213 ra_class_add_reg(regs
, classes
[i
], reg
);
215 ra_reg_to_grf
[reg
] = j
* 2;
217 for (int base_reg
= j
;
218 base_reg
< j
+ (class_sizes
[i
] + 1) / 2;
220 ra_add_reg_conflict(regs
, base_reg
, reg
);
226 for (int j
= 0; j
< class_reg_count
; j
++) {
227 ra_class_add_reg(regs
, classes
[i
], reg
);
229 ra_reg_to_grf
[reg
] = j
;
231 for (int base_reg
= j
;
232 base_reg
< j
+ class_sizes
[i
];
234 ra_add_reg_conflict(regs
, base_reg
, reg
);
241 assert(reg
== ra_reg_count
);
243 /* Applying transitivity to all of the base registers gives us the
244 * appropreate register conflict relationships everywhere.
246 for (int reg
= 0; reg
< base_reg_count
; reg
++)
247 ra_make_reg_conflicts_transitive(regs
, reg
);
249 /* Add a special class for aligned pairs, which we'll put delta_xy
250 * in on Gen <= 6 so that we can do PLN.
252 if (devinfo
->has_pln
&& dispatch_width
== 8 && devinfo
->gen
<= 6) {
253 aligned_pairs_class
= ra_alloc_reg_class(regs
);
255 for (int i
= 0; i
< pairs_reg_count
; i
++) {
256 if ((ra_reg_to_grf
[pairs_base_reg
+ i
] & 1) == 0) {
257 ra_class_add_reg(regs
, aligned_pairs_class
, pairs_base_reg
+ i
);
261 for (int i
= 0; i
< class_count
; i
++) {
262 /* These are a little counter-intuitive because the pair registers
263 * are required to be aligned while the register they are
264 * potentially interferring with are not. In the case where the
265 * size is even, the worst-case is that the register is
266 * odd-aligned. In the odd-size case, it doesn't matter.
268 q_values
[class_count
][i
] = class_sizes
[i
] / 2 + 1;
269 q_values
[i
][class_count
] = class_sizes
[i
] + 1;
271 q_values
[class_count
][class_count
] = 1;
274 ra_set_finalize(regs
, q_values
);
276 ralloc_free(q_values
);
278 compiler
->fs_reg_sets
[index
].regs
= regs
;
279 for (unsigned i
= 0; i
< ARRAY_SIZE(compiler
->fs_reg_sets
[index
].classes
); i
++)
280 compiler
->fs_reg_sets
[index
].classes
[i
] = -1;
281 for (int i
= 0; i
< class_count
; i
++)
282 compiler
->fs_reg_sets
[index
].classes
[class_sizes
[i
] - 1] = classes
[i
];
283 compiler
->fs_reg_sets
[index
].ra_reg_to_grf
= ra_reg_to_grf
;
284 compiler
->fs_reg_sets
[index
].aligned_pairs_class
= aligned_pairs_class
;
288 brw_fs_alloc_reg_sets(struct brw_compiler
*compiler
)
290 brw_alloc_reg_set(compiler
, 8);
291 brw_alloc_reg_set(compiler
, 16);
292 brw_alloc_reg_set(compiler
, 32);
296 count_to_loop_end(const bblock_t
*block
)
298 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
299 return block
->end_ip
;
302 /* Skip the first block, since we don't want to count the do the calling
305 for (block
= block
->next();
307 block
= block
->next()) {
308 if (block
->start()->opcode
== BRW_OPCODE_DO
)
310 if (block
->end()->opcode
== BRW_OPCODE_WHILE
) {
313 return block
->end_ip
;
316 unreachable("not reached");
319 void fs_visitor::calculate_payload_ranges(int payload_node_count
,
320 int *payload_last_use_ip
)
325 for (int i
= 0; i
< payload_node_count
; i
++)
326 payload_last_use_ip
[i
] = -1;
329 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
330 switch (inst
->opcode
) {
334 /* Since payload regs are deffed only at the start of the shader
335 * execution, any uses of the payload within a loop mean the live
336 * interval extends to the end of the outermost loop. Find the ip of
340 loop_end_ip
= count_to_loop_end(block
);
342 case BRW_OPCODE_WHILE
:
351 use_ip
= loop_end_ip
;
355 /* Note that UNIFORM args have been turned into FIXED_GRF by
356 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
357 * the start (see interp_reg()).
359 for (int i
= 0; i
< inst
->sources
; i
++) {
360 if (inst
->src
[i
].file
== FIXED_GRF
) {
361 int node_nr
= inst
->src
[i
].nr
;
362 if (node_nr
>= payload_node_count
)
365 for (unsigned j
= 0; j
< regs_read(inst
, i
); j
++) {
366 payload_last_use_ip
[node_nr
+ j
] = use_ip
;
367 assert(node_nr
+ j
< unsigned(payload_node_count
));
372 /* Special case instructions which have extra implied registers used. */
373 switch (inst
->opcode
) {
374 case CS_OPCODE_CS_TERMINATE
:
375 payload_last_use_ip
[0] = use_ip
;
380 /* We could omit this for the !inst->header_present case, except
381 * that the simulator apparently incorrectly reads from g0/g1
382 * instead of sideband. It also really freaks out driver
383 * developers to see g0 used in unusual places, so just always
386 payload_last_use_ip
[0] = use_ip
;
387 payload_last_use_ip
[1] = use_ip
;
398 fs_reg_alloc(fs_visitor
*fs
):
399 fs(fs
), devinfo(fs
->devinfo
), compiler(fs
->compiler
), g(NULL
),
400 have_spill_costs(false)
402 mem_ctx
= ralloc_context(NULL
);
404 /* Most of this allocation was written for a reg_width of 1
405 * (dispatch_width == 8). In extending to SIMD16, the code was
406 * left in place and it was converted to have the hardware
407 * registers it's allocating be contiguous physical pairs of regs
408 * for reg_width == 2.
410 int reg_width
= fs
->dispatch_width
/ 8;
411 rsi
= _mesa_logbase2(reg_width
);
412 payload_node_count
= ALIGN(fs
->first_non_payload_grf
, reg_width
);
414 /* Get payload IP information */
415 payload_last_use_ip
= ralloc_array(mem_ctx
, int, payload_node_count
);
417 spill_vgrf_ip
= NULL
;
418 spill_vgrf_ip_alloc
= 0;
419 spill_node_count
= 0;
424 ralloc_free(mem_ctx
);
427 bool assign_regs(bool allow_spilling
, bool spill_all
);
430 void setup_live_interference(unsigned node
,
431 int node_start_ip
, int node_end_ip
);
432 void setup_inst_interference(fs_inst
*inst
);
434 void build_interference_graph(bool allow_spilling
);
435 void discard_interference_graph();
437 void set_spill_costs();
438 int choose_spill_reg();
439 fs_reg
alloc_spill_reg(unsigned size
, int ip
);
440 void spill_reg(unsigned spill_reg
);
444 const gen_device_info
*devinfo
;
445 const brw_compiler
*compiler
;
447 /* Which compiler->fs_reg_sets[] to use */
451 bool have_spill_costs
;
453 int payload_node_count
;
454 int *payload_last_use_ip
;
457 int first_payload_node
;
458 int first_mrf_hack_node
;
459 int grf127_send_hack_node
;
461 int first_spill_node
;
464 int spill_vgrf_ip_alloc
;
465 int spill_node_count
;
469 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
471 * This is used in assign_regs() to decide which of the GRFs that we use as
472 * MRFs on gen7 get normally register allocated, and in register spilling to
473 * see if we can actually use MRFs to do spills without overwriting normal MRF
477 get_used_mrfs(fs_visitor
*v
, bool *mrf_used
)
479 int reg_width
= v
->dispatch_width
/ 8;
481 memset(mrf_used
, 0, BRW_MAX_MRF(v
->devinfo
->gen
) * sizeof(bool));
483 foreach_block_and_inst(block
, fs_inst
, inst
, v
->cfg
) {
484 if (inst
->dst
.file
== MRF
) {
485 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
486 mrf_used
[reg
] = true;
487 if (reg_width
== 2) {
488 if (inst
->dst
.nr
& BRW_MRF_COMPR4
) {
489 mrf_used
[reg
+ 4] = true;
491 mrf_used
[reg
+ 1] = true;
496 if (inst
->mlen
> 0) {
497 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
498 mrf_used
[inst
->base_mrf
+ i
] = true;
506 * Maximum spill block size we expect to encounter in 32B units.
508 * This is somewhat arbitrary and doesn't necessarily limit the maximum
509 * variable size that can be spilled -- A higher value will allow a
510 * variable of a given size to be spilled more efficiently with a smaller
511 * number of scratch messages, but will increase the likelihood of a
512 * collision between the MRFs reserved for spilling and other MRFs used by
513 * the program (and possibly increase GRF register pressure on platforms
514 * without hardware MRFs), what could cause register allocation to fail.
516 * For the moment reserve just enough space so a register of 32 bit
517 * component type and natural region width can be spilled without splitting
518 * into multiple (force_writemask_all) scratch messages.
521 spill_max_size(const backend_shader
*s
)
523 /* FINISHME - On Gen7+ it should be possible to avoid this limit
524 * altogether by spilling directly from the temporary GRF
525 * allocated to hold the result of the instruction (and the
526 * scratch write header).
528 /* FINISHME - The shader's dispatch width probably belongs in
529 * backend_shader (or some nonexistent fs_shader class?)
530 * rather than in the visitor class.
532 return static_cast<const fs_visitor
*>(s
)->dispatch_width
/ 8;
536 * First MRF register available for spilling.
539 spill_base_mrf(const backend_shader
*s
)
541 return BRW_MAX_MRF(s
->devinfo
->gen
) - spill_max_size(s
) - 1;
546 fs_reg_alloc::setup_live_interference(unsigned node
,
547 int node_start_ip
, int node_end_ip
)
549 /* Mark any virtual grf that is live between the start of the program and
550 * the last use of a payload node interfering with that payload node.
552 for (int i
= 0; i
< payload_node_count
; i
++) {
553 if (payload_last_use_ip
[i
] == -1)
556 /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
557 * in order to not have to worry about the uniform issue described in
558 * calculate_live_intervals().
560 if (node_start_ip
<= payload_last_use_ip
[i
])
561 ra_add_node_interference(g
, node
, first_payload_node
+ i
);
564 /* If we have the MRF hack enabled, mark this node as interfering with all
567 if (first_mrf_hack_node
>= 0) {
568 for (int i
= spill_base_mrf(fs
); i
< BRW_MAX_MRF(devinfo
->gen
); i
++)
569 ra_add_node_interference(g
, node
, first_mrf_hack_node
+ i
);
572 /* Add interference with every vgrf whose live range intersects this
573 * node's. We only need to look at nodes below this one as the reflexivity
574 * of interference will take care of the rest.
576 for (unsigned n2
= first_vgrf_node
;
577 n2
< (unsigned)first_spill_node
&& n2
< node
; n2
++) {
578 unsigned vgrf
= n2
- first_vgrf_node
;
579 if (!(node_end_ip
<= fs
->virtual_grf_start
[vgrf
] ||
580 fs
->virtual_grf_end
[vgrf
] <= node_start_ip
))
581 ra_add_node_interference(g
, node
, n2
);
586 fs_reg_alloc::setup_inst_interference(fs_inst
*inst
)
588 /* Certain instructions can't safely use the same register for their
589 * sources and destination. Add interference.
591 if (inst
->dst
.file
== VGRF
&& inst
->has_source_and_destination_hazard()) {
592 for (unsigned i
= 0; i
< inst
->sources
; i
++) {
593 if (inst
->src
[i
].file
== VGRF
) {
594 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
595 first_vgrf_node
+ inst
->src
[i
].nr
);
600 /* In 16-wide instructions we have an issue where a compressed
601 * instruction is actually two instructions executed simultaneously.
602 * It's actually ok to have the source and destination registers be
603 * the same. In this case, each instruction over-writes its own
604 * source and there's no problem. The real problem here is if the
605 * source and destination registers are off by one. Then you can end
606 * up in a scenario where the first instruction over-writes the
607 * source of the second instruction. Since the compiler doesn't know
608 * about this level of granularity, we simply make the source and
609 * destination interfere.
611 if (inst
->exec_size
>= 16 && inst
->dst
.file
== VGRF
) {
612 for (int i
= 0; i
< inst
->sources
; ++i
) {
613 if (inst
->src
[i
].file
== VGRF
) {
614 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
615 first_vgrf_node
+ inst
->src
[i
].nr
);
620 if (grf127_send_hack_node
>= 0) {
621 /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
622 * subsection "EUISA Instructions", Send Message (page 990):
624 * "r127 must not be used for return address when there is a src and
625 * dest overlap in send instruction."
627 * We are avoiding using grf127 as part of the destination of send
628 * messages adding a node interference to the grf127_send_hack_node.
629 * This node has a fixed asignment to grf127.
631 * We don't apply it to SIMD16 instructions because previous code avoids
632 * any register overlap between sources and destination.
634 if (inst
->exec_size
< 16 && inst
->is_send_from_grf() &&
635 inst
->dst
.file
== VGRF
)
636 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
637 grf127_send_hack_node
);
639 /* Spilling instruction are genereated as SEND messages from MRF but as
640 * Gen7+ supports sending from GRF the driver will maps assingn these
641 * MRF registers to a GRF. Implementations reuses the dest of the send
642 * message as source. So as we will have an overlap for sure, we create
643 * an interference between destination and grf127.
645 if ((inst
->opcode
== SHADER_OPCODE_GEN7_SCRATCH_READ
||
646 inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_READ
) &&
647 inst
->dst
.file
== VGRF
)
648 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
649 grf127_send_hack_node
);
652 /* From the Skylake PRM Vol. 2a docs for sends:
654 * "It is required that the second block of GRFs does not overlap with
657 * Normally, this is taken care of by fixup_sends_duplicate_payload() but
658 * in the case where one of the registers is an undefined value, the
659 * register allocator may decide that they don't interfere even though
660 * they're used as sources in the same instruction. We also need to add
663 if (devinfo
->gen
>= 9) {
664 if (inst
->opcode
== SHADER_OPCODE_SEND
&& inst
->ex_mlen
> 0 &&
665 inst
->src
[2].file
== VGRF
&& inst
->src
[3].file
== VGRF
&&
666 inst
->src
[2].nr
!= inst
->src
[3].nr
)
667 ra_add_node_interference(g
, first_vgrf_node
+ inst
->src
[2].nr
,
668 first_vgrf_node
+ inst
->src
[3].nr
);
671 /* When we do send-from-GRF for FB writes, we need to ensure that the last
672 * write instruction sends from a high register. This is because the
673 * vertex fetcher wants to start filling the low payload registers while
674 * the pixel data port is still working on writing out the memory. If we
675 * don't do this, we get rendering artifacts.
677 * We could just do "something high". Instead, we just pick the highest
678 * register that works.
681 const int vgrf
= inst
->opcode
== SHADER_OPCODE_SEND
?
682 inst
->src
[2].nr
: inst
->src
[0].nr
;
683 int size
= fs
->alloc
.sizes
[vgrf
];
684 int reg
= compiler
->fs_reg_sets
[rsi
].class_to_ra_reg_range
[size
] - 1;
686 /* If something happened to spill, we want to push the EOT send
687 * register early enough in the register file that we don't
688 * conflict with any used MRF hack registers.
690 if (first_mrf_hack_node
>= 0)
691 reg
-= BRW_MAX_MRF(devinfo
->gen
) - spill_base_mrf(fs
);
693 ra_set_node_reg(g
, first_vgrf_node
+ vgrf
, reg
);
698 fs_reg_alloc::build_interference_graph(bool allow_spilling
)
700 const gen_device_info
*devinfo
= fs
->devinfo
;
701 const brw_compiler
*compiler
= fs
->compiler
;
703 /* Compute the RA node layout */
705 first_payload_node
= node_count
;
706 node_count
+= payload_node_count
;
707 if (devinfo
->gen
>= 7 && allow_spilling
) {
708 first_mrf_hack_node
= node_count
;
709 node_count
+= BRW_MAX_GRF
- GEN7_MRF_HACK_START
;
711 first_mrf_hack_node
= -1;
713 if (devinfo
->gen
>= 8) {
714 grf127_send_hack_node
= node_count
;
717 grf127_send_hack_node
= -1;
719 first_vgrf_node
= node_count
;
720 node_count
+= fs
->alloc
.count
;
721 first_spill_node
= node_count
;
723 fs
->calculate_live_intervals();
724 fs
->calculate_payload_ranges(payload_node_count
,
725 payload_last_use_ip
);
728 g
= ra_alloc_interference_graph(compiler
->fs_reg_sets
[rsi
].regs
, node_count
);
729 ralloc_steal(mem_ctx
, g
);
731 /* Set up the payload nodes */
732 for (int i
= 0; i
< payload_node_count
; i
++) {
733 /* Mark each payload node as being allocated to its physical register.
735 * The alternative would be to have per-physical-register classes, which
736 * would just be silly.
738 if (devinfo
->gen
<= 5 && fs
->dispatch_width
>= 16) {
739 /* We have to divide by 2 here because we only have even numbered
740 * registers. Some of the payload registers will be odd, but
741 * that's ok because their physical register numbers have already
742 * been assigned. The only thing this is used for is interference.
744 ra_set_node_reg(g
, first_payload_node
+ i
, i
/ 2);
746 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
750 if (first_mrf_hack_node
>= 0) {
751 /* Mark each MRF reg node as being allocated to its physical
754 * The alternative would be to have per-physical-register classes,
755 * which would just be silly.
757 for (int i
= 0; i
< BRW_MAX_MRF(devinfo
->gen
); i
++) {
758 ra_set_node_reg(g
, first_mrf_hack_node
+ i
,
759 GEN7_MRF_HACK_START
+ i
);
763 if (grf127_send_hack_node
>= 0)
764 ra_set_node_reg(g
, grf127_send_hack_node
, 127);
766 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
767 unsigned size
= fs
->alloc
.sizes
[i
];
770 assert(size
<= ARRAY_SIZE(compiler
->fs_reg_sets
[rsi
].classes
) &&
771 "Register allocation relies on split_virtual_grfs()");
772 c
= compiler
->fs_reg_sets
[rsi
].classes
[size
- 1];
774 /* Special case: on pre-GEN6 hardware that supports PLN, the
775 * second operand of a PLN instruction needs to be an
776 * even-numbered register, so we have a special register class
777 * wm_aligned_pairs_class to handle this case. pre-GEN6 always
778 * uses fs->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL] as the
779 * second operand of a PLN instruction (since it doesn't support
780 * any other interpolation modes). So all we need to do is find
781 * that register and set it to the appropriate class.
783 if (compiler
->fs_reg_sets
[rsi
].aligned_pairs_class
>= 0 &&
784 fs
->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
].file
== VGRF
&&
785 fs
->delta_xy
[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL
].nr
== i
) {
786 c
= compiler
->fs_reg_sets
[rsi
].aligned_pairs_class
;
789 ra_set_node_class(g
, first_vgrf_node
+ i
, c
);
791 /* Add interference based on the live range of the register */
792 setup_live_interference(first_vgrf_node
+ i
,
793 fs
->virtual_grf_start
[i
],
794 fs
->virtual_grf_end
[i
]);
797 /* Add interference based on the instructions in which a register is used.
799 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
)
800 setup_inst_interference(inst
);
804 fs_reg_alloc::discard_interference_graph()
808 have_spill_costs
= false;
812 emit_unspill(const fs_builder
&bld
, fs_reg dst
,
813 uint32_t spill_offset
, unsigned count
)
815 const gen_device_info
*devinfo
= bld
.shader
->devinfo
;
816 const unsigned reg_size
= dst
.component_size(bld
.dispatch_width()) /
818 assert(count
% reg_size
== 0);
820 for (unsigned i
= 0; i
< count
/ reg_size
; i
++) {
821 /* The Gen7 descriptor-based offset is 12 bits of HWORD units. Because
822 * the Gen7-style scratch block read is hardwired to BTI 255, on Gen9+
823 * it would cause the DC to do an IA-coherent read, what largely
824 * outweighs the slight advantage from not having to provide the address
825 * as part of the message header, so we're better off using plain old
828 bool gen7_read
= (devinfo
->gen
>= 7 && devinfo
->gen
< 9 &&
829 spill_offset
< (1 << 12) * REG_SIZE
);
830 fs_inst
*unspill_inst
= bld
.emit(gen7_read
?
831 SHADER_OPCODE_GEN7_SCRATCH_READ
:
832 SHADER_OPCODE_GEN4_SCRATCH_READ
,
834 unspill_inst
->offset
= spill_offset
;
837 unspill_inst
->base_mrf
= spill_base_mrf(bld
.shader
);
838 unspill_inst
->mlen
= 1; /* header contains offset */
841 dst
.offset
+= reg_size
* REG_SIZE
;
842 spill_offset
+= reg_size
* REG_SIZE
;
847 emit_spill(const fs_builder
&bld
, fs_reg src
,
848 uint32_t spill_offset
, unsigned count
)
850 const unsigned reg_size
= src
.component_size(bld
.dispatch_width()) /
852 assert(count
% reg_size
== 0);
854 for (unsigned i
= 0; i
< count
/ reg_size
; i
++) {
855 fs_inst
*spill_inst
=
856 bld
.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE
, bld
.null_reg_f(), src
);
857 src
.offset
+= reg_size
* REG_SIZE
;
858 spill_inst
->offset
= spill_offset
+ i
* reg_size
* REG_SIZE
;
859 spill_inst
->mlen
= 1 + reg_size
; /* header, value */
860 spill_inst
->base_mrf
= spill_base_mrf(bld
.shader
);
865 fs_reg_alloc::set_spill_costs()
867 float block_scale
= 1.0;
868 float spill_costs
[fs
->alloc
.count
];
869 bool no_spill
[fs
->alloc
.count
];
871 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
872 spill_costs
[i
] = 0.0;
876 /* Calculate costs for spilling nodes. Call it a cost of 1 per
877 * spill/unspill we'll have to do, and guess that the insides of
878 * loops run 10 times.
880 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
) {
881 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
882 if (inst
->src
[i
].file
== VGRF
)
883 spill_costs
[inst
->src
[i
].nr
] += regs_read(inst
, i
) * block_scale
;
886 if (inst
->dst
.file
== VGRF
)
887 spill_costs
[inst
->dst
.nr
] += regs_written(inst
) * block_scale
;
889 switch (inst
->opcode
) {
895 case BRW_OPCODE_WHILE
:
904 case BRW_OPCODE_ENDIF
:
908 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
909 if (inst
->src
[0].file
== VGRF
)
910 no_spill
[inst
->src
[0].nr
] = true;
913 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
914 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
915 if (inst
->dst
.file
== VGRF
)
916 no_spill
[inst
->dst
.nr
] = true;
924 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
925 /* Do the no_spill check first. Registers that are used as spill
926 * temporaries may have been allocated after we calculated liveness so
927 * we shouldn't look their liveness up. Fortunately, they're always
928 * used in SCRATCH_READ/WRITE instructions so they'll always be flagged
934 int live_length
= fs
->virtual_grf_end
[i
] - fs
->virtual_grf_start
[i
];
935 if (live_length
<= 0)
938 /* Divide the cost (in number of spills/fills) by the log of the length
939 * of the live range of the register. This will encourage spill logic
940 * to spill long-living things before spilling short-lived things where
941 * spilling is less likely to actually do us any good. We use the log
942 * of the length because it will fall off very quickly and not cause us
943 * to spill medium length registers with more uses.
945 float adjusted_cost
= spill_costs
[i
] / logf(live_length
);
946 ra_set_node_spill_cost(g
, first_vgrf_node
+ i
, adjusted_cost
);
949 have_spill_costs
= true;
953 fs_reg_alloc::choose_spill_reg()
955 if (!have_spill_costs
)
958 int node
= ra_get_best_spill_node(g
);
962 assert(node
>= first_vgrf_node
);
963 return node
- first_vgrf_node
;
967 fs_reg_alloc::alloc_spill_reg(unsigned size
, int ip
)
969 int vgrf
= fs
->alloc
.allocate(size
);
970 int n
= ra_add_node(g
, compiler
->fs_reg_sets
[rsi
].classes
[size
- 1]);
971 assert(n
== first_vgrf_node
+ vgrf
);
972 assert(n
== first_spill_node
+ spill_node_count
);
974 setup_live_interference(n
, ip
- 1, ip
+ 1);
976 /* Add interference between this spill node and any other spill nodes for
977 * the same instruction.
979 for (int s
= 0; s
< spill_node_count
; s
++) {
980 if (spill_vgrf_ip
[s
] == ip
)
981 ra_add_node_interference(g
, n
, first_spill_node
+ s
);
984 /* Add this spill node to the list for next time */
985 if (spill_node_count
>= spill_vgrf_ip_alloc
) {
986 if (spill_vgrf_ip_alloc
== 0)
987 spill_vgrf_ip_alloc
= 16;
989 spill_vgrf_ip_alloc
*= 2;
990 spill_vgrf_ip
= reralloc(mem_ctx
, spill_vgrf_ip
, int,
991 spill_vgrf_ip_alloc
);
993 spill_vgrf_ip
[spill_node_count
++] = ip
;
995 return fs_reg(VGRF
, vgrf
);
999 fs_reg_alloc::spill_reg(unsigned spill_reg
)
1001 int size
= fs
->alloc
.sizes
[spill_reg
];
1002 unsigned int spill_offset
= fs
->last_scratch
;
1003 assert(ALIGN(spill_offset
, 16) == spill_offset
); /* oword read/write req. */
1005 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
1006 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
1007 * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
1008 * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
1009 * depth), starting from m1. In summary: We may not be able to spill in
1010 * SIMD16 mode, because we'd stomp the FB writes.
1012 if (!fs
->spilled_any_registers
) {
1013 bool mrf_used
[BRW_MAX_MRF(devinfo
->gen
)];
1014 get_used_mrfs(fs
, mrf_used
);
1016 for (int i
= spill_base_mrf(fs
); i
< BRW_MAX_MRF(devinfo
->gen
); i
++) {
1018 fs
->fail("Register spilling not supported with m%d used", i
);
1023 fs
->spilled_any_registers
= true;
1026 fs
->last_scratch
+= size
* REG_SIZE
;
1028 /* We're about to replace all uses of this register. It no longer
1029 * conflicts with anything so we can get rid of its interference.
1031 ra_set_node_spill_cost(g
, first_vgrf_node
+ spill_reg
, 0);
1032 ra_reset_node_interference(g
, first_vgrf_node
+ spill_reg
);
1034 /* Generate spill/unspill instructions for the objects being
1035 * spilled. Right now, we spill or unspill the whole thing to a
1036 * virtual grf of the same size. For most instructions, though, we
1037 * could just spill/unspill the GRF being accessed.
1040 foreach_block_and_inst (block
, fs_inst
, inst
, fs
->cfg
) {
1041 const fs_builder ibld
= fs_builder(fs
, block
, inst
);
1042 exec_node
*before
= inst
->prev
;
1043 exec_node
*after
= inst
->next
;
1045 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
1046 if (inst
->src
[i
].file
== VGRF
&&
1047 inst
->src
[i
].nr
== spill_reg
) {
1048 int count
= regs_read(inst
, i
);
1049 int subset_spill_offset
= spill_offset
+
1050 ROUND_DOWN_TO(inst
->src
[i
].offset
, REG_SIZE
);
1051 fs_reg unspill_dst
= alloc_spill_reg(count
, ip
);
1053 inst
->src
[i
].nr
= unspill_dst
.nr
;
1054 inst
->src
[i
].offset
%= REG_SIZE
;
1056 /* We read the largest power-of-two divisor of the register count
1057 * (because only POT scratch read blocks are allowed by the
1058 * hardware) up to the maximum supported block size.
1060 const unsigned width
=
1061 MIN2(32, 1u << (ffs(MAX2(1, count
) * 8) - 1));
1063 /* Set exec_all() on unspill messages under the (rather
1064 * pessimistic) assumption that there is no one-to-one
1065 * correspondence between channels of the spilled variable in
1066 * scratch space and the scratch read message, which operates on
1067 * 32 bit channels. It shouldn't hurt in any case because the
1068 * unspill destination is a block-local temporary.
1070 emit_unspill(ibld
.exec_all().group(width
, 0),
1071 unspill_dst
, subset_spill_offset
, count
);
1075 if (inst
->dst
.file
== VGRF
&&
1076 inst
->dst
.nr
== spill_reg
) {
1077 int subset_spill_offset
= spill_offset
+
1078 ROUND_DOWN_TO(inst
->dst
.offset
, REG_SIZE
);
1079 fs_reg spill_src
= alloc_spill_reg(regs_written(inst
), ip
);
1081 inst
->dst
.nr
= spill_src
.nr
;
1082 inst
->dst
.offset
%= REG_SIZE
;
1084 /* If we're immediately spilling the register, we should not use
1085 * destination dependency hints. Doing so will cause the GPU do
1086 * try to read and write the register at the same time and may
1089 inst
->no_dd_clear
= false;
1090 inst
->no_dd_check
= false;
1092 /* Calculate the execution width of the scratch messages (which work
1093 * in terms of 32 bit components so we have a fixed number of eight
1094 * channels per spilled register). We attempt to write one
1095 * exec_size-wide component of the variable at a time without
1096 * exceeding the maximum number of (fake) MRF registers reserved for
1099 const unsigned width
= 8 * MIN2(
1100 DIV_ROUND_UP(inst
->dst
.component_size(inst
->exec_size
), REG_SIZE
),
1101 spill_max_size(fs
));
1103 /* Spills should only write data initialized by the instruction for
1104 * whichever channels are enabled in the excution mask. If that's
1105 * not possible we'll have to emit a matching unspill before the
1106 * instruction and set force_writemask_all on the spill.
1108 const bool per_channel
=
1109 inst
->dst
.is_contiguous() && type_sz(inst
->dst
.type
) == 4 &&
1110 inst
->exec_size
== width
;
1112 /* Builder used to emit the scratch messages. */
1113 const fs_builder ubld
= ibld
.exec_all(!per_channel
).group(width
, 0);
1115 /* If our write is going to affect just part of the
1116 * regs_written(inst), then we need to unspill the destination since
1117 * we write back out all of the regs_written(). If the original
1118 * instruction had force_writemask_all set and is not a partial
1119 * write, there should be no need for the unspill since the
1120 * instruction will be overwriting the whole destination in any case.
1122 if (inst
->is_partial_write() ||
1123 (!inst
->force_writemask_all
&& !per_channel
))
1124 emit_unspill(ubld
, spill_src
, subset_spill_offset
,
1125 regs_written(inst
));
1127 emit_spill(ubld
.at(block
, inst
->next
), spill_src
,
1128 subset_spill_offset
, regs_written(inst
));
1131 for (fs_inst
*inst
= (fs_inst
*)before
->next
;
1132 inst
!= after
; inst
= (fs_inst
*)inst
->next
)
1133 setup_inst_interference(inst
);
1135 /* We don't advance the ip for scratch read/write instructions
1136 * because we consider them to have the same ip as instruction we're
1137 * spilling around for the purposes of interference.
1139 if (inst
->opcode
!= SHADER_OPCODE_GEN4_SCRATCH_WRITE
&&
1140 inst
->opcode
!= SHADER_OPCODE_GEN4_SCRATCH_READ
&&
1141 inst
->opcode
!= SHADER_OPCODE_GEN7_SCRATCH_READ
)
1147 fs_reg_alloc::assign_regs(bool allow_spilling
, bool spill_all
)
1149 build_interference_graph(fs
->spilled_any_registers
|| spill_all
);
1151 bool spilled
= false;
1153 /* Debug of register spilling: Go spill everything. */
1154 if (unlikely(spill_all
)) {
1155 int reg
= choose_spill_reg();
1165 if (!allow_spilling
)
1168 /* Failed to allocate registers. Spill a reg, and the caller will
1169 * loop back into here to try again.
1171 int reg
= choose_spill_reg();
1175 /* If we're going to spill but we've never spilled before, we need to
1176 * re-build the interference graph with MRFs enabled to allow spilling.
1178 if (!fs
->spilled_any_registers
) {
1179 discard_interference_graph();
1180 build_interference_graph(true);
1189 fs
->invalidate_live_intervals();
1191 /* Get the chosen virtual registers for each node, and map virtual
1192 * regs in the register classes back down to real hardware reg
1195 unsigned hw_reg_mapping
[fs
->alloc
.count
];
1196 fs
->grf_used
= fs
->first_non_payload_grf
;
1197 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
1198 int reg
= ra_get_node_reg(g
, first_vgrf_node
+ i
);
1200 hw_reg_mapping
[i
] = compiler
->fs_reg_sets
[rsi
].ra_reg_to_grf
[reg
];
1201 fs
->grf_used
= MAX2(fs
->grf_used
,
1202 hw_reg_mapping
[i
] + fs
->alloc
.sizes
[i
]);
1205 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
) {
1206 assign_reg(hw_reg_mapping
, &inst
->dst
);
1207 for (int i
= 0; i
< inst
->sources
; i
++) {
1208 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
1212 fs
->alloc
.count
= fs
->grf_used
;
1218 fs_visitor::assign_regs(bool allow_spilling
, bool spill_all
)
1220 fs_reg_alloc
alloc(this);
1221 bool success
= alloc
.assign_regs(allow_spilling
, spill_all
);
1222 if (!success
&& allow_spilling
) {
1223 fail("no register to spill:\n");
1224 dump_instructions(NULL
);