2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
31 #include "util/register_allocate.h"
36 assign_reg(unsigned *reg_hw_locations
, fs_reg
*reg
)
38 if (reg
->file
== VGRF
) {
39 reg
->nr
= reg_hw_locations
[reg
->nr
] + reg
->offset
/ REG_SIZE
;
40 reg
->offset
%= REG_SIZE
;
45 fs_visitor::assign_regs_trivial()
47 unsigned hw_reg_mapping
[this->alloc
.count
+ 1];
49 int reg_width
= dispatch_width
/ 8;
51 /* Note that compressed instructions require alignment to 2 registers. */
52 hw_reg_mapping
[0] = ALIGN(this->first_non_payload_grf
, reg_width
);
53 for (i
= 1; i
<= this->alloc
.count
; i
++) {
54 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
55 this->alloc
.sizes
[i
- 1]);
57 this->grf_used
= hw_reg_mapping
[this->alloc
.count
];
59 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
60 assign_reg(hw_reg_mapping
, &inst
->dst
);
61 for (i
= 0; i
< inst
->sources
; i
++) {
62 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
66 if (this->grf_used
>= max_grf
) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used
, max_grf
);
70 this->alloc
.count
= this->grf_used
;
76 * Size of a register from the aligned_bary_class register class.
79 aligned_bary_size(unsigned dispatch_width
)
81 return (dispatch_width
== 8 ? 2 : 4);
85 brw_alloc_reg_set(struct brw_compiler
*compiler
, int dispatch_width
)
87 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
88 int base_reg_count
= BRW_MAX_GRF
;
89 const int index
= util_logbase2(dispatch_width
/ 8);
91 if (dispatch_width
> 8 && devinfo
->gen
>= 7) {
92 /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
93 * SIMD16. Therefore, we can use the exact same register sets for
94 * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
96 compiler
->fs_reg_sets
[index
] = compiler
->fs_reg_sets
[0];
100 /* The registers used to make up almost all values handled in the compiler
101 * are a scalar value occupying a single register (or 2 registers in the
102 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
103 * multiplying allocated register numbers by 2). Things that were
104 * aggregates of scalar values at the GLSL level were split to scalar
105 * values by split_virtual_grfs().
107 * However, texture SEND messages return a series of contiguous registers
108 * to write into. We currently always ask for 4 registers, but we may
109 * convert that to use less some day.
111 * Additionally, on gen5 we need aligned pairs of registers for the PLN
112 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
115 const int class_count
= MAX_VGRF_SIZE
;
116 int class_sizes
[MAX_VGRF_SIZE
];
117 for (unsigned i
= 0; i
< MAX_VGRF_SIZE
; i
++)
118 class_sizes
[i
] = i
+ 1;
120 memset(compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
, 0,
121 sizeof(compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
));
122 int *class_to_ra_reg_range
= compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
;
124 /* Compute the total number of registers across all classes. */
125 int ra_reg_count
= 0;
126 for (int i
= 0; i
< class_count
; i
++) {
127 if (devinfo
->gen
<= 5 && dispatch_width
>= 16) {
130 * In order to reduce the hardware complexity, the following
131 * rules and restrictions apply to the compressed instruction:
133 * * Operand Alignment Rule: With the exceptions listed below, a
134 * source/destination operand in general should be aligned to
135 * even 256-bit physical register with a region size equal to
136 * two 256-bit physical register
138 ra_reg_count
+= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
140 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
142 /* Mark the last register. We'll fill in the beginnings later. */
143 class_to_ra_reg_range
[class_sizes
[i
]] = ra_reg_count
;
146 /* Fill out the rest of the range markers */
147 for (int i
= 1; i
< 17; ++i
) {
148 if (class_to_ra_reg_range
[i
] == 0)
149 class_to_ra_reg_range
[i
] = class_to_ra_reg_range
[i
-1];
152 uint8_t *ra_reg_to_grf
= ralloc_array(compiler
, uint8_t, ra_reg_count
);
153 struct ra_regs
*regs
= ra_alloc_reg_set(compiler
, ra_reg_count
, false);
154 if (devinfo
->gen
>= 6)
155 ra_set_allocate_round_robin(regs
);
156 int *classes
= ralloc_array(compiler
, int, class_count
);
157 int aligned_bary_class
= -1;
159 /* Allocate space for q values. We allocate class_count + 1 because we
160 * want to leave room for the aligned barycentric class if we have it.
162 unsigned int **q_values
= ralloc_array(compiler
, unsigned int *,
164 for (int i
= 0; i
< class_count
+ 1; ++i
)
165 q_values
[i
] = ralloc_array(q_values
, unsigned int, class_count
+ 1);
167 /* Now, add the registers to their classes, and add the conflicts
168 * between them and the base GRF registers (and also each other).
171 int aligned_bary_base_reg
= 0;
172 int aligned_bary_reg_count
= 0;
173 for (int i
= 0; i
< class_count
; i
++) {
175 if (devinfo
->gen
<= 5 && dispatch_width
>= 16) {
176 class_reg_count
= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
178 /* See comment below. The only difference here is that we are
179 * dealing with pairs of registers instead of single registers.
180 * Registers of odd sizes simply get rounded up. */
181 for (int j
= 0; j
< class_count
; j
++)
182 q_values
[i
][j
] = (class_sizes
[i
] + 1) / 2 +
183 (class_sizes
[j
] + 1) / 2 - 1;
185 class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
187 /* From register_allocate.c:
189 * q(B,C) (indexed by C, B is this register class) in
190 * Runeson/Nyström paper. This is "how many registers of B could
191 * the worst choice register from C conflict with".
193 * If we just let the register allocation algorithm compute these
194 * values, is extremely expensive. However, since all of our
195 * registers are laid out, we can very easily compute them
196 * ourselves. View the register from C as fixed starting at GRF n
197 * somwhere in the middle, and the register from B as sliding back
198 * and forth. Then the first register to conflict from B is the
199 * one starting at n - class_size[B] + 1 and the last register to
200 * conflict will start at n + class_size[B] - 1. Therefore, the
201 * number of conflicts from B is class_size[B] + class_size[C] - 1.
203 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
204 * B | | | | | |n| --> | | | | | | |
205 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
210 for (int j
= 0; j
< class_count
; j
++)
211 q_values
[i
][j
] = class_sizes
[i
] + class_sizes
[j
] - 1;
213 classes
[i
] = ra_alloc_reg_class(regs
);
215 /* Save this off for the aligned barycentric class at the end. */
216 if (class_sizes
[i
] == int(aligned_bary_size(dispatch_width
))) {
217 aligned_bary_base_reg
= reg
;
218 aligned_bary_reg_count
= class_reg_count
;
221 if (devinfo
->gen
<= 5 && dispatch_width
>= 16) {
222 for (int j
= 0; j
< class_reg_count
; j
++) {
223 ra_class_add_reg(regs
, classes
[i
], reg
);
225 ra_reg_to_grf
[reg
] = j
* 2;
227 for (int base_reg
= j
;
228 base_reg
< j
+ (class_sizes
[i
] + 1) / 2;
230 ra_add_reg_conflict(regs
, base_reg
, reg
);
236 for (int j
= 0; j
< class_reg_count
; j
++) {
237 ra_class_add_reg(regs
, classes
[i
], reg
);
239 ra_reg_to_grf
[reg
] = j
;
241 for (int base_reg
= j
;
242 base_reg
< j
+ class_sizes
[i
];
244 ra_add_reg_conflict(regs
, base_reg
, reg
);
251 assert(reg
== ra_reg_count
);
253 /* Applying transitivity to all of the base registers gives us the
254 * appropreate register conflict relationships everywhere.
256 for (int reg
= 0; reg
< base_reg_count
; reg
++)
257 ra_make_reg_conflicts_transitive(regs
, reg
);
259 /* Add a special class for aligned barycentrics, which we'll put the
260 * first source of LINTERP on so that we can do PLN on Gen <= 6.
262 if (devinfo
->has_pln
&& (devinfo
->gen
== 6 ||
263 (dispatch_width
== 8 && devinfo
->gen
<= 5))) {
264 aligned_bary_class
= ra_alloc_reg_class(regs
);
266 for (int i
= 0; i
< aligned_bary_reg_count
; i
++) {
267 if ((ra_reg_to_grf
[aligned_bary_base_reg
+ i
] & 1) == 0) {
268 ra_class_add_reg(regs
, aligned_bary_class
,
269 aligned_bary_base_reg
+ i
);
273 for (int i
= 0; i
< class_count
; i
++) {
274 /* These are a little counter-intuitive because the barycentric
275 * registers are required to be aligned while the register they are
276 * potentially interferring with are not. In the case where the size
277 * is even, the worst-case is that the register is odd-aligned. In
278 * the odd-size case, it doesn't matter.
280 q_values
[class_count
][i
] = class_sizes
[i
] / 2 +
281 aligned_bary_size(dispatch_width
) / 2;
282 q_values
[i
][class_count
] = class_sizes
[i
] +
283 aligned_bary_size(dispatch_width
) - 1;
285 q_values
[class_count
][class_count
] = aligned_bary_size(dispatch_width
) - 1;
288 ra_set_finalize(regs
, q_values
);
290 ralloc_free(q_values
);
292 compiler
->fs_reg_sets
[index
].regs
= regs
;
293 for (unsigned i
= 0; i
< ARRAY_SIZE(compiler
->fs_reg_sets
[index
].classes
); i
++)
294 compiler
->fs_reg_sets
[index
].classes
[i
] = -1;
295 for (int i
= 0; i
< class_count
; i
++)
296 compiler
->fs_reg_sets
[index
].classes
[class_sizes
[i
] - 1] = classes
[i
];
297 compiler
->fs_reg_sets
[index
].ra_reg_to_grf
= ra_reg_to_grf
;
298 compiler
->fs_reg_sets
[index
].aligned_bary_class
= aligned_bary_class
;
302 brw_fs_alloc_reg_sets(struct brw_compiler
*compiler
)
304 brw_alloc_reg_set(compiler
, 8);
305 brw_alloc_reg_set(compiler
, 16);
306 brw_alloc_reg_set(compiler
, 32);
310 count_to_loop_end(const bblock_t
*block
)
312 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
313 return block
->end_ip
;
316 /* Skip the first block, since we don't want to count the do the calling
319 for (block
= block
->next();
321 block
= block
->next()) {
322 if (block
->start()->opcode
== BRW_OPCODE_DO
)
324 if (block
->end()->opcode
== BRW_OPCODE_WHILE
) {
327 return block
->end_ip
;
330 unreachable("not reached");
333 void fs_visitor::calculate_payload_ranges(int payload_node_count
,
334 int *payload_last_use_ip
) const
339 for (int i
= 0; i
< payload_node_count
; i
++)
340 payload_last_use_ip
[i
] = -1;
343 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
344 switch (inst
->opcode
) {
348 /* Since payload regs are deffed only at the start of the shader
349 * execution, any uses of the payload within a loop mean the live
350 * interval extends to the end of the outermost loop. Find the ip of
354 loop_end_ip
= count_to_loop_end(block
);
356 case BRW_OPCODE_WHILE
:
365 use_ip
= loop_end_ip
;
369 /* Note that UNIFORM args have been turned into FIXED_GRF by
370 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
371 * the start (see interp_reg()).
373 for (int i
= 0; i
< inst
->sources
; i
++) {
374 if (inst
->src
[i
].file
== FIXED_GRF
) {
375 int node_nr
= inst
->src
[i
].nr
;
376 if (node_nr
>= payload_node_count
)
379 for (unsigned j
= 0; j
< regs_read(inst
, i
); j
++) {
380 payload_last_use_ip
[node_nr
+ j
] = use_ip
;
381 assert(node_nr
+ j
< unsigned(payload_node_count
));
386 /* Special case instructions which have extra implied registers used. */
387 switch (inst
->opcode
) {
388 case CS_OPCODE_CS_TERMINATE
:
389 payload_last_use_ip
[0] = use_ip
;
394 /* We could omit this for the !inst->header_present case, except
395 * that the simulator apparently incorrectly reads from g0/g1
396 * instead of sideband. It also really freaks out driver
397 * developers to see g0 used in unusual places, so just always
400 payload_last_use_ip
[0] = use_ip
;
401 payload_last_use_ip
[1] = use_ip
;
412 fs_reg_alloc(fs_visitor
*fs
):
413 fs(fs
), devinfo(fs
->devinfo
), compiler(fs
->compiler
),
414 live(fs
->live_analysis
.require()), g(NULL
),
415 have_spill_costs(false)
417 mem_ctx
= ralloc_context(NULL
);
419 /* Most of this allocation was written for a reg_width of 1
420 * (dispatch_width == 8). In extending to SIMD16, the code was
421 * left in place and it was converted to have the hardware
422 * registers it's allocating be contiguous physical pairs of regs
423 * for reg_width == 2.
425 int reg_width
= fs
->dispatch_width
/ 8;
426 rsi
= util_logbase2(reg_width
);
427 payload_node_count
= ALIGN(fs
->first_non_payload_grf
, reg_width
);
429 /* Get payload IP information */
430 payload_last_use_ip
= ralloc_array(mem_ctx
, int, payload_node_count
);
432 spill_vgrf_ip
= NULL
;
433 spill_vgrf_ip_alloc
= 0;
434 spill_node_count
= 0;
439 ralloc_free(mem_ctx
);
442 bool assign_regs(bool allow_spilling
, bool spill_all
);
445 void setup_live_interference(unsigned node
,
446 int node_start_ip
, int node_end_ip
);
447 void setup_inst_interference(const fs_inst
*inst
);
449 void build_interference_graph(bool allow_spilling
);
450 void discard_interference_graph();
452 void set_spill_costs();
453 int choose_spill_reg();
454 fs_reg
alloc_spill_reg(unsigned size
, int ip
);
455 void spill_reg(unsigned spill_reg
);
459 const gen_device_info
*devinfo
;
460 const brw_compiler
*compiler
;
461 const fs_live_variables
&live
;
463 /* Which compiler->fs_reg_sets[] to use */
467 bool have_spill_costs
;
469 int payload_node_count
;
470 int *payload_last_use_ip
;
473 int first_payload_node
;
474 int first_mrf_hack_node
;
475 int grf127_send_hack_node
;
477 int first_spill_node
;
480 int spill_vgrf_ip_alloc
;
481 int spill_node_count
;
485 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
487 * This is used in assign_regs() to decide which of the GRFs that we use as
488 * MRFs on gen7 get normally register allocated, and in register spilling to
489 * see if we can actually use MRFs to do spills without overwriting normal MRF
493 get_used_mrfs(const fs_visitor
*v
, bool *mrf_used
)
495 int reg_width
= v
->dispatch_width
/ 8;
497 memset(mrf_used
, 0, BRW_MAX_MRF(v
->devinfo
->gen
) * sizeof(bool));
499 foreach_block_and_inst(block
, fs_inst
, inst
, v
->cfg
) {
500 if (inst
->dst
.file
== MRF
) {
501 int reg
= inst
->dst
.nr
& ~BRW_MRF_COMPR4
;
502 mrf_used
[reg
] = true;
503 if (reg_width
== 2) {
504 if (inst
->dst
.nr
& BRW_MRF_COMPR4
) {
505 mrf_used
[reg
+ 4] = true;
507 mrf_used
[reg
+ 1] = true;
512 if (inst
->mlen
> 0) {
513 for (unsigned i
= 0; i
< inst
->implied_mrf_writes(); i
++) {
514 mrf_used
[inst
->base_mrf
+ i
] = true;
522 * Maximum spill block size we expect to encounter in 32B units.
524 * This is somewhat arbitrary and doesn't necessarily limit the maximum
525 * variable size that can be spilled -- A higher value will allow a
526 * variable of a given size to be spilled more efficiently with a smaller
527 * number of scratch messages, but will increase the likelihood of a
528 * collision between the MRFs reserved for spilling and other MRFs used by
529 * the program (and possibly increase GRF register pressure on platforms
530 * without hardware MRFs), what could cause register allocation to fail.
532 * For the moment reserve just enough space so a register of 32 bit
533 * component type and natural region width can be spilled without splitting
534 * into multiple (force_writemask_all) scratch messages.
537 spill_max_size(const backend_shader
*s
)
539 /* FINISHME - On Gen7+ it should be possible to avoid this limit
540 * altogether by spilling directly from the temporary GRF
541 * allocated to hold the result of the instruction (and the
542 * scratch write header).
544 /* FINISHME - The shader's dispatch width probably belongs in
545 * backend_shader (or some nonexistent fs_shader class?)
546 * rather than in the visitor class.
548 return static_cast<const fs_visitor
*>(s
)->dispatch_width
/ 8;
552 * First MRF register available for spilling.
555 spill_base_mrf(const backend_shader
*s
)
557 return BRW_MAX_MRF(s
->devinfo
->gen
) - spill_max_size(s
) - 1;
562 fs_reg_alloc::setup_live_interference(unsigned node
,
563 int node_start_ip
, int node_end_ip
)
565 /* Mark any virtual grf that is live between the start of the program and
566 * the last use of a payload node interfering with that payload node.
568 for (int i
= 0; i
< payload_node_count
; i
++) {
569 if (payload_last_use_ip
[i
] == -1)
572 /* Note that we use a <= comparison, unlike vgrfs_interfere(),
573 * in order to not have to worry about the uniform issue described in
574 * calculate_live_intervals().
576 if (node_start_ip
<= payload_last_use_ip
[i
])
577 ra_add_node_interference(g
, node
, first_payload_node
+ i
);
580 /* If we have the MRF hack enabled, mark this node as interfering with all
583 if (first_mrf_hack_node
>= 0) {
584 for (int i
= spill_base_mrf(fs
); i
< BRW_MAX_MRF(devinfo
->gen
); i
++)
585 ra_add_node_interference(g
, node
, first_mrf_hack_node
+ i
);
588 /* Add interference with every vgrf whose live range intersects this
589 * node's. We only need to look at nodes below this one as the reflexivity
590 * of interference will take care of the rest.
592 for (unsigned n2
= first_vgrf_node
;
593 n2
< (unsigned)first_spill_node
&& n2
< node
; n2
++) {
594 unsigned vgrf
= n2
- first_vgrf_node
;
595 if (!(node_end_ip
<= live
.vgrf_start
[vgrf
] ||
596 live
.vgrf_end
[vgrf
] <= node_start_ip
))
597 ra_add_node_interference(g
, node
, n2
);
602 fs_reg_alloc::setup_inst_interference(const fs_inst
*inst
)
604 /* Certain instructions can't safely use the same register for their
605 * sources and destination. Add interference.
607 if (inst
->dst
.file
== VGRF
&& inst
->has_source_and_destination_hazard()) {
608 for (unsigned i
= 0; i
< inst
->sources
; i
++) {
609 if (inst
->src
[i
].file
== VGRF
) {
610 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
611 first_vgrf_node
+ inst
->src
[i
].nr
);
616 /* In 16-wide instructions we have an issue where a compressed
617 * instruction is actually two instructions executed simultaneously.
618 * It's actually ok to have the source and destination registers be
619 * the same. In this case, each instruction over-writes its own
620 * source and there's no problem. The real problem here is if the
621 * source and destination registers are off by one. Then you can end
622 * up in a scenario where the first instruction over-writes the
623 * source of the second instruction. Since the compiler doesn't know
624 * about this level of granularity, we simply make the source and
625 * destination interfere.
627 if (inst
->exec_size
>= 16 && inst
->dst
.file
== VGRF
) {
628 for (int i
= 0; i
< inst
->sources
; ++i
) {
629 if (inst
->src
[i
].file
== VGRF
) {
630 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
631 first_vgrf_node
+ inst
->src
[i
].nr
);
636 if (grf127_send_hack_node
>= 0) {
637 /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
638 * subsection "EUISA Instructions", Send Message (page 990):
640 * "r127 must not be used for return address when there is a src and
641 * dest overlap in send instruction."
643 * We are avoiding using grf127 as part of the destination of send
644 * messages adding a node interference to the grf127_send_hack_node.
645 * This node has a fixed asignment to grf127.
647 * We don't apply it to SIMD16 instructions because previous code avoids
648 * any register overlap between sources and destination.
650 if (inst
->exec_size
< 16 && inst
->is_send_from_grf() &&
651 inst
->dst
.file
== VGRF
)
652 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
653 grf127_send_hack_node
);
655 /* Spilling instruction are genereated as SEND messages from MRF but as
656 * Gen7+ supports sending from GRF the driver will maps assingn these
657 * MRF registers to a GRF. Implementations reuses the dest of the send
658 * message as source. So as we will have an overlap for sure, we create
659 * an interference between destination and grf127.
661 if ((inst
->opcode
== SHADER_OPCODE_GEN7_SCRATCH_READ
||
662 inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_READ
) &&
663 inst
->dst
.file
== VGRF
)
664 ra_add_node_interference(g
, first_vgrf_node
+ inst
->dst
.nr
,
665 grf127_send_hack_node
);
668 /* From the Skylake PRM Vol. 2a docs for sends:
670 * "It is required that the second block of GRFs does not overlap with
673 * Normally, this is taken care of by fixup_sends_duplicate_payload() but
674 * in the case where one of the registers is an undefined value, the
675 * register allocator may decide that they don't interfere even though
676 * they're used as sources in the same instruction. We also need to add
679 if (devinfo
->gen
>= 9) {
680 if (inst
->opcode
== SHADER_OPCODE_SEND
&& inst
->ex_mlen
> 0 &&
681 inst
->src
[2].file
== VGRF
&& inst
->src
[3].file
== VGRF
&&
682 inst
->src
[2].nr
!= inst
->src
[3].nr
)
683 ra_add_node_interference(g
, first_vgrf_node
+ inst
->src
[2].nr
,
684 first_vgrf_node
+ inst
->src
[3].nr
);
687 /* When we do send-from-GRF for FB writes, we need to ensure that the last
688 * write instruction sends from a high register. This is because the
689 * vertex fetcher wants to start filling the low payload registers while
690 * the pixel data port is still working on writing out the memory. If we
691 * don't do this, we get rendering artifacts.
693 * We could just do "something high". Instead, we just pick the highest
694 * register that works.
697 const int vgrf
= inst
->opcode
== SHADER_OPCODE_SEND
?
698 inst
->src
[2].nr
: inst
->src
[0].nr
;
699 int size
= fs
->alloc
.sizes
[vgrf
];
700 int reg
= compiler
->fs_reg_sets
[rsi
].class_to_ra_reg_range
[size
] - 1;
702 if (first_mrf_hack_node
>= 0) {
703 /* If something happened to spill, we want to push the EOT send
704 * register early enough in the register file that we don't
705 * conflict with any used MRF hack registers.
707 reg
-= BRW_MAX_MRF(devinfo
->gen
) - spill_base_mrf(fs
);
708 } else if (grf127_send_hack_node
>= 0) {
709 /* Avoid r127 which might be unusable if the node was previously
710 * written by a SIMD8 SEND message with source/destination overlap.
715 ra_set_node_reg(g
, first_vgrf_node
+ vgrf
, reg
);
720 fs_reg_alloc::build_interference_graph(bool allow_spilling
)
722 /* Compute the RA node layout */
724 first_payload_node
= node_count
;
725 node_count
+= payload_node_count
;
726 if (devinfo
->gen
>= 7 && allow_spilling
) {
727 first_mrf_hack_node
= node_count
;
728 node_count
+= BRW_MAX_GRF
- GEN7_MRF_HACK_START
;
730 first_mrf_hack_node
= -1;
732 if (devinfo
->gen
>= 8) {
733 grf127_send_hack_node
= node_count
;
736 grf127_send_hack_node
= -1;
738 first_vgrf_node
= node_count
;
739 node_count
+= fs
->alloc
.count
;
740 first_spill_node
= node_count
;
742 fs
->calculate_payload_ranges(payload_node_count
,
743 payload_last_use_ip
);
746 g
= ra_alloc_interference_graph(compiler
->fs_reg_sets
[rsi
].regs
, node_count
);
747 ralloc_steal(mem_ctx
, g
);
749 /* Set up the payload nodes */
750 for (int i
= 0; i
< payload_node_count
; i
++) {
751 /* Mark each payload node as being allocated to its physical register.
753 * The alternative would be to have per-physical-register classes, which
754 * would just be silly.
756 if (devinfo
->gen
<= 5 && fs
->dispatch_width
>= 16) {
757 /* We have to divide by 2 here because we only have even numbered
758 * registers. Some of the payload registers will be odd, but
759 * that's ok because their physical register numbers have already
760 * been assigned. The only thing this is used for is interference.
762 ra_set_node_reg(g
, first_payload_node
+ i
, i
/ 2);
764 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
768 if (first_mrf_hack_node
>= 0) {
769 /* Mark each MRF reg node as being allocated to its physical
772 * The alternative would be to have per-physical-register classes,
773 * which would just be silly.
775 for (int i
= 0; i
< BRW_MAX_MRF(devinfo
->gen
); i
++) {
776 ra_set_node_reg(g
, first_mrf_hack_node
+ i
,
777 GEN7_MRF_HACK_START
+ i
);
781 if (grf127_send_hack_node
>= 0)
782 ra_set_node_reg(g
, grf127_send_hack_node
, 127);
784 /* Specify the classes of each virtual register. */
785 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
786 unsigned size
= fs
->alloc
.sizes
[i
];
788 assert(size
<= ARRAY_SIZE(compiler
->fs_reg_sets
[rsi
].classes
) &&
789 "Register allocation relies on split_virtual_grfs()");
791 ra_set_node_class(g
, first_vgrf_node
+ i
,
792 compiler
->fs_reg_sets
[rsi
].classes
[size
- 1]);
795 /* Special case: on pre-Gen7 hardware that supports PLN, the second operand
796 * of a PLN instruction needs to be an even-numbered register, so we have a
797 * special register class aligned_bary_class to handle this case.
799 if (compiler
->fs_reg_sets
[rsi
].aligned_bary_class
>= 0) {
800 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
) {
801 if (inst
->opcode
== FS_OPCODE_LINTERP
&& inst
->src
[0].file
== VGRF
&&
802 fs
->alloc
.sizes
[inst
->src
[0].nr
] ==
803 aligned_bary_size(fs
->dispatch_width
)) {
804 ra_set_node_class(g
, first_vgrf_node
+ inst
->src
[0].nr
,
805 compiler
->fs_reg_sets
[rsi
].aligned_bary_class
);
810 /* Add interference based on the live range of the register */
811 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
812 setup_live_interference(first_vgrf_node
+ i
,
817 /* Add interference based on the instructions in which a register is used.
819 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
)
820 setup_inst_interference(inst
);
824 fs_reg_alloc::discard_interference_graph()
828 have_spill_costs
= false;
832 emit_unspill(const fs_builder
&bld
, fs_reg dst
,
833 uint32_t spill_offset
, unsigned count
)
835 const gen_device_info
*devinfo
= bld
.shader
->devinfo
;
836 const unsigned reg_size
= dst
.component_size(bld
.dispatch_width()) /
838 assert(count
% reg_size
== 0);
840 for (unsigned i
= 0; i
< count
/ reg_size
; i
++) {
841 /* The Gen7 descriptor-based offset is 12 bits of HWORD units. Because
842 * the Gen7-style scratch block read is hardwired to BTI 255, on Gen9+
843 * it would cause the DC to do an IA-coherent read, what largely
844 * outweighs the slight advantage from not having to provide the address
845 * as part of the message header, so we're better off using plain old
848 bool gen7_read
= (devinfo
->gen
>= 7 && devinfo
->gen
< 9 &&
849 spill_offset
< (1 << 12) * REG_SIZE
);
850 fs_inst
*unspill_inst
= bld
.emit(gen7_read
?
851 SHADER_OPCODE_GEN7_SCRATCH_READ
:
852 SHADER_OPCODE_GEN4_SCRATCH_READ
,
854 unspill_inst
->offset
= spill_offset
;
857 unspill_inst
->base_mrf
= spill_base_mrf(bld
.shader
);
858 unspill_inst
->mlen
= 1; /* header contains offset */
861 dst
.offset
+= reg_size
* REG_SIZE
;
862 spill_offset
+= reg_size
* REG_SIZE
;
867 emit_spill(const fs_builder
&bld
, fs_reg src
,
868 uint32_t spill_offset
, unsigned count
)
870 const unsigned reg_size
= src
.component_size(bld
.dispatch_width()) /
872 assert(count
% reg_size
== 0);
874 for (unsigned i
= 0; i
< count
/ reg_size
; i
++) {
875 fs_inst
*spill_inst
=
876 bld
.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE
, bld
.null_reg_f(), src
);
877 src
.offset
+= reg_size
* REG_SIZE
;
878 spill_inst
->offset
= spill_offset
+ i
* reg_size
* REG_SIZE
;
879 spill_inst
->mlen
= 1 + reg_size
; /* header, value */
880 spill_inst
->base_mrf
= spill_base_mrf(bld
.shader
);
885 fs_reg_alloc::set_spill_costs()
887 float block_scale
= 1.0;
888 float spill_costs
[fs
->alloc
.count
];
889 bool no_spill
[fs
->alloc
.count
];
891 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
892 spill_costs
[i
] = 0.0;
896 /* Calculate costs for spilling nodes. Call it a cost of 1 per
897 * spill/unspill we'll have to do, and guess that the insides of
898 * loops run 10 times.
900 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
) {
901 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
902 if (inst
->src
[i
].file
== VGRF
)
903 spill_costs
[inst
->src
[i
].nr
] += regs_read(inst
, i
) * block_scale
;
906 if (inst
->dst
.file
== VGRF
)
907 spill_costs
[inst
->dst
.nr
] += regs_written(inst
) * block_scale
;
909 switch (inst
->opcode
) {
915 case BRW_OPCODE_WHILE
:
924 case BRW_OPCODE_ENDIF
:
928 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
929 if (inst
->src
[0].file
== VGRF
)
930 no_spill
[inst
->src
[0].nr
] = true;
933 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
934 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
935 if (inst
->dst
.file
== VGRF
)
936 no_spill
[inst
->dst
.nr
] = true;
944 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
945 /* Do the no_spill check first. Registers that are used as spill
946 * temporaries may have been allocated after we calculated liveness so
947 * we shouldn't look their liveness up. Fortunately, they're always
948 * used in SCRATCH_READ/WRITE instructions so they'll always be flagged
954 int live_length
= live
.vgrf_end
[i
] - live
.vgrf_start
[i
];
955 if (live_length
<= 0)
958 /* Divide the cost (in number of spills/fills) by the log of the length
959 * of the live range of the register. This will encourage spill logic
960 * to spill long-living things before spilling short-lived things where
961 * spilling is less likely to actually do us any good. We use the log
962 * of the length because it will fall off very quickly and not cause us
963 * to spill medium length registers with more uses.
965 float adjusted_cost
= spill_costs
[i
] / logf(live_length
);
966 ra_set_node_spill_cost(g
, first_vgrf_node
+ i
, adjusted_cost
);
969 have_spill_costs
= true;
973 fs_reg_alloc::choose_spill_reg()
975 if (!have_spill_costs
)
978 int node
= ra_get_best_spill_node(g
);
982 assert(node
>= first_vgrf_node
);
983 return node
- first_vgrf_node
;
987 fs_reg_alloc::alloc_spill_reg(unsigned size
, int ip
)
989 int vgrf
= fs
->alloc
.allocate(size
);
990 int n
= ra_add_node(g
, compiler
->fs_reg_sets
[rsi
].classes
[size
- 1]);
991 assert(n
== first_vgrf_node
+ vgrf
);
992 assert(n
== first_spill_node
+ spill_node_count
);
994 setup_live_interference(n
, ip
- 1, ip
+ 1);
996 /* Add interference between this spill node and any other spill nodes for
997 * the same instruction.
999 for (int s
= 0; s
< spill_node_count
; s
++) {
1000 if (spill_vgrf_ip
[s
] == ip
)
1001 ra_add_node_interference(g
, n
, first_spill_node
+ s
);
1004 /* Add this spill node to the list for next time */
1005 if (spill_node_count
>= spill_vgrf_ip_alloc
) {
1006 if (spill_vgrf_ip_alloc
== 0)
1007 spill_vgrf_ip_alloc
= 16;
1009 spill_vgrf_ip_alloc
*= 2;
1010 spill_vgrf_ip
= reralloc(mem_ctx
, spill_vgrf_ip
, int,
1011 spill_vgrf_ip_alloc
);
1013 spill_vgrf_ip
[spill_node_count
++] = ip
;
1015 return fs_reg(VGRF
, vgrf
);
1019 fs_reg_alloc::spill_reg(unsigned spill_reg
)
1021 int size
= fs
->alloc
.sizes
[spill_reg
];
1022 unsigned int spill_offset
= fs
->last_scratch
;
1023 assert(ALIGN(spill_offset
, 16) == spill_offset
); /* oword read/write req. */
1025 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
1026 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
1027 * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
1028 * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
1029 * depth), starting from m1. In summary: We may not be able to spill in
1030 * SIMD16 mode, because we'd stomp the FB writes.
1032 if (!fs
->spilled_any_registers
) {
1033 bool mrf_used
[BRW_MAX_MRF(devinfo
->gen
)];
1034 get_used_mrfs(fs
, mrf_used
);
1036 for (int i
= spill_base_mrf(fs
); i
< BRW_MAX_MRF(devinfo
->gen
); i
++) {
1038 fs
->fail("Register spilling not supported with m%d used", i
);
1043 fs
->spilled_any_registers
= true;
1046 fs
->last_scratch
+= size
* REG_SIZE
;
1048 /* We're about to replace all uses of this register. It no longer
1049 * conflicts with anything so we can get rid of its interference.
1051 ra_set_node_spill_cost(g
, first_vgrf_node
+ spill_reg
, 0);
1052 ra_reset_node_interference(g
, first_vgrf_node
+ spill_reg
);
1054 /* Generate spill/unspill instructions for the objects being
1055 * spilled. Right now, we spill or unspill the whole thing to a
1056 * virtual grf of the same size. For most instructions, though, we
1057 * could just spill/unspill the GRF being accessed.
1060 foreach_block_and_inst (block
, fs_inst
, inst
, fs
->cfg
) {
1061 const fs_builder ibld
= fs_builder(fs
, block
, inst
);
1062 exec_node
*before
= inst
->prev
;
1063 exec_node
*after
= inst
->next
;
1065 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
1066 if (inst
->src
[i
].file
== VGRF
&&
1067 inst
->src
[i
].nr
== spill_reg
) {
1068 int count
= regs_read(inst
, i
);
1069 int subset_spill_offset
= spill_offset
+
1070 ROUND_DOWN_TO(inst
->src
[i
].offset
, REG_SIZE
);
1071 fs_reg unspill_dst
= alloc_spill_reg(count
, ip
);
1073 inst
->src
[i
].nr
= unspill_dst
.nr
;
1074 inst
->src
[i
].offset
%= REG_SIZE
;
1076 /* We read the largest power-of-two divisor of the register count
1077 * (because only POT scratch read blocks are allowed by the
1078 * hardware) up to the maximum supported block size.
1080 const unsigned width
=
1081 MIN2(32, 1u << (ffs(MAX2(1, count
) * 8) - 1));
1083 /* Set exec_all() on unspill messages under the (rather
1084 * pessimistic) assumption that there is no one-to-one
1085 * correspondence between channels of the spilled variable in
1086 * scratch space and the scratch read message, which operates on
1087 * 32 bit channels. It shouldn't hurt in any case because the
1088 * unspill destination is a block-local temporary.
1090 emit_unspill(ibld
.exec_all().group(width
, 0),
1091 unspill_dst
, subset_spill_offset
, count
);
1095 if (inst
->dst
.file
== VGRF
&&
1096 inst
->dst
.nr
== spill_reg
) {
1097 int subset_spill_offset
= spill_offset
+
1098 ROUND_DOWN_TO(inst
->dst
.offset
, REG_SIZE
);
1099 fs_reg spill_src
= alloc_spill_reg(regs_written(inst
), ip
);
1101 inst
->dst
.nr
= spill_src
.nr
;
1102 inst
->dst
.offset
%= REG_SIZE
;
1104 /* If we're immediately spilling the register, we should not use
1105 * destination dependency hints. Doing so will cause the GPU do
1106 * try to read and write the register at the same time and may
1109 inst
->no_dd_clear
= false;
1110 inst
->no_dd_check
= false;
1112 /* Calculate the execution width of the scratch messages (which work
1113 * in terms of 32 bit components so we have a fixed number of eight
1114 * channels per spilled register). We attempt to write one
1115 * exec_size-wide component of the variable at a time without
1116 * exceeding the maximum number of (fake) MRF registers reserved for
1119 const unsigned width
= 8 * MIN2(
1120 DIV_ROUND_UP(inst
->dst
.component_size(inst
->exec_size
), REG_SIZE
),
1121 spill_max_size(fs
));
1123 /* Spills should only write data initialized by the instruction for
1124 * whichever channels are enabled in the excution mask. If that's
1125 * not possible we'll have to emit a matching unspill before the
1126 * instruction and set force_writemask_all on the spill.
1128 const bool per_channel
=
1129 inst
->dst
.is_contiguous() && type_sz(inst
->dst
.type
) == 4 &&
1130 inst
->exec_size
== width
;
1132 /* Builder used to emit the scratch messages. */
1133 const fs_builder ubld
= ibld
.exec_all(!per_channel
).group(width
, 0);
1135 /* If our write is going to affect just part of the
1136 * regs_written(inst), then we need to unspill the destination since
1137 * we write back out all of the regs_written(). If the original
1138 * instruction had force_writemask_all set and is not a partial
1139 * write, there should be no need for the unspill since the
1140 * instruction will be overwriting the whole destination in any case.
1142 if (inst
->is_partial_write() ||
1143 (!inst
->force_writemask_all
&& !per_channel
))
1144 emit_unspill(ubld
, spill_src
, subset_spill_offset
,
1145 regs_written(inst
));
1147 emit_spill(ubld
.at(block
, inst
->next
), spill_src
,
1148 subset_spill_offset
, regs_written(inst
));
1151 for (fs_inst
*inst
= (fs_inst
*)before
->next
;
1152 inst
!= after
; inst
= (fs_inst
*)inst
->next
)
1153 setup_inst_interference(inst
);
1155 /* We don't advance the ip for scratch read/write instructions
1156 * because we consider them to have the same ip as instruction we're
1157 * spilling around for the purposes of interference.
1159 if (inst
->opcode
!= SHADER_OPCODE_GEN4_SCRATCH_WRITE
&&
1160 inst
->opcode
!= SHADER_OPCODE_GEN4_SCRATCH_READ
&&
1161 inst
->opcode
!= SHADER_OPCODE_GEN7_SCRATCH_READ
)
1167 fs_reg_alloc::assign_regs(bool allow_spilling
, bool spill_all
)
1169 build_interference_graph(fs
->spilled_any_registers
|| spill_all
);
1171 bool spilled
= false;
1173 /* Debug of register spilling: Go spill everything. */
1174 if (unlikely(spill_all
)) {
1175 int reg
= choose_spill_reg();
1185 if (!allow_spilling
)
1188 /* Failed to allocate registers. Spill a reg, and the caller will
1189 * loop back into here to try again.
1191 int reg
= choose_spill_reg();
1195 /* If we're going to spill but we've never spilled before, we need to
1196 * re-build the interference graph with MRFs enabled to allow spilling.
1198 if (!fs
->spilled_any_registers
) {
1199 discard_interference_graph();
1200 build_interference_graph(true);
1209 fs
->invalidate_analysis(DEPENDENCY_INSTRUCTIONS
| DEPENDENCY_VARIABLES
);
1211 /* Get the chosen virtual registers for each node, and map virtual
1212 * regs in the register classes back down to real hardware reg
1215 unsigned hw_reg_mapping
[fs
->alloc
.count
];
1216 fs
->grf_used
= fs
->first_non_payload_grf
;
1217 for (unsigned i
= 0; i
< fs
->alloc
.count
; i
++) {
1218 int reg
= ra_get_node_reg(g
, first_vgrf_node
+ i
);
1220 hw_reg_mapping
[i
] = compiler
->fs_reg_sets
[rsi
].ra_reg_to_grf
[reg
];
1221 fs
->grf_used
= MAX2(fs
->grf_used
,
1222 hw_reg_mapping
[i
] + fs
->alloc
.sizes
[i
]);
1225 foreach_block_and_inst(block
, fs_inst
, inst
, fs
->cfg
) {
1226 assign_reg(hw_reg_mapping
, &inst
->dst
);
1227 for (int i
= 0; i
< inst
->sources
; i
++) {
1228 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
1232 fs
->alloc
.count
= fs
->grf_used
;
1238 fs_visitor::assign_regs(bool allow_spilling
, bool spill_all
)
1240 fs_reg_alloc
alloc(this);
1241 bool success
= alloc
.assign_regs(allow_spilling
, spill_all
);
1242 if (!success
&& allow_spilling
) {
1243 fail("no register to spill:\n");
1244 dump_instructions(NULL
);