2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "glsl/nir/glsl_types.h"
31 #include "glsl/ir_optimization.h"
36 assign_reg(unsigned *reg_hw_locations
, fs_reg
*reg
)
38 if (reg
->file
== GRF
) {
39 reg
->reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
;
45 fs_visitor::assign_regs_trivial()
47 unsigned hw_reg_mapping
[this->alloc
.count
+ 1];
49 int reg_width
= dispatch_width
/ 8;
51 /* Note that compressed instructions require alignment to 2 registers. */
52 hw_reg_mapping
[0] = ALIGN(this->first_non_payload_grf
, reg_width
);
53 for (i
= 1; i
<= this->alloc
.count
; i
++) {
54 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
55 this->alloc
.sizes
[i
- 1]);
57 this->grf_used
= hw_reg_mapping
[this->alloc
.count
];
59 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
60 assign_reg(hw_reg_mapping
, &inst
->dst
);
61 for (i
= 0; i
< inst
->sources
; i
++) {
62 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
66 if (this->grf_used
>= max_grf
) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used
, max_grf
);
70 this->alloc
.count
= this->grf_used
;
76 brw_alloc_reg_set(struct brw_compiler
*compiler
, int dispatch_width
)
78 const struct brw_device_info
*devinfo
= compiler
->devinfo
;
79 int base_reg_count
= BRW_MAX_GRF
;
80 int index
= (dispatch_width
/ 8) - 1;
82 if (dispatch_width
> 8 && devinfo
->gen
>= 7) {
83 /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
84 * SIMD16. Therefore, we can use the exact same register sets for
85 * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
87 compiler
->fs_reg_sets
[index
] = compiler
->fs_reg_sets
[0];
91 /* The registers used to make up almost all values handled in the compiler
92 * are a scalar value occupying a single register (or 2 registers in the
93 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
94 * multiplying allocated register numbers by 2). Things that were
95 * aggregates of scalar values at the GLSL level were split to scalar
96 * values by split_virtual_grfs().
98 * However, texture SEND messages return a series of contiguous registers
99 * to write into. We currently always ask for 4 registers, but we may
100 * convert that to use less some day.
102 * Additionally, on gen5 we need aligned pairs of registers for the PLN
103 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
106 * So we have a need for classes for 1, 2, 4, and 8 registers currently,
107 * and we add in '3' to make indexing the array easier for the common case
108 * (since we'll probably want it for texturing later).
110 * And, on gen7 and newer, we do texturing SEND messages from GRFs, which
111 * means that we may need any size up to the sampler message size limit (11
115 int class_sizes
[MAX_VGRF_SIZE
];
117 if (devinfo
->gen
>= 7) {
118 for (class_count
= 0; class_count
< MAX_VGRF_SIZE
; class_count
++)
119 class_sizes
[class_count
] = class_count
+ 1;
121 for (class_count
= 0; class_count
< 4; class_count
++)
122 class_sizes
[class_count
] = class_count
+ 1;
123 class_sizes
[class_count
++] = 8;
126 memset(compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
, 0,
127 sizeof(compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
));
128 int *class_to_ra_reg_range
= compiler
->fs_reg_sets
[index
].class_to_ra_reg_range
;
130 /* Compute the total number of registers across all classes. */
131 int ra_reg_count
= 0;
132 for (int i
= 0; i
< class_count
; i
++) {
133 if (devinfo
->gen
<= 5 && dispatch_width
== 16) {
136 * In order to reduce the hardware complexity, the following
137 * rules and restrictions apply to the compressed instruction:
139 * * Operand Alignment Rule: With the exceptions listed below, a
140 * source/destination operand in general should be aligned to
141 * even 256-bit physical register with a region size equal to
142 * two 256-bit physical register
144 ra_reg_count
+= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
146 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
148 /* Mark the last register. We'll fill in the beginnings later. */
149 class_to_ra_reg_range
[class_sizes
[i
]] = ra_reg_count
;
152 /* Fill out the rest of the range markers */
153 for (int i
= 1; i
< 17; ++i
) {
154 if (class_to_ra_reg_range
[i
] == 0)
155 class_to_ra_reg_range
[i
] = class_to_ra_reg_range
[i
-1];
158 uint8_t *ra_reg_to_grf
= ralloc_array(compiler
, uint8_t, ra_reg_count
);
159 struct ra_regs
*regs
= ra_alloc_reg_set(compiler
, ra_reg_count
, false);
160 if (devinfo
->gen
>= 6)
161 ra_set_allocate_round_robin(regs
);
162 int *classes
= ralloc_array(compiler
, int, class_count
);
163 int aligned_pairs_class
= -1;
165 /* Allocate space for q values. We allocate class_count + 1 because we
166 * want to leave room for the aligned pairs class if we have it. */
167 unsigned int **q_values
= ralloc_array(compiler
, unsigned int *,
169 for (int i
= 0; i
< class_count
+ 1; ++i
)
170 q_values
[i
] = ralloc_array(q_values
, unsigned int, class_count
+ 1);
172 /* Now, add the registers to their classes, and add the conflicts
173 * between them and the base GRF registers (and also each other).
176 int pairs_base_reg
= 0;
177 int pairs_reg_count
= 0;
178 for (int i
= 0; i
< class_count
; i
++) {
180 if (devinfo
->gen
<= 5 && dispatch_width
== 16) {
181 class_reg_count
= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
183 /* See comment below. The only difference here is that we are
184 * dealing with pairs of registers instead of single registers.
185 * Registers of odd sizes simply get rounded up. */
186 for (int j
= 0; j
< class_count
; j
++)
187 q_values
[i
][j
] = (class_sizes
[i
] + 1) / 2 +
188 (class_sizes
[j
] + 1) / 2 - 1;
190 class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
192 /* From register_allocate.c:
194 * q(B,C) (indexed by C, B is this register class) in
195 * Runeson/Nyström paper. This is "how many registers of B could
196 * the worst choice register from C conflict with".
198 * If we just let the register allocation algorithm compute these
199 * values, is extremely expensive. However, since all of our
200 * registers are laid out, we can very easily compute them
201 * ourselves. View the register from C as fixed starting at GRF n
202 * somwhere in the middle, and the register from B as sliding back
203 * and forth. Then the first register to conflict from B is the
204 * one starting at n - class_size[B] + 1 and the last register to
205 * conflict will start at n + class_size[B] - 1. Therefore, the
206 * number of conflicts from B is class_size[B] + class_size[C] - 1.
208 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
209 * B | | | | | |n| --> | | | | | | |
210 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
215 for (int j
= 0; j
< class_count
; j
++)
216 q_values
[i
][j
] = class_sizes
[i
] + class_sizes
[j
] - 1;
218 classes
[i
] = ra_alloc_reg_class(regs
);
220 /* Save this off for the aligned pair class at the end. */
221 if (class_sizes
[i
] == 2) {
222 pairs_base_reg
= reg
;
223 pairs_reg_count
= class_reg_count
;
226 if (devinfo
->gen
<= 5 && dispatch_width
== 16) {
227 for (int j
= 0; j
< class_reg_count
; j
++) {
228 ra_class_add_reg(regs
, classes
[i
], reg
);
230 ra_reg_to_grf
[reg
] = j
* 2;
232 for (int base_reg
= j
;
233 base_reg
< j
+ (class_sizes
[i
] + 1) / 2;
235 ra_add_reg_conflict(regs
, base_reg
, reg
);
241 for (int j
= 0; j
< class_reg_count
; j
++) {
242 ra_class_add_reg(regs
, classes
[i
], reg
);
244 ra_reg_to_grf
[reg
] = j
;
246 for (int base_reg
= j
;
247 base_reg
< j
+ class_sizes
[i
];
249 ra_add_reg_conflict(regs
, base_reg
, reg
);
256 assert(reg
== ra_reg_count
);
258 /* Applying transitivity to all of the base registers gives us the
259 * appropreate register conflict relationships everywhere.
261 for (int reg
= 0; reg
< base_reg_count
; reg
++)
262 ra_make_reg_conflicts_transitive(regs
, reg
);
264 /* Add a special class for aligned pairs, which we'll put delta_xy
265 * in on Gen <= 6 so that we can do PLN.
267 if (devinfo
->has_pln
&& dispatch_width
== 8 && devinfo
->gen
<= 6) {
268 aligned_pairs_class
= ra_alloc_reg_class(regs
);
270 for (int i
= 0; i
< pairs_reg_count
; i
++) {
271 if ((ra_reg_to_grf
[pairs_base_reg
+ i
] & 1) == 0) {
272 ra_class_add_reg(regs
, aligned_pairs_class
, pairs_base_reg
+ i
);
276 for (int i
= 0; i
< class_count
; i
++) {
277 /* These are a little counter-intuitive because the pair registers
278 * are required to be aligned while the register they are
279 * potentially interferring with are not. In the case where the
280 * size is even, the worst-case is that the register is
281 * odd-aligned. In the odd-size case, it doesn't matter.
283 q_values
[class_count
][i
] = class_sizes
[i
] / 2 + 1;
284 q_values
[i
][class_count
] = class_sizes
[i
] + 1;
286 q_values
[class_count
][class_count
] = 1;
289 ra_set_finalize(regs
, q_values
);
291 ralloc_free(q_values
);
293 compiler
->fs_reg_sets
[index
].regs
= regs
;
294 for (unsigned i
= 0; i
< ARRAY_SIZE(compiler
->fs_reg_sets
[index
].classes
); i
++)
295 compiler
->fs_reg_sets
[index
].classes
[i
] = -1;
296 for (int i
= 0; i
< class_count
; i
++)
297 compiler
->fs_reg_sets
[index
].classes
[class_sizes
[i
] - 1] = classes
[i
];
298 compiler
->fs_reg_sets
[index
].ra_reg_to_grf
= ra_reg_to_grf
;
299 compiler
->fs_reg_sets
[index
].aligned_pairs_class
= aligned_pairs_class
;
303 brw_fs_alloc_reg_sets(struct brw_compiler
*compiler
)
305 brw_alloc_reg_set(compiler
, 8);
306 brw_alloc_reg_set(compiler
, 16);
310 count_to_loop_end(const bblock_t
*block
)
312 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
313 return block
->end_ip
;
316 /* Skip the first block, since we don't want to count the do the calling
319 for (block
= block
->next();
321 block
= block
->next()) {
322 if (block
->start()->opcode
== BRW_OPCODE_DO
)
324 if (block
->end()->opcode
== BRW_OPCODE_WHILE
) {
327 return block
->end_ip
;
330 unreachable("not reached");
333 void fs_visitor::calculate_payload_ranges(int payload_node_count
,
334 int *payload_last_use_ip
)
339 for (int i
= 0; i
< payload_node_count
; i
++)
340 payload_last_use_ip
[i
] = -1;
343 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
344 switch (inst
->opcode
) {
348 /* Since payload regs are deffed only at the start of the shader
349 * execution, any uses of the payload within a loop mean the live
350 * interval extends to the end of the outermost loop. Find the ip of
354 loop_end_ip
= count_to_loop_end(block
);
356 case BRW_OPCODE_WHILE
:
365 use_ip
= loop_end_ip
;
369 /* Note that UNIFORM args have been turned into FIXED_HW_REG by
370 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
371 * the start (see interp_reg()).
373 for (int i
= 0; i
< inst
->sources
; i
++) {
374 if (inst
->src
[i
].file
== HW_REG
&&
375 inst
->src
[i
].fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
376 int node_nr
= inst
->src
[i
].fixed_hw_reg
.nr
;
377 if (node_nr
>= payload_node_count
)
380 for (int j
= 0; j
< inst
->regs_read(i
); j
++) {
381 payload_last_use_ip
[node_nr
+ j
] = use_ip
;
382 assert(node_nr
+ j
< payload_node_count
);
387 /* Special case instructions which have extra implied registers used. */
388 switch (inst
->opcode
) {
389 case CS_OPCODE_CS_TERMINATE
:
390 payload_last_use_ip
[0] = use_ip
;
395 /* We could omit this for the !inst->header_present case, except
396 * that the simulator apparently incorrectly reads from g0/g1
397 * instead of sideband. It also really freaks out driver
398 * developers to see g0 used in unusual places, so just always
401 payload_last_use_ip
[0] = use_ip
;
402 payload_last_use_ip
[1] = use_ip
;
413 * Sets up interference between thread payload registers and the virtual GRFs
414 * to be allocated for program temporaries.
416 * We want to be able to reallocate the payload for our virtual GRFs, notably
417 * because the setup coefficients for a full set of 16 FS inputs takes up 8 of
420 * The layout of the payload registers is:
422 * 0..payload.num_regs-1: fixed function setup (including bary coordinates).
423 * payload.num_regs..payload.num_regs+curb_read_lengh-1: uniform data
424 * payload.num_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients.
426 * And we have payload_node_count nodes covering these registers in order
427 * (note that in SIMD16, a node is two registers).
430 fs_visitor::setup_payload_interference(struct ra_graph
*g
,
431 int payload_node_count
,
432 int first_payload_node
)
434 int payload_last_use_ip
[payload_node_count
];
435 calculate_payload_ranges(payload_node_count
, payload_last_use_ip
);
437 for (int i
= 0; i
< payload_node_count
; i
++) {
438 if (payload_last_use_ip
[i
] == -1)
441 /* Mark the payload node as interfering with any virtual grf that is
442 * live between the start of the program and our last use of the payload
445 for (unsigned j
= 0; j
< this->alloc
.count
; j
++) {
446 /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
447 * in order to not have to worry about the uniform issue described in
448 * calculate_live_intervals().
450 if (this->virtual_grf_start
[j
] <= payload_last_use_ip
[i
]) {
451 ra_add_node_interference(g
, first_payload_node
+ i
, j
);
456 for (int i
= 0; i
< payload_node_count
; i
++) {
457 /* Mark each payload node as being allocated to its physical register.
459 * The alternative would be to have per-physical-register classes, which
460 * would just be silly.
462 if (devinfo
->gen
<= 5 && dispatch_width
== 16) {
463 /* We have to divide by 2 here because we only have even numbered
464 * registers. Some of the payload registers will be odd, but
465 * that's ok because their physical register numbers have already
466 * been assigned. The only thing this is used for is interference.
468 ra_set_node_reg(g
, first_payload_node
+ i
, i
/ 2);
470 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
476 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
478 * This is used in assign_regs() to decide which of the GRFs that we use as
479 * MRFs on gen7 get normally register allocated, and in register spilling to
480 * see if we can actually use MRFs to do spills without overwriting normal MRF
484 get_used_mrfs(fs_visitor
*v
, bool *mrf_used
)
486 int reg_width
= v
->dispatch_width
/ 8;
488 memset(mrf_used
, 0, BRW_MAX_MRF(v
->devinfo
->gen
) * sizeof(bool));
490 foreach_block_and_inst(block
, fs_inst
, inst
, v
->cfg
) {
491 if (inst
->dst
.file
== MRF
) {
492 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
493 mrf_used
[reg
] = true;
494 if (reg_width
== 2) {
495 if (inst
->dst
.reg
& BRW_MRF_COMPR4
) {
496 mrf_used
[reg
+ 4] = true;
498 mrf_used
[reg
+ 1] = true;
503 if (inst
->mlen
> 0) {
504 for (int i
= 0; i
< v
->implied_mrf_writes(inst
); i
++) {
505 mrf_used
[inst
->base_mrf
+ i
] = true;
512 * Sets interference between virtual GRFs and usage of the high GRFs for SEND
513 * messages (treated as MRFs in code generation).
516 setup_mrf_hack_interference(fs_visitor
*v
, struct ra_graph
*g
,
517 int first_mrf_node
, int *first_used_mrf
)
519 bool mrf_used
[BRW_MAX_MRF(v
->devinfo
->gen
)];
520 get_used_mrfs(v
, mrf_used
);
522 *first_used_mrf
= BRW_MAX_MRF(v
->devinfo
->gen
);
523 for (int i
= 0; i
< BRW_MAX_MRF(v
->devinfo
->gen
); i
++) {
524 /* Mark each MRF reg node as being allocated to its physical register.
526 * The alternative would be to have per-physical-register classes, which
527 * would just be silly.
529 ra_set_node_reg(g
, first_mrf_node
+ i
, GEN7_MRF_HACK_START
+ i
);
531 /* Since we don't have any live/dead analysis on the MRFs, just mark all
532 * that are used as conflicting with all virtual GRFs.
535 if (i
< *first_used_mrf
)
538 for (unsigned j
= 0; j
< v
->alloc
.count
; j
++) {
539 ra_add_node_interference(g
, first_mrf_node
+ i
, j
);
546 fs_visitor::assign_regs(bool allow_spilling
)
548 /* Most of this allocation was written for a reg_width of 1
549 * (dispatch_width == 8). In extending to SIMD16, the code was
550 * left in place and it was converted to have the hardware
551 * registers it's allocating be contiguous physical pairs of regs
552 * for reg_width == 2.
554 int reg_width
= dispatch_width
/ 8;
555 unsigned hw_reg_mapping
[this->alloc
.count
];
556 int payload_node_count
= ALIGN(this->first_non_payload_grf
, reg_width
);
557 int rsi
= reg_width
- 1; /* Which compiler->fs_reg_sets[] to use */
558 calculate_live_intervals();
560 int node_count
= this->alloc
.count
;
561 int first_payload_node
= node_count
;
562 node_count
+= payload_node_count
;
563 int first_mrf_hack_node
= node_count
;
564 if (devinfo
->gen
>= 7)
565 node_count
+= BRW_MAX_GRF
- GEN7_MRF_HACK_START
;
567 ra_alloc_interference_graph(compiler
->fs_reg_sets
[rsi
].regs
, node_count
);
569 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
570 unsigned size
= this->alloc
.sizes
[i
];
573 assert(size
<= ARRAY_SIZE(compiler
->fs_reg_sets
[rsi
].classes
) &&
574 "Register allocation relies on split_virtual_grfs()");
575 c
= compiler
->fs_reg_sets
[rsi
].classes
[size
- 1];
577 /* Special case: on pre-GEN6 hardware that supports PLN, the
578 * second operand of a PLN instruction needs to be an
579 * even-numbered register, so we have a special register class
580 * wm_aligned_pairs_class to handle this case. pre-GEN6 always
581 * uses this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] as the
582 * second operand of a PLN instruction (since it doesn't support
583 * any other interpolation modes). So all we need to do is find
584 * that register and set it to the appropriate class.
586 if (compiler
->fs_reg_sets
[rsi
].aligned_pairs_class
>= 0 &&
587 this->delta_xy
[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC
].file
== GRF
&&
588 this->delta_xy
[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC
].reg
== i
) {
589 c
= compiler
->fs_reg_sets
[rsi
].aligned_pairs_class
;
592 ra_set_node_class(g
, i
, c
);
594 for (unsigned j
= 0; j
< i
; j
++) {
595 if (virtual_grf_interferes(i
, j
)) {
596 ra_add_node_interference(g
, i
, j
);
601 setup_payload_interference(g
, payload_node_count
, first_payload_node
);
602 if (devinfo
->gen
>= 7) {
603 int first_used_mrf
= BRW_MAX_MRF(devinfo
->gen
);
604 setup_mrf_hack_interference(this, g
, first_mrf_hack_node
,
607 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
608 /* When we do send-from-GRF for FB writes, we need to ensure that
609 * the last write instruction sends from a high register. This is
610 * because the vertex fetcher wants to start filling the low
611 * payload registers while the pixel data port is still working on
612 * writing out the memory. If we don't do this, we get rendering
615 * We could just do "something high". Instead, we just pick the
616 * highest register that works.
619 int size
= alloc
.sizes
[inst
->src
[0].reg
];
620 int reg
= compiler
->fs_reg_sets
[rsi
].class_to_ra_reg_range
[size
] - 1;
622 /* If something happened to spill, we want to push the EOT send
623 * register early enough in the register file that we don't
624 * conflict with any used MRF hack registers.
626 reg
-= BRW_MAX_MRF(devinfo
->gen
) - first_used_mrf
;
628 ra_set_node_reg(g
, inst
->src
[0].reg
, reg
);
634 if (dispatch_width
> 8) {
635 /* In 16-wide dispatch we have an issue where a compressed
636 * instruction is actually two instructions executed simultaneiously.
637 * It's actually ok to have the source and destination registers be
638 * the same. In this case, each instruction over-writes its own
639 * source and there's no problem. The real problem here is if the
640 * source and destination registers are off by one. Then you can end
641 * up in a scenario where the first instruction over-writes the
642 * source of the second instruction. Since the compiler doesn't know
643 * about this level of granularity, we simply make the source and
644 * destination interfere.
646 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
647 if (inst
->dst
.file
!= GRF
)
650 for (int i
= 0; i
< inst
->sources
; ++i
) {
651 if (inst
->src
[i
].file
== GRF
) {
652 ra_add_node_interference(g
, inst
->dst
.reg
, inst
->src
[i
].reg
);
658 /* Debug of register spilling: Go spill everything. */
659 if (unlikely(INTEL_DEBUG
& DEBUG_SPILL_FS
)) {
660 int reg
= choose_spill_reg(g
);
669 if (!ra_allocate(g
)) {
670 /* Failed to allocate registers. Spill a reg, and the caller will
671 * loop back into here to try again.
673 int reg
= choose_spill_reg(g
);
676 fail("no register to spill:\n");
677 dump_instructions(NULL
);
678 } else if (allow_spilling
) {
687 /* Get the chosen virtual registers for each node, and map virtual
688 * regs in the register classes back down to real hardware reg
691 this->grf_used
= payload_node_count
;
692 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
693 int reg
= ra_get_node_reg(g
, i
);
695 hw_reg_mapping
[i
] = compiler
->fs_reg_sets
[rsi
].ra_reg_to_grf
[reg
];
696 this->grf_used
= MAX2(this->grf_used
,
697 hw_reg_mapping
[i
] + this->alloc
.sizes
[i
]);
700 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
701 assign_reg(hw_reg_mapping
, &inst
->dst
);
702 for (int i
= 0; i
< inst
->sources
; i
++) {
703 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
707 this->alloc
.count
= this->grf_used
;
715 fs_visitor::emit_unspill(bblock_t
*block
, fs_inst
*inst
, fs_reg dst
,
716 uint32_t spill_offset
, int count
)
719 if (dispatch_width
== 16 && count
% 2 == 0)
722 const fs_builder ibld
= bld
.annotate(inst
->annotation
, inst
->ir
)
723 .group(reg_size
* 8, 0)
726 for (int i
= 0; i
< count
/ reg_size
; i
++) {
727 /* The gen7 descriptor-based offset is 12 bits of HWORD units. */
728 bool gen7_read
= devinfo
->gen
>= 7 && spill_offset
< (1 << 12) * REG_SIZE
;
729 fs_inst
*unspill_inst
= ibld
.emit(gen7_read
?
730 SHADER_OPCODE_GEN7_SCRATCH_READ
:
731 SHADER_OPCODE_GEN4_SCRATCH_READ
,
733 unspill_inst
->offset
= spill_offset
;
734 unspill_inst
->regs_written
= reg_size
;
737 unspill_inst
->base_mrf
= FIRST_SPILL_MRF(devinfo
->gen
) + 1;
738 unspill_inst
->mlen
= 1; /* header contains offset */
741 dst
.reg_offset
+= reg_size
;
742 spill_offset
+= reg_size
* REG_SIZE
;
747 fs_visitor::emit_spill(bblock_t
*block
, fs_inst
*inst
, fs_reg src
,
748 uint32_t spill_offset
, int count
)
751 int spill_base_mrf
= FIRST_SPILL_MRF(devinfo
->gen
) + 1;
752 if (dispatch_width
== 16 && count
% 2 == 0) {
753 spill_base_mrf
= FIRST_SPILL_MRF(devinfo
->gen
);
757 const fs_builder ibld
= bld
.annotate(inst
->annotation
, inst
->ir
)
758 .group(reg_size
* 8, 0)
759 .at(block
, inst
->next
);
761 for (int i
= 0; i
< count
/ reg_size
; i
++) {
762 fs_inst
*spill_inst
=
763 ibld
.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE
, ibld
.null_reg_f(), src
);
764 src
.reg_offset
+= reg_size
;
765 spill_inst
->offset
= spill_offset
+ i
* reg_size
* REG_SIZE
;
766 spill_inst
->mlen
= 1 + reg_size
; /* header, value */
767 spill_inst
->base_mrf
= spill_base_mrf
;
772 fs_visitor::choose_spill_reg(struct ra_graph
*g
)
774 float loop_scale
= 1.0;
775 float spill_costs
[this->alloc
.count
];
776 bool no_spill
[this->alloc
.count
];
778 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
779 spill_costs
[i
] = 0.0;
783 /* Calculate costs for spilling nodes. Call it a cost of 1 per
784 * spill/unspill we'll have to do, and guess that the insides of
785 * loops run 10 times.
787 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
788 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
789 if (inst
->src
[i
].file
== GRF
) {
790 spill_costs
[inst
->src
[i
].reg
] += loop_scale
;
792 /* Register spilling logic assumes full-width registers; smeared
793 * registers have a width of 1 so if we try to spill them we'll
794 * generate invalid assembly. This shouldn't be a problem because
795 * smeared registers are only used as short-term temporaries when
796 * loading pull constants, so spilling them is unlikely to reduce
797 * register pressure anyhow.
799 if (!inst
->src
[i
].is_contiguous()) {
800 no_spill
[inst
->src
[i
].reg
] = true;
805 if (inst
->dst
.file
== GRF
) {
806 spill_costs
[inst
->dst
.reg
] += inst
->regs_written
* loop_scale
;
808 if (!inst
->dst
.is_contiguous()) {
809 no_spill
[inst
->dst
.reg
] = true;
813 switch (inst
->opcode
) {
819 case BRW_OPCODE_WHILE
:
823 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
824 if (inst
->src
[0].file
== GRF
)
825 no_spill
[inst
->src
[0].reg
] = true;
828 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
829 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
830 if (inst
->dst
.file
== GRF
)
831 no_spill
[inst
->dst
.reg
] = true;
839 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
841 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
844 return ra_get_best_spill_node(g
);
848 fs_visitor::spill_reg(int spill_reg
)
850 int size
= alloc
.sizes
[spill_reg
];
851 unsigned int spill_offset
= last_scratch
;
852 assert(ALIGN(spill_offset
, 16) == spill_offset
); /* oword read/write req. */
853 int spill_base_mrf
= dispatch_width
> 8 ? FIRST_SPILL_MRF(devinfo
->gen
) :
854 FIRST_SPILL_MRF(devinfo
->gen
) + 1;
856 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
857 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
858 * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
859 * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
860 * depth), starting from m1. In summary: We may not be able to spill in
861 * SIMD16 mode, because we'd stomp the FB writes.
863 if (!spilled_any_registers
) {
864 bool mrf_used
[BRW_MAX_MRF(devinfo
->gen
)];
865 get_used_mrfs(this, mrf_used
);
867 for (int i
= spill_base_mrf
; i
< BRW_MAX_MRF(devinfo
->gen
); i
++) {
869 fail("Register spilling not supported with m%d used", i
);
874 spilled_any_registers
= true;
877 last_scratch
+= size
* REG_SIZE
;
879 /* Generate spill/unspill instructions for the objects being
880 * spilled. Right now, we spill or unspill the whole thing to a
881 * virtual grf of the same size. For most instructions, though, we
882 * could just spill/unspill the GRF being accessed.
884 foreach_block_and_inst (block
, fs_inst
, inst
, cfg
) {
885 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
886 if (inst
->src
[i
].file
== GRF
&&
887 inst
->src
[i
].reg
== spill_reg
) {
888 int regs_read
= inst
->regs_read(i
);
889 int subset_spill_offset
= (spill_offset
+
890 REG_SIZE
* inst
->src
[i
].reg_offset
);
891 fs_reg
unspill_dst(GRF
, alloc
.allocate(regs_read
));
893 inst
->src
[i
].reg
= unspill_dst
.reg
;
894 inst
->src
[i
].reg_offset
= 0;
896 emit_unspill(block
, inst
, unspill_dst
, subset_spill_offset
,
901 if (inst
->dst
.file
== GRF
&&
902 inst
->dst
.reg
== spill_reg
) {
903 int subset_spill_offset
= (spill_offset
+
904 REG_SIZE
* inst
->dst
.reg_offset
);
905 fs_reg
spill_src(GRF
, alloc
.allocate(inst
->regs_written
));
907 inst
->dst
.reg
= spill_src
.reg
;
908 inst
->dst
.reg_offset
= 0;
910 /* If we're immediately spilling the register, we should not use
911 * destination dependency hints. Doing so will cause the GPU do
912 * try to read and write the register at the same time and may
915 inst
->no_dd_clear
= false;
916 inst
->no_dd_check
= false;
918 /* If our write is going to affect just part of the
919 * inst->regs_written(), then we need to unspill the destination
920 * since we write back out all of the regs_written().
922 if (inst
->is_partial_write())
923 emit_unspill(block
, inst
, spill_src
, subset_spill_offset
,
926 emit_spill(block
, inst
, spill_src
, subset_spill_offset
,
931 invalidate_live_intervals();