2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "glsl/glsl_types.h"
31 #include "glsl/ir_optimization.h"
34 assign_reg(int *reg_hw_locations
, fs_reg
*reg
)
36 if (reg
->file
== GRF
) {
37 assert(reg
->reg_offset
>= 0);
38 reg
->reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
;
44 fs_visitor::assign_regs_trivial()
46 int hw_reg_mapping
[this->virtual_grf_count
+ 1];
48 int reg_width
= dispatch_width
/ 8;
50 /* Note that compressed instructions require alignment to 2 registers. */
51 hw_reg_mapping
[0] = ALIGN(this->first_non_payload_grf
, reg_width
);
52 for (i
= 1; i
<= this->virtual_grf_count
; i
++) {
53 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
54 this->virtual_grf_sizes
[i
- 1]);
56 this->grf_used
= hw_reg_mapping
[this->virtual_grf_count
];
58 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
59 assign_reg(hw_reg_mapping
, &inst
->dst
);
60 for (i
= 0; i
< inst
->sources
; i
++) {
61 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
65 if (this->grf_used
>= max_grf
) {
66 fail("Ran out of regs on trivial allocator (%d/%d)\n",
67 this->grf_used
, max_grf
);
69 this->virtual_grf_count
= this->grf_used
;
75 brw_alloc_reg_set(struct intel_screen
*screen
, int reg_width
)
77 const struct brw_device_info
*devinfo
= screen
->devinfo
;
78 int base_reg_count
= BRW_MAX_GRF
;
79 int index
= reg_width
- 1;
81 /* The registers used to make up almost all values handled in the compiler
82 * are a scalar value occupying a single register (or 2 registers in the
83 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
84 * multiplying allocated register numbers by 2). Things that were
85 * aggregates of scalar values at the GLSL level were split to scalar
86 * values by split_virtual_grfs().
88 * However, texture SEND messages return a series of contiguous registers
89 * to write into. We currently always ask for 4 registers, but we may
90 * convert that to use less some day.
92 * Additionally, on gen5 we need aligned pairs of registers for the PLN
93 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
96 * So we have a need for classes for 1, 2, 4, and 8 registers currently,
97 * and we add in '3' to make indexing the array easier for the common case
98 * (since we'll probably want it for texturing later).
100 * And, on gen7 and newer, we do texturing SEND messages from GRFs, which
101 * means that we may need any size up to the sampler message size limit (11
105 int class_sizes
[BRW_MAX_MRF
];
107 if (devinfo
->gen
>= 7) {
108 for (class_count
= 0; class_count
< MAX_VGRF_SIZE
; class_count
++)
109 class_sizes
[class_count
] = class_count
+ 1;
111 for (class_count
= 0; class_count
< 4; class_count
++)
112 class_sizes
[class_count
] = class_count
+ 1;
113 class_sizes
[class_count
++] = 8;
116 memset(screen
->wm_reg_sets
[index
].class_to_ra_reg_range
, 0,
117 sizeof(screen
->wm_reg_sets
[index
].class_to_ra_reg_range
));
118 int *class_to_ra_reg_range
= screen
->wm_reg_sets
[index
].class_to_ra_reg_range
;
120 /* Compute the total number of registers across all classes. */
121 int ra_reg_count
= 0;
122 for (int i
= 0; i
< class_count
; i
++) {
123 if (devinfo
->gen
<= 5 && reg_width
== 2) {
126 * In order to reduce the hardware complexity, the following
127 * rules and restrictions apply to the compressed instruction:
129 * * Operand Alignment Rule: With the exceptions listed below, a
130 * source/destination operand in general should be aligned to
131 * even 256-bit physical register with a region size equal to
132 * two 256-bit physical register
134 ra_reg_count
+= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
136 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
138 /* Mark the last register. We'll fill in the beginnings later. */
139 class_to_ra_reg_range
[class_sizes
[i
]] = ra_reg_count
;
142 /* Fill out the rest of the range markers */
143 for (int i
= 1; i
< 17; ++i
) {
144 if (class_to_ra_reg_range
[i
] == 0)
145 class_to_ra_reg_range
[i
] = class_to_ra_reg_range
[i
-1];
148 uint8_t *ra_reg_to_grf
= ralloc_array(screen
, uint8_t, ra_reg_count
);
149 struct ra_regs
*regs
= ra_alloc_reg_set(screen
, ra_reg_count
);
150 if (devinfo
->gen
>= 6)
151 ra_set_allocate_round_robin(regs
);
152 int *classes
= ralloc_array(screen
, int, class_count
);
153 int aligned_pairs_class
= -1;
155 /* Now, add the registers to their classes, and add the conflicts
156 * between them and the base GRF registers (and also each other).
159 int pairs_base_reg
= 0;
160 int pairs_reg_count
= 0;
161 for (int i
= 0; i
< class_count
; i
++) {
163 if (devinfo
->gen
<= 5 && reg_width
== 2) {
164 class_reg_count
= (base_reg_count
- (class_sizes
[i
] - 1)) / 2;
166 class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
168 classes
[i
] = ra_alloc_reg_class(regs
);
170 /* Save this off for the aligned pair class at the end. */
171 if (class_sizes
[i
] == 2) {
172 pairs_base_reg
= reg
;
173 pairs_reg_count
= class_reg_count
;
176 if (devinfo
->gen
<= 5 && reg_width
== 2) {
177 for (int j
= 0; j
< class_reg_count
; j
++) {
178 ra_class_add_reg(regs
, classes
[i
], reg
);
180 ra_reg_to_grf
[reg
] = j
* 2;
182 for (int base_reg
= j
* 2;
183 base_reg
< j
* 2 + class_sizes
[i
];
185 ra_add_transitive_reg_conflict(regs
, base_reg
, reg
);
191 for (int j
= 0; j
< class_reg_count
; j
++) {
192 ra_class_add_reg(regs
, classes
[i
], reg
);
194 ra_reg_to_grf
[reg
] = j
;
196 for (int base_reg
= j
;
197 base_reg
< j
+ class_sizes
[i
];
199 ra_add_transitive_reg_conflict(regs
, base_reg
, reg
);
206 assert(reg
== ra_reg_count
);
208 /* Add a special class for aligned pairs, which we'll put delta_x/y
209 * in on gen5 so that we can do PLN.
211 if (devinfo
->has_pln
&& devinfo
->gen
< 6) {
212 aligned_pairs_class
= ra_alloc_reg_class(regs
);
214 for (int i
= 0; i
< pairs_reg_count
; i
++) {
215 if ((ra_reg_to_grf
[pairs_base_reg
+ i
] & 1) == 0) {
216 ra_class_add_reg(regs
, aligned_pairs_class
, pairs_base_reg
+ i
);
221 ra_set_finalize(regs
, NULL
);
223 screen
->wm_reg_sets
[index
].regs
= regs
;
224 for (unsigned i
= 0; i
< ARRAY_SIZE(screen
->wm_reg_sets
[index
].classes
); i
++)
225 screen
->wm_reg_sets
[index
].classes
[i
] = -1;
226 for (int i
= 0; i
< class_count
; i
++)
227 screen
->wm_reg_sets
[index
].classes
[class_sizes
[i
] - 1] = classes
[i
];
228 screen
->wm_reg_sets
[index
].ra_reg_to_grf
= ra_reg_to_grf
;
229 screen
->wm_reg_sets
[index
].aligned_pairs_class
= aligned_pairs_class
;
233 brw_fs_alloc_reg_sets(struct intel_screen
*screen
)
235 brw_alloc_reg_set(screen
, 1);
236 brw_alloc_reg_set(screen
, 2);
240 count_to_loop_end(const bblock_t
*block
)
242 if (block
->end()->opcode
== BRW_OPCODE_WHILE
)
243 return block
->end_ip
;
246 /* Skip the first block, since we don't want to count the do the calling
249 for (block
= block
->next();
251 block
= block
->next()) {
252 if (block
->start()->opcode
== BRW_OPCODE_DO
)
254 if (block
->end()->opcode
== BRW_OPCODE_WHILE
) {
257 return block
->end_ip
;
260 unreachable("not reached");
264 * Sets up interference between thread payload registers and the virtual GRFs
265 * to be allocated for program temporaries.
267 * We want to be able to reallocate the payload for our virtual GRFs, notably
268 * because the setup coefficients for a full set of 16 FS inputs takes up 8 of
271 * The layout of the payload registers is:
273 * 0..payload.num_regs-1: fixed function setup (including bary coordinates).
274 * payload.num_regs..payload.num_regs+curb_read_lengh-1: uniform data
275 * payload.num_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients.
277 * And we have payload_node_count nodes covering these registers in order
278 * (note that in SIMD16, a node is two registers).
281 fs_visitor::setup_payload_interference(struct ra_graph
*g
,
282 int payload_node_count
,
283 int first_payload_node
)
288 int payload_last_use_ip
[payload_node_count
];
289 memset(payload_last_use_ip
, 0, sizeof(payload_last_use_ip
));
291 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
292 switch (inst
->opcode
) {
296 /* Since payload regs are deffed only at the start of the shader
297 * execution, any uses of the payload within a loop mean the live
298 * interval extends to the end of the outermost loop. Find the ip of
302 loop_end_ip
= count_to_loop_end(block
);
304 case BRW_OPCODE_WHILE
:
313 use_ip
= loop_end_ip
;
317 /* Note that UNIFORM args have been turned into FIXED_HW_REG by
318 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
319 * the start (see interp_reg()).
321 for (int i
= 0; i
< inst
->sources
; i
++) {
322 if (inst
->src
[i
].file
== HW_REG
&&
323 inst
->src
[i
].fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
324 int node_nr
= inst
->src
[i
].fixed_hw_reg
.nr
;
325 if (node_nr
>= payload_node_count
)
328 payload_last_use_ip
[node_nr
] = use_ip
;
332 /* Special case instructions which have extra implied registers used. */
333 switch (inst
->opcode
) {
334 case FS_OPCODE_FB_WRITE
:
335 /* We could omit this for the !inst->header_present case, except that
336 * the simulator apparently incorrectly reads from g0/g1 instead of
337 * sideband. It also really freaks out driver developers to see g0
338 * used in unusual places, so just always reserve it.
340 payload_last_use_ip
[0] = use_ip
;
341 payload_last_use_ip
[1] = use_ip
;
344 case FS_OPCODE_LINTERP
:
345 /* On gen6+ in SIMD16, there are 4 adjacent registers used by
346 * PLN's sourcing of the deltas, while we list only the first one
347 * in the arguments. Pre-gen6, the deltas are computed in normal
352 if (inst
->src
[delta_x_arg
].file
== HW_REG
&&
353 inst
->src
[delta_x_arg
].fixed_hw_reg
.file
==
354 BRW_GENERAL_REGISTER_FILE
) {
355 for (int i
= 1; i
< 4; ++i
) {
356 int node
= inst
->src
[delta_x_arg
].fixed_hw_reg
.nr
+ i
;
357 assert(node
< payload_node_count
);
358 payload_last_use_ip
[node
] = use_ip
;
371 for (int i
= 0; i
< payload_node_count
; i
++) {
372 /* Mark the payload node as interfering with any virtual grf that is
373 * live between the start of the program and our last use of the payload
376 for (int j
= 0; j
< this->virtual_grf_count
; j
++) {
377 /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
378 * in order to not have to worry about the uniform issue described in
379 * calculate_live_intervals().
381 if (this->virtual_grf_start
[j
] <= payload_last_use_ip
[i
]) {
382 ra_add_node_interference(g
, first_payload_node
+ i
, j
);
387 for (int i
= 0; i
< payload_node_count
; i
++) {
388 /* Mark each payload node as being allocated to its physical register.
390 * The alternative would be to have per-physical-register classes, which
391 * would just be silly.
393 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
398 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
400 * This is used in assign_regs() to decide which of the GRFs that we use as
401 * MRFs on gen7 get normally register allocated, and in register spilling to
402 * see if we can actually use MRFs to do spills without overwriting normal MRF
406 fs_visitor::get_used_mrfs(bool *mrf_used
)
408 int reg_width
= dispatch_width
/ 8;
410 memset(mrf_used
, 0, BRW_MAX_MRF
* sizeof(bool));
412 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
413 if (inst
->dst
.file
== MRF
) {
414 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
415 mrf_used
[reg
] = true;
416 if (reg_width
== 2) {
417 if (inst
->dst
.reg
& BRW_MRF_COMPR4
) {
418 mrf_used
[reg
+ 4] = true;
420 mrf_used
[reg
+ 1] = true;
425 if (inst
->mlen
> 0) {
426 for (int i
= 0; i
< implied_mrf_writes(inst
); i
++) {
427 mrf_used
[inst
->base_mrf
+ i
] = true;
434 * Sets interference between virtual GRFs and usage of the high GRFs for SEND
435 * messages (treated as MRFs in code generation).
438 fs_visitor::setup_mrf_hack_interference(struct ra_graph
*g
, int first_mrf_node
)
440 bool mrf_used
[BRW_MAX_MRF
];
441 get_used_mrfs(mrf_used
);
443 for (int i
= 0; i
< BRW_MAX_MRF
; i
++) {
444 /* Mark each MRF reg node as being allocated to its physical register.
446 * The alternative would be to have per-physical-register classes, which
447 * would just be silly.
449 ra_set_node_reg(g
, first_mrf_node
+ i
, GEN7_MRF_HACK_START
+ i
);
451 /* Since we don't have any live/dead analysis on the MRFs, just mark all
452 * that are used as conflicting with all virtual GRFs.
455 for (int j
= 0; j
< this->virtual_grf_count
; j
++) {
456 ra_add_node_interference(g
, first_mrf_node
+ i
, j
);
463 fs_visitor::assign_regs(bool allow_spilling
)
465 struct intel_screen
*screen
= brw
->intelScreen
;
466 /* Most of this allocation was written for a reg_width of 1
467 * (dispatch_width == 8). In extending to SIMD16, the code was
468 * left in place and it was converted to have the hardware
469 * registers it's allocating be contiguous physical pairs of regs
470 * for reg_width == 2.
472 int reg_width
= dispatch_width
/ 8;
473 int hw_reg_mapping
[this->virtual_grf_count
];
474 int payload_node_count
= ALIGN(this->first_non_payload_grf
, reg_width
);
475 int rsi
= reg_width
- 1; /* Which screen->wm_reg_sets[] to use */
476 calculate_live_intervals();
478 int node_count
= this->virtual_grf_count
;
479 int first_payload_node
= node_count
;
480 node_count
+= payload_node_count
;
481 int first_mrf_hack_node
= node_count
;
483 node_count
+= BRW_MAX_GRF
- GEN7_MRF_HACK_START
;
484 struct ra_graph
*g
= ra_alloc_interference_graph(screen
->wm_reg_sets
[rsi
].regs
,
487 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
488 unsigned size
= this->virtual_grf_sizes
[i
];
491 assert(size
<= ARRAY_SIZE(screen
->wm_reg_sets
[rsi
].classes
) &&
492 "Register allocation relies on split_virtual_grfs()");
493 c
= screen
->wm_reg_sets
[rsi
].classes
[size
- 1];
495 /* Special case: on pre-GEN6 hardware that supports PLN, the
496 * second operand of a PLN instruction needs to be an
497 * even-numbered register, so we have a special register class
498 * wm_aligned_pairs_class to handle this case. pre-GEN6 always
499 * uses this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] as the
500 * second operand of a PLN instruction (since it doesn't support
501 * any other interpolation modes). So all we need to do is find
502 * that register and set it to the appropriate class.
504 if (screen
->wm_reg_sets
[rsi
].aligned_pairs_class
>= 0 &&
505 this->delta_x
[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC
].file
== GRF
&&
506 this->delta_x
[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC
].reg
== i
) {
507 c
= screen
->wm_reg_sets
[rsi
].aligned_pairs_class
;
510 ra_set_node_class(g
, i
, c
);
512 for (int j
= 0; j
< i
; j
++) {
513 if (virtual_grf_interferes(i
, j
)) {
514 ra_add_node_interference(g
, i
, j
);
519 setup_payload_interference(g
, payload_node_count
, first_payload_node
);
521 setup_mrf_hack_interference(g
, first_mrf_hack_node
);
523 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
524 /* When we do send-from-GRF for FB writes, we need to ensure that
525 * the last write instruction sends from a high register. This is
526 * because the vertex fetcher wants to start filling the low
527 * payload registers while the pixel data port is still working on
528 * writing out the memory. If we don't do this, we get rendering
531 * We could just do "something high". Instead, we just pick the
532 * highest register that works.
534 if (inst
->opcode
== FS_OPCODE_FB_WRITE
&& inst
->eot
) {
535 int size
= virtual_grf_sizes
[inst
->src
[0].reg
];
536 int reg
= screen
->wm_reg_sets
[rsi
].class_to_ra_reg_range
[size
] - 1;
537 ra_set_node_reg(g
, inst
->src
[0].reg
, reg
);
543 if (dispatch_width
> 8) {
544 /* In 16-wide dispatch we have an issue where a compressed
545 * instruction is actually two instructions executed simultaneiously.
546 * It's actually ok to have the source and destination registers be
547 * the same. In this case, each instruction over-writes its own
548 * source and there's no problem. The real problem here is if the
549 * source and destination registers are off by one. Then you can end
550 * up in a scenario where the first instruction over-writes the
551 * source of the second instruction. Since the compiler doesn't know
552 * about this level of granularity, we simply make the source and
553 * destination interfere.
555 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
556 if (inst
->dst
.file
!= GRF
)
559 for (int i
= 0; i
< inst
->sources
; ++i
) {
560 if (inst
->src
[i
].file
== GRF
) {
561 ra_add_node_interference(g
, inst
->dst
.reg
, inst
->src
[i
].reg
);
567 /* Debug of register spilling: Go spill everything. */
569 int reg
= choose_spill_reg(g
);
578 if (!ra_allocate(g
)) {
579 /* Failed to allocate registers. Spill a reg, and the caller will
580 * loop back into here to try again.
582 int reg
= choose_spill_reg(g
);
585 fail("no register to spill:\n");
586 dump_instructions(NULL
);
587 } else if (allow_spilling
) {
596 /* Get the chosen virtual registers for each node, and map virtual
597 * regs in the register classes back down to real hardware reg
600 this->grf_used
= payload_node_count
;
601 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
602 int reg
= ra_get_node_reg(g
, i
);
604 hw_reg_mapping
[i
] = screen
->wm_reg_sets
[rsi
].ra_reg_to_grf
[reg
];
605 this->grf_used
= MAX2(this->grf_used
,
606 hw_reg_mapping
[i
] + this->virtual_grf_sizes
[i
]);
609 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
610 assign_reg(hw_reg_mapping
, &inst
->dst
);
611 for (int i
= 0; i
< inst
->sources
; i
++) {
612 assign_reg(hw_reg_mapping
, &inst
->src
[i
]);
616 this->virtual_grf_count
= this->grf_used
;
624 fs_visitor::emit_unspill(bblock_t
*block
, fs_inst
*inst
, fs_reg dst
,
625 uint32_t spill_offset
, int count
)
631 for (int i
= 0; i
< count
/ reg_size
; i
++) {
632 /* The gen7 descriptor-based offset is 12 bits of HWORD units. */
633 bool gen7_read
= brw
->gen
>= 7 && spill_offset
< (1 << 12) * REG_SIZE
;
635 fs_inst
*unspill_inst
=
636 new(mem_ctx
) fs_inst(gen7_read
?
637 SHADER_OPCODE_GEN7_SCRATCH_READ
:
638 SHADER_OPCODE_GEN4_SCRATCH_READ
,
640 unspill_inst
->offset
= spill_offset
;
641 unspill_inst
->ir
= inst
->ir
;
642 unspill_inst
->annotation
= inst
->annotation
;
645 unspill_inst
->base_mrf
= 14;
646 unspill_inst
->mlen
= 1; /* header contains offset */
648 inst
->insert_before(block
, unspill_inst
);
650 dst
.reg_offset
+= reg_size
;
651 spill_offset
+= reg_size
* 8 * sizeof(float);
656 fs_visitor::emit_spill(bblock_t
*block
, fs_inst
*inst
, fs_reg src
,
657 uint32_t spill_offset
, int count
)
660 int spill_base_mrf
= 14;
661 if (count
% 2 == 0) {
666 for (int i
= 0; i
< count
/ reg_size
; i
++) {
667 fs_inst
*spill_inst
=
668 new(mem_ctx
) fs_inst(SHADER_OPCODE_GEN4_SCRATCH_WRITE
,
670 src
.reg_offset
+= reg_size
;
671 spill_inst
->offset
= spill_offset
+ i
* reg_size
;
672 spill_inst
->ir
= inst
->ir
;
673 spill_inst
->annotation
= inst
->annotation
;
674 spill_inst
->mlen
= 1 + reg_size
; /* header, value */
675 spill_inst
->base_mrf
= spill_base_mrf
;
676 inst
->insert_after(block
, spill_inst
);
681 fs_visitor::choose_spill_reg(struct ra_graph
*g
)
683 float loop_scale
= 1.0;
684 float spill_costs
[this->virtual_grf_count
];
685 bool no_spill
[this->virtual_grf_count
];
687 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
688 spill_costs
[i
] = 0.0;
692 /* Calculate costs for spilling nodes. Call it a cost of 1 per
693 * spill/unspill we'll have to do, and guess that the insides of
694 * loops run 10 times.
696 foreach_block_and_inst(block
, fs_inst
, inst
, cfg
) {
697 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
698 if (inst
->src
[i
].file
== GRF
) {
699 spill_costs
[inst
->src
[i
].reg
] += loop_scale
;
701 /* Register spilling logic assumes full-width registers; smeared
702 * registers have a width of 1 so if we try to spill them we'll
703 * generate invalid assembly. This shouldn't be a problem because
704 * smeared registers are only used as short-term temporaries when
705 * loading pull constants, so spilling them is unlikely to reduce
706 * register pressure anyhow.
708 if (!inst
->src
[i
].is_contiguous()) {
709 no_spill
[inst
->src
[i
].reg
] = true;
714 if (inst
->dst
.file
== GRF
) {
715 spill_costs
[inst
->dst
.reg
] += inst
->regs_written
* loop_scale
;
717 if (!inst
->dst
.is_contiguous()) {
718 no_spill
[inst
->dst
.reg
] = true;
722 switch (inst
->opcode
) {
728 case BRW_OPCODE_WHILE
:
732 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
733 if (inst
->src
[0].file
== GRF
)
734 no_spill
[inst
->src
[0].reg
] = true;
737 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
738 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
739 if (inst
->dst
.file
== GRF
)
740 no_spill
[inst
->dst
.reg
] = true;
748 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
750 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
753 return ra_get_best_spill_node(g
);
757 fs_visitor::spill_reg(int spill_reg
)
759 int reg_size
= dispatch_width
* sizeof(float);
760 int size
= virtual_grf_sizes
[spill_reg
];
761 unsigned int spill_offset
= last_scratch
;
762 assert(ALIGN(spill_offset
, 16) == spill_offset
); /* oword read/write req. */
763 int spill_base_mrf
= dispatch_width
> 8 ? 13 : 14;
765 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
766 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
767 * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
768 * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
769 * depth), starting from m1. In summary: We may not be able to spill in
770 * SIMD16 mode, because we'd stomp the FB writes.
772 if (!spilled_any_registers
) {
773 bool mrf_used
[BRW_MAX_MRF
];
774 get_used_mrfs(mrf_used
);
776 for (int i
= spill_base_mrf
; i
< BRW_MAX_MRF
; i
++) {
778 fail("Register spilling not supported with m%d used", i
);
783 spilled_any_registers
= true;
786 last_scratch
+= size
* reg_size
;
788 /* Generate spill/unspill instructions for the objects being
789 * spilled. Right now, we spill or unspill the whole thing to a
790 * virtual grf of the same size. For most instructions, though, we
791 * could just spill/unspill the GRF being accessed.
793 foreach_block_and_inst (block
, fs_inst
, inst
, cfg
) {
794 for (unsigned int i
= 0; i
< inst
->sources
; i
++) {
795 if (inst
->src
[i
].file
== GRF
&&
796 inst
->src
[i
].reg
== spill_reg
) {
797 int regs_read
= inst
->regs_read(this, i
);
798 int subset_spill_offset
= (spill_offset
+
799 reg_size
* inst
->src
[i
].reg_offset
);
800 fs_reg
unspill_dst(GRF
, virtual_grf_alloc(regs_read
));
802 inst
->src
[i
].reg
= unspill_dst
.reg
;
803 inst
->src
[i
].reg_offset
= 0;
805 emit_unspill(block
, inst
, unspill_dst
, subset_spill_offset
,
810 if (inst
->dst
.file
== GRF
&&
811 inst
->dst
.reg
== spill_reg
) {
812 int subset_spill_offset
= (spill_offset
+
813 reg_size
* inst
->dst
.reg_offset
);
814 fs_reg
spill_src(GRF
, virtual_grf_alloc(inst
->regs_written
));
816 inst
->dst
.reg
= spill_src
.reg
;
817 inst
->dst
.reg_offset
= 0;
819 /* If our write is going to affect just part of the
820 * inst->regs_written(), then we need to unspill the destination
821 * since we write back out all of the regs_written().
823 if (inst
->is_partial_write())
824 emit_unspill(block
, inst
, spill_src
, subset_spill_offset
,
827 emit_spill(block
, inst
, spill_src
, subset_spill_offset
,
832 invalidate_live_intervals();