2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "glsl/glsl_types.h"
30 #include "glsl/ir_optimization.h"
33 assign_reg(int *reg_hw_locations
, fs_reg
*reg
, int reg_width
)
35 if (reg
->file
== GRF
) {
36 assert(reg
->reg_offset
>= 0);
37 reg
->reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
* reg_width
;
43 fs_visitor::assign_regs_trivial()
45 int hw_reg_mapping
[this->virtual_grf_count
+ 1];
47 int reg_width
= dispatch_width
/ 8;
49 /* Note that compressed instructions require alignment to 2 registers. */
50 hw_reg_mapping
[0] = ALIGN(this->first_non_payload_grf
, reg_width
);
51 for (i
= 1; i
<= this->virtual_grf_count
; i
++) {
52 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
53 this->virtual_grf_sizes
[i
- 1] * reg_width
);
55 this->grf_used
= hw_reg_mapping
[this->virtual_grf_count
];
57 foreach_list(node
, &this->instructions
) {
58 fs_inst
*inst
= (fs_inst
*)node
;
60 assign_reg(hw_reg_mapping
, &inst
->dst
, reg_width
);
61 assign_reg(hw_reg_mapping
, &inst
->src
[0], reg_width
);
62 assign_reg(hw_reg_mapping
, &inst
->src
[1], reg_width
);
63 assign_reg(hw_reg_mapping
, &inst
->src
[2], reg_width
);
66 if (this->grf_used
>= max_grf
) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used
, max_grf
);
74 brw_alloc_reg_set(struct brw_context
*brw
, int reg_width
)
76 int base_reg_count
= BRW_MAX_GRF
/ reg_width
;
77 int index
= reg_width
- 1;
79 /* The registers used to make up almost all values handled in the compiler
80 * are a scalar value occupying a single register (or 2 registers in the
81 * case of 16-wide, which is handled by dividing base_reg_count by 2 and
82 * multiplying allocated register numbers by 2). Things that were
83 * aggregates of scalar values at the GLSL level were split to scalar
84 * values by split_virtual_grfs().
86 * However, texture SEND messages return a series of contiguous registers
87 * to write into. We currently always ask for 4 registers, but we may
88 * convert that to use less some day.
90 * Additionally, on gen5 we need aligned pairs of registers for the PLN
91 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
94 * So we have a need for classes for 1, 2, 4, and 8 registers currently,
95 * and we add in '3' to make indexing the array easier for the common case
96 * (since we'll probably want it for texturing later).
98 * And, on gen7 and newer, we do texturing SEND messages from GRFs, which
99 * means that we may need any size up to the sampler message size limit (11
103 int class_sizes
[BRW_MAX_MRF
];
106 for (class_count
= 0; class_count
< 11; class_count
++)
107 class_sizes
[class_count
] = class_count
+ 1;
109 for (class_count
= 0; class_count
< 4; class_count
++)
110 class_sizes
[class_count
] = class_count
+ 1;
111 class_sizes
[class_count
++] = 8;
114 /* Compute the total number of registers across all classes. */
115 int ra_reg_count
= 0;
116 for (int i
= 0; i
< class_count
; i
++) {
117 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
120 uint8_t *ra_reg_to_grf
= ralloc_array(brw
, uint8_t, ra_reg_count
);
121 struct ra_regs
*regs
= ra_alloc_reg_set(brw
, ra_reg_count
);
123 ra_set_allocate_round_robin(regs
);
124 int *classes
= ralloc_array(brw
, int, class_count
);
125 int aligned_pairs_class
= -1;
127 /* Now, add the registers to their classes, and add the conflicts
128 * between them and the base GRF registers (and also each other).
131 int pairs_base_reg
= 0;
132 int pairs_reg_count
= 0;
133 for (int i
= 0; i
< class_count
; i
++) {
134 int class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
135 classes
[i
] = ra_alloc_reg_class(regs
);
137 /* Save this off for the aligned pair class at the end. */
138 if (class_sizes
[i
] == 2) {
139 pairs_base_reg
= reg
;
140 pairs_reg_count
= class_reg_count
;
143 for (int j
= 0; j
< class_reg_count
; j
++) {
144 ra_class_add_reg(regs
, classes
[i
], reg
);
146 ra_reg_to_grf
[reg
] = j
;
148 for (int base_reg
= j
;
149 base_reg
< j
+ class_sizes
[i
];
151 ra_add_transitive_reg_conflict(regs
, base_reg
, reg
);
157 assert(reg
== ra_reg_count
);
159 /* Add a special class for aligned pairs, which we'll put delta_x/y
160 * in on gen5 so that we can do PLN.
162 if (brw
->has_pln
&& reg_width
== 1 && brw
->gen
< 6) {
163 aligned_pairs_class
= ra_alloc_reg_class(regs
);
165 for (int i
= 0; i
< pairs_reg_count
; i
++) {
166 if ((ra_reg_to_grf
[pairs_base_reg
+ i
] & 1) == 0) {
167 ra_class_add_reg(regs
, aligned_pairs_class
, pairs_base_reg
+ i
);
172 ra_set_finalize(regs
, NULL
);
174 brw
->wm
.reg_sets
[index
].regs
= regs
;
175 for (unsigned i
= 0; i
< ARRAY_SIZE(brw
->wm
.reg_sets
[index
].classes
); i
++)
176 brw
->wm
.reg_sets
[index
].classes
[i
] = -1;
177 for (int i
= 0; i
< class_count
; i
++)
178 brw
->wm
.reg_sets
[index
].classes
[class_sizes
[i
] - 1] = classes
[i
];
179 brw
->wm
.reg_sets
[index
].ra_reg_to_grf
= ra_reg_to_grf
;
180 brw
->wm
.reg_sets
[index
].aligned_pairs_class
= aligned_pairs_class
;
184 brw_fs_alloc_reg_sets(struct brw_context
*brw
)
186 brw_alloc_reg_set(brw
, 1);
187 brw_alloc_reg_set(brw
, 2);
191 count_to_loop_end(fs_inst
*do_inst
)
195 for (fs_inst
*inst
= (fs_inst
*)do_inst
->next
;
197 inst
= (fs_inst
*)inst
->next
) {
198 switch (inst
->opcode
) {
202 case BRW_OPCODE_WHILE
:
214 * Sets up interference between thread payload registers and the virtual GRFs
215 * to be allocated for program temporaries.
217 * We want to be able to reallocate the payload for our virtual GRFs, notably
218 * because the setup coefficients for a full set of 16 FS inputs takes up 8 of
221 * The layout of the payload registers is:
223 * 0..nr_payload_regs-1: fixed function setup (including bary coordinates).
224 * nr_payload_regs..nr_payload_regs+curb_read_lengh-1: uniform data
225 * nr_payload_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients.
227 * And we have payload_node_count nodes covering these registers in order
228 * (note that in 16-wide, a node is two registers).
231 fs_visitor::setup_payload_interference(struct ra_graph
*g
,
232 int payload_node_count
,
233 int first_payload_node
)
235 int reg_width
= dispatch_width
/ 8;
239 int payload_last_use_ip
[payload_node_count
];
240 memset(payload_last_use_ip
, 0, sizeof(payload_last_use_ip
));
242 foreach_list(node
, &this->instructions
) {
243 fs_inst
*inst
= (fs_inst
*)node
;
245 switch (inst
->opcode
) {
249 /* Since payload regs are deffed only at the start of the shader
250 * execution, any uses of the payload within a loop mean the live
251 * interval extends to the end of the outermost loop. Find the ip of
255 loop_end_ip
= ip
+ count_to_loop_end(inst
);
257 case BRW_OPCODE_WHILE
:
266 use_ip
= loop_end_ip
;
270 /* Note that UNIFORM args have been turned into FIXED_HW_REG by
271 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
272 * the start (see interp_reg()).
274 for (int i
= 0; i
< 3; i
++) {
275 if (inst
->src
[i
].file
== HW_REG
&&
276 inst
->src
[i
].fixed_hw_reg
.file
== BRW_GENERAL_REGISTER_FILE
) {
277 int node_nr
= inst
->src
[i
].fixed_hw_reg
.nr
/ reg_width
;
278 if (node_nr
>= payload_node_count
)
281 payload_last_use_ip
[node_nr
] = use_ip
;
285 /* Special case instructions which have extra implied registers used. */
286 switch (inst
->opcode
) {
287 case FS_OPCODE_FB_WRITE
:
288 /* We could omit this for the !inst->header_present case, except that
289 * the simulator apparently incorrectly reads from g0/g1 instead of
290 * sideband. It also really freaks out driver developers to see g0
291 * used in unusual places, so just always reserve it.
293 payload_last_use_ip
[0 / reg_width
] = use_ip
;
294 payload_last_use_ip
[1 / reg_width
] = use_ip
;
297 case FS_OPCODE_LINTERP
:
298 /* On gen6+ in 16-wide, there are 4 adjacent registers (so 2 nodes)
299 * used by PLN's sourcing of the deltas, while we list only the first
300 * two in the arguments (1 node). Pre-gen6, the deltas are computed
305 if (inst
->src
[delta_x_arg
].file
== HW_REG
&&
306 inst
->src
[delta_x_arg
].fixed_hw_reg
.file
==
307 BRW_GENERAL_REGISTER_FILE
) {
308 int sechalf_node
= (inst
->src
[delta_x_arg
].fixed_hw_reg
.nr
/
310 assert(sechalf_node
< payload_node_count
);
311 payload_last_use_ip
[sechalf_node
] = use_ip
;
323 for (int i
= 0; i
< payload_node_count
; i
++) {
324 /* Mark the payload node as interfering with any virtual grf that is
325 * live between the start of the program and our last use of the payload
328 for (int j
= 0; j
< this->virtual_grf_count
; j
++) {
329 /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
330 * in order to not have to worry about the uniform issue described in
331 * calculate_live_intervals().
333 if (this->virtual_grf_start
[j
] <= payload_last_use_ip
[i
]) {
334 ra_add_node_interference(g
, first_payload_node
+ i
, j
);
339 for (int i
= 0; i
< payload_node_count
; i
++) {
340 /* Mark each payload node as being allocated to its physical register.
342 * The alternative would be to have per-physical-register classes, which
343 * would just be silly.
345 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
350 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
352 * This is used in assign_regs() to decide which of the GRFs that we use as
353 * MRFs on gen7 get normally register allocated, and in register spilling to
354 * see if we can actually use MRFs to do spills without overwriting normal MRF
358 fs_visitor::get_used_mrfs(bool *mrf_used
)
360 int reg_width
= dispatch_width
/ 8;
362 memset(mrf_used
, 0, BRW_MAX_MRF
* sizeof(bool));
364 foreach_list(node
, &this->instructions
) {
365 fs_inst
*inst
= (fs_inst
*)node
;
367 if (inst
->dst
.file
== MRF
) {
368 int reg
= inst
->dst
.reg
& ~BRW_MRF_COMPR4
;
369 mrf_used
[reg
] = true;
370 if (reg_width
== 2) {
371 if (inst
->dst
.reg
& BRW_MRF_COMPR4
) {
372 mrf_used
[reg
+ 4] = true;
374 mrf_used
[reg
+ 1] = true;
379 if (inst
->mlen
> 0) {
380 for (int i
= 0; i
< implied_mrf_writes(inst
); i
++) {
381 mrf_used
[inst
->base_mrf
+ i
] = true;
388 * Sets interference between virtual GRFs and usage of the high GRFs for SEND
389 * messages (treated as MRFs in code generation).
392 fs_visitor::setup_mrf_hack_interference(struct ra_graph
*g
, int first_mrf_node
)
394 int reg_width
= dispatch_width
/ 8;
396 bool mrf_used
[BRW_MAX_MRF
];
397 get_used_mrfs(mrf_used
);
399 for (int i
= 0; i
< BRW_MAX_MRF
; i
++) {
400 /* Mark each MRF reg node as being allocated to its physical register.
402 * The alternative would be to have per-physical-register classes, which
403 * would just be silly.
405 ra_set_node_reg(g
, first_mrf_node
+ i
,
406 (GEN7_MRF_HACK_START
+ i
) / reg_width
);
408 /* Since we don't have any live/dead analysis on the MRFs, just mark all
409 * that are used as conflicting with all virtual GRFs.
412 for (int j
= 0; j
< this->virtual_grf_count
; j
++) {
413 ra_add_node_interference(g
, first_mrf_node
+ i
, j
);
420 fs_visitor::assign_regs()
422 /* Most of this allocation was written for a reg_width of 1
423 * (dispatch_width == 8). In extending to 16-wide, the code was
424 * left in place and it was converted to have the hardware
425 * registers it's allocating be contiguous physical pairs of regs
426 * for reg_width == 2.
428 int reg_width
= dispatch_width
/ 8;
429 int hw_reg_mapping
[this->virtual_grf_count
];
430 int payload_node_count
= (ALIGN(this->first_non_payload_grf
, reg_width
) /
432 int rsi
= reg_width
- 1; /* Which brw->wm.reg_sets[] to use */
433 calculate_live_intervals();
435 int node_count
= this->virtual_grf_count
;
436 int first_payload_node
= node_count
;
437 node_count
+= payload_node_count
;
438 int first_mrf_hack_node
= node_count
;
440 node_count
+= BRW_MAX_GRF
- GEN7_MRF_HACK_START
;
441 struct ra_graph
*g
= ra_alloc_interference_graph(brw
->wm
.reg_sets
[rsi
].regs
,
444 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
445 unsigned size
= this->virtual_grf_sizes
[i
];
448 assert(size
<= ARRAY_SIZE(brw
->wm
.reg_sets
[rsi
].classes
) &&
449 "Register allocation relies on split_virtual_grfs()");
450 c
= brw
->wm
.reg_sets
[rsi
].classes
[size
- 1];
452 /* Special case: on pre-GEN6 hardware that supports PLN, the
453 * second operand of a PLN instruction needs to be an
454 * even-numbered register, so we have a special register class
455 * wm_aligned_pairs_class to handle this case. pre-GEN6 always
456 * uses this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] as the
457 * second operand of a PLN instruction (since it doesn't support
458 * any other interpolation modes). So all we need to do is find
459 * that register and set it to the appropriate class.
461 if (brw
->wm
.reg_sets
[rsi
].aligned_pairs_class
>= 0 &&
462 this->delta_x
[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC
].reg
== i
) {
463 c
= brw
->wm
.reg_sets
[rsi
].aligned_pairs_class
;
466 ra_set_node_class(g
, i
, c
);
468 for (int j
= 0; j
< i
; j
++) {
469 if (virtual_grf_interferes(i
, j
)) {
470 ra_add_node_interference(g
, i
, j
);
475 setup_payload_interference(g
, payload_node_count
, first_payload_node
);
477 setup_mrf_hack_interference(g
, first_mrf_hack_node
);
479 /* Debug of register spilling: Go spill everything. */
481 int reg
= choose_spill_reg(g
);
490 if (!ra_allocate_no_spills(g
)) {
491 /* Failed to allocate registers. Spill a reg, and the caller will
492 * loop back into here to try again.
494 int reg
= choose_spill_reg(g
);
497 fail("no register to spill:\n");
499 } else if (dispatch_width
== 16) {
500 fail("Failure to register allocate. Reduce number of live scalar "
501 "values to avoid this.");
512 /* Get the chosen virtual registers for each node, and map virtual
513 * regs in the register classes back down to real hardware reg
516 this->grf_used
= payload_node_count
* reg_width
;
517 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
518 int reg
= ra_get_node_reg(g
, i
);
520 hw_reg_mapping
[i
] = brw
->wm
.reg_sets
[rsi
].ra_reg_to_grf
[reg
] * reg_width
;
521 this->grf_used
= MAX2(this->grf_used
,
522 hw_reg_mapping
[i
] + this->virtual_grf_sizes
[i
] *
526 foreach_list(node
, &this->instructions
) {
527 fs_inst
*inst
= (fs_inst
*)node
;
529 assign_reg(hw_reg_mapping
, &inst
->dst
, reg_width
);
530 assign_reg(hw_reg_mapping
, &inst
->src
[0], reg_width
);
531 assign_reg(hw_reg_mapping
, &inst
->src
[1], reg_width
);
532 assign_reg(hw_reg_mapping
, &inst
->src
[2], reg_width
);
541 fs_visitor::emit_unspill(fs_inst
*inst
, fs_reg dst
, uint32_t spill_offset
,
544 for (int i
= 0; i
< count
; i
++) {
545 fs_inst
*unspill_inst
= new(mem_ctx
) fs_inst(FS_OPCODE_UNSPILL
, dst
);
546 unspill_inst
->offset
= spill_offset
;
547 unspill_inst
->ir
= inst
->ir
;
548 unspill_inst
->annotation
= inst
->annotation
;
550 /* Choose a MRF that won't conflict with an MRF that's live across the
551 * spill. Nothing else will make it up to MRF 14/15.
553 unspill_inst
->base_mrf
= 14;
554 unspill_inst
->mlen
= 1; /* header contains offset */
555 inst
->insert_before(unspill_inst
);
558 spill_offset
+= REG_SIZE
;
563 fs_visitor::choose_spill_reg(struct ra_graph
*g
)
565 float loop_scale
= 1.0;
566 float spill_costs
[this->virtual_grf_count
];
567 bool no_spill
[this->virtual_grf_count
];
569 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
570 spill_costs
[i
] = 0.0;
574 /* Calculate costs for spilling nodes. Call it a cost of 1 per
575 * spill/unspill we'll have to do, and guess that the insides of
576 * loops run 10 times.
578 foreach_list(node
, &this->instructions
) {
579 fs_inst
*inst
= (fs_inst
*)node
;
581 for (unsigned int i
= 0; i
< 3; i
++) {
582 if (inst
->src
[i
].file
== GRF
) {
583 spill_costs
[inst
->src
[i
].reg
] += loop_scale
;
585 /* Register spilling logic assumes full-width registers; smeared
586 * registers have a width of 1 so if we try to spill them we'll
587 * generate invalid assembly. This shouldn't be a problem because
588 * smeared registers are only used as short-term temporaries when
589 * loading pull constants, so spilling them is unlikely to reduce
590 * register pressure anyhow.
592 if (inst
->src
[i
].smear
>= 0) {
593 no_spill
[inst
->src
[i
].reg
] = true;
598 if (inst
->dst
.file
== GRF
) {
599 spill_costs
[inst
->dst
.reg
] += inst
->regs_written
* loop_scale
;
601 if (inst
->dst
.smear
>= 0) {
602 no_spill
[inst
->dst
.reg
] = true;
606 switch (inst
->opcode
) {
612 case BRW_OPCODE_WHILE
:
616 case FS_OPCODE_SPILL
:
617 if (inst
->src
[0].file
== GRF
)
618 no_spill
[inst
->src
[0].reg
] = true;
621 case FS_OPCODE_UNSPILL
:
622 if (inst
->dst
.file
== GRF
)
623 no_spill
[inst
->dst
.reg
] = true;
631 for (int i
= 0; i
< this->virtual_grf_count
; i
++) {
633 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
636 return ra_get_best_spill_node(g
);
640 fs_visitor::spill_reg(int spill_reg
)
642 int size
= virtual_grf_sizes
[spill_reg
];
643 unsigned int spill_offset
= c
->last_scratch
;
644 assert(ALIGN(spill_offset
, 16) == spill_offset
); /* oword read/write req. */
645 c
->last_scratch
+= size
* REG_SIZE
;
647 /* Generate spill/unspill instructions for the objects being
648 * spilled. Right now, we spill or unspill the whole thing to a
649 * virtual grf of the same size. For most instructions, though, we
650 * could just spill/unspill the GRF being accessed.
652 foreach_list(node
, &this->instructions
) {
653 fs_inst
*inst
= (fs_inst
*)node
;
655 for (unsigned int i
= 0; i
< 3; i
++) {
656 if (inst
->src
[i
].file
== GRF
&&
657 inst
->src
[i
].reg
== spill_reg
) {
658 int regs_read
= inst
->regs_read(this, i
);
660 inst
->src
[i
].reg
= virtual_grf_alloc(regs_read
);
661 inst
->src
[i
].reg_offset
= 0;
663 emit_unspill(inst
, inst
->src
[i
],
664 spill_offset
+ REG_SIZE
* inst
->src
[i
].reg_offset
,
669 if (inst
->dst
.file
== GRF
&&
670 inst
->dst
.reg
== spill_reg
) {
671 int subset_spill_offset
= (spill_offset
+
672 REG_SIZE
* inst
->dst
.reg_offset
);
673 inst
->dst
.reg
= virtual_grf_alloc(inst
->regs_written
);
674 inst
->dst
.reg_offset
= 0;
676 /* If our write is going to affect just part of the
677 * inst->regs_written(), then we need to unspill the destination
678 * since we write back out all of the regs_written().
680 if (inst
->predicate
|| inst
->force_uncompressed
|| inst
->force_sechalf
) {
681 emit_unspill(inst
, inst
->dst
, subset_spill_offset
,
685 fs_reg spill_src
= inst
->dst
;
686 spill_src
.reg_offset
= 0;
687 spill_src
.abs
= false;
688 spill_src
.negate
= false;
689 spill_src
.smear
= -1;
691 for (int chan
= 0; chan
< inst
->regs_written
; chan
++) {
692 fs_inst
*spill_inst
= new(mem_ctx
) fs_inst(FS_OPCODE_SPILL
,
693 reg_null_f
, spill_src
);
694 spill_src
.reg_offset
++;
695 spill_inst
->offset
= subset_spill_offset
+ chan
* REG_SIZE
;
696 spill_inst
->ir
= inst
->ir
;
697 spill_inst
->annotation
= inst
->annotation
;
698 spill_inst
->base_mrf
= 14;
699 spill_inst
->mlen
= 2; /* header, value */
700 inst
->insert_after(spill_inst
);
705 invalidate_live_intervals();