2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "glsl/glsl_types.h"
30 #include "glsl/ir_optimization.h"
31 #include "glsl/ir_print_visitor.h"
34 assign_reg(int *reg_hw_locations
, fs_reg
*reg
, int reg_width
)
36 if (reg
->file
== GRF
) {
37 assert(reg
->reg_offset
>= 0);
38 reg
->reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
* reg_width
;
44 fs_visitor::assign_regs_trivial()
46 int hw_reg_mapping
[this->virtual_grf_next
+ 1];
48 int reg_width
= c
->dispatch_width
/ 8;
50 /* Note that compressed instructions require alignment to 2 registers. */
51 hw_reg_mapping
[0] = ALIGN(this->first_non_payload_grf
, reg_width
);
52 for (i
= 1; i
<= this->virtual_grf_next
; i
++) {
53 hw_reg_mapping
[i
] = (hw_reg_mapping
[i
- 1] +
54 this->virtual_grf_sizes
[i
- 1] * reg_width
);
56 this->grf_used
= hw_reg_mapping
[this->virtual_grf_next
];
58 foreach_list(node
, &this->instructions
) {
59 fs_inst
*inst
= (fs_inst
*)node
;
61 assign_reg(hw_reg_mapping
, &inst
->dst
, reg_width
);
62 assign_reg(hw_reg_mapping
, &inst
->src
[0], reg_width
);
63 assign_reg(hw_reg_mapping
, &inst
->src
[1], reg_width
);
66 if (this->grf_used
>= max_grf
) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used
, max_grf
);
74 brw_alloc_reg_set_for_classes(struct brw_context
*brw
,
80 struct intel_context
*intel
= &brw
->intel
;
82 /* Compute the total number of registers across all classes. */
84 for (int i
= 0; i
< class_count
; i
++) {
85 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
88 ralloc_free(brw
->wm
.ra_reg_to_grf
);
89 brw
->wm
.ra_reg_to_grf
= ralloc_array(brw
, uint8_t, ra_reg_count
);
90 ralloc_free(brw
->wm
.regs
);
91 brw
->wm
.regs
= ra_alloc_reg_set(brw
, ra_reg_count
);
92 ralloc_free(brw
->wm
.classes
);
93 brw
->wm
.classes
= ralloc_array(brw
, int, class_count
+ 1);
95 brw
->wm
.aligned_pairs_class
= -1;
97 /* Now, add the registers to their classes, and add the conflicts
98 * between them and the base GRF registers (and also each other).
101 int pairs_base_reg
= 0;
102 int pairs_reg_count
= 0;
103 for (int i
= 0; i
< class_count
; i
++) {
104 int class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
105 brw
->wm
.classes
[i
] = ra_alloc_reg_class(brw
->wm
.regs
);
107 /* Save this off for the aligned pair class at the end. */
108 if (class_sizes
[i
] == 2) {
109 pairs_base_reg
= reg
;
110 pairs_reg_count
= class_reg_count
;
113 for (int j
= 0; j
< class_reg_count
; j
++) {
114 ra_class_add_reg(brw
->wm
.regs
, brw
->wm
.classes
[i
], reg
);
116 brw
->wm
.ra_reg_to_grf
[reg
] = j
;
118 for (int base_reg
= j
;
119 base_reg
< j
+ class_sizes
[i
];
121 ra_add_transitive_reg_conflict(brw
->wm
.regs
, base_reg
, reg
);
127 assert(reg
== ra_reg_count
);
129 /* Add a special class for aligned pairs, which we'll put delta_x/y
130 * in on gen5 so that we can do PLN.
132 if (brw
->has_pln
&& reg_width
== 1 && intel
->gen
< 6) {
133 brw
->wm
.aligned_pairs_class
= ra_alloc_reg_class(brw
->wm
.regs
);
135 for (int i
= 0; i
< pairs_reg_count
; i
++) {
136 if ((brw
->wm
.ra_reg_to_grf
[pairs_base_reg
+ i
] & 1) == 0) {
137 ra_class_add_reg(brw
->wm
.regs
, brw
->wm
.aligned_pairs_class
,
144 ra_set_finalize(brw
->wm
.regs
);
148 fs_visitor::assign_regs()
150 /* Most of this allocation was written for a reg_width of 1
151 * (dispatch_width == 8). In extending to 16-wide, the code was
152 * left in place and it was converted to have the hardware
153 * registers it's allocating be contiguous physical pairs of regs
154 * for reg_width == 2.
156 int reg_width
= c
->dispatch_width
/ 8;
157 int hw_reg_mapping
[this->virtual_grf_next
];
158 int first_assigned_grf
= ALIGN(this->first_non_payload_grf
, reg_width
);
159 int base_reg_count
= (max_grf
- first_assigned_grf
) / reg_width
;
160 int class_sizes
[base_reg_count
];
163 calculate_live_intervals();
165 /* Set up the register classes.
167 * The base registers store a scalar value. For texture samples,
168 * we get virtual GRFs composed of 4 contiguous hw register. For
169 * structures and arrays, we store them as contiguous larger things
170 * than that, though we should be able to do better most of the
173 class_sizes
[class_count
++] = 1;
174 if (brw
->has_pln
&& intel
->gen
< 6) {
175 /* Always set up the (unaligned) pairs for gen5, so we can find
176 * them for making the aligned pair class.
178 class_sizes
[class_count
++] = 2;
180 for (int r
= 0; r
< this->virtual_grf_next
; r
++) {
183 for (i
= 0; i
< class_count
; i
++) {
184 if (class_sizes
[i
] == this->virtual_grf_sizes
[r
])
187 if (i
== class_count
) {
188 if (this->virtual_grf_sizes
[r
] >= base_reg_count
) {
189 fail("Object too large to register allocate.\n");
192 class_sizes
[class_count
++] = this->virtual_grf_sizes
[r
];
196 brw_alloc_reg_set_for_classes(brw
, class_sizes
, class_count
,
197 reg_width
, base_reg_count
);
199 struct ra_graph
*g
= ra_alloc_interference_graph(brw
->wm
.regs
,
200 this->virtual_grf_next
);
202 for (int i
= 0; i
< this->virtual_grf_next
; i
++) {
203 for (int c
= 0; c
< class_count
; c
++) {
204 if (class_sizes
[c
] == this->virtual_grf_sizes
[i
]) {
205 /* Special case: on pre-GEN6 hardware that supports PLN, the
206 * second operand of a PLN instruction needs to be an
207 * even-numbered register, so we have a special register class
208 * wm_aligned_pairs_class to handle this case. pre-GEN6 always
209 * uses this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] as the
210 * second operand of a PLN instruction (since it doesn't support
211 * any other interpolation modes). So all we need to do is find
212 * that register and set it to the appropriate class.
214 if (brw
->wm
.aligned_pairs_class
>= 0 &&
215 this->delta_x
[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC
].reg
== i
) {
216 ra_set_node_class(g
, i
, brw
->wm
.aligned_pairs_class
);
218 ra_set_node_class(g
, i
, brw
->wm
.classes
[c
]);
224 for (int j
= 0; j
< i
; j
++) {
225 if (virtual_grf_interferes(i
, j
)) {
226 ra_add_node_interference(g
, i
, j
);
231 if (!ra_allocate_no_spills(g
)) {
232 /* Failed to allocate registers. Spill a reg, and the caller will
233 * loop back into here to try again.
235 int reg
= choose_spill_reg(g
);
238 fail("no register to spill\n");
239 } else if (intel
->gen
>= 7) {
240 fail("no spilling support on gen7 yet\n");
241 } else if (c
->dispatch_width
== 16) {
242 fail("no spilling support on 16-wide yet\n");
253 /* Get the chosen virtual registers for each node, and map virtual
254 * regs in the register classes back down to real hardware reg
257 this->grf_used
= first_assigned_grf
;
258 for (int i
= 0; i
< this->virtual_grf_next
; i
++) {
259 int reg
= ra_get_node_reg(g
, i
);
261 hw_reg_mapping
[i
] = (first_assigned_grf
+
262 brw
->wm
.ra_reg_to_grf
[reg
] * reg_width
);
263 this->grf_used
= MAX2(this->grf_used
,
264 hw_reg_mapping
[i
] + this->virtual_grf_sizes
[i
] *
268 foreach_list(node
, &this->instructions
) {
269 fs_inst
*inst
= (fs_inst
*)node
;
271 assign_reg(hw_reg_mapping
, &inst
->dst
, reg_width
);
272 assign_reg(hw_reg_mapping
, &inst
->src
[0], reg_width
);
273 assign_reg(hw_reg_mapping
, &inst
->src
[1], reg_width
);
282 fs_visitor::emit_unspill(fs_inst
*inst
, fs_reg dst
, uint32_t spill_offset
)
284 int size
= virtual_grf_sizes
[dst
.reg
];
287 for (int chan
= 0; chan
< size
; chan
++) {
288 fs_inst
*unspill_inst
= new(mem_ctx
) fs_inst(FS_OPCODE_UNSPILL
,
291 unspill_inst
->offset
= spill_offset
+ chan
* REG_SIZE
;
292 unspill_inst
->ir
= inst
->ir
;
293 unspill_inst
->annotation
= inst
->annotation
;
295 /* Choose a MRF that won't conflict with an MRF that's live across the
296 * spill. Nothing else will make it up to MRF 14/15.
298 unspill_inst
->base_mrf
= 14;
299 unspill_inst
->mlen
= 1; /* header contains offset */
300 inst
->insert_before(unspill_inst
);
305 fs_visitor::choose_spill_reg(struct ra_graph
*g
)
307 float loop_scale
= 1.0;
308 float spill_costs
[this->virtual_grf_next
];
309 bool no_spill
[this->virtual_grf_next
];
311 for (int i
= 0; i
< this->virtual_grf_next
; i
++) {
312 spill_costs
[i
] = 0.0;
316 /* Calculate costs for spilling nodes. Call it a cost of 1 per
317 * spill/unspill we'll have to do, and guess that the insides of
318 * loops run 10 times.
320 foreach_list(node
, &this->instructions
) {
321 fs_inst
*inst
= (fs_inst
*)node
;
323 for (unsigned int i
= 0; i
< 3; i
++) {
324 if (inst
->src
[i
].file
== GRF
) {
325 int size
= virtual_grf_sizes
[inst
->src
[i
].reg
];
326 spill_costs
[inst
->src
[i
].reg
] += size
* loop_scale
;
330 if (inst
->dst
.file
== GRF
) {
331 int size
= virtual_grf_sizes
[inst
->dst
.reg
];
332 spill_costs
[inst
->dst
.reg
] += size
* loop_scale
;
335 switch (inst
->opcode
) {
341 case BRW_OPCODE_WHILE
:
345 case FS_OPCODE_SPILL
:
346 if (inst
->src
[0].file
== GRF
)
347 no_spill
[inst
->src
[0].reg
] = true;
350 case FS_OPCODE_UNSPILL
:
351 if (inst
->dst
.file
== GRF
)
352 no_spill
[inst
->dst
.reg
] = true;
360 for (int i
= 0; i
< this->virtual_grf_next
; i
++) {
362 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
365 return ra_get_best_spill_node(g
);
369 fs_visitor::spill_reg(int spill_reg
)
371 int size
= virtual_grf_sizes
[spill_reg
];
372 unsigned int spill_offset
= c
->last_scratch
;
373 assert(ALIGN(spill_offset
, 16) == spill_offset
); /* oword read/write req. */
374 c
->last_scratch
+= size
* REG_SIZE
;
376 /* Generate spill/unspill instructions for the objects being
377 * spilled. Right now, we spill or unspill the whole thing to a
378 * virtual grf of the same size. For most instructions, though, we
379 * could just spill/unspill the GRF being accessed.
381 foreach_list(node
, &this->instructions
) {
382 fs_inst
*inst
= (fs_inst
*)node
;
384 for (unsigned int i
= 0; i
< 3; i
++) {
385 if (inst
->src
[i
].file
== GRF
&&
386 inst
->src
[i
].reg
== spill_reg
) {
387 inst
->src
[i
].reg
= virtual_grf_alloc(size
);
388 emit_unspill(inst
, inst
->src
[i
], spill_offset
);
392 if (inst
->dst
.file
== GRF
&&
393 inst
->dst
.reg
== spill_reg
) {
394 inst
->dst
.reg
= virtual_grf_alloc(size
);
396 /* Since we spill/unspill the whole thing even if we access
397 * just a component, we may need to unspill before the
398 * instruction we're spilling for.
400 if (size
!= 1 || inst
->predicated
) {
401 emit_unspill(inst
, inst
->dst
, spill_offset
);
404 fs_reg spill_src
= inst
->dst
;
405 spill_src
.reg_offset
= 0;
406 spill_src
.abs
= false;
407 spill_src
.negate
= false;
408 spill_src
.smear
= -1;
410 for (int chan
= 0; chan
< size
; chan
++) {
411 fs_inst
*spill_inst
= new(mem_ctx
) fs_inst(FS_OPCODE_SPILL
,
412 reg_null_f
, spill_src
);
413 spill_src
.reg_offset
++;
414 spill_inst
->offset
= spill_offset
+ chan
* REG_SIZE
;
415 spill_inst
->ir
= inst
->ir
;
416 spill_inst
->annotation
= inst
->annotation
;
417 spill_inst
->base_mrf
= 14;
418 spill_inst
->mlen
= 2; /* header, value */
419 inst
->insert_after(spill_inst
);
424 this->live_intervals_valid
= false;