intel/compiler/fs: Switch liveness analysis to IR analysis framework
[mesa.git] / src / intel / compiler / brw_fs_reg_allocate.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_eu.h"
29 #include "brw_fs.h"
30 #include "brw_cfg.h"
31 #include "util/register_allocate.h"
32
33 using namespace brw;
34
35 static void
36 assign_reg(unsigned *reg_hw_locations, fs_reg *reg)
37 {
38 if (reg->file == VGRF) {
39 reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
40 reg->offset %= REG_SIZE;
41 }
42 }
43
44 void
45 fs_visitor::assign_regs_trivial()
46 {
47 unsigned hw_reg_mapping[this->alloc.count + 1];
48 unsigned i;
49 int reg_width = dispatch_width / 8;
50
51 /* Note that compressed instructions require alignment to 2 registers. */
52 hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
53 for (i = 1; i <= this->alloc.count; i++) {
54 hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
55 this->alloc.sizes[i - 1]);
56 }
57 this->grf_used = hw_reg_mapping[this->alloc.count];
58
59 foreach_block_and_inst(block, fs_inst, inst, cfg) {
60 assign_reg(hw_reg_mapping, &inst->dst);
61 for (i = 0; i < inst->sources; i++) {
62 assign_reg(hw_reg_mapping, &inst->src[i]);
63 }
64 }
65
66 if (this->grf_used >= max_grf) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used, max_grf);
69 } else {
70 this->alloc.count = this->grf_used;
71 }
72
73 }
74
75 /**
76 * Size of a register from the aligned_bary_class register class.
77 */
78 static unsigned
79 aligned_bary_size(unsigned dispatch_width)
80 {
81 return (dispatch_width == 8 ? 2 : 4);
82 }
83
84 static void
85 brw_alloc_reg_set(struct brw_compiler *compiler, int dispatch_width)
86 {
87 const struct gen_device_info *devinfo = compiler->devinfo;
88 int base_reg_count = BRW_MAX_GRF;
89 const int index = _mesa_logbase2(dispatch_width / 8);
90
91 if (dispatch_width > 8 && devinfo->gen >= 7) {
92 /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
93 * SIMD16. Therefore, we can use the exact same register sets for
94 * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
95 */
96 compiler->fs_reg_sets[index] = compiler->fs_reg_sets[0];
97 return;
98 }
99
100 /* The registers used to make up almost all values handled in the compiler
101 * are a scalar value occupying a single register (or 2 registers in the
102 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
103 * multiplying allocated register numbers by 2). Things that were
104 * aggregates of scalar values at the GLSL level were split to scalar
105 * values by split_virtual_grfs().
106 *
107 * However, texture SEND messages return a series of contiguous registers
108 * to write into. We currently always ask for 4 registers, but we may
109 * convert that to use less some day.
110 *
111 * Additionally, on gen5 we need aligned pairs of registers for the PLN
112 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
113 * texturing.
114 */
115 const int class_count = MAX_VGRF_SIZE;
116 int class_sizes[MAX_VGRF_SIZE];
117 for (unsigned i = 0; i < MAX_VGRF_SIZE; i++)
118 class_sizes[i] = i + 1;
119
120 memset(compiler->fs_reg_sets[index].class_to_ra_reg_range, 0,
121 sizeof(compiler->fs_reg_sets[index].class_to_ra_reg_range));
122 int *class_to_ra_reg_range = compiler->fs_reg_sets[index].class_to_ra_reg_range;
123
124 /* Compute the total number of registers across all classes. */
125 int ra_reg_count = 0;
126 for (int i = 0; i < class_count; i++) {
127 if (devinfo->gen <= 5 && dispatch_width >= 16) {
128 /* From the G45 PRM:
129 *
130 * In order to reduce the hardware complexity, the following
131 * rules and restrictions apply to the compressed instruction:
132 * ...
133 * * Operand Alignment Rule: With the exceptions listed below, a
134 * source/destination operand in general should be aligned to
135 * even 256-bit physical register with a region size equal to
136 * two 256-bit physical register
137 */
138 ra_reg_count += (base_reg_count - (class_sizes[i] - 1)) / 2;
139 } else {
140 ra_reg_count += base_reg_count - (class_sizes[i] - 1);
141 }
142 /* Mark the last register. We'll fill in the beginnings later. */
143 class_to_ra_reg_range[class_sizes[i]] = ra_reg_count;
144 }
145
146 /* Fill out the rest of the range markers */
147 for (int i = 1; i < 17; ++i) {
148 if (class_to_ra_reg_range[i] == 0)
149 class_to_ra_reg_range[i] = class_to_ra_reg_range[i-1];
150 }
151
152 uint8_t *ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
153 struct ra_regs *regs = ra_alloc_reg_set(compiler, ra_reg_count, false);
154 if (devinfo->gen >= 6)
155 ra_set_allocate_round_robin(regs);
156 int *classes = ralloc_array(compiler, int, class_count);
157 int aligned_bary_class = -1;
158
159 /* Allocate space for q values. We allocate class_count + 1 because we
160 * want to leave room for the aligned barycentric class if we have it.
161 */
162 unsigned int **q_values = ralloc_array(compiler, unsigned int *,
163 class_count + 1);
164 for (int i = 0; i < class_count + 1; ++i)
165 q_values[i] = ralloc_array(q_values, unsigned int, class_count + 1);
166
167 /* Now, add the registers to their classes, and add the conflicts
168 * between them and the base GRF registers (and also each other).
169 */
170 int reg = 0;
171 int aligned_bary_base_reg = 0;
172 int aligned_bary_reg_count = 0;
173 for (int i = 0; i < class_count; i++) {
174 int class_reg_count;
175 if (devinfo->gen <= 5 && dispatch_width >= 16) {
176 class_reg_count = (base_reg_count - (class_sizes[i] - 1)) / 2;
177
178 /* See comment below. The only difference here is that we are
179 * dealing with pairs of registers instead of single registers.
180 * Registers of odd sizes simply get rounded up. */
181 for (int j = 0; j < class_count; j++)
182 q_values[i][j] = (class_sizes[i] + 1) / 2 +
183 (class_sizes[j] + 1) / 2 - 1;
184 } else {
185 class_reg_count = base_reg_count - (class_sizes[i] - 1);
186
187 /* From register_allocate.c:
188 *
189 * q(B,C) (indexed by C, B is this register class) in
190 * Runeson/Nyström paper. This is "how many registers of B could
191 * the worst choice register from C conflict with".
192 *
193 * If we just let the register allocation algorithm compute these
194 * values, is extremely expensive. However, since all of our
195 * registers are laid out, we can very easily compute them
196 * ourselves. View the register from C as fixed starting at GRF n
197 * somwhere in the middle, and the register from B as sliding back
198 * and forth. Then the first register to conflict from B is the
199 * one starting at n - class_size[B] + 1 and the last register to
200 * conflict will start at n + class_size[B] - 1. Therefore, the
201 * number of conflicts from B is class_size[B] + class_size[C] - 1.
202 *
203 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
204 * B | | | | | |n| --> | | | | | | |
205 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
206 * +-+-+-+-+-+
207 * C |n| | | | |
208 * +-+-+-+-+-+
209 */
210 for (int j = 0; j < class_count; j++)
211 q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
212 }
213 classes[i] = ra_alloc_reg_class(regs);
214
215 /* Save this off for the aligned barycentric class at the end. */
216 if (class_sizes[i] == int(aligned_bary_size(dispatch_width))) {
217 aligned_bary_base_reg = reg;
218 aligned_bary_reg_count = class_reg_count;
219 }
220
221 if (devinfo->gen <= 5 && dispatch_width >= 16) {
222 for (int j = 0; j < class_reg_count; j++) {
223 ra_class_add_reg(regs, classes[i], reg);
224
225 ra_reg_to_grf[reg] = j * 2;
226
227 for (int base_reg = j;
228 base_reg < j + (class_sizes[i] + 1) / 2;
229 base_reg++) {
230 ra_add_reg_conflict(regs, base_reg, reg);
231 }
232
233 reg++;
234 }
235 } else {
236 for (int j = 0; j < class_reg_count; j++) {
237 ra_class_add_reg(regs, classes[i], reg);
238
239 ra_reg_to_grf[reg] = j;
240
241 for (int base_reg = j;
242 base_reg < j + class_sizes[i];
243 base_reg++) {
244 ra_add_reg_conflict(regs, base_reg, reg);
245 }
246
247 reg++;
248 }
249 }
250 }
251 assert(reg == ra_reg_count);
252
253 /* Applying transitivity to all of the base registers gives us the
254 * appropreate register conflict relationships everywhere.
255 */
256 for (int reg = 0; reg < base_reg_count; reg++)
257 ra_make_reg_conflicts_transitive(regs, reg);
258
259 /* Add a special class for aligned barycentrics, which we'll put the
260 * first source of LINTERP on so that we can do PLN on Gen <= 6.
261 */
262 if (devinfo->has_pln && (devinfo->gen == 6 ||
263 (dispatch_width == 8 && devinfo->gen <= 5))) {
264 aligned_bary_class = ra_alloc_reg_class(regs);
265
266 for (int i = 0; i < aligned_bary_reg_count; i++) {
267 if ((ra_reg_to_grf[aligned_bary_base_reg + i] & 1) == 0) {
268 ra_class_add_reg(regs, aligned_bary_class,
269 aligned_bary_base_reg + i);
270 }
271 }
272
273 for (int i = 0; i < class_count; i++) {
274 /* These are a little counter-intuitive because the barycentric
275 * registers are required to be aligned while the register they are
276 * potentially interferring with are not. In the case where the size
277 * is even, the worst-case is that the register is odd-aligned. In
278 * the odd-size case, it doesn't matter.
279 */
280 q_values[class_count][i] = class_sizes[i] / 2 +
281 aligned_bary_size(dispatch_width) / 2;
282 q_values[i][class_count] = class_sizes[i] +
283 aligned_bary_size(dispatch_width) - 1;
284 }
285 q_values[class_count][class_count] = aligned_bary_size(dispatch_width) - 1;
286 }
287
288 ra_set_finalize(regs, q_values);
289
290 ralloc_free(q_values);
291
292 compiler->fs_reg_sets[index].regs = regs;
293 for (unsigned i = 0; i < ARRAY_SIZE(compiler->fs_reg_sets[index].classes); i++)
294 compiler->fs_reg_sets[index].classes[i] = -1;
295 for (int i = 0; i < class_count; i++)
296 compiler->fs_reg_sets[index].classes[class_sizes[i] - 1] = classes[i];
297 compiler->fs_reg_sets[index].ra_reg_to_grf = ra_reg_to_grf;
298 compiler->fs_reg_sets[index].aligned_bary_class = aligned_bary_class;
299 }
300
301 void
302 brw_fs_alloc_reg_sets(struct brw_compiler *compiler)
303 {
304 brw_alloc_reg_set(compiler, 8);
305 brw_alloc_reg_set(compiler, 16);
306 brw_alloc_reg_set(compiler, 32);
307 }
308
309 static int
310 count_to_loop_end(const bblock_t *block)
311 {
312 if (block->end()->opcode == BRW_OPCODE_WHILE)
313 return block->end_ip;
314
315 int depth = 1;
316 /* Skip the first block, since we don't want to count the do the calling
317 * function found.
318 */
319 for (block = block->next();
320 depth > 0;
321 block = block->next()) {
322 if (block->start()->opcode == BRW_OPCODE_DO)
323 depth++;
324 if (block->end()->opcode == BRW_OPCODE_WHILE) {
325 depth--;
326 if (depth == 0)
327 return block->end_ip;
328 }
329 }
330 unreachable("not reached");
331 }
332
333 void fs_visitor::calculate_payload_ranges(int payload_node_count,
334 int *payload_last_use_ip)
335 {
336 int loop_depth = 0;
337 int loop_end_ip = 0;
338
339 for (int i = 0; i < payload_node_count; i++)
340 payload_last_use_ip[i] = -1;
341
342 int ip = 0;
343 foreach_block_and_inst(block, fs_inst, inst, cfg) {
344 switch (inst->opcode) {
345 case BRW_OPCODE_DO:
346 loop_depth++;
347
348 /* Since payload regs are deffed only at the start of the shader
349 * execution, any uses of the payload within a loop mean the live
350 * interval extends to the end of the outermost loop. Find the ip of
351 * the end now.
352 */
353 if (loop_depth == 1)
354 loop_end_ip = count_to_loop_end(block);
355 break;
356 case BRW_OPCODE_WHILE:
357 loop_depth--;
358 break;
359 default:
360 break;
361 }
362
363 int use_ip;
364 if (loop_depth > 0)
365 use_ip = loop_end_ip;
366 else
367 use_ip = ip;
368
369 /* Note that UNIFORM args have been turned into FIXED_GRF by
370 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
371 * the start (see interp_reg()).
372 */
373 for (int i = 0; i < inst->sources; i++) {
374 if (inst->src[i].file == FIXED_GRF) {
375 int node_nr = inst->src[i].nr;
376 if (node_nr >= payload_node_count)
377 continue;
378
379 for (unsigned j = 0; j < regs_read(inst, i); j++) {
380 payload_last_use_ip[node_nr + j] = use_ip;
381 assert(node_nr + j < unsigned(payload_node_count));
382 }
383 }
384 }
385
386 /* Special case instructions which have extra implied registers used. */
387 switch (inst->opcode) {
388 case CS_OPCODE_CS_TERMINATE:
389 payload_last_use_ip[0] = use_ip;
390 break;
391
392 default:
393 if (inst->eot) {
394 /* We could omit this for the !inst->header_present case, except
395 * that the simulator apparently incorrectly reads from g0/g1
396 * instead of sideband. It also really freaks out driver
397 * developers to see g0 used in unusual places, so just always
398 * reserve it.
399 */
400 payload_last_use_ip[0] = use_ip;
401 payload_last_use_ip[1] = use_ip;
402 }
403 break;
404 }
405
406 ip++;
407 }
408 }
409
410 class fs_reg_alloc {
411 public:
412 fs_reg_alloc(fs_visitor *fs):
413 fs(fs), devinfo(fs->devinfo), compiler(fs->compiler),
414 live(fs->live_analysis.require()), g(NULL),
415 have_spill_costs(false)
416 {
417 mem_ctx = ralloc_context(NULL);
418
419 /* Most of this allocation was written for a reg_width of 1
420 * (dispatch_width == 8). In extending to SIMD16, the code was
421 * left in place and it was converted to have the hardware
422 * registers it's allocating be contiguous physical pairs of regs
423 * for reg_width == 2.
424 */
425 int reg_width = fs->dispatch_width / 8;
426 rsi = _mesa_logbase2(reg_width);
427 payload_node_count = ALIGN(fs->first_non_payload_grf, reg_width);
428
429 /* Get payload IP information */
430 payload_last_use_ip = ralloc_array(mem_ctx, int, payload_node_count);
431
432 spill_vgrf_ip = NULL;
433 spill_vgrf_ip_alloc = 0;
434 spill_node_count = 0;
435 }
436
437 ~fs_reg_alloc()
438 {
439 ralloc_free(mem_ctx);
440 }
441
442 bool assign_regs(bool allow_spilling, bool spill_all);
443
444 private:
445 void setup_live_interference(unsigned node,
446 int node_start_ip, int node_end_ip);
447 void setup_inst_interference(fs_inst *inst);
448
449 void build_interference_graph(bool allow_spilling);
450 void discard_interference_graph();
451
452 void set_spill_costs();
453 int choose_spill_reg();
454 fs_reg alloc_spill_reg(unsigned size, int ip);
455 void spill_reg(unsigned spill_reg);
456
457 void *mem_ctx;
458 fs_visitor *fs;
459 const gen_device_info *devinfo;
460 const brw_compiler *compiler;
461 const fs_live_variables &live;
462
463 /* Which compiler->fs_reg_sets[] to use */
464 int rsi;
465
466 ra_graph *g;
467 bool have_spill_costs;
468
469 int payload_node_count;
470 int *payload_last_use_ip;
471
472 int node_count;
473 int first_payload_node;
474 int first_mrf_hack_node;
475 int grf127_send_hack_node;
476 int first_vgrf_node;
477 int first_spill_node;
478
479 int *spill_vgrf_ip;
480 int spill_vgrf_ip_alloc;
481 int spill_node_count;
482 };
483
484 /**
485 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
486 *
487 * This is used in assign_regs() to decide which of the GRFs that we use as
488 * MRFs on gen7 get normally register allocated, and in register spilling to
489 * see if we can actually use MRFs to do spills without overwriting normal MRF
490 * contents.
491 */
492 static void
493 get_used_mrfs(fs_visitor *v, bool *mrf_used)
494 {
495 int reg_width = v->dispatch_width / 8;
496
497 memset(mrf_used, 0, BRW_MAX_MRF(v->devinfo->gen) * sizeof(bool));
498
499 foreach_block_and_inst(block, fs_inst, inst, v->cfg) {
500 if (inst->dst.file == MRF) {
501 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
502 mrf_used[reg] = true;
503 if (reg_width == 2) {
504 if (inst->dst.nr & BRW_MRF_COMPR4) {
505 mrf_used[reg + 4] = true;
506 } else {
507 mrf_used[reg + 1] = true;
508 }
509 }
510 }
511
512 if (inst->mlen > 0) {
513 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
514 mrf_used[inst->base_mrf + i] = true;
515 }
516 }
517 }
518 }
519
520 namespace {
521 /**
522 * Maximum spill block size we expect to encounter in 32B units.
523 *
524 * This is somewhat arbitrary and doesn't necessarily limit the maximum
525 * variable size that can be spilled -- A higher value will allow a
526 * variable of a given size to be spilled more efficiently with a smaller
527 * number of scratch messages, but will increase the likelihood of a
528 * collision between the MRFs reserved for spilling and other MRFs used by
529 * the program (and possibly increase GRF register pressure on platforms
530 * without hardware MRFs), what could cause register allocation to fail.
531 *
532 * For the moment reserve just enough space so a register of 32 bit
533 * component type and natural region width can be spilled without splitting
534 * into multiple (force_writemask_all) scratch messages.
535 */
536 unsigned
537 spill_max_size(const backend_shader *s)
538 {
539 /* FINISHME - On Gen7+ it should be possible to avoid this limit
540 * altogether by spilling directly from the temporary GRF
541 * allocated to hold the result of the instruction (and the
542 * scratch write header).
543 */
544 /* FINISHME - The shader's dispatch width probably belongs in
545 * backend_shader (or some nonexistent fs_shader class?)
546 * rather than in the visitor class.
547 */
548 return static_cast<const fs_visitor *>(s)->dispatch_width / 8;
549 }
550
551 /**
552 * First MRF register available for spilling.
553 */
554 unsigned
555 spill_base_mrf(const backend_shader *s)
556 {
557 return BRW_MAX_MRF(s->devinfo->gen) - spill_max_size(s) - 1;
558 }
559 }
560
561 void
562 fs_reg_alloc::setup_live_interference(unsigned node,
563 int node_start_ip, int node_end_ip)
564 {
565 /* Mark any virtual grf that is live between the start of the program and
566 * the last use of a payload node interfering with that payload node.
567 */
568 for (int i = 0; i < payload_node_count; i++) {
569 if (payload_last_use_ip[i] == -1)
570 continue;
571
572 /* Note that we use a <= comparison, unlike vgrfs_interfere(),
573 * in order to not have to worry about the uniform issue described in
574 * calculate_live_intervals().
575 */
576 if (node_start_ip <= payload_last_use_ip[i])
577 ra_add_node_interference(g, node, first_payload_node + i);
578 }
579
580 /* If we have the MRF hack enabled, mark this node as interfering with all
581 * MRF registers.
582 */
583 if (first_mrf_hack_node >= 0) {
584 for (int i = spill_base_mrf(fs); i < BRW_MAX_MRF(devinfo->gen); i++)
585 ra_add_node_interference(g, node, first_mrf_hack_node + i);
586 }
587
588 /* Add interference with every vgrf whose live range intersects this
589 * node's. We only need to look at nodes below this one as the reflexivity
590 * of interference will take care of the rest.
591 */
592 for (unsigned n2 = first_vgrf_node;
593 n2 < (unsigned)first_spill_node && n2 < node; n2++) {
594 unsigned vgrf = n2 - first_vgrf_node;
595 if (!(node_end_ip <= live.vgrf_start[vgrf] ||
596 live.vgrf_end[vgrf] <= node_start_ip))
597 ra_add_node_interference(g, node, n2);
598 }
599 }
600
601 void
602 fs_reg_alloc::setup_inst_interference(fs_inst *inst)
603 {
604 /* Certain instructions can't safely use the same register for their
605 * sources and destination. Add interference.
606 */
607 if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
608 for (unsigned i = 0; i < inst->sources; i++) {
609 if (inst->src[i].file == VGRF) {
610 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
611 first_vgrf_node + inst->src[i].nr);
612 }
613 }
614 }
615
616 /* In 16-wide instructions we have an issue where a compressed
617 * instruction is actually two instructions executed simultaneously.
618 * It's actually ok to have the source and destination registers be
619 * the same. In this case, each instruction over-writes its own
620 * source and there's no problem. The real problem here is if the
621 * source and destination registers are off by one. Then you can end
622 * up in a scenario where the first instruction over-writes the
623 * source of the second instruction. Since the compiler doesn't know
624 * about this level of granularity, we simply make the source and
625 * destination interfere.
626 */
627 if (inst->exec_size >= 16 && inst->dst.file == VGRF) {
628 for (int i = 0; i < inst->sources; ++i) {
629 if (inst->src[i].file == VGRF) {
630 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
631 first_vgrf_node + inst->src[i].nr);
632 }
633 }
634 }
635
636 if (grf127_send_hack_node >= 0) {
637 /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
638 * subsection "EUISA Instructions", Send Message (page 990):
639 *
640 * "r127 must not be used for return address when there is a src and
641 * dest overlap in send instruction."
642 *
643 * We are avoiding using grf127 as part of the destination of send
644 * messages adding a node interference to the grf127_send_hack_node.
645 * This node has a fixed asignment to grf127.
646 *
647 * We don't apply it to SIMD16 instructions because previous code avoids
648 * any register overlap between sources and destination.
649 */
650 if (inst->exec_size < 16 && inst->is_send_from_grf() &&
651 inst->dst.file == VGRF)
652 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
653 grf127_send_hack_node);
654
655 /* Spilling instruction are genereated as SEND messages from MRF but as
656 * Gen7+ supports sending from GRF the driver will maps assingn these
657 * MRF registers to a GRF. Implementations reuses the dest of the send
658 * message as source. So as we will have an overlap for sure, we create
659 * an interference between destination and grf127.
660 */
661 if ((inst->opcode == SHADER_OPCODE_GEN7_SCRATCH_READ ||
662 inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_READ) &&
663 inst->dst.file == VGRF)
664 ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
665 grf127_send_hack_node);
666 }
667
668 /* From the Skylake PRM Vol. 2a docs for sends:
669 *
670 * "It is required that the second block of GRFs does not overlap with
671 * the first block."
672 *
673 * Normally, this is taken care of by fixup_sends_duplicate_payload() but
674 * in the case where one of the registers is an undefined value, the
675 * register allocator may decide that they don't interfere even though
676 * they're used as sources in the same instruction. We also need to add
677 * interference here.
678 */
679 if (devinfo->gen >= 9) {
680 if (inst->opcode == SHADER_OPCODE_SEND && inst->ex_mlen > 0 &&
681 inst->src[2].file == VGRF && inst->src[3].file == VGRF &&
682 inst->src[2].nr != inst->src[3].nr)
683 ra_add_node_interference(g, first_vgrf_node + inst->src[2].nr,
684 first_vgrf_node + inst->src[3].nr);
685 }
686
687 /* When we do send-from-GRF for FB writes, we need to ensure that the last
688 * write instruction sends from a high register. This is because the
689 * vertex fetcher wants to start filling the low payload registers while
690 * the pixel data port is still working on writing out the memory. If we
691 * don't do this, we get rendering artifacts.
692 *
693 * We could just do "something high". Instead, we just pick the highest
694 * register that works.
695 */
696 if (inst->eot) {
697 const int vgrf = inst->opcode == SHADER_OPCODE_SEND ?
698 inst->src[2].nr : inst->src[0].nr;
699 int size = fs->alloc.sizes[vgrf];
700 int reg = compiler->fs_reg_sets[rsi].class_to_ra_reg_range[size] - 1;
701
702 if (first_mrf_hack_node >= 0) {
703 /* If something happened to spill, we want to push the EOT send
704 * register early enough in the register file that we don't
705 * conflict with any used MRF hack registers.
706 */
707 reg -= BRW_MAX_MRF(devinfo->gen) - spill_base_mrf(fs);
708 } else if (grf127_send_hack_node >= 0) {
709 /* Avoid r127 which might be unusable if the node was previously
710 * written by a SIMD8 SEND message with source/destination overlap.
711 */
712 reg--;
713 }
714
715 ra_set_node_reg(g, first_vgrf_node + vgrf, reg);
716 }
717 }
718
719 void
720 fs_reg_alloc::build_interference_graph(bool allow_spilling)
721 {
722 const gen_device_info *devinfo = fs->devinfo;
723 const brw_compiler *compiler = fs->compiler;
724
725 /* Compute the RA node layout */
726 node_count = 0;
727 first_payload_node = node_count;
728 node_count += payload_node_count;
729 if (devinfo->gen >= 7 && allow_spilling) {
730 first_mrf_hack_node = node_count;
731 node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START;
732 } else {
733 first_mrf_hack_node = -1;
734 }
735 if (devinfo->gen >= 8) {
736 grf127_send_hack_node = node_count;
737 node_count ++;
738 } else {
739 grf127_send_hack_node = -1;
740 }
741 first_vgrf_node = node_count;
742 node_count += fs->alloc.count;
743 first_spill_node = node_count;
744
745 fs->calculate_payload_ranges(payload_node_count,
746 payload_last_use_ip);
747
748 assert(g == NULL);
749 g = ra_alloc_interference_graph(compiler->fs_reg_sets[rsi].regs, node_count);
750 ralloc_steal(mem_ctx, g);
751
752 /* Set up the payload nodes */
753 for (int i = 0; i < payload_node_count; i++) {
754 /* Mark each payload node as being allocated to its physical register.
755 *
756 * The alternative would be to have per-physical-register classes, which
757 * would just be silly.
758 */
759 if (devinfo->gen <= 5 && fs->dispatch_width >= 16) {
760 /* We have to divide by 2 here because we only have even numbered
761 * registers. Some of the payload registers will be odd, but
762 * that's ok because their physical register numbers have already
763 * been assigned. The only thing this is used for is interference.
764 */
765 ra_set_node_reg(g, first_payload_node + i, i / 2);
766 } else {
767 ra_set_node_reg(g, first_payload_node + i, i);
768 }
769 }
770
771 if (first_mrf_hack_node >= 0) {
772 /* Mark each MRF reg node as being allocated to its physical
773 * register.
774 *
775 * The alternative would be to have per-physical-register classes,
776 * which would just be silly.
777 */
778 for (int i = 0; i < BRW_MAX_MRF(devinfo->gen); i++) {
779 ra_set_node_reg(g, first_mrf_hack_node + i,
780 GEN7_MRF_HACK_START + i);
781 }
782 }
783
784 if (grf127_send_hack_node >= 0)
785 ra_set_node_reg(g, grf127_send_hack_node, 127);
786
787 /* Specify the classes of each virtual register. */
788 for (unsigned i = 0; i < fs->alloc.count; i++) {
789 unsigned size = fs->alloc.sizes[i];
790
791 assert(size <= ARRAY_SIZE(compiler->fs_reg_sets[rsi].classes) &&
792 "Register allocation relies on split_virtual_grfs()");
793
794 ra_set_node_class(g, first_vgrf_node + i,
795 compiler->fs_reg_sets[rsi].classes[size - 1]);
796 }
797
798 /* Special case: on pre-Gen7 hardware that supports PLN, the second operand
799 * of a PLN instruction needs to be an even-numbered register, so we have a
800 * special register class aligned_bary_class to handle this case.
801 */
802 if (compiler->fs_reg_sets[rsi].aligned_bary_class >= 0) {
803 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
804 if (inst->opcode == FS_OPCODE_LINTERP && inst->src[0].file == VGRF &&
805 fs->alloc.sizes[inst->src[0].nr] ==
806 aligned_bary_size(fs->dispatch_width)) {
807 ra_set_node_class(g, first_vgrf_node + inst->src[0].nr,
808 compiler->fs_reg_sets[rsi].aligned_bary_class);
809 }
810 }
811 }
812
813 /* Add interference based on the live range of the register */
814 for (unsigned i = 0; i < fs->alloc.count; i++) {
815 setup_live_interference(first_vgrf_node + i,
816 live.vgrf_start[i],
817 live.vgrf_end[i]);
818 }
819
820 /* Add interference based on the instructions in which a register is used.
821 */
822 foreach_block_and_inst(block, fs_inst, inst, fs->cfg)
823 setup_inst_interference(inst);
824 }
825
826 void
827 fs_reg_alloc::discard_interference_graph()
828 {
829 ralloc_free(g);
830 g = NULL;
831 have_spill_costs = false;
832 }
833
834 static void
835 emit_unspill(const fs_builder &bld, fs_reg dst,
836 uint32_t spill_offset, unsigned count)
837 {
838 const gen_device_info *devinfo = bld.shader->devinfo;
839 const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
840 REG_SIZE;
841 assert(count % reg_size == 0);
842
843 for (unsigned i = 0; i < count / reg_size; i++) {
844 /* The Gen7 descriptor-based offset is 12 bits of HWORD units. Because
845 * the Gen7-style scratch block read is hardwired to BTI 255, on Gen9+
846 * it would cause the DC to do an IA-coherent read, what largely
847 * outweighs the slight advantage from not having to provide the address
848 * as part of the message header, so we're better off using plain old
849 * oword block reads.
850 */
851 bool gen7_read = (devinfo->gen >= 7 && devinfo->gen < 9 &&
852 spill_offset < (1 << 12) * REG_SIZE);
853 fs_inst *unspill_inst = bld.emit(gen7_read ?
854 SHADER_OPCODE_GEN7_SCRATCH_READ :
855 SHADER_OPCODE_GEN4_SCRATCH_READ,
856 dst);
857 unspill_inst->offset = spill_offset;
858
859 if (!gen7_read) {
860 unspill_inst->base_mrf = spill_base_mrf(bld.shader);
861 unspill_inst->mlen = 1; /* header contains offset */
862 }
863
864 dst.offset += reg_size * REG_SIZE;
865 spill_offset += reg_size * REG_SIZE;
866 }
867 }
868
869 static void
870 emit_spill(const fs_builder &bld, fs_reg src,
871 uint32_t spill_offset, unsigned count)
872 {
873 const unsigned reg_size = src.component_size(bld.dispatch_width()) /
874 REG_SIZE;
875 assert(count % reg_size == 0);
876
877 for (unsigned i = 0; i < count / reg_size; i++) {
878 fs_inst *spill_inst =
879 bld.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE, bld.null_reg_f(), src);
880 src.offset += reg_size * REG_SIZE;
881 spill_inst->offset = spill_offset + i * reg_size * REG_SIZE;
882 spill_inst->mlen = 1 + reg_size; /* header, value */
883 spill_inst->base_mrf = spill_base_mrf(bld.shader);
884 }
885 }
886
887 void
888 fs_reg_alloc::set_spill_costs()
889 {
890 float block_scale = 1.0;
891 float spill_costs[fs->alloc.count];
892 bool no_spill[fs->alloc.count];
893
894 for (unsigned i = 0; i < fs->alloc.count; i++) {
895 spill_costs[i] = 0.0;
896 no_spill[i] = false;
897 }
898
899 /* Calculate costs for spilling nodes. Call it a cost of 1 per
900 * spill/unspill we'll have to do, and guess that the insides of
901 * loops run 10 times.
902 */
903 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
904 for (unsigned int i = 0; i < inst->sources; i++) {
905 if (inst->src[i].file == VGRF)
906 spill_costs[inst->src[i].nr] += regs_read(inst, i) * block_scale;
907 }
908
909 if (inst->dst.file == VGRF)
910 spill_costs[inst->dst.nr] += regs_written(inst) * block_scale;
911
912 switch (inst->opcode) {
913
914 case BRW_OPCODE_DO:
915 block_scale *= 10;
916 break;
917
918 case BRW_OPCODE_WHILE:
919 block_scale /= 10;
920 break;
921
922 case BRW_OPCODE_IF:
923 case BRW_OPCODE_IFF:
924 block_scale *= 0.5;
925 break;
926
927 case BRW_OPCODE_ENDIF:
928 block_scale /= 0.5;
929 break;
930
931 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
932 if (inst->src[0].file == VGRF)
933 no_spill[inst->src[0].nr] = true;
934 break;
935
936 case SHADER_OPCODE_GEN4_SCRATCH_READ:
937 case SHADER_OPCODE_GEN7_SCRATCH_READ:
938 if (inst->dst.file == VGRF)
939 no_spill[inst->dst.nr] = true;
940 break;
941
942 default:
943 break;
944 }
945 }
946
947 for (unsigned i = 0; i < fs->alloc.count; i++) {
948 /* Do the no_spill check first. Registers that are used as spill
949 * temporaries may have been allocated after we calculated liveness so
950 * we shouldn't look their liveness up. Fortunately, they're always
951 * used in SCRATCH_READ/WRITE instructions so they'll always be flagged
952 * no_spill.
953 */
954 if (no_spill[i])
955 continue;
956
957 int live_length = live.vgrf_end[i] - live.vgrf_start[i];
958 if (live_length <= 0)
959 continue;
960
961 /* Divide the cost (in number of spills/fills) by the log of the length
962 * of the live range of the register. This will encourage spill logic
963 * to spill long-living things before spilling short-lived things where
964 * spilling is less likely to actually do us any good. We use the log
965 * of the length because it will fall off very quickly and not cause us
966 * to spill medium length registers with more uses.
967 */
968 float adjusted_cost = spill_costs[i] / logf(live_length);
969 ra_set_node_spill_cost(g, first_vgrf_node + i, adjusted_cost);
970 }
971
972 have_spill_costs = true;
973 }
974
975 int
976 fs_reg_alloc::choose_spill_reg()
977 {
978 if (!have_spill_costs)
979 set_spill_costs();
980
981 int node = ra_get_best_spill_node(g);
982 if (node < 0)
983 return -1;
984
985 assert(node >= first_vgrf_node);
986 return node - first_vgrf_node;
987 }
988
989 fs_reg
990 fs_reg_alloc::alloc_spill_reg(unsigned size, int ip)
991 {
992 int vgrf = fs->alloc.allocate(size);
993 int n = ra_add_node(g, compiler->fs_reg_sets[rsi].classes[size - 1]);
994 assert(n == first_vgrf_node + vgrf);
995 assert(n == first_spill_node + spill_node_count);
996
997 setup_live_interference(n, ip - 1, ip + 1);
998
999 /* Add interference between this spill node and any other spill nodes for
1000 * the same instruction.
1001 */
1002 for (int s = 0; s < spill_node_count; s++) {
1003 if (spill_vgrf_ip[s] == ip)
1004 ra_add_node_interference(g, n, first_spill_node + s);
1005 }
1006
1007 /* Add this spill node to the list for next time */
1008 if (spill_node_count >= spill_vgrf_ip_alloc) {
1009 if (spill_vgrf_ip_alloc == 0)
1010 spill_vgrf_ip_alloc = 16;
1011 else
1012 spill_vgrf_ip_alloc *= 2;
1013 spill_vgrf_ip = reralloc(mem_ctx, spill_vgrf_ip, int,
1014 spill_vgrf_ip_alloc);
1015 }
1016 spill_vgrf_ip[spill_node_count++] = ip;
1017
1018 return fs_reg(VGRF, vgrf);
1019 }
1020
1021 void
1022 fs_reg_alloc::spill_reg(unsigned spill_reg)
1023 {
1024 int size = fs->alloc.sizes[spill_reg];
1025 unsigned int spill_offset = fs->last_scratch;
1026 assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
1027
1028 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
1029 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
1030 * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
1031 * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
1032 * depth), starting from m1. In summary: We may not be able to spill in
1033 * SIMD16 mode, because we'd stomp the FB writes.
1034 */
1035 if (!fs->spilled_any_registers) {
1036 bool mrf_used[BRW_MAX_MRF(devinfo->gen)];
1037 get_used_mrfs(fs, mrf_used);
1038
1039 for (int i = spill_base_mrf(fs); i < BRW_MAX_MRF(devinfo->gen); i++) {
1040 if (mrf_used[i]) {
1041 fs->fail("Register spilling not supported with m%d used", i);
1042 return;
1043 }
1044 }
1045
1046 fs->spilled_any_registers = true;
1047 }
1048
1049 fs->last_scratch += size * REG_SIZE;
1050
1051 /* We're about to replace all uses of this register. It no longer
1052 * conflicts with anything so we can get rid of its interference.
1053 */
1054 ra_set_node_spill_cost(g, first_vgrf_node + spill_reg, 0);
1055 ra_reset_node_interference(g, first_vgrf_node + spill_reg);
1056
1057 /* Generate spill/unspill instructions for the objects being
1058 * spilled. Right now, we spill or unspill the whole thing to a
1059 * virtual grf of the same size. For most instructions, though, we
1060 * could just spill/unspill the GRF being accessed.
1061 */
1062 int ip = 0;
1063 foreach_block_and_inst (block, fs_inst, inst, fs->cfg) {
1064 const fs_builder ibld = fs_builder(fs, block, inst);
1065 exec_node *before = inst->prev;
1066 exec_node *after = inst->next;
1067
1068 for (unsigned int i = 0; i < inst->sources; i++) {
1069 if (inst->src[i].file == VGRF &&
1070 inst->src[i].nr == spill_reg) {
1071 int count = regs_read(inst, i);
1072 int subset_spill_offset = spill_offset +
1073 ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE);
1074 fs_reg unspill_dst = alloc_spill_reg(count, ip);
1075
1076 inst->src[i].nr = unspill_dst.nr;
1077 inst->src[i].offset %= REG_SIZE;
1078
1079 /* We read the largest power-of-two divisor of the register count
1080 * (because only POT scratch read blocks are allowed by the
1081 * hardware) up to the maximum supported block size.
1082 */
1083 const unsigned width =
1084 MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
1085
1086 /* Set exec_all() on unspill messages under the (rather
1087 * pessimistic) assumption that there is no one-to-one
1088 * correspondence between channels of the spilled variable in
1089 * scratch space and the scratch read message, which operates on
1090 * 32 bit channels. It shouldn't hurt in any case because the
1091 * unspill destination is a block-local temporary.
1092 */
1093 emit_unspill(ibld.exec_all().group(width, 0),
1094 unspill_dst, subset_spill_offset, count);
1095 }
1096 }
1097
1098 if (inst->dst.file == VGRF &&
1099 inst->dst.nr == spill_reg) {
1100 int subset_spill_offset = spill_offset +
1101 ROUND_DOWN_TO(inst->dst.offset, REG_SIZE);
1102 fs_reg spill_src = alloc_spill_reg(regs_written(inst), ip);
1103
1104 inst->dst.nr = spill_src.nr;
1105 inst->dst.offset %= REG_SIZE;
1106
1107 /* If we're immediately spilling the register, we should not use
1108 * destination dependency hints. Doing so will cause the GPU do
1109 * try to read and write the register at the same time and may
1110 * hang the GPU.
1111 */
1112 inst->no_dd_clear = false;
1113 inst->no_dd_check = false;
1114
1115 /* Calculate the execution width of the scratch messages (which work
1116 * in terms of 32 bit components so we have a fixed number of eight
1117 * channels per spilled register). We attempt to write one
1118 * exec_size-wide component of the variable at a time without
1119 * exceeding the maximum number of (fake) MRF registers reserved for
1120 * spills.
1121 */
1122 const unsigned width = 8 * MIN2(
1123 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE),
1124 spill_max_size(fs));
1125
1126 /* Spills should only write data initialized by the instruction for
1127 * whichever channels are enabled in the excution mask. If that's
1128 * not possible we'll have to emit a matching unspill before the
1129 * instruction and set force_writemask_all on the spill.
1130 */
1131 const bool per_channel =
1132 inst->dst.is_contiguous() && type_sz(inst->dst.type) == 4 &&
1133 inst->exec_size == width;
1134
1135 /* Builder used to emit the scratch messages. */
1136 const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
1137
1138 /* If our write is going to affect just part of the
1139 * regs_written(inst), then we need to unspill the destination since
1140 * we write back out all of the regs_written(). If the original
1141 * instruction had force_writemask_all set and is not a partial
1142 * write, there should be no need for the unspill since the
1143 * instruction will be overwriting the whole destination in any case.
1144 */
1145 if (inst->is_partial_write() ||
1146 (!inst->force_writemask_all && !per_channel))
1147 emit_unspill(ubld, spill_src, subset_spill_offset,
1148 regs_written(inst));
1149
1150 emit_spill(ubld.at(block, inst->next), spill_src,
1151 subset_spill_offset, regs_written(inst));
1152 }
1153
1154 for (fs_inst *inst = (fs_inst *)before->next;
1155 inst != after; inst = (fs_inst *)inst->next)
1156 setup_inst_interference(inst);
1157
1158 /* We don't advance the ip for scratch read/write instructions
1159 * because we consider them to have the same ip as instruction we're
1160 * spilling around for the purposes of interference.
1161 */
1162 if (inst->opcode != SHADER_OPCODE_GEN4_SCRATCH_WRITE &&
1163 inst->opcode != SHADER_OPCODE_GEN4_SCRATCH_READ &&
1164 inst->opcode != SHADER_OPCODE_GEN7_SCRATCH_READ)
1165 ip++;
1166 }
1167 }
1168
1169 bool
1170 fs_reg_alloc::assign_regs(bool allow_spilling, bool spill_all)
1171 {
1172 build_interference_graph(fs->spilled_any_registers || spill_all);
1173
1174 bool spilled = false;
1175 while (1) {
1176 /* Debug of register spilling: Go spill everything. */
1177 if (unlikely(spill_all)) {
1178 int reg = choose_spill_reg();
1179 if (reg != -1) {
1180 spill_reg(reg);
1181 continue;
1182 }
1183 }
1184
1185 if (ra_allocate(g))
1186 break;
1187
1188 if (!allow_spilling)
1189 return false;
1190
1191 /* Failed to allocate registers. Spill a reg, and the caller will
1192 * loop back into here to try again.
1193 */
1194 int reg = choose_spill_reg();
1195 if (reg == -1)
1196 return false;
1197
1198 /* If we're going to spill but we've never spilled before, we need to
1199 * re-build the interference graph with MRFs enabled to allow spilling.
1200 */
1201 if (!fs->spilled_any_registers) {
1202 discard_interference_graph();
1203 build_interference_graph(true);
1204 }
1205
1206 spilled = true;
1207
1208 spill_reg(reg);
1209 }
1210
1211 if (spilled)
1212 fs->invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
1213
1214 /* Get the chosen virtual registers for each node, and map virtual
1215 * regs in the register classes back down to real hardware reg
1216 * numbers.
1217 */
1218 unsigned hw_reg_mapping[fs->alloc.count];
1219 fs->grf_used = fs->first_non_payload_grf;
1220 for (unsigned i = 0; i < fs->alloc.count; i++) {
1221 int reg = ra_get_node_reg(g, first_vgrf_node + i);
1222
1223 hw_reg_mapping[i] = compiler->fs_reg_sets[rsi].ra_reg_to_grf[reg];
1224 fs->grf_used = MAX2(fs->grf_used,
1225 hw_reg_mapping[i] + fs->alloc.sizes[i]);
1226 }
1227
1228 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
1229 assign_reg(hw_reg_mapping, &inst->dst);
1230 for (int i = 0; i < inst->sources; i++) {
1231 assign_reg(hw_reg_mapping, &inst->src[i]);
1232 }
1233 }
1234
1235 fs->alloc.count = fs->grf_used;
1236
1237 return true;
1238 }
1239
1240 bool
1241 fs_visitor::assign_regs(bool allow_spilling, bool spill_all)
1242 {
1243 fs_reg_alloc alloc(this);
1244 bool success = alloc.assign_regs(allow_spilling, spill_all);
1245 if (!success && allow_spilling) {
1246 fail("no register to spill:\n");
1247 dump_instructions(NULL);
1248 }
1249 return success;
1250 }