intel/fs/ra: Split building the interference graph into a helper
[mesa.git] / src / intel / compiler / brw_fs_reg_allocate.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_eu.h"
29 #include "brw_fs.h"
30 #include "brw_cfg.h"
31 #include "util/register_allocate.h"
32
33 using namespace brw;
34
35 static void
36 assign_reg(unsigned *reg_hw_locations, fs_reg *reg)
37 {
38 if (reg->file == VGRF) {
39 reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
40 reg->offset %= REG_SIZE;
41 }
42 }
43
44 void
45 fs_visitor::assign_regs_trivial()
46 {
47 unsigned hw_reg_mapping[this->alloc.count + 1];
48 unsigned i;
49 int reg_width = dispatch_width / 8;
50
51 /* Note that compressed instructions require alignment to 2 registers. */
52 hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
53 for (i = 1; i <= this->alloc.count; i++) {
54 hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
55 this->alloc.sizes[i - 1]);
56 }
57 this->grf_used = hw_reg_mapping[this->alloc.count];
58
59 foreach_block_and_inst(block, fs_inst, inst, cfg) {
60 assign_reg(hw_reg_mapping, &inst->dst);
61 for (i = 0; i < inst->sources; i++) {
62 assign_reg(hw_reg_mapping, &inst->src[i]);
63 }
64 }
65
66 if (this->grf_used >= max_grf) {
67 fail("Ran out of regs on trivial allocator (%d/%d)\n",
68 this->grf_used, max_grf);
69 } else {
70 this->alloc.count = this->grf_used;
71 }
72
73 }
74
75 static void
76 brw_alloc_reg_set(struct brw_compiler *compiler, int dispatch_width)
77 {
78 const struct gen_device_info *devinfo = compiler->devinfo;
79 int base_reg_count = BRW_MAX_GRF;
80 const int index = _mesa_logbase2(dispatch_width / 8);
81
82 if (dispatch_width > 8 && devinfo->gen >= 7) {
83 /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
84 * SIMD16. Therefore, we can use the exact same register sets for
85 * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
86 */
87 compiler->fs_reg_sets[index] = compiler->fs_reg_sets[0];
88 return;
89 }
90
91 /* The registers used to make up almost all values handled in the compiler
92 * are a scalar value occupying a single register (or 2 registers in the
93 * case of SIMD16, which is handled by dividing base_reg_count by 2 and
94 * multiplying allocated register numbers by 2). Things that were
95 * aggregates of scalar values at the GLSL level were split to scalar
96 * values by split_virtual_grfs().
97 *
98 * However, texture SEND messages return a series of contiguous registers
99 * to write into. We currently always ask for 4 registers, but we may
100 * convert that to use less some day.
101 *
102 * Additionally, on gen5 we need aligned pairs of registers for the PLN
103 * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
104 * texturing.
105 */
106 const int class_count = MAX_VGRF_SIZE;
107 int class_sizes[MAX_VGRF_SIZE];
108 for (unsigned i = 0; i < MAX_VGRF_SIZE; i++)
109 class_sizes[i] = i + 1;
110
111 memset(compiler->fs_reg_sets[index].class_to_ra_reg_range, 0,
112 sizeof(compiler->fs_reg_sets[index].class_to_ra_reg_range));
113 int *class_to_ra_reg_range = compiler->fs_reg_sets[index].class_to_ra_reg_range;
114
115 /* Compute the total number of registers across all classes. */
116 int ra_reg_count = 0;
117 for (int i = 0; i < class_count; i++) {
118 if (devinfo->gen <= 5 && dispatch_width >= 16) {
119 /* From the G45 PRM:
120 *
121 * In order to reduce the hardware complexity, the following
122 * rules and restrictions apply to the compressed instruction:
123 * ...
124 * * Operand Alignment Rule: With the exceptions listed below, a
125 * source/destination operand in general should be aligned to
126 * even 256-bit physical register with a region size equal to
127 * two 256-bit physical register
128 */
129 ra_reg_count += (base_reg_count - (class_sizes[i] - 1)) / 2;
130 } else {
131 ra_reg_count += base_reg_count - (class_sizes[i] - 1);
132 }
133 /* Mark the last register. We'll fill in the beginnings later. */
134 class_to_ra_reg_range[class_sizes[i]] = ra_reg_count;
135 }
136
137 /* Fill out the rest of the range markers */
138 for (int i = 1; i < 17; ++i) {
139 if (class_to_ra_reg_range[i] == 0)
140 class_to_ra_reg_range[i] = class_to_ra_reg_range[i-1];
141 }
142
143 uint8_t *ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
144 struct ra_regs *regs = ra_alloc_reg_set(compiler, ra_reg_count, false);
145 if (devinfo->gen >= 6)
146 ra_set_allocate_round_robin(regs);
147 int *classes = ralloc_array(compiler, int, class_count);
148 int aligned_pairs_class = -1;
149
150 /* Allocate space for q values. We allocate class_count + 1 because we
151 * want to leave room for the aligned pairs class if we have it. */
152 unsigned int **q_values = ralloc_array(compiler, unsigned int *,
153 class_count + 1);
154 for (int i = 0; i < class_count + 1; ++i)
155 q_values[i] = ralloc_array(q_values, unsigned int, class_count + 1);
156
157 /* Now, add the registers to their classes, and add the conflicts
158 * between them and the base GRF registers (and also each other).
159 */
160 int reg = 0;
161 int pairs_base_reg = 0;
162 int pairs_reg_count = 0;
163 for (int i = 0; i < class_count; i++) {
164 int class_reg_count;
165 if (devinfo->gen <= 5 && dispatch_width >= 16) {
166 class_reg_count = (base_reg_count - (class_sizes[i] - 1)) / 2;
167
168 /* See comment below. The only difference here is that we are
169 * dealing with pairs of registers instead of single registers.
170 * Registers of odd sizes simply get rounded up. */
171 for (int j = 0; j < class_count; j++)
172 q_values[i][j] = (class_sizes[i] + 1) / 2 +
173 (class_sizes[j] + 1) / 2 - 1;
174 } else {
175 class_reg_count = base_reg_count - (class_sizes[i] - 1);
176
177 /* From register_allocate.c:
178 *
179 * q(B,C) (indexed by C, B is this register class) in
180 * Runeson/Nyström paper. This is "how many registers of B could
181 * the worst choice register from C conflict with".
182 *
183 * If we just let the register allocation algorithm compute these
184 * values, is extremely expensive. However, since all of our
185 * registers are laid out, we can very easily compute them
186 * ourselves. View the register from C as fixed starting at GRF n
187 * somwhere in the middle, and the register from B as sliding back
188 * and forth. Then the first register to conflict from B is the
189 * one starting at n - class_size[B] + 1 and the last register to
190 * conflict will start at n + class_size[B] - 1. Therefore, the
191 * number of conflicts from B is class_size[B] + class_size[C] - 1.
192 *
193 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
194 * B | | | | | |n| --> | | | | | | |
195 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
196 * +-+-+-+-+-+
197 * C |n| | | | |
198 * +-+-+-+-+-+
199 */
200 for (int j = 0; j < class_count; j++)
201 q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
202 }
203 classes[i] = ra_alloc_reg_class(regs);
204
205 /* Save this off for the aligned pair class at the end. */
206 if (class_sizes[i] == 2) {
207 pairs_base_reg = reg;
208 pairs_reg_count = class_reg_count;
209 }
210
211 if (devinfo->gen <= 5 && dispatch_width >= 16) {
212 for (int j = 0; j < class_reg_count; j++) {
213 ra_class_add_reg(regs, classes[i], reg);
214
215 ra_reg_to_grf[reg] = j * 2;
216
217 for (int base_reg = j;
218 base_reg < j + (class_sizes[i] + 1) / 2;
219 base_reg++) {
220 ra_add_reg_conflict(regs, base_reg, reg);
221 }
222
223 reg++;
224 }
225 } else {
226 for (int j = 0; j < class_reg_count; j++) {
227 ra_class_add_reg(regs, classes[i], reg);
228
229 ra_reg_to_grf[reg] = j;
230
231 for (int base_reg = j;
232 base_reg < j + class_sizes[i];
233 base_reg++) {
234 ra_add_reg_conflict(regs, base_reg, reg);
235 }
236
237 reg++;
238 }
239 }
240 }
241 assert(reg == ra_reg_count);
242
243 /* Applying transitivity to all of the base registers gives us the
244 * appropreate register conflict relationships everywhere.
245 */
246 for (int reg = 0; reg < base_reg_count; reg++)
247 ra_make_reg_conflicts_transitive(regs, reg);
248
249 /* Add a special class for aligned pairs, which we'll put delta_xy
250 * in on Gen <= 6 so that we can do PLN.
251 */
252 if (devinfo->has_pln && dispatch_width == 8 && devinfo->gen <= 6) {
253 aligned_pairs_class = ra_alloc_reg_class(regs);
254
255 for (int i = 0; i < pairs_reg_count; i++) {
256 if ((ra_reg_to_grf[pairs_base_reg + i] & 1) == 0) {
257 ra_class_add_reg(regs, aligned_pairs_class, pairs_base_reg + i);
258 }
259 }
260
261 for (int i = 0; i < class_count; i++) {
262 /* These are a little counter-intuitive because the pair registers
263 * are required to be aligned while the register they are
264 * potentially interferring with are not. In the case where the
265 * size is even, the worst-case is that the register is
266 * odd-aligned. In the odd-size case, it doesn't matter.
267 */
268 q_values[class_count][i] = class_sizes[i] / 2 + 1;
269 q_values[i][class_count] = class_sizes[i] + 1;
270 }
271 q_values[class_count][class_count] = 1;
272 }
273
274 ra_set_finalize(regs, q_values);
275
276 ralloc_free(q_values);
277
278 compiler->fs_reg_sets[index].regs = regs;
279 for (unsigned i = 0; i < ARRAY_SIZE(compiler->fs_reg_sets[index].classes); i++)
280 compiler->fs_reg_sets[index].classes[i] = -1;
281 for (int i = 0; i < class_count; i++)
282 compiler->fs_reg_sets[index].classes[class_sizes[i] - 1] = classes[i];
283 compiler->fs_reg_sets[index].ra_reg_to_grf = ra_reg_to_grf;
284 compiler->fs_reg_sets[index].aligned_pairs_class = aligned_pairs_class;
285 }
286
287 void
288 brw_fs_alloc_reg_sets(struct brw_compiler *compiler)
289 {
290 brw_alloc_reg_set(compiler, 8);
291 brw_alloc_reg_set(compiler, 16);
292 brw_alloc_reg_set(compiler, 32);
293 }
294
295 static int
296 count_to_loop_end(const bblock_t *block)
297 {
298 if (block->end()->opcode == BRW_OPCODE_WHILE)
299 return block->end_ip;
300
301 int depth = 1;
302 /* Skip the first block, since we don't want to count the do the calling
303 * function found.
304 */
305 for (block = block->next();
306 depth > 0;
307 block = block->next()) {
308 if (block->start()->opcode == BRW_OPCODE_DO)
309 depth++;
310 if (block->end()->opcode == BRW_OPCODE_WHILE) {
311 depth--;
312 if (depth == 0)
313 return block->end_ip;
314 }
315 }
316 unreachable("not reached");
317 }
318
319 void fs_visitor::calculate_payload_ranges(int payload_node_count,
320 int *payload_last_use_ip)
321 {
322 int loop_depth = 0;
323 int loop_end_ip = 0;
324
325 for (int i = 0; i < payload_node_count; i++)
326 payload_last_use_ip[i] = -1;
327
328 int ip = 0;
329 foreach_block_and_inst(block, fs_inst, inst, cfg) {
330 switch (inst->opcode) {
331 case BRW_OPCODE_DO:
332 loop_depth++;
333
334 /* Since payload regs are deffed only at the start of the shader
335 * execution, any uses of the payload within a loop mean the live
336 * interval extends to the end of the outermost loop. Find the ip of
337 * the end now.
338 */
339 if (loop_depth == 1)
340 loop_end_ip = count_to_loop_end(block);
341 break;
342 case BRW_OPCODE_WHILE:
343 loop_depth--;
344 break;
345 default:
346 break;
347 }
348
349 int use_ip;
350 if (loop_depth > 0)
351 use_ip = loop_end_ip;
352 else
353 use_ip = ip;
354
355 /* Note that UNIFORM args have been turned into FIXED_GRF by
356 * assign_curbe_setup(), and interpolation uses fixed hardware regs from
357 * the start (see interp_reg()).
358 */
359 for (int i = 0; i < inst->sources; i++) {
360 if (inst->src[i].file == FIXED_GRF) {
361 int node_nr = inst->src[i].nr;
362 if (node_nr >= payload_node_count)
363 continue;
364
365 for (unsigned j = 0; j < regs_read(inst, i); j++) {
366 payload_last_use_ip[node_nr + j] = use_ip;
367 assert(node_nr + j < unsigned(payload_node_count));
368 }
369 }
370 }
371
372 /* Special case instructions which have extra implied registers used. */
373 switch (inst->opcode) {
374 case CS_OPCODE_CS_TERMINATE:
375 payload_last_use_ip[0] = use_ip;
376 break;
377
378 default:
379 if (inst->eot) {
380 /* We could omit this for the !inst->header_present case, except
381 * that the simulator apparently incorrectly reads from g0/g1
382 * instead of sideband. It also really freaks out driver
383 * developers to see g0 used in unusual places, so just always
384 * reserve it.
385 */
386 payload_last_use_ip[0] = use_ip;
387 payload_last_use_ip[1] = use_ip;
388 }
389 break;
390 }
391
392 ip++;
393 }
394 }
395
396
397 /**
398 * Sets up interference between thread payload registers and the virtual GRFs
399 * to be allocated for program temporaries.
400 *
401 * We want to be able to reallocate the payload for our virtual GRFs, notably
402 * because the setup coefficients for a full set of 16 FS inputs takes up 8 of
403 * our 128 registers.
404 *
405 * The layout of the payload registers is:
406 *
407 * 0..payload.num_regs-1: fixed function setup (including bary coordinates).
408 * payload.num_regs..payload.num_regs+curb_read_lengh-1: uniform data
409 * payload.num_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients.
410 *
411 * And we have payload_node_count nodes covering these registers in order
412 * (note that in SIMD16, a node is two registers).
413 */
414 void
415 fs_visitor::setup_payload_interference(struct ra_graph *g,
416 int payload_node_count,
417 int first_payload_node)
418 {
419 int payload_last_use_ip[payload_node_count];
420 calculate_payload_ranges(payload_node_count, payload_last_use_ip);
421
422 for (int i = 0; i < payload_node_count; i++) {
423 if (payload_last_use_ip[i] == -1)
424 continue;
425
426 /* Mark the payload node as interfering with any virtual grf that is
427 * live between the start of the program and our last use of the payload
428 * node.
429 */
430 for (unsigned j = 0; j < this->alloc.count; j++) {
431 /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
432 * in order to not have to worry about the uniform issue described in
433 * calculate_live_intervals().
434 */
435 if (this->virtual_grf_start[j] <= payload_last_use_ip[i]) {
436 ra_add_node_interference(g, first_payload_node + i, j);
437 }
438 }
439 }
440
441 for (int i = 0; i < payload_node_count; i++) {
442 /* Mark each payload node as being allocated to its physical register.
443 *
444 * The alternative would be to have per-physical-register classes, which
445 * would just be silly.
446 */
447 if (devinfo->gen <= 5 && dispatch_width >= 16) {
448 /* We have to divide by 2 here because we only have even numbered
449 * registers. Some of the payload registers will be odd, but
450 * that's ok because their physical register numbers have already
451 * been assigned. The only thing this is used for is interference.
452 */
453 ra_set_node_reg(g, first_payload_node + i, i / 2);
454 } else {
455 ra_set_node_reg(g, first_payload_node + i, i);
456 }
457 }
458 }
459
460 /**
461 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
462 *
463 * This is used in assign_regs() to decide which of the GRFs that we use as
464 * MRFs on gen7 get normally register allocated, and in register spilling to
465 * see if we can actually use MRFs to do spills without overwriting normal MRF
466 * contents.
467 */
468 static void
469 get_used_mrfs(fs_visitor *v, bool *mrf_used)
470 {
471 int reg_width = v->dispatch_width / 8;
472
473 memset(mrf_used, 0, BRW_MAX_MRF(v->devinfo->gen) * sizeof(bool));
474
475 foreach_block_and_inst(block, fs_inst, inst, v->cfg) {
476 if (inst->dst.file == MRF) {
477 int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
478 mrf_used[reg] = true;
479 if (reg_width == 2) {
480 if (inst->dst.nr & BRW_MRF_COMPR4) {
481 mrf_used[reg + 4] = true;
482 } else {
483 mrf_used[reg + 1] = true;
484 }
485 }
486 }
487
488 if (inst->mlen > 0) {
489 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
490 mrf_used[inst->base_mrf + i] = true;
491 }
492 }
493 }
494 }
495
496 /**
497 * Sets interference between virtual GRFs and usage of the high GRFs for SEND
498 * messages (treated as MRFs in code generation).
499 */
500 static void
501 setup_mrf_hack_interference(fs_visitor *v, struct ra_graph *g,
502 int first_mrf_node, int *first_used_mrf)
503 {
504 bool mrf_used[BRW_MAX_MRF(v->devinfo->gen)];
505 get_used_mrfs(v, mrf_used);
506
507 *first_used_mrf = BRW_MAX_MRF(v->devinfo->gen);
508 for (int i = 0; i < BRW_MAX_MRF(v->devinfo->gen); i++) {
509 /* Mark each MRF reg node as being allocated to its physical register.
510 *
511 * The alternative would be to have per-physical-register classes, which
512 * would just be silly.
513 */
514 ra_set_node_reg(g, first_mrf_node + i, GEN7_MRF_HACK_START + i);
515
516 /* Since we don't have any live/dead analysis on the MRFs, just mark all
517 * that are used as conflicting with all virtual GRFs.
518 */
519 if (mrf_used[i]) {
520 if (i < *first_used_mrf)
521 *first_used_mrf = i;
522
523 for (unsigned j = 0; j < v->alloc.count; j++) {
524 ra_add_node_interference(g, first_mrf_node + i, j);
525 }
526 }
527 }
528 }
529
530 static ra_graph *
531 build_interference_graph(fs_visitor *fs)
532 {
533 const gen_device_info *devinfo = fs->devinfo;
534 const brw_compiler *compiler = fs->compiler;
535
536 /* Most of this allocation was written for a reg_width of 1
537 * (dispatch_width == 8). In extending to SIMD16, the code was
538 * left in place and it was converted to have the hardware
539 * registers it's allocating be contiguous physical pairs of regs
540 * for reg_width == 2.
541 */
542 int reg_width = fs->dispatch_width / 8;
543 int payload_node_count = ALIGN(fs->first_non_payload_grf, reg_width);
544 int rsi = _mesa_logbase2(reg_width); /* Which compiler->fs_reg_sets[] to use */
545 fs->calculate_live_intervals();
546
547 int node_count = fs->alloc.count;
548 int first_payload_node = node_count;
549 node_count += payload_node_count;
550 int first_mrf_hack_node = node_count;
551 if (devinfo->gen >= 7)
552 node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START;
553 int grf127_send_hack_node = node_count;
554 if (devinfo->gen >= 8)
555 node_count ++;
556 struct ra_graph *g =
557 ra_alloc_interference_graph(compiler->fs_reg_sets[rsi].regs, node_count);
558
559 for (unsigned i = 0; i < fs->alloc.count; i++) {
560 unsigned size = fs->alloc.sizes[i];
561 int c;
562
563 assert(size <= ARRAY_SIZE(compiler->fs_reg_sets[rsi].classes) &&
564 "Register allocation relies on split_virtual_grfs()");
565 c = compiler->fs_reg_sets[rsi].classes[size - 1];
566
567 /* Special case: on pre-GEN6 hardware that supports PLN, the
568 * second operand of a PLN instruction needs to be an
569 * even-numbered register, so we have a special register class
570 * wm_aligned_pairs_class to handle this case. pre-GEN6 always
571 * uses fs->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL] as the
572 * second operand of a PLN instruction (since it doesn't support
573 * any other interpolation modes). So all we need to do is find
574 * that register and set it to the appropriate class.
575 */
576 if (compiler->fs_reg_sets[rsi].aligned_pairs_class >= 0 &&
577 fs->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL].file == VGRF &&
578 fs->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL].nr == i) {
579 c = compiler->fs_reg_sets[rsi].aligned_pairs_class;
580 }
581
582 ra_set_node_class(g, i, c);
583
584 for (unsigned j = 0; j < i; j++) {
585 if (fs->virtual_grf_interferes(i, j)) {
586 ra_add_node_interference(g, i, j);
587 }
588 }
589 }
590
591 /* Certain instructions can't safely use the same register for their
592 * sources and destination. Add interference.
593 */
594 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
595 if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
596 for (unsigned i = 0; i < inst->sources; i++) {
597 if (inst->src[i].file == VGRF) {
598 ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
599 }
600 }
601 }
602 }
603
604 fs->setup_payload_interference(g, payload_node_count, first_payload_node);
605 if (devinfo->gen >= 7) {
606 int first_used_mrf = BRW_MAX_MRF(devinfo->gen);
607 setup_mrf_hack_interference(fs, g, first_mrf_hack_node,
608 &first_used_mrf);
609
610 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
611 /* When we do send-from-GRF for FB writes, we need to ensure that
612 * the last write instruction sends from a high register. This is
613 * because the vertex fetcher wants to start filling the low
614 * payload registers while the pixel data port is still working on
615 * writing out the memory. If we don't do this, we get rendering
616 * artifacts.
617 *
618 * We could just do "something high". Instead, we just pick the
619 * highest register that works.
620 */
621 if (inst->eot) {
622 const int vgrf = inst->opcode == SHADER_OPCODE_SEND ?
623 inst->src[2].nr : inst->src[0].nr;
624 int size = fs->alloc.sizes[vgrf];
625 int reg = compiler->fs_reg_sets[rsi].class_to_ra_reg_range[size] - 1;
626
627 /* If something happened to spill, we want to push the EOT send
628 * register early enough in the register file that we don't
629 * conflict with any used MRF hack registers.
630 */
631 reg -= BRW_MAX_MRF(devinfo->gen) - first_used_mrf;
632
633 ra_set_node_reg(g, vgrf, reg);
634 break;
635 }
636 }
637 }
638
639 /* In 16-wide instructions we have an issue where a compressed
640 * instruction is actually two instructions executed simultaneously.
641 * It's actually ok to have the source and destination registers be
642 * the same. In this case, each instruction over-writes its own
643 * source and there's no problem. The real problem here is if the
644 * source and destination registers are off by one. Then you can end
645 * up in a scenario where the first instruction over-writes the
646 * source of the second instruction. Since the compiler doesn't know
647 * about this level of granularity, we simply make the source and
648 * destination interfere.
649 */
650 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
651 if (inst->exec_size < 16 || inst->dst.file != VGRF)
652 continue;
653
654 for (int i = 0; i < inst->sources; ++i) {
655 if (inst->src[i].file == VGRF) {
656 ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
657 }
658 }
659 }
660
661 if (devinfo->gen >= 8) {
662 /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
663 * subsection "EUISA Instructions", Send Message (page 990):
664 *
665 * "r127 must not be used for return address when there is a src and
666 * dest overlap in send instruction."
667 *
668 * We are avoiding using grf127 as part of the destination of send
669 * messages adding a node interference to the grf127_send_hack_node.
670 * This node has a fixed asignment to grf127.
671 *
672 * We don't apply it to SIMD16 instructions because previous code avoids
673 * any register overlap between sources and destination.
674 */
675 ra_set_node_reg(g, grf127_send_hack_node, 127);
676 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
677 if (inst->exec_size < 16 && inst->is_send_from_grf() &&
678 inst->dst.file == VGRF)
679 ra_add_node_interference(g, inst->dst.nr, grf127_send_hack_node);
680 }
681
682 if (fs->spilled_any_registers) {
683 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
684 /* Spilling instruction are genereated as SEND messages from MRF
685 * but as Gen7+ supports sending from GRF the driver will maps
686 * assingn these MRF registers to a GRF. Implementations reuses
687 * the dest of the send message as source. So as we will have an
688 * overlap for sure, we create an interference between destination
689 * and grf127.
690 */
691 if ((inst->opcode == SHADER_OPCODE_GEN7_SCRATCH_READ ||
692 inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_READ) &&
693 inst->dst.file == VGRF)
694 ra_add_node_interference(g, inst->dst.nr, grf127_send_hack_node);
695 }
696 }
697 }
698
699 /* From the Skylake PRM Vol. 2a docs for sends:
700 *
701 * "It is required that the second block of GRFs does not overlap with
702 * the first block."
703 *
704 * Normally, this is taken care of by fixup_sends_duplicate_payload() but
705 * in the case where one of the registers is an undefined value, the
706 * register allocator may decide that they don't interfere even though
707 * they're used as sources in the same instruction. We also need to add
708 * interference here.
709 */
710 if (devinfo->gen >= 9) {
711 foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
712 if (inst->opcode == SHADER_OPCODE_SEND && inst->ex_mlen > 0 &&
713 inst->src[2].file == VGRF &&
714 inst->src[3].file == VGRF &&
715 inst->src[2].nr != inst->src[3].nr)
716 ra_add_node_interference(g, inst->src[2].nr,
717 inst->src[3].nr);
718 }
719 }
720
721 return g;
722 }
723
724 bool
725 fs_visitor::assign_regs(bool allow_spilling, bool spill_all)
726 {
727 /* Most of this allocation was written for a reg_width of 1
728 * (dispatch_width == 8). In extending to SIMD16, the code was
729 * left in place and it was converted to have the hardware
730 * registers it's allocating be contiguous physical pairs of regs
731 * for reg_width == 2.
732 */
733 int reg_width = dispatch_width / 8;
734 int rsi = _mesa_logbase2(reg_width); /* Which compiler->fs_reg_sets[] to use */
735 ra_graph *g = build_interference_graph(this);
736
737 /* Debug of register spilling: Go spill everything. */
738 if (unlikely(spill_all)) {
739 int reg = choose_spill_reg(g);
740
741 if (reg != -1) {
742 spill_reg(reg);
743 ralloc_free(g);
744 return false;
745 }
746 }
747
748 if (!ra_allocate(g)) {
749 /* Failed to allocate registers. Spill a reg, and the caller will
750 * loop back into here to try again.
751 */
752 int reg = choose_spill_reg(g);
753
754 if (reg == -1) {
755 fail("no register to spill:\n");
756 dump_instructions(NULL);
757 } else if (allow_spilling) {
758 spill_reg(reg);
759 }
760
761 ralloc_free(g);
762
763 return false;
764 }
765
766 /* Get the chosen virtual registers for each node, and map virtual
767 * regs in the register classes back down to real hardware reg
768 * numbers.
769 */
770 unsigned hw_reg_mapping[alloc.count];
771 this->grf_used = this->first_non_payload_grf;
772 for (unsigned i = 0; i < this->alloc.count; i++) {
773 int reg = ra_get_node_reg(g, i);
774
775 hw_reg_mapping[i] = compiler->fs_reg_sets[rsi].ra_reg_to_grf[reg];
776 this->grf_used = MAX2(this->grf_used,
777 hw_reg_mapping[i] + this->alloc.sizes[i]);
778 }
779
780 foreach_block_and_inst(block, fs_inst, inst, cfg) {
781 assign_reg(hw_reg_mapping, &inst->dst);
782 for (int i = 0; i < inst->sources; i++) {
783 assign_reg(hw_reg_mapping, &inst->src[i]);
784 }
785 }
786
787 this->alloc.count = this->grf_used;
788
789 ralloc_free(g);
790
791 return true;
792 }
793
794 namespace {
795 /**
796 * Maximum spill block size we expect to encounter in 32B units.
797 *
798 * This is somewhat arbitrary and doesn't necessarily limit the maximum
799 * variable size that can be spilled -- A higher value will allow a
800 * variable of a given size to be spilled more efficiently with a smaller
801 * number of scratch messages, but will increase the likelihood of a
802 * collision between the MRFs reserved for spilling and other MRFs used by
803 * the program (and possibly increase GRF register pressure on platforms
804 * without hardware MRFs), what could cause register allocation to fail.
805 *
806 * For the moment reserve just enough space so a register of 32 bit
807 * component type and natural region width can be spilled without splitting
808 * into multiple (force_writemask_all) scratch messages.
809 */
810 unsigned
811 spill_max_size(const backend_shader *s)
812 {
813 /* FINISHME - On Gen7+ it should be possible to avoid this limit
814 * altogether by spilling directly from the temporary GRF
815 * allocated to hold the result of the instruction (and the
816 * scratch write header).
817 */
818 /* FINISHME - The shader's dispatch width probably belongs in
819 * backend_shader (or some nonexistent fs_shader class?)
820 * rather than in the visitor class.
821 */
822 return static_cast<const fs_visitor *>(s)->dispatch_width / 8;
823 }
824
825 /**
826 * First MRF register available for spilling.
827 */
828 unsigned
829 spill_base_mrf(const backend_shader *s)
830 {
831 return BRW_MAX_MRF(s->devinfo->gen) - spill_max_size(s) - 1;
832 }
833 }
834
835 static void
836 emit_unspill(const fs_builder &bld, fs_reg dst,
837 uint32_t spill_offset, unsigned count)
838 {
839 const gen_device_info *devinfo = bld.shader->devinfo;
840 const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
841 REG_SIZE;
842 assert(count % reg_size == 0);
843
844 for (unsigned i = 0; i < count / reg_size; i++) {
845 /* The Gen7 descriptor-based offset is 12 bits of HWORD units. Because
846 * the Gen7-style scratch block read is hardwired to BTI 255, on Gen9+
847 * it would cause the DC to do an IA-coherent read, what largely
848 * outweighs the slight advantage from not having to provide the address
849 * as part of the message header, so we're better off using plain old
850 * oword block reads.
851 */
852 bool gen7_read = (devinfo->gen >= 7 && devinfo->gen < 9 &&
853 spill_offset < (1 << 12) * REG_SIZE);
854 fs_inst *unspill_inst = bld.emit(gen7_read ?
855 SHADER_OPCODE_GEN7_SCRATCH_READ :
856 SHADER_OPCODE_GEN4_SCRATCH_READ,
857 dst);
858 unspill_inst->offset = spill_offset;
859
860 if (!gen7_read) {
861 unspill_inst->base_mrf = spill_base_mrf(bld.shader);
862 unspill_inst->mlen = 1; /* header contains offset */
863 }
864
865 dst.offset += reg_size * REG_SIZE;
866 spill_offset += reg_size * REG_SIZE;
867 }
868 }
869
870 static void
871 emit_spill(const fs_builder &bld, fs_reg src,
872 uint32_t spill_offset, unsigned count)
873 {
874 const unsigned reg_size = src.component_size(bld.dispatch_width()) /
875 REG_SIZE;
876 assert(count % reg_size == 0);
877
878 for (unsigned i = 0; i < count / reg_size; i++) {
879 fs_inst *spill_inst =
880 bld.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE, bld.null_reg_f(), src);
881 src.offset += reg_size * REG_SIZE;
882 spill_inst->offset = spill_offset + i * reg_size * REG_SIZE;
883 spill_inst->mlen = 1 + reg_size; /* header, value */
884 spill_inst->base_mrf = spill_base_mrf(bld.shader);
885 }
886 }
887
888 int
889 fs_visitor::choose_spill_reg(struct ra_graph *g)
890 {
891 float block_scale = 1.0;
892 float spill_costs[this->alloc.count];
893 bool no_spill[this->alloc.count];
894
895 for (unsigned i = 0; i < this->alloc.count; i++) {
896 spill_costs[i] = 0.0;
897 no_spill[i] = false;
898 }
899
900 /* Calculate costs for spilling nodes. Call it a cost of 1 per
901 * spill/unspill we'll have to do, and guess that the insides of
902 * loops run 10 times.
903 */
904 foreach_block_and_inst(block, fs_inst, inst, cfg) {
905 for (unsigned int i = 0; i < inst->sources; i++) {
906 if (inst->src[i].file == VGRF)
907 spill_costs[inst->src[i].nr] += regs_read(inst, i) * block_scale;
908 }
909
910 if (inst->dst.file == VGRF)
911 spill_costs[inst->dst.nr] += regs_written(inst) * block_scale;
912
913 switch (inst->opcode) {
914
915 case BRW_OPCODE_DO:
916 block_scale *= 10;
917 break;
918
919 case BRW_OPCODE_WHILE:
920 block_scale /= 10;
921 break;
922
923 case BRW_OPCODE_IF:
924 case BRW_OPCODE_IFF:
925 block_scale *= 0.5;
926 break;
927
928 case BRW_OPCODE_ENDIF:
929 block_scale /= 0.5;
930 break;
931
932 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
933 if (inst->src[0].file == VGRF)
934 no_spill[inst->src[0].nr] = true;
935 break;
936
937 case SHADER_OPCODE_GEN4_SCRATCH_READ:
938 case SHADER_OPCODE_GEN7_SCRATCH_READ:
939 if (inst->dst.file == VGRF)
940 no_spill[inst->dst.nr] = true;
941 break;
942
943 default:
944 break;
945 }
946 }
947
948 for (unsigned i = 0; i < this->alloc.count; i++) {
949 int live_length = virtual_grf_end[i] - virtual_grf_start[i];
950 if (live_length <= 0)
951 continue;
952
953 /* Divide the cost (in number of spills/fills) by the log of the length
954 * of the live range of the register. This will encourage spill logic
955 * to spill long-living things before spilling short-lived things where
956 * spilling is less likely to actually do us any good. We use the log
957 * of the length because it will fall off very quickly and not cause us
958 * to spill medium length registers with more uses.
959 */
960 float adjusted_cost = spill_costs[i] / logf(live_length);
961 if (!no_spill[i])
962 ra_set_node_spill_cost(g, i, adjusted_cost);
963 }
964
965 return ra_get_best_spill_node(g);
966 }
967
968 void
969 fs_visitor::spill_reg(unsigned spill_reg)
970 {
971 int size = alloc.sizes[spill_reg];
972 unsigned int spill_offset = last_scratch;
973 assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
974
975 /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
976 * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
977 * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
978 * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
979 * depth), starting from m1. In summary: We may not be able to spill in
980 * SIMD16 mode, because we'd stomp the FB writes.
981 */
982 if (!spilled_any_registers) {
983 bool mrf_used[BRW_MAX_MRF(devinfo->gen)];
984 get_used_mrfs(this, mrf_used);
985
986 for (int i = spill_base_mrf(this); i < BRW_MAX_MRF(devinfo->gen); i++) {
987 if (mrf_used[i]) {
988 fail("Register spilling not supported with m%d used", i);
989 return;
990 }
991 }
992
993 spilled_any_registers = true;
994 }
995
996 last_scratch += size * REG_SIZE;
997
998 /* Generate spill/unspill instructions for the objects being
999 * spilled. Right now, we spill or unspill the whole thing to a
1000 * virtual grf of the same size. For most instructions, though, we
1001 * could just spill/unspill the GRF being accessed.
1002 */
1003 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1004 const fs_builder ibld = fs_builder(this, block, inst);
1005
1006 for (unsigned int i = 0; i < inst->sources; i++) {
1007 if (inst->src[i].file == VGRF &&
1008 inst->src[i].nr == spill_reg) {
1009 int count = regs_read(inst, i);
1010 int subset_spill_offset = spill_offset +
1011 ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE);
1012 fs_reg unspill_dst(VGRF, alloc.allocate(count));
1013
1014 inst->src[i].nr = unspill_dst.nr;
1015 inst->src[i].offset %= REG_SIZE;
1016
1017 /* We read the largest power-of-two divisor of the register count
1018 * (because only POT scratch read blocks are allowed by the
1019 * hardware) up to the maximum supported block size.
1020 */
1021 const unsigned width =
1022 MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
1023
1024 /* Set exec_all() on unspill messages under the (rather
1025 * pessimistic) assumption that there is no one-to-one
1026 * correspondence between channels of the spilled variable in
1027 * scratch space and the scratch read message, which operates on
1028 * 32 bit channels. It shouldn't hurt in any case because the
1029 * unspill destination is a block-local temporary.
1030 */
1031 emit_unspill(ibld.exec_all().group(width, 0),
1032 unspill_dst, subset_spill_offset, count);
1033 }
1034 }
1035
1036 if (inst->dst.file == VGRF &&
1037 inst->dst.nr == spill_reg) {
1038 int subset_spill_offset = spill_offset +
1039 ROUND_DOWN_TO(inst->dst.offset, REG_SIZE);
1040 fs_reg spill_src(VGRF, alloc.allocate(regs_written(inst)));
1041
1042 inst->dst.nr = spill_src.nr;
1043 inst->dst.offset %= REG_SIZE;
1044
1045 /* If we're immediately spilling the register, we should not use
1046 * destination dependency hints. Doing so will cause the GPU do
1047 * try to read and write the register at the same time and may
1048 * hang the GPU.
1049 */
1050 inst->no_dd_clear = false;
1051 inst->no_dd_check = false;
1052
1053 /* Calculate the execution width of the scratch messages (which work
1054 * in terms of 32 bit components so we have a fixed number of eight
1055 * channels per spilled register). We attempt to write one
1056 * exec_size-wide component of the variable at a time without
1057 * exceeding the maximum number of (fake) MRF registers reserved for
1058 * spills.
1059 */
1060 const unsigned width = 8 * MIN2(
1061 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE),
1062 spill_max_size(this));
1063
1064 /* Spills should only write data initialized by the instruction for
1065 * whichever channels are enabled in the excution mask. If that's
1066 * not possible we'll have to emit a matching unspill before the
1067 * instruction and set force_writemask_all on the spill.
1068 */
1069 const bool per_channel =
1070 inst->dst.is_contiguous() && type_sz(inst->dst.type) == 4 &&
1071 inst->exec_size == width;
1072
1073 /* Builder used to emit the scratch messages. */
1074 const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
1075
1076 /* If our write is going to affect just part of the
1077 * regs_written(inst), then we need to unspill the destination since
1078 * we write back out all of the regs_written(). If the original
1079 * instruction had force_writemask_all set and is not a partial
1080 * write, there should be no need for the unspill since the
1081 * instruction will be overwriting the whole destination in any case.
1082 */
1083 if (inst->is_partial_write() ||
1084 (!inst->force_writemask_all && !per_channel))
1085 emit_unspill(ubld, spill_src, subset_spill_offset,
1086 regs_written(inst));
1087
1088 emit_spill(ubld.at(block, inst->next), spill_src,
1089 subset_spill_offset, regs_written(inst));
1090 }
1091 }
1092
1093 invalidate_live_intervals();
1094 }