v3d: Add support for CS workgroup/invocation id intrinsics.
[mesa.git] / src / broadcom / compiler / vir_register_allocate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
28
29 #define QPU_R(i) { .magic = false, .index = i }
30
31 #define ACC_INDEX 0
32 #define ACC_COUNT 5
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
34 #define PHYS_COUNT 64
35
36 static bool
37 is_last_ldtmu(struct qinst *inst, struct qblock *block)
38 {
39 list_for_each_entry_from(struct qinst, scan_inst, inst,
40 &block->instructions, link) {
41 if (inst->qpu.sig.ldtmu)
42 return false;
43 if (v3d_qpu_writes_tmu(&inst->qpu))
44 return true;
45 }
46
47 return true;
48 }
49
50 static int
51 v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
52 uint32_t *temp_to_node)
53 {
54 float block_scale = 1.0;
55 float spill_costs[c->num_temps];
56 bool in_tmu_operation = false;
57 bool started_last_seg = false;
58
59 for (unsigned i = 0; i < c->num_temps; i++)
60 spill_costs[i] = 0.0;
61
62 /* XXX: Scale the cost up when inside of a loop. */
63 vir_for_each_block(block, c) {
64 vir_for_each_inst(inst, block) {
65 /* We can't insert a new TMU operation while currently
66 * in a TMU operation, and we can't insert new thread
67 * switches after starting output writes.
68 */
69 bool no_spilling =
70 (in_tmu_operation ||
71 (c->threads > 1 && started_last_seg));
72
73 for (int i = 0; i < vir_get_nsrc(inst); i++) {
74 if (inst->src[i].file != QFILE_TEMP)
75 continue;
76
77 int temp = inst->src[i].index;
78 if (no_spilling) {
79 BITSET_CLEAR(c->spillable,
80 temp);
81 } else {
82 spill_costs[temp] += block_scale;
83 }
84 }
85
86 if (inst->dst.file == QFILE_TEMP) {
87 int temp = inst->dst.index;
88
89 if (no_spilling) {
90 BITSET_CLEAR(c->spillable,
91 temp);
92 } else {
93 spill_costs[temp] += block_scale;
94 }
95 }
96
97 /* Refuse to spill a ldvary's dst, because that means
98 * that ldvary's r5 would end up being used across a
99 * thrsw.
100 */
101 if (inst->qpu.sig.ldvary) {
102 assert(inst->dst.file == QFILE_TEMP);
103 BITSET_CLEAR(c->spillable, inst->dst.index);
104 }
105
106 if (inst->is_last_thrsw)
107 started_last_seg = true;
108
109 if (v3d_qpu_writes_vpm(&inst->qpu) ||
110 v3d_qpu_uses_tlb(&inst->qpu))
111 started_last_seg = true;
112
113 /* Track when we're in between a TMU setup and the
114 * final LDTMU or TMUWT from that TMU setup. We can't
115 * spill/fill any temps during that time, because that
116 * involves inserting a new TMU setup/LDTMU sequence.
117 */
118 if (inst->qpu.sig.ldtmu &&
119 is_last_ldtmu(inst, block))
120 in_tmu_operation = false;
121
122 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
123 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT)
124 in_tmu_operation = false;
125
126 if (v3d_qpu_writes_tmu(&inst->qpu))
127 in_tmu_operation = true;
128 }
129 }
130
131 for (unsigned i = 0; i < c->num_temps; i++) {
132 int node = temp_to_node[i];
133
134 if (BITSET_TEST(c->spillable, i))
135 ra_set_node_spill_cost(g, node, spill_costs[i]);
136 }
137
138 return ra_get_best_spill_node(g);
139 }
140
141 /* The spill offset for this thread takes a bit of setup, so do it once at
142 * program start.
143 */
144 static void
145 v3d_setup_spill_base(struct v3d_compile *c)
146 {
147 c->cursor = vir_before_block(vir_entry_block(c));
148
149 int start_num_temps = c->num_temps;
150
151 /* Each thread wants to be in a separate region of the scratch space
152 * so that the QPUs aren't fighting over cache lines. We have the
153 * driver keep a single global spill BO rather than
154 * per-spilling-program BOs, so we need a uniform from the driver for
155 * what the per-thread scale is.
156 */
157 struct qreg thread_offset =
158 vir_UMUL(c,
159 vir_TIDX(c),
160 vir_uniform(c, QUNIFORM_SPILL_SIZE_PER_THREAD, 0));
161
162 /* Each channel in a reg is 4 bytes, so scale them up by that. */
163 struct qreg element_offset = vir_SHL(c, vir_EIDX(c),
164 vir_uniform_ui(c, 2));
165
166 c->spill_base = vir_ADD(c,
167 vir_ADD(c, thread_offset, element_offset),
168 vir_uniform(c, QUNIFORM_SPILL_OFFSET, 0));
169
170 /* Make sure that we don't spill the spilling setup instructions. */
171 for (int i = start_num_temps; i < c->num_temps; i++)
172 BITSET_CLEAR(c->spillable, i);
173 }
174
175 static void
176 v3d_emit_spill_tmua(struct v3d_compile *c, uint32_t spill_offset)
177 {
178 vir_ADD_dest(c, vir_reg(QFILE_MAGIC,
179 V3D_QPU_WADDR_TMUA),
180 c->spill_base,
181 vir_uniform_ui(c, spill_offset));
182 }
183
184 static void
185 v3d_spill_reg(struct v3d_compile *c, int spill_temp)
186 {
187 uint32_t spill_offset = c->spill_size;
188 c->spill_size += 16 * sizeof(uint32_t);
189
190 if (spill_offset == 0)
191 v3d_setup_spill_base(c);
192
193 struct qinst *last_thrsw = c->last_thrsw;
194 assert(!last_thrsw || last_thrsw->is_last_thrsw);
195
196 int start_num_temps = c->num_temps;
197
198 vir_for_each_inst_inorder(inst, c) {
199 for (int i = 0; i < vir_get_nsrc(inst); i++) {
200 if (inst->src[i].file != QFILE_TEMP ||
201 inst->src[i].index != spill_temp) {
202 continue;
203 }
204
205 c->cursor = vir_before_inst(inst);
206
207 v3d_emit_spill_tmua(c, spill_offset);
208 vir_emit_thrsw(c);
209 inst->src[i] = vir_LDTMU(c);
210 c->fills++;
211 }
212
213 if (inst->dst.file == QFILE_TEMP &&
214 inst->dst.index == spill_temp) {
215 c->cursor = vir_after_inst(inst);
216
217 inst->dst.index = c->num_temps++;
218 vir_MOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD),
219 inst->dst);
220 v3d_emit_spill_tmua(c, spill_offset);
221 vir_emit_thrsw(c);
222 vir_TMUWT(c);
223 c->spills++;
224 }
225
226 /* If we didn't have a last-thrsw inserted by nir_to_vir and
227 * we've been inserting thrsws, then insert a new last_thrsw
228 * right before we start the vpm/tlb sequence for the last
229 * thread segment.
230 */
231 if (!last_thrsw && c->last_thrsw &&
232 (v3d_qpu_writes_vpm(&inst->qpu) ||
233 v3d_qpu_uses_tlb(&inst->qpu))) {
234 c->cursor = vir_before_inst(inst);
235 vir_emit_thrsw(c);
236
237 last_thrsw = c->last_thrsw;
238 last_thrsw->is_last_thrsw = true;
239 }
240 }
241
242 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
243 * inserted in our most recent unspill.
244 */
245 if (last_thrsw)
246 c->last_thrsw = last_thrsw;
247
248 /* Don't allow spilling of our spilling instructions. There's no way
249 * they can help get things colored.
250 */
251 for (int i = start_num_temps; i < c->num_temps; i++)
252 BITSET_CLEAR(c->spillable, i);
253 }
254
255 struct v3d_ra_select_callback_data {
256 uint32_t next_acc;
257 uint32_t next_phys;
258 };
259
260 static unsigned int
261 v3d_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
262 {
263 struct v3d_ra_select_callback_data *v3d_ra = data;
264
265 /* Choose an accumulator if possible (I think it's lower power than
266 * phys regs), but round-robin through them to give post-RA
267 * instruction selection more options.
268 */
269 for (int i = 0; i < ACC_COUNT; i++) {
270 int acc_off = (v3d_ra->next_acc + i) % ACC_COUNT;
271 int acc = ACC_INDEX + acc_off;
272
273 if (BITSET_TEST(regs, acc)) {
274 v3d_ra->next_acc = acc_off + 1;
275 return acc;
276 }
277 }
278
279 for (int i = 0; i < PHYS_COUNT; i++) {
280 int phys_off = (v3d_ra->next_phys + i) % PHYS_COUNT;
281 int phys = PHYS_INDEX + phys_off;
282
283 if (BITSET_TEST(regs, phys)) {
284 v3d_ra->next_phys = phys_off + 1;
285 return phys;
286 }
287 }
288
289 unreachable("RA must pass us at least one possible reg.");
290 }
291
292 bool
293 vir_init_reg_sets(struct v3d_compiler *compiler)
294 {
295 /* Allocate up to 3 regfile classes, for the ways the physical
296 * register file can be divided up for fragment shader threading.
297 */
298 int max_thread_index = (compiler->devinfo->ver >= 40 ? 2 : 3);
299
300 compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
301 true);
302 if (!compiler->regs)
303 return false;
304
305 for (int threads = 0; threads < max_thread_index; threads++) {
306 compiler->reg_class_phys_or_acc[threads] =
307 ra_alloc_reg_class(compiler->regs);
308 compiler->reg_class_phys[threads] =
309 ra_alloc_reg_class(compiler->regs);
310
311 for (int i = PHYS_INDEX;
312 i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
313 ra_class_add_reg(compiler->regs,
314 compiler->reg_class_phys_or_acc[threads], i);
315 ra_class_add_reg(compiler->regs,
316 compiler->reg_class_phys[threads], i);
317 }
318
319 for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT; i++) {
320 ra_class_add_reg(compiler->regs,
321 compiler->reg_class_phys_or_acc[threads], i);
322 }
323 }
324
325 ra_set_finalize(compiler->regs, NULL);
326
327 return true;
328 }
329
330 struct node_to_temp_map {
331 uint32_t temp;
332 uint32_t priority;
333 };
334
335 static int
336 node_to_temp_priority(const void *in_a, const void *in_b)
337 {
338 const struct node_to_temp_map *a = in_a;
339 const struct node_to_temp_map *b = in_b;
340
341 return a->priority - b->priority;
342 }
343
344 #define CLASS_BIT_PHYS (1 << 0)
345 #define CLASS_BIT_R0_R2 (1 << 1)
346 #define CLASS_BIT_R3 (1 << 2)
347 #define CLASS_BIT_R4 (1 << 3)
348
349 /**
350 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
351 *
352 * The return value should be freed by the caller.
353 */
354 struct qpu_reg *
355 v3d_register_allocate(struct v3d_compile *c, bool *spilled)
356 {
357 struct node_to_temp_map map[c->num_temps];
358 uint32_t temp_to_node[c->num_temps];
359 uint8_t class_bits[c->num_temps];
360 struct qpu_reg *temp_registers = calloc(c->num_temps,
361 sizeof(*temp_registers));
362 int acc_nodes[ACC_COUNT];
363 struct v3d_ra_select_callback_data callback_data = {
364 .next_acc = 0,
365 /* Start at RF3, to try to keep the TLB writes from using
366 * RF0-2.
367 */
368 .next_phys = 3,
369 };
370
371 *spilled = false;
372
373 vir_calculate_live_intervals(c);
374
375 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
376 *
377 * V3D 4.x has double the physical register space, so 64 physical regs
378 * are available at both 1x and 2x threading, and 4x has 32.
379 */
380 int thread_index = ffs(c->threads) - 1;
381 if (c->devinfo->ver >= 40) {
382 if (thread_index >= 1)
383 thread_index--;
384 }
385
386 struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
387 c->num_temps +
388 ARRAY_SIZE(acc_nodes));
389 ra_set_select_reg_callback(g, v3d_ra_select_callback, &callback_data);
390
391 /* Make some fixed nodes for the accumulators, which we will need to
392 * interfere with when ops have implied r3/r4 writes or for the thread
393 * switches. We could represent these as classes for the nodes to
394 * live in, but the classes take up a lot of memory to set up, so we
395 * don't want to make too many.
396 */
397 for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
398 acc_nodes[i] = c->num_temps + i;
399 ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
400 }
401
402 for (uint32_t i = 0; i < c->num_temps; i++) {
403 map[i].temp = i;
404 map[i].priority = c->temp_end[i] - c->temp_start[i];
405 }
406 qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
407 for (uint32_t i = 0; i < c->num_temps; i++) {
408 temp_to_node[map[i].temp] = i;
409 }
410
411 /* Figure out our register classes and preallocated registers. We
412 * start with any temp being able to be in any file, then instructions
413 * incrementally remove bits that the temp definitely can't be in.
414 */
415 memset(class_bits,
416 CLASS_BIT_PHYS | CLASS_BIT_R0_R2 | CLASS_BIT_R3 | CLASS_BIT_R4,
417 sizeof(class_bits));
418
419 int ip = 0;
420 vir_for_each_inst_inorder(inst, c) {
421 /* If the instruction writes r3/r4 (and optionally moves its
422 * result to a temp), nothing else can be stored in r3/r4 across
423 * it.
424 */
425 if (vir_writes_r3(c->devinfo, inst)) {
426 for (int i = 0; i < c->num_temps; i++) {
427 if (c->temp_start[i] < ip &&
428 c->temp_end[i] > ip) {
429 ra_add_node_interference(g,
430 temp_to_node[i],
431 acc_nodes[3]);
432 }
433 }
434 }
435 if (vir_writes_r4(c->devinfo, inst)) {
436 for (int i = 0; i < c->num_temps; i++) {
437 if (c->temp_start[i] < ip &&
438 c->temp_end[i] > ip) {
439 ra_add_node_interference(g,
440 temp_to_node[i],
441 acc_nodes[4]);
442 }
443 }
444 }
445
446 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
447 switch (inst->qpu.alu.add.op) {
448 case V3D_QPU_A_LDVPMV_IN:
449 case V3D_QPU_A_LDVPMV_OUT:
450 case V3D_QPU_A_LDVPMD_IN:
451 case V3D_QPU_A_LDVPMD_OUT:
452 case V3D_QPU_A_LDVPMP:
453 case V3D_QPU_A_LDVPMG_IN:
454 case V3D_QPU_A_LDVPMG_OUT:
455 /* LDVPMs only store to temps (the MA flag
456 * decides whether the LDVPM is in or out)
457 */
458 assert(inst->dst.file == QFILE_TEMP);
459 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
460 break;
461
462 case V3D_QPU_A_RECIP:
463 case V3D_QPU_A_RSQRT:
464 case V3D_QPU_A_EXP:
465 case V3D_QPU_A_LOG:
466 case V3D_QPU_A_SIN:
467 case V3D_QPU_A_RSQRT2:
468 /* The SFU instructions write directly to the
469 * phys regfile.
470 */
471 assert(inst->dst.file == QFILE_TEMP);
472 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
473 break;
474
475 default:
476 break;
477 }
478 }
479
480 if (inst->src[0].file == QFILE_REG) {
481 switch (inst->src[0].index) {
482 case 0:
483 case 1:
484 case 2:
485 case 3:
486 /* Payload setup instructions: Force allocate
487 * the dst to the given register (so the MOV
488 * will disappear).
489 */
490 assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
491 assert(inst->dst.file == QFILE_TEMP);
492 ra_set_node_reg(g,
493 temp_to_node[inst->dst.index],
494 PHYS_INDEX +
495 inst->src[0].index);
496 break;
497 }
498 }
499
500 if (inst->qpu.sig.thrsw) {
501 /* All accumulators are invalidated across a thread
502 * switch.
503 */
504 for (int i = 0; i < c->num_temps; i++) {
505 if (c->temp_start[i] < ip && c->temp_end[i] > ip)
506 class_bits[i] &= CLASS_BIT_PHYS;
507 }
508 }
509
510 ip++;
511 }
512
513 for (uint32_t i = 0; i < c->num_temps; i++) {
514 if (class_bits[i] == CLASS_BIT_PHYS) {
515 ra_set_node_class(g, temp_to_node[i],
516 c->compiler->reg_class_phys[thread_index]);
517 } else {
518 assert(class_bits[i] == (CLASS_BIT_PHYS |
519 CLASS_BIT_R0_R2 |
520 CLASS_BIT_R3 |
521 CLASS_BIT_R4));
522 ra_set_node_class(g, temp_to_node[i],
523 c->compiler->reg_class_phys_or_acc[thread_index]);
524 }
525 }
526
527 for (uint32_t i = 0; i < c->num_temps; i++) {
528 for (uint32_t j = i + 1; j < c->num_temps; j++) {
529 if (!(c->temp_start[i] >= c->temp_end[j] ||
530 c->temp_start[j] >= c->temp_end[i])) {
531 ra_add_node_interference(g,
532 temp_to_node[i],
533 temp_to_node[j]);
534 }
535 }
536 }
537
538 /* Debug code to force a bit of register spilling, for running across
539 * conformance tests to make sure that spilling works.
540 */
541 int force_register_spills = 0;
542 if (c->spill_size < 16 * sizeof(uint32_t) * force_register_spills) {
543 int node = v3d_choose_spill_node(c, g, temp_to_node);
544 if (node != -1) {
545 v3d_spill_reg(c, map[node].temp);
546 ralloc_free(g);
547 *spilled = true;
548 return NULL;
549 }
550 }
551
552 bool ok = ra_allocate(g);
553 if (!ok) {
554 /* Try to spill, if we can't reduce threading first. */
555 if (thread_index == 0) {
556 int node = v3d_choose_spill_node(c, g, temp_to_node);
557
558 if (node != -1) {
559 v3d_spill_reg(c, map[node].temp);
560 ralloc_free(g);
561
562 /* Ask the outer loop to call back in. */
563 *spilled = true;
564 return NULL;
565 }
566 }
567
568 free(temp_registers);
569 return NULL;
570 }
571
572 for (uint32_t i = 0; i < c->num_temps; i++) {
573 int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
574 if (ra_reg < PHYS_INDEX) {
575 temp_registers[i].magic = true;
576 temp_registers[i].index = (V3D_QPU_WADDR_R0 +
577 ra_reg - ACC_INDEX);
578 } else {
579 temp_registers[i].magic = false;
580 temp_registers[i].index = ra_reg - PHYS_INDEX;
581 }
582
583 /* If the value's never used, just write to the NOP register
584 * for clarity in debug output.
585 */
586 if (c->temp_start[i] == c->temp_end[i]) {
587 temp_registers[i].magic = true;
588 temp_registers[i].index = V3D_QPU_WADDR_NOP;
589 }
590 }
591
592 ralloc_free(g);
593
594 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
595 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d spills\n",
596 vir_get_stage_name(c),
597 c->program_id, c->variant_id,
598 c->spills);
599
600 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d fills\n",
601 vir_get_stage_name(c),
602 c->program_id, c->variant_id,
603 c->fills);
604 }
605
606 return temp_registers;
607 }