pan/bi: Ingest vecN directly (again)
[mesa.git] / src / panfrost / bifrost / bifrost_compile.c
1 /*
2 * Copyright (C) 2020 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "main/imports.h"
31 #include "compiler/nir/nir_builder.h"
32
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
35 #include "bifrost_nir.h"
36 #include "compiler.h"
37 #include "bi_quirks.h"
38 #include "bi_print.h"
39
40 static bi_block *emit_cf_list(bi_context *ctx, struct exec_list *list);
41 static bi_instruction *bi_emit_branch(bi_context *ctx);
42 static void bi_schedule_barrier(bi_context *ctx);
43
44 static void
45 emit_jump(bi_context *ctx, nir_jump_instr *instr)
46 {
47 bi_instruction *branch = bi_emit_branch(ctx);
48
49 switch (instr->type) {
50 case nir_jump_break:
51 branch->branch.target = ctx->break_block;
52 break;
53 case nir_jump_continue:
54 branch->branch.target = ctx->continue_block;
55 break;
56 default:
57 unreachable("Unhandled jump type");
58 }
59
60 pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
61 }
62
63 /* Gets a bytemask for a complete vecN write */
64 static unsigned
65 bi_mask_for_channels_32(unsigned i)
66 {
67 return (1 << (4 * i)) - 1;
68 }
69
70 static bi_instruction
71 bi_load(enum bi_class T, nir_intrinsic_instr *instr)
72 {
73 bi_instruction load = {
74 .type = T,
75 .writemask = bi_mask_for_channels_32(instr->num_components),
76 .src = { BIR_INDEX_CONSTANT },
77 .constant = { .u64 = nir_intrinsic_base(instr) },
78 };
79
80 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
81
82 if (info->has_dest)
83 load.dest = bir_dest_index(&instr->dest);
84
85 if (info->has_dest && info->index_map[NIR_INTRINSIC_TYPE] > 0)
86 load.dest_type = nir_intrinsic_type(instr);
87
88 nir_src *offset = nir_get_io_offset_src(instr);
89
90 if (nir_src_is_const(*offset))
91 load.constant.u64 += nir_src_as_uint(*offset);
92 else
93 load.src[0] = bir_src_index(offset);
94
95 return load;
96 }
97
98 static void
99 bi_emit_ld_vary(bi_context *ctx, nir_intrinsic_instr *instr)
100 {
101 bi_instruction ins = bi_load(BI_LOAD_VAR, instr);
102 ins.load_vary.interp_mode = BIFROST_INTERP_DEFAULT; /* TODO */
103 ins.load_vary.reuse = false; /* TODO */
104 ins.load_vary.flat = instr->intrinsic != nir_intrinsic_load_interpolated_input;
105 ins.dest_type = nir_type_float | nir_dest_bit_size(instr->dest);
106
107 if (nir_src_is_const(*nir_get_io_offset_src(instr))) {
108 /* Zero it out for direct */
109 ins.src[1] = BIR_INDEX_ZERO;
110 } else {
111 /* R61 contains sample mask stuff, TODO RA XXX */
112 ins.src[1] = BIR_INDEX_REGISTER | 61;
113 }
114
115 bi_emit(ctx, ins);
116 }
117
118 static void
119 bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
120 {
121 if (!ctx->emitted_atest) {
122 bi_instruction ins = {
123 .type = BI_ATEST,
124 .src = {
125 BIR_INDEX_REGISTER | 60 /* TODO: RA */,
126 bir_src_index(&instr->src[0])
127 },
128 .src_types = {
129 nir_type_uint32,
130 nir_type_float32
131 },
132 .swizzle = {
133 { 0 },
134 { 3, 0 } /* swizzle out the alpha */
135 },
136 .dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
137 .dest_type = nir_type_uint32,
138 .writemask = 0xF
139 };
140
141 bi_emit(ctx, ins);
142 bi_schedule_barrier(ctx);
143 ctx->emitted_atest = true;
144 }
145
146 bi_instruction blend = {
147 .type = BI_BLEND,
148 .blend_location = nir_intrinsic_base(instr),
149 .src = {
150 BIR_INDEX_REGISTER | 60 /* Can this be arbitrary? */,
151 bir_src_index(&instr->src[0])
152 },
153 .src_types = {
154 nir_type_uint32,
155 nir_type_float32,
156 },
157 .swizzle = {
158 { 0 },
159 { 0, 1, 2, 3 }
160 },
161 .dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
162 .dest_type = nir_type_uint32,
163 .writemask = 0xF
164 };
165
166 bi_emit(ctx, blend);
167 bi_schedule_barrier(ctx);
168 }
169
170 static bi_instruction
171 bi_load_with_r61(enum bi_class T, nir_intrinsic_instr *instr)
172 {
173 bi_instruction ld = bi_load(T, instr);
174 ld.src[1] = BIR_INDEX_REGISTER | 61; /* TODO: RA */
175 ld.src[2] = BIR_INDEX_REGISTER | 62;
176 ld.src[3] = 0;
177 ld.src_types[1] = nir_type_uint32;
178 ld.src_types[2] = nir_type_uint32;
179 ld.src_types[3] = nir_intrinsic_type(instr);
180 return ld;
181 }
182
183 static void
184 bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
185 {
186 bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
187 address.dest = bi_make_temp(ctx);
188 address.dest_type = nir_type_uint32;
189 address.writemask = (1 << 12) - 1;
190
191 bi_instruction st = {
192 .type = BI_STORE_VAR,
193 .src = {
194 bir_src_index(&instr->src[0]),
195 address.dest, address.dest, address.dest,
196 },
197 .src_types = {
198 nir_type_uint32,
199 nir_type_uint32, nir_type_uint32, nir_type_uint32,
200 },
201 .swizzle = {
202 { 0, 1, 2, 3 },
203 { 0 }, { 1 }, { 2}
204 },
205 .store_channels = 4, /* TODO: WRITEMASK */
206 };
207
208 bi_emit(ctx, address);
209 bi_emit(ctx, st);
210 }
211
212 static void
213 bi_emit_ld_uniform(bi_context *ctx, nir_intrinsic_instr *instr)
214 {
215 bi_instruction ld = bi_load(BI_LOAD_UNIFORM, instr);
216 ld.src[1] = BIR_INDEX_ZERO; /* TODO: UBO index */
217
218 /* TODO: Indirect access, since we need to multiply by the element
219 * size. I believe we can get this lowering automatically via
220 * nir_lower_io (as mul instructions) with the proper options, but this
221 * is TODO */
222 assert(ld.src[0] & BIR_INDEX_CONSTANT);
223 ld.constant.u64 += ctx->sysvals.sysval_count;
224 ld.constant.u64 *= 16;
225
226 bi_emit(ctx, ld);
227 }
228
229 static void
230 bi_emit_sysval(bi_context *ctx, nir_instr *instr,
231 unsigned nr_components, unsigned offset)
232 {
233 nir_dest nir_dest;
234
235 /* Figure out which uniform this is */
236 int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
237 void *val = _mesa_hash_table_u64_search(ctx->sysvals.sysval_to_id, sysval);
238
239 /* Sysvals are prefix uniforms */
240 unsigned uniform = ((uintptr_t) val) - 1;
241
242 /* Emit the read itself -- this is never indirect */
243
244 bi_instruction load = {
245 .type = BI_LOAD_UNIFORM,
246 .writemask = (1 << (nr_components * 4)) - 1,
247 .src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
248 .constant = { (uniform * 16) + offset },
249 .dest = bir_dest_index(&nir_dest),
250 .dest_type = nir_type_uint32, /* TODO */
251 };
252
253 bi_emit(ctx, load);
254 }
255
256 static void
257 emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
258 {
259
260 switch (instr->intrinsic) {
261 case nir_intrinsic_load_barycentric_pixel:
262 /* stub */
263 break;
264 case nir_intrinsic_load_interpolated_input:
265 case nir_intrinsic_load_input:
266 if (ctx->stage == MESA_SHADER_FRAGMENT)
267 bi_emit_ld_vary(ctx, instr);
268 else if (ctx->stage == MESA_SHADER_VERTEX)
269 bi_emit(ctx, bi_load_with_r61(BI_LOAD_ATTR, instr));
270 else {
271 unreachable("Unsupported shader stage");
272 }
273 break;
274
275 case nir_intrinsic_store_output:
276 if (ctx->stage == MESA_SHADER_FRAGMENT)
277 bi_emit_frag_out(ctx, instr);
278 else if (ctx->stage == MESA_SHADER_VERTEX)
279 bi_emit_st_vary(ctx, instr);
280 else
281 unreachable("Unsupported shader stage");
282 break;
283
284 case nir_intrinsic_load_uniform:
285 bi_emit_ld_uniform(ctx, instr);
286 break;
287
288 case nir_intrinsic_load_ssbo_address:
289 bi_emit_sysval(ctx, &instr->instr, 1, 0);
290 break;
291
292 case nir_intrinsic_get_buffer_size:
293 bi_emit_sysval(ctx, &instr->instr, 1, 8);
294 break;
295
296 case nir_intrinsic_load_viewport_scale:
297 case nir_intrinsic_load_viewport_offset:
298 case nir_intrinsic_load_num_work_groups:
299 case nir_intrinsic_load_sampler_lod_parameters_pan:
300 bi_emit_sysval(ctx, &instr->instr, 3, 0);
301 break;
302
303 default:
304 /* todo */
305 break;
306 }
307 }
308
309 static void
310 emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
311 {
312 /* Make sure we've been lowered */
313 assert(instr->def.num_components == 1);
314
315 bi_instruction move = {
316 .type = BI_MOV,
317 .dest = bir_ssa_index(&instr->def),
318 .dest_type = instr->def.bit_size | nir_type_uint,
319 .writemask = (1 << (instr->def.bit_size / 8)) - 1,
320 .src = {
321 BIR_INDEX_CONSTANT
322 },
323 .constant = {
324 .u64 = nir_const_value_as_uint(instr->value[0], instr->def.bit_size)
325 }
326 };
327
328 bi_emit(ctx, move);
329 }
330
331 #define BI_CASE_CMP(op) \
332 case op##8: \
333 case op##16: \
334 case op##32: \
335
336 static enum bi_class
337 bi_class_for_nir_alu(nir_op op)
338 {
339 switch (op) {
340 case nir_op_iadd:
341 case nir_op_fadd:
342 case nir_op_fsub:
343 return BI_ADD;
344 case nir_op_isub:
345 return BI_ISUB;
346
347 BI_CASE_CMP(nir_op_flt)
348 BI_CASE_CMP(nir_op_fge)
349 BI_CASE_CMP(nir_op_feq)
350 BI_CASE_CMP(nir_op_fne)
351 BI_CASE_CMP(nir_op_ilt)
352 BI_CASE_CMP(nir_op_ige)
353 BI_CASE_CMP(nir_op_ieq)
354 BI_CASE_CMP(nir_op_ine)
355 return BI_CMP;
356
357 case nir_op_b8csel:
358 case nir_op_b16csel:
359 case nir_op_b32csel:
360 return BI_CSEL;
361
362 case nir_op_i2i8:
363 case nir_op_i2i16:
364 case nir_op_i2i32:
365 case nir_op_i2i64:
366 case nir_op_u2u8:
367 case nir_op_u2u16:
368 case nir_op_u2u32:
369 case nir_op_u2u64:
370 case nir_op_f2i16:
371 case nir_op_f2i32:
372 case nir_op_f2i64:
373 case nir_op_f2u16:
374 case nir_op_f2u32:
375 case nir_op_f2u64:
376 case nir_op_i2f16:
377 case nir_op_i2f32:
378 case nir_op_i2f64:
379 case nir_op_u2f16:
380 case nir_op_u2f32:
381 case nir_op_u2f64:
382 return BI_CONVERT;
383
384 case nir_op_vec2:
385 case nir_op_vec3:
386 case nir_op_vec4:
387 return BI_COMBINE;
388
389 case nir_op_vec8:
390 case nir_op_vec16:
391 unreachable("should've been lowered");
392
393 case nir_op_ffma:
394 case nir_op_fmul:
395 return BI_FMA;
396
397 case nir_op_imin:
398 case nir_op_imax:
399 case nir_op_umin:
400 case nir_op_umax:
401 case nir_op_fmin:
402 case nir_op_fmax:
403 return BI_MINMAX;
404
405 case nir_op_fsat:
406 case nir_op_fneg:
407 case nir_op_fabs:
408 return BI_FMOV;
409 case nir_op_mov:
410 return BI_MOV;
411
412 case nir_op_frcp:
413 case nir_op_frsq:
414 case nir_op_fsin:
415 case nir_op_fcos:
416 return BI_SPECIAL;
417
418 default:
419 unreachable("Unknown ALU op");
420 }
421 }
422
423 /* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
424 * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
425 * optimizations). Otherwise it will bail (when used for primary code
426 * generation). */
427
428 static enum bi_cond
429 bi_cond_for_nir(nir_op op, bool soft)
430 {
431 switch (op) {
432 BI_CASE_CMP(nir_op_flt)
433 BI_CASE_CMP(nir_op_ilt)
434 return BI_COND_LT;
435
436 BI_CASE_CMP(nir_op_fge)
437 BI_CASE_CMP(nir_op_ige)
438 return BI_COND_GE;
439
440 BI_CASE_CMP(nir_op_feq)
441 BI_CASE_CMP(nir_op_ieq)
442 return BI_COND_EQ;
443
444 BI_CASE_CMP(nir_op_fne)
445 BI_CASE_CMP(nir_op_ine)
446 return BI_COND_NE;
447 default:
448 if (soft)
449 return BI_COND_ALWAYS;
450 else
451 unreachable("Invalid compare");
452 }
453 }
454
455 static void
456 bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
457 unsigned *constants_left, unsigned *constant_shift)
458 {
459 unsigned bits = nir_src_bit_size(instr->src[i].src);
460 unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
461
462 alu->src_types[to] = nir_op_infos[instr->op].input_types[i]
463 | bits;
464
465 /* Try to inline a constant */
466 if (nir_src_is_const(instr->src[i].src) && *constants_left && (dest_bits == bits)) {
467 alu->constant.u64 |=
468 (nir_src_as_uint(instr->src[i].src)) << *constant_shift;
469
470 alu->src[to] = BIR_INDEX_CONSTANT | (*constant_shift);
471 --(*constants_left);
472 (*constant_shift) += dest_bits;
473 return;
474 }
475
476 alu->src[to] = bir_src_index(&instr->src[i].src);
477
478 /* We assert scalarization above */
479 alu->swizzle[to][0] = instr->src[i].swizzle[0];
480 }
481
482 static void
483 bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
484 unsigned *constants_left, unsigned *constant_shift)
485 {
486 /* Bail for vector weirdness */
487 if (cond.swizzle[0] != 0)
488 return;
489
490 if (!cond.src.is_ssa)
491 return;
492
493 nir_ssa_def *def = cond.src.ssa;
494 nir_instr *parent = def->parent_instr;
495
496 if (parent->type != nir_instr_type_alu)
497 return;
498
499 nir_alu_instr *alu = nir_instr_as_alu(parent);
500
501 /* Try to match a condition */
502 enum bi_cond bcond = bi_cond_for_nir(alu->op, true);
503
504 if (bcond == BI_COND_ALWAYS)
505 return;
506
507 /* We found one, let's fuse it in */
508 csel->csel_cond = bcond;
509 bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift);
510 bi_copy_src(csel, alu, 1, 3, constants_left, constant_shift);
511 }
512
513 static void
514 emit_alu(bi_context *ctx, nir_alu_instr *instr)
515 {
516 /* Assume it's something we can handle normally */
517 bi_instruction alu = {
518 .type = bi_class_for_nir_alu(instr->op),
519 .dest = bir_dest_index(&instr->dest.dest),
520 .dest_type = nir_op_infos[instr->op].output_type
521 | nir_dest_bit_size(instr->dest.dest),
522 };
523
524 /* TODO: Implement lowering of special functions for older Bifrost */
525 assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
526
527 if (instr->dest.dest.is_ssa) {
528 /* Construct a writemask */
529 unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
530 unsigned comps = instr->dest.dest.ssa.num_components;
531
532 if (alu.type != BI_COMBINE)
533 assert(comps == 1);
534
535 unsigned bits = bits_per_comp * comps;
536 unsigned bytes = bits / 8;
537 alu.writemask = (1 << bytes) - 1;
538 } else {
539 unsigned comp_mask = instr->dest.write_mask;
540
541 alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
542 comp_mask);
543 }
544
545 /* We inline constants as we go. This tracks how many constants have
546 * been inlined, since we're limited to 64-bits of constants per
547 * instruction */
548
549 unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
550 unsigned constants_left = (64 / dest_bits);
551 unsigned constant_shift = 0;
552
553 /* Copy sources */
554
555 unsigned num_inputs = nir_op_infos[instr->op].num_inputs;
556 assert(num_inputs <= ARRAY_SIZE(alu.src));
557
558 for (unsigned i = 0; i < num_inputs; ++i)
559 bi_copy_src(&alu, instr, i, i, &constants_left, &constant_shift);
560
561 /* Op-specific fixup */
562 switch (instr->op) {
563 case nir_op_fmul:
564 alu.src[2] = BIR_INDEX_ZERO; /* FMA */
565 break;
566 case nir_op_fsat:
567 alu.outmod = BIFROST_SAT; /* FMOV */
568 break;
569 case nir_op_fneg:
570 alu.src_neg[0] = true; /* FMOV */
571 break;
572 case nir_op_fabs:
573 alu.src_abs[0] = true; /* FMOV */
574 break;
575 case nir_op_fsub:
576 alu.src_neg[1] = true; /* FADD */
577 break;
578 case nir_op_fmax:
579 case nir_op_imax:
580 case nir_op_umax:
581 alu.op.minmax = BI_MINMAX_MAX; /* MINMAX */
582 break;
583 case nir_op_frcp:
584 alu.op.special = BI_SPECIAL_FRCP;
585 break;
586 case nir_op_frsq:
587 alu.op.special = BI_SPECIAL_FRSQ;
588 break;
589 case nir_op_fsin:
590 alu.op.special = BI_SPECIAL_FSIN;
591 break;
592 case nir_op_fcos:
593 alu.op.special = BI_SPECIAL_FCOS;
594 break;
595 BI_CASE_CMP(nir_op_flt)
596 BI_CASE_CMP(nir_op_ilt)
597 BI_CASE_CMP(nir_op_fge)
598 BI_CASE_CMP(nir_op_ige)
599 BI_CASE_CMP(nir_op_feq)
600 BI_CASE_CMP(nir_op_ieq)
601 BI_CASE_CMP(nir_op_fne)
602 BI_CASE_CMP(nir_op_ine)
603 alu.op.compare = bi_cond_for_nir(instr->op, false);
604 break;
605 default:
606 break;
607 }
608
609 if (alu.type == BI_CSEL) {
610 /* Default to csel3 */
611 alu.csel_cond = BI_COND_NE;
612 alu.src[3] = BIR_INDEX_ZERO;
613 alu.src_types[3] = alu.src_types[0];
614
615 bi_fuse_csel_cond(&alu, instr->src[0],
616 &constants_left, &constant_shift);
617 }
618
619 bi_emit(ctx, alu);
620 }
621
622 static void
623 emit_instr(bi_context *ctx, struct nir_instr *instr)
624 {
625 switch (instr->type) {
626 case nir_instr_type_load_const:
627 emit_load_const(ctx, nir_instr_as_load_const(instr));
628 break;
629
630 case nir_instr_type_intrinsic:
631 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
632 break;
633
634 case nir_instr_type_alu:
635 emit_alu(ctx, nir_instr_as_alu(instr));
636 break;
637
638 #if 0
639 case nir_instr_type_tex:
640 emit_tex(ctx, nir_instr_as_tex(instr));
641 break;
642 #endif
643
644 case nir_instr_type_jump:
645 emit_jump(ctx, nir_instr_as_jump(instr));
646 break;
647
648 case nir_instr_type_ssa_undef:
649 /* Spurious */
650 break;
651
652 default:
653 //unreachable("Unhandled instruction type");
654 break;
655 }
656 }
657
658
659
660 static bi_block *
661 create_empty_block(bi_context *ctx)
662 {
663 bi_block *blk = rzalloc(ctx, bi_block);
664
665 blk->base.predecessors = _mesa_set_create(blk,
666 _mesa_hash_pointer,
667 _mesa_key_pointer_equal);
668
669 blk->base.name = ctx->block_name_count++;
670
671 return blk;
672 }
673
674 static void
675 bi_schedule_barrier(bi_context *ctx)
676 {
677 bi_block *temp = ctx->after_block;
678 ctx->after_block = create_empty_block(ctx);
679 list_addtail(&ctx->after_block->base.link, &ctx->blocks);
680 list_inithead(&ctx->after_block->base.instructions);
681 pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
682 ctx->current_block = ctx->after_block;
683 ctx->after_block = temp;
684 }
685
686 static bi_block *
687 emit_block(bi_context *ctx, nir_block *block)
688 {
689 if (ctx->after_block) {
690 ctx->current_block = ctx->after_block;
691 ctx->after_block = NULL;
692 } else {
693 ctx->current_block = create_empty_block(ctx);
694 }
695
696 list_addtail(&ctx->current_block->base.link, &ctx->blocks);
697 list_inithead(&ctx->current_block->base.instructions);
698
699 nir_foreach_instr(instr, block) {
700 emit_instr(ctx, instr);
701 ++ctx->instruction_count;
702 }
703
704 return ctx->current_block;
705 }
706
707 /* Emits an unconditional branch to the end of the current block, returning a
708 * pointer so the user can fill in details */
709
710 static bi_instruction *
711 bi_emit_branch(bi_context *ctx)
712 {
713 bi_instruction branch = {
714 .type = BI_BRANCH,
715 .branch = {
716 .cond = BI_COND_ALWAYS
717 }
718 };
719
720 return bi_emit(ctx, branch);
721 }
722
723 /* Sets a condition for a branch by examing the NIR condition. If we're
724 * familiar with the condition, we unwrap it to fold it into the branch
725 * instruction. Otherwise, we consume the condition directly. We
726 * generally use 1-bit booleans which allows us to use small types for
727 * the conditions.
728 */
729
730 static void
731 bi_set_branch_cond(bi_instruction *branch, nir_src *cond, bool invert)
732 {
733 /* TODO: Try to unwrap instead of always bailing */
734 branch->src[0] = bir_src_index(cond);
735 branch->src[1] = BIR_INDEX_ZERO;
736 branch->src_types[0] = branch->src_types[1] = nir_type_uint16;
737 branch->branch.cond = invert ? BI_COND_EQ : BI_COND_NE;
738 }
739
740 static void
741 emit_if(bi_context *ctx, nir_if *nif)
742 {
743 bi_block *before_block = ctx->current_block;
744
745 /* Speculatively emit the branch, but we can't fill it in until later */
746 bi_instruction *then_branch = bi_emit_branch(ctx);
747 bi_set_branch_cond(then_branch, &nif->condition, true);
748
749 /* Emit the two subblocks. */
750 bi_block *then_block = emit_cf_list(ctx, &nif->then_list);
751 bi_block *end_then_block = ctx->current_block;
752
753 /* Emit a jump from the end of the then block to the end of the else */
754 bi_instruction *then_exit = bi_emit_branch(ctx);
755
756 /* Emit second block, and check if it's empty */
757
758 int count_in = ctx->instruction_count;
759 bi_block *else_block = emit_cf_list(ctx, &nif->else_list);
760 bi_block *end_else_block = ctx->current_block;
761 ctx->after_block = create_empty_block(ctx);
762
763 /* Now that we have the subblocks emitted, fix up the branches */
764
765 assert(then_block);
766 assert(else_block);
767
768 if (ctx->instruction_count == count_in) {
769 /* The else block is empty, so don't emit an exit jump */
770 bi_remove_instruction(then_exit);
771 then_branch->branch.target = ctx->after_block;
772 } else {
773 then_branch->branch.target = else_block;
774 then_exit->branch.target = ctx->after_block;
775 pan_block_add_successor(&end_then_block->base, &then_exit->branch.target->base);
776 }
777
778 /* Wire up the successors */
779
780 pan_block_add_successor(&before_block->base, &then_branch->branch.target->base); /* then_branch */
781
782 pan_block_add_successor(&before_block->base, &then_block->base); /* fallthrough */
783 pan_block_add_successor(&end_else_block->base, &ctx->after_block->base); /* fallthrough */
784 }
785
786 static void
787 emit_loop(bi_context *ctx, nir_loop *nloop)
788 {
789 /* Remember where we are */
790 bi_block *start_block = ctx->current_block;
791
792 bi_block *saved_break = ctx->break_block;
793 bi_block *saved_continue = ctx->continue_block;
794
795 ctx->continue_block = create_empty_block(ctx);
796 ctx->break_block = create_empty_block(ctx);
797 ctx->after_block = ctx->continue_block;
798
799 /* Emit the body itself */
800 emit_cf_list(ctx, &nloop->body);
801
802 /* Branch back to loop back */
803 bi_instruction *br_back = bi_emit_branch(ctx);
804 br_back->branch.target = ctx->continue_block;
805 pan_block_add_successor(&start_block->base, &ctx->continue_block->base);
806 pan_block_add_successor(&ctx->current_block->base, &ctx->continue_block->base);
807
808 ctx->after_block = ctx->break_block;
809
810 /* Pop off */
811 ctx->break_block = saved_break;
812 ctx->continue_block = saved_continue;
813 ++ctx->loop_count;
814 }
815
816 static bi_block *
817 emit_cf_list(bi_context *ctx, struct exec_list *list)
818 {
819 bi_block *start_block = NULL;
820
821 foreach_list_typed(nir_cf_node, node, node, list) {
822 switch (node->type) {
823 case nir_cf_node_block: {
824 bi_block *block = emit_block(ctx, nir_cf_node_as_block(node));
825
826 if (!start_block)
827 start_block = block;
828
829 break;
830 }
831
832 case nir_cf_node_if:
833 emit_if(ctx, nir_cf_node_as_if(node));
834 break;
835
836 case nir_cf_node_loop:
837 emit_loop(ctx, nir_cf_node_as_loop(node));
838 break;
839
840 default:
841 unreachable("Unknown control flow");
842 }
843 }
844
845 return start_block;
846 }
847
848 static int
849 glsl_type_size(const struct glsl_type *type, bool bindless)
850 {
851 return glsl_count_attribute_slots(type, false);
852 }
853
854 static void
855 bi_optimize_nir(nir_shader *nir)
856 {
857 bool progress;
858 unsigned lower_flrp = 16 | 32 | 64;
859
860 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
861 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
862
863 nir_lower_tex_options lower_tex_options = {
864 .lower_txs_lod = true,
865 .lower_txp = ~0,
866 .lower_tex_without_implicit_lod = true,
867 .lower_txd = true,
868 };
869
870 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
871 NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
872 NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
873
874 do {
875 progress = false;
876
877 NIR_PASS(progress, nir, nir_lower_var_copies);
878 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
879
880 NIR_PASS(progress, nir, nir_copy_prop);
881 NIR_PASS(progress, nir, nir_opt_remove_phis);
882 NIR_PASS(progress, nir, nir_opt_dce);
883 NIR_PASS(progress, nir, nir_opt_dead_cf);
884 NIR_PASS(progress, nir, nir_opt_cse);
885 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
886 NIR_PASS(progress, nir, nir_opt_algebraic);
887 NIR_PASS(progress, nir, nir_opt_constant_folding);
888
889 if (lower_flrp != 0) {
890 bool lower_flrp_progress = false;
891 NIR_PASS(lower_flrp_progress,
892 nir,
893 nir_lower_flrp,
894 lower_flrp,
895 false /* always_precise */,
896 nir->options->lower_ffma);
897 if (lower_flrp_progress) {
898 NIR_PASS(progress, nir,
899 nir_opt_constant_folding);
900 progress = true;
901 }
902
903 /* Nothing should rematerialize any flrps, so we only
904 * need to do this lowering once.
905 */
906 lower_flrp = 0;
907 }
908
909 NIR_PASS(progress, nir, nir_opt_undef);
910 NIR_PASS(progress, nir, nir_opt_loop_unroll,
911 nir_var_shader_in |
912 nir_var_shader_out |
913 nir_var_function_temp);
914 } while (progress);
915
916 NIR_PASS(progress, nir, nir_opt_algebraic_late);
917 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
918 NIR_PASS(progress, nir, bifrost_nir_lower_algebraic_late);
919 NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
920 NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
921
922 /* Take us out of SSA */
923 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
924 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
925 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
926 }
927
928 void
929 bifrost_compile_shader_nir(nir_shader *nir, panfrost_program *program, unsigned product_id)
930 {
931 bi_context *ctx = rzalloc(NULL, bi_context);
932 ctx->nir = nir;
933 ctx->stage = nir->info.stage;
934 ctx->quirks = bifrost_get_quirks(product_id);
935 list_inithead(&ctx->blocks);
936
937 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
938 * (so we don't accidentally duplicate the epilogue since mesa/st has
939 * messed with our I/O quite a bit already) */
940
941 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
942
943 if (ctx->stage == MESA_SHADER_VERTEX) {
944 NIR_PASS_V(nir, nir_lower_viewport_transform);
945 NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
946 }
947
948 NIR_PASS_V(nir, nir_split_var_copies);
949 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
950 NIR_PASS_V(nir, nir_lower_var_copies);
951 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
952 NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
953 NIR_PASS_V(nir, nir_lower_ssbo);
954
955 bi_optimize_nir(nir);
956 nir_print_shader(nir, stdout);
957
958 panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
959 program->sysval_count = ctx->sysvals.sysval_count;
960 memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
961
962 nir_foreach_function(func, nir) {
963 if (!func->impl)
964 continue;
965
966 ctx->impl = func->impl;
967 emit_cf_list(ctx, &func->impl->body);
968 break; /* TODO: Multi-function shaders */
969 }
970
971 bi_foreach_block(ctx, _block) {
972 bi_block *block = (bi_block *) _block;
973 bi_lower_combine(ctx, block);
974 }
975
976 bool progress = false;
977
978 do {
979 progress = false;
980
981 bi_foreach_block(ctx, _block) {
982 bi_block *block = (bi_block *) _block;
983 progress |= bi_opt_dead_code_eliminate(ctx, block);
984 }
985 } while(progress);
986
987 bi_print_shader(ctx, stdout);
988 bi_schedule(ctx);
989 bi_register_allocate(ctx);
990 bi_print_shader(ctx, stdout);
991 bi_pack(ctx, &program->compiled);
992 disassemble_bifrost(stdout, program->compiled.data, program->compiled.size, true);
993
994 ralloc_free(ctx);
995 }