bifrost: Add support for nir_op_inot
[mesa.git] / src / panfrost / bifrost / bifrost_compile.c
1 /*
2 * Copyright (C) 2020 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "compiler/nir/nir_builder.h"
31 #include "util/u_debug.h"
32
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
35 #include "bifrost_nir.h"
36 #include "compiler.h"
37 #include "bi_quirks.h"
38 #include "bi_print.h"
39
40 static const struct debug_named_value debug_options[] = {
41 {"msgs", BIFROST_DBG_MSGS, "Print debug messages"},
42 {"shaders", BIFROST_DBG_SHADERS, "Dump shaders in NIR and MIR"},
43 DEBUG_NAMED_VALUE_END
44 };
45
46 DEBUG_GET_ONCE_FLAGS_OPTION(bifrost_debug, "BIFROST_MESA_DEBUG", debug_options, 0)
47
48 int bifrost_debug = 0;
49
50 #define DBG(fmt, ...) \
51 do { if (bifrost_debug & BIFROST_DBG_MSGS) \
52 fprintf(stderr, "%s:%d: "fmt, \
53 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
54
55 static bi_block *emit_cf_list(bi_context *ctx, struct exec_list *list);
56 static bi_instruction *bi_emit_branch(bi_context *ctx);
57
58 static void
59 emit_jump(bi_context *ctx, nir_jump_instr *instr)
60 {
61 bi_instruction *branch = bi_emit_branch(ctx);
62
63 switch (instr->type) {
64 case nir_jump_break:
65 branch->branch_target = ctx->break_block;
66 break;
67 case nir_jump_continue:
68 branch->branch_target = ctx->continue_block;
69 break;
70 default:
71 unreachable("Unhandled jump type");
72 }
73
74 pan_block_add_successor(&ctx->current_block->base, &branch->branch_target->base);
75 }
76
77 static bi_instruction
78 bi_load(enum bi_class T, nir_intrinsic_instr *instr)
79 {
80 bi_instruction load = {
81 .type = T,
82 .vector_channels = instr->num_components,
83 .src = { BIR_INDEX_CONSTANT },
84 .src_types = { nir_type_uint32 },
85 .constant = { .u64 = nir_intrinsic_base(instr) },
86 };
87
88 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
89
90 if (info->has_dest)
91 load.dest = pan_dest_index(&instr->dest);
92
93 if (info->has_dest && info->index_map[NIR_INTRINSIC_TYPE] > 0)
94 load.dest_type = nir_intrinsic_type(instr);
95
96 nir_src *offset = nir_get_io_offset_src(instr);
97
98 if (nir_src_is_const(*offset))
99 load.constant.u64 += nir_src_as_uint(*offset);
100 else
101 load.src[0] = pan_src_index(offset);
102
103 return load;
104 }
105
106 static void
107 bi_emit_ld_vary(bi_context *ctx, nir_intrinsic_instr *instr)
108 {
109 bi_instruction ins = bi_load(BI_LOAD_VAR, instr);
110 ins.load_vary.interp_mode = BIFROST_INTERP_DEFAULT; /* TODO */
111 ins.load_vary.reuse = false; /* TODO */
112 ins.load_vary.flat = instr->intrinsic != nir_intrinsic_load_interpolated_input;
113 ins.dest_type = nir_type_float | nir_dest_bit_size(instr->dest);
114
115 if (nir_src_is_const(*nir_get_io_offset_src(instr))) {
116 /* Zero it out for direct */
117 ins.src[1] = BIR_INDEX_ZERO;
118 } else {
119 /* R61 contains sample mask stuff, TODO RA XXX */
120 ins.src[1] = BIR_INDEX_REGISTER | 61;
121 }
122
123 bi_emit(ctx, ins);
124 }
125
126 static void
127 bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
128 {
129 if (!ctx->emitted_atest) {
130 bi_instruction ins = {
131 .type = BI_ATEST,
132 .src = {
133 BIR_INDEX_REGISTER | 60 /* TODO: RA */,
134 pan_src_index(&instr->src[0])
135 },
136 .src_types = {
137 nir_type_uint32,
138 nir_intrinsic_type(instr)
139 },
140 .swizzle = {
141 { 0 },
142 { 3, 0 } /* swizzle out the alpha */
143 },
144 .dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
145 .dest_type = nir_type_uint32,
146 };
147
148 bi_emit(ctx, ins);
149 ctx->emitted_atest = true;
150 }
151
152 bi_instruction blend = {
153 .type = BI_BLEND,
154 .blend_location = nir_intrinsic_base(instr),
155 .src = {
156 pan_src_index(&instr->src[0]),
157 BIR_INDEX_REGISTER | 60 /* Can this be arbitrary? */,
158 },
159 .src_types = {
160 nir_intrinsic_type(instr),
161 nir_type_uint32
162 },
163 .swizzle = {
164 { 0, 1, 2, 3 },
165 { 0 }
166 },
167 .dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
168 .dest_type = nir_type_uint32,
169 .vector_channels = 4
170 };
171
172 assert(blend.blend_location < BIFROST_MAX_RENDER_TARGET_COUNT);
173 assert(ctx->blend_types);
174 assert(blend.src_types[0]);
175 ctx->blend_types[blend.blend_location] = blend.src_types[0];
176
177 bi_emit(ctx, blend);
178 }
179
180 static bi_instruction
181 bi_load_with_r61(enum bi_class T, nir_intrinsic_instr *instr)
182 {
183 bi_instruction ld = bi_load(T, instr);
184 ld.src[1] = BIR_INDEX_REGISTER | 61; /* TODO: RA */
185 ld.src[2] = BIR_INDEX_REGISTER | 62;
186 ld.src[3] = 0;
187 ld.src_types[1] = nir_type_uint32;
188 ld.src_types[2] = nir_type_uint32;
189 ld.src_types[3] = nir_intrinsic_type(instr);
190 return ld;
191 }
192
193 static void
194 bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
195 {
196 bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
197 address.dest = bi_make_temp(ctx);
198 address.dest_type = nir_type_uint32;
199 address.vector_channels = 3;
200
201 unsigned nr = nir_intrinsic_src_components(instr, 0);
202 assert(nir_intrinsic_write_mask(instr) == ((1 << nr) - 1));
203
204 bi_instruction st = {
205 .type = BI_STORE_VAR,
206 .src = {
207 pan_src_index(&instr->src[0]),
208 address.dest, address.dest, address.dest,
209 },
210 .src_types = {
211 nir_type_uint32,
212 nir_type_uint32, nir_type_uint32, nir_type_uint32,
213 },
214 .swizzle = {
215 { 0 },
216 { 0 }, { 1 }, { 2}
217 },
218 .vector_channels = nr,
219 };
220
221 for (unsigned i = 0; i < nr; ++i)
222 st.swizzle[0][i] = i;
223
224 bi_emit(ctx, address);
225 bi_emit(ctx, st);
226 }
227
228 static void
229 bi_emit_ld_uniform(bi_context *ctx, nir_intrinsic_instr *instr)
230 {
231 bi_instruction ld = bi_load(BI_LOAD_UNIFORM, instr);
232 ld.src[1] = BIR_INDEX_ZERO; /* TODO: UBO index */
233
234 /* TODO: Indirect access, since we need to multiply by the element
235 * size. I believe we can get this lowering automatically via
236 * nir_lower_io (as mul instructions) with the proper options, but this
237 * is TODO */
238 assert(ld.src[0] & BIR_INDEX_CONSTANT);
239 ld.constant.u64 += ctx->sysvals.sysval_count;
240 ld.constant.u64 *= 16;
241
242 bi_emit(ctx, ld);
243 }
244
245 static void
246 bi_emit_sysval(bi_context *ctx, nir_instr *instr,
247 unsigned nr_components, unsigned offset)
248 {
249 nir_dest nir_dest;
250
251 /* Figure out which uniform this is */
252 int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
253 void *val = _mesa_hash_table_u64_search(ctx->sysvals.sysval_to_id, sysval);
254
255 /* Sysvals are prefix uniforms */
256 unsigned uniform = ((uintptr_t) val) - 1;
257
258 /* Emit the read itself -- this is never indirect */
259
260 bi_instruction load = {
261 .type = BI_LOAD_UNIFORM,
262 .vector_channels = nr_components,
263 .src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
264 .src_types = { nir_type_uint32, nir_type_uint32 },
265 .constant = { (uniform * 16) + offset },
266 .dest = pan_dest_index(&nir_dest),
267 .dest_type = nir_type_uint32, /* TODO */
268 };
269
270 bi_emit(ctx, load);
271 }
272
273 /* gl_FragCoord.xy = u16_to_f32(R59.xy) + 0.5
274 * gl_FragCoord.z = ld_vary(fragz)
275 * gl_FragCoord.w = ld_vary(fragw)
276 */
277
278 static void
279 bi_emit_ld_frag_coord(bi_context *ctx, nir_intrinsic_instr *instr)
280 {
281 /* Future proofing for mediump fragcoord at some point.. */
282 nir_alu_type T = nir_type_float32;
283
284 /* First, sketch a combine */
285 bi_instruction combine = {
286 .type = BI_COMBINE,
287 .dest_type = nir_type_uint32,
288 .dest = pan_dest_index(&instr->dest),
289 .src_types = { T, T, T, T },
290 };
291
292 /* Second, handle xy */
293 for (unsigned i = 0; i < 2; ++i) {
294 bi_instruction conv = {
295 .type = BI_CONVERT,
296 .dest_type = T,
297 .dest = bi_make_temp(ctx),
298 .src = {
299 /* TODO: RA XXX */
300 BIR_INDEX_REGISTER | 59
301 },
302 .src_types = { nir_type_uint16 },
303 .swizzle = { { i } }
304 };
305
306 bi_instruction add = {
307 .type = BI_ADD,
308 .dest_type = T,
309 .dest = bi_make_temp(ctx),
310 .src = { conv.dest, BIR_INDEX_CONSTANT },
311 .src_types = { T, T },
312 };
313
314 float half = 0.5;
315 memcpy(&add.constant.u32, &half, sizeof(float));
316
317 bi_emit(ctx, conv);
318 bi_emit(ctx, add);
319
320 combine.src[i] = add.dest;
321 }
322
323 /* Third, zw */
324 for (unsigned i = 0; i < 2; ++i) {
325 bi_instruction load = {
326 .type = BI_LOAD_VAR,
327 .load_vary = {
328 .interp_mode = BIFROST_INTERP_DEFAULT,
329 .reuse = false,
330 .flat = true
331 },
332 .vector_channels = 1,
333 .dest_type = nir_type_float32,
334 .dest = bi_make_temp(ctx),
335 .src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
336 .src_types = { nir_type_uint32, nir_type_uint32 },
337 .constant = {
338 .u32 = (i == 0) ? BIFROST_FRAGZ : BIFROST_FRAGW
339 }
340 };
341
342 bi_emit(ctx, load);
343
344 combine.src[i + 2] = load.dest;
345 }
346
347 /* Finally, emit the combine */
348 bi_emit(ctx, combine);
349 }
350
351 static void
352 bi_emit_discard(bi_context *ctx, nir_intrinsic_instr *instr)
353 {
354 /* Goofy lowering */
355 bi_instruction discard = {
356 .type = BI_DISCARD,
357 .cond = BI_COND_EQ,
358 .src_types = { nir_type_uint32, nir_type_uint32 },
359 .src = { BIR_INDEX_ZERO, BIR_INDEX_ZERO },
360 };
361
362 bi_emit(ctx, discard);
363 }
364
365 static void
366 bi_fuse_cond(bi_instruction *csel, nir_alu_src cond,
367 unsigned *constants_left, unsigned *constant_shift,
368 unsigned comps, bool float_only);
369
370 static void
371 bi_emit_discard_if(bi_context *ctx, nir_intrinsic_instr *instr)
372 {
373 nir_src cond = instr->src[0];
374 nir_alu_type T = nir_type_uint | nir_src_bit_size(cond);
375
376 bi_instruction discard = {
377 .type = BI_DISCARD,
378 .cond = BI_COND_NE,
379 .src_types = { T, T },
380 .src = {
381 pan_src_index(&cond),
382 BIR_INDEX_ZERO
383 },
384 };
385
386 /* Try to fuse in the condition */
387 unsigned constants_left = 1, constant_shift = 0;
388
389 /* Scalar so no swizzle */
390 nir_alu_src wrap = {
391 .src = instr->src[0]
392 };
393
394 /* May or may not succeed but we're optimistic */
395 bi_fuse_cond(&discard, wrap, &constants_left, &constant_shift, 1, true);
396
397 bi_emit(ctx, discard);
398 }
399
400 static void
401 emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
402 {
403
404 switch (instr->intrinsic) {
405 case nir_intrinsic_load_barycentric_pixel:
406 /* stub */
407 break;
408 case nir_intrinsic_load_interpolated_input:
409 case nir_intrinsic_load_input:
410 if (ctx->stage == MESA_SHADER_FRAGMENT)
411 bi_emit_ld_vary(ctx, instr);
412 else if (ctx->stage == MESA_SHADER_VERTEX)
413 bi_emit(ctx, bi_load_with_r61(BI_LOAD_ATTR, instr));
414 else {
415 unreachable("Unsupported shader stage");
416 }
417 break;
418
419 case nir_intrinsic_store_output:
420 if (ctx->stage == MESA_SHADER_FRAGMENT)
421 bi_emit_frag_out(ctx, instr);
422 else if (ctx->stage == MESA_SHADER_VERTEX)
423 bi_emit_st_vary(ctx, instr);
424 else
425 unreachable("Unsupported shader stage");
426 break;
427
428 case nir_intrinsic_load_uniform:
429 bi_emit_ld_uniform(ctx, instr);
430 break;
431
432 case nir_intrinsic_load_frag_coord:
433 bi_emit_ld_frag_coord(ctx, instr);
434 break;
435
436 case nir_intrinsic_discard:
437 bi_emit_discard(ctx, instr);
438 break;
439
440 case nir_intrinsic_discard_if:
441 bi_emit_discard_if(ctx, instr);
442 break;
443
444 case nir_intrinsic_load_ssbo_address:
445 bi_emit_sysval(ctx, &instr->instr, 1, 0);
446 break;
447
448 case nir_intrinsic_get_buffer_size:
449 bi_emit_sysval(ctx, &instr->instr, 1, 8);
450 break;
451
452 case nir_intrinsic_load_viewport_scale:
453 case nir_intrinsic_load_viewport_offset:
454 case nir_intrinsic_load_num_work_groups:
455 case nir_intrinsic_load_sampler_lod_parameters_pan:
456 bi_emit_sysval(ctx, &instr->instr, 3, 0);
457 break;
458
459 default:
460 unreachable("Unknown intrinsic");
461 break;
462 }
463 }
464
465 static void
466 emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
467 {
468 /* Make sure we've been lowered */
469 assert(instr->def.num_components <= (32 / instr->def.bit_size));
470
471 /* Accumulate all the channels of the constant, as if we did an
472 * implicit SEL over them */
473 uint32_t acc = 0;
474
475 for (unsigned i = 0; i < instr->def.num_components; ++i) {
476 unsigned v = nir_const_value_as_uint(instr->value[i], instr->def.bit_size);
477 acc |= (v << (i * instr->def.bit_size));
478 }
479
480 bi_instruction move = {
481 .type = BI_MOV,
482 .dest = pan_ssa_index(&instr->def),
483 .dest_type = nir_type_uint32,
484 .src = {
485 BIR_INDEX_CONSTANT
486 },
487 .src_types = {
488 nir_type_uint32,
489 },
490 .constant = {
491 .u32 = acc
492 }
493 };
494
495 bi_emit(ctx, move);
496 }
497
498 #define BI_CASE_CMP(op) \
499 case op##8: \
500 case op##16: \
501 case op##32: \
502
503 static enum bi_class
504 bi_class_for_nir_alu(nir_op op)
505 {
506 switch (op) {
507 case nir_op_fadd:
508 case nir_op_fsub:
509 return BI_ADD;
510
511 case nir_op_iadd:
512 case nir_op_isub:
513 return BI_IMATH;
514
515 case nir_op_iand:
516 case nir_op_ior:
517 case nir_op_ixor:
518 case nir_op_inot:
519 return BI_BITWISE;
520
521 BI_CASE_CMP(nir_op_flt)
522 BI_CASE_CMP(nir_op_fge)
523 BI_CASE_CMP(nir_op_feq)
524 BI_CASE_CMP(nir_op_fne)
525 BI_CASE_CMP(nir_op_ilt)
526 BI_CASE_CMP(nir_op_ige)
527 BI_CASE_CMP(nir_op_ieq)
528 BI_CASE_CMP(nir_op_ine)
529 return BI_CMP;
530
531 case nir_op_b8csel:
532 case nir_op_b16csel:
533 case nir_op_b32csel:
534 return BI_CSEL;
535
536 case nir_op_i2i8:
537 case nir_op_i2i16:
538 case nir_op_i2i32:
539 case nir_op_i2i64:
540 case nir_op_u2u8:
541 case nir_op_u2u16:
542 case nir_op_u2u32:
543 case nir_op_u2u64:
544 case nir_op_f2i16:
545 case nir_op_f2i32:
546 case nir_op_f2i64:
547 case nir_op_f2u16:
548 case nir_op_f2u32:
549 case nir_op_f2u64:
550 case nir_op_i2f16:
551 case nir_op_i2f32:
552 case nir_op_i2f64:
553 case nir_op_u2f16:
554 case nir_op_u2f32:
555 case nir_op_u2f64:
556 case nir_op_f2f16:
557 case nir_op_f2f32:
558 case nir_op_f2f64:
559 case nir_op_f2fmp:
560 return BI_CONVERT;
561
562 case nir_op_vec2:
563 case nir_op_vec3:
564 case nir_op_vec4:
565 return BI_COMBINE;
566
567 case nir_op_vec8:
568 case nir_op_vec16:
569 unreachable("should've been lowered");
570
571 case nir_op_ffma:
572 case nir_op_fmul:
573 return BI_FMA;
574
575 case nir_op_imin:
576 case nir_op_imax:
577 case nir_op_umin:
578 case nir_op_umax:
579 case nir_op_fmin:
580 case nir_op_fmax:
581 return BI_MINMAX;
582
583 case nir_op_fsat:
584 case nir_op_fneg:
585 case nir_op_fabs:
586 return BI_FMOV;
587 case nir_op_mov:
588 return BI_MOV;
589
590 case nir_op_fround_even:
591 case nir_op_fceil:
592 case nir_op_ffloor:
593 case nir_op_ftrunc:
594 return BI_ROUND;
595
596 case nir_op_frcp:
597 case nir_op_frsq:
598 return BI_SPECIAL;
599
600 default:
601 unreachable("Unknown ALU op");
602 }
603 }
604
605 /* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
606 * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
607 * optimizations). Otherwise it will bail (when used for primary code
608 * generation). */
609
610 static enum bi_cond
611 bi_cond_for_nir(nir_op op, bool soft)
612 {
613 switch (op) {
614 BI_CASE_CMP(nir_op_flt)
615 BI_CASE_CMP(nir_op_ilt)
616 return BI_COND_LT;
617
618 BI_CASE_CMP(nir_op_fge)
619 BI_CASE_CMP(nir_op_ige)
620 return BI_COND_GE;
621
622 BI_CASE_CMP(nir_op_feq)
623 BI_CASE_CMP(nir_op_ieq)
624 return BI_COND_EQ;
625
626 BI_CASE_CMP(nir_op_fne)
627 BI_CASE_CMP(nir_op_ine)
628 return BI_COND_NE;
629 default:
630 if (soft)
631 return BI_COND_ALWAYS;
632 else
633 unreachable("Invalid compare");
634 }
635 }
636
637 static void
638 bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
639 unsigned *constants_left, unsigned *constant_shift, unsigned comps)
640 {
641 unsigned bits = nir_src_bit_size(instr->src[i].src);
642 unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
643
644 alu->src_types[to] = nir_op_infos[instr->op].input_types[i]
645 | bits;
646
647 /* Try to inline a constant */
648 if (nir_src_is_const(instr->src[i].src) && *constants_left && (dest_bits == bits)) {
649 uint64_t mask = (1ull << dest_bits) - 1;
650 uint64_t cons = nir_src_as_uint(instr->src[i].src);
651
652 /* Try to reuse a constant */
653 for (unsigned i = 0; i < (*constant_shift); i += dest_bits) {
654 if (((alu->constant.u64 >> i) & mask) == cons) {
655 alu->src[to] = BIR_INDEX_CONSTANT | i;
656 return;
657 }
658 }
659
660 alu->constant.u64 |= cons << *constant_shift;
661 alu->src[to] = BIR_INDEX_CONSTANT | (*constant_shift);
662 --(*constants_left);
663 (*constant_shift) += MAX2(dest_bits, 32); /* lo/hi */
664 return;
665 }
666
667 alu->src[to] = pan_src_index(&instr->src[i].src);
668
669 /* Copy swizzle for all vectored components, replicating last component
670 * to fill undersized */
671
672 unsigned vec = alu->type == BI_COMBINE ? 1 :
673 MAX2(1, 32 / dest_bits);
674
675 for (unsigned j = 0; j < vec; ++j)
676 alu->swizzle[to][j] = instr->src[i].swizzle[MIN2(j, comps - 1)];
677 }
678
679 static void
680 bi_fuse_cond(bi_instruction *csel, nir_alu_src cond,
681 unsigned *constants_left, unsigned *constant_shift,
682 unsigned comps, bool float_only)
683 {
684 /* Bail for vector weirdness */
685 if (cond.swizzle[0] != 0)
686 return;
687
688 if (!cond.src.is_ssa)
689 return;
690
691 nir_ssa_def *def = cond.src.ssa;
692 nir_instr *parent = def->parent_instr;
693
694 if (parent->type != nir_instr_type_alu)
695 return;
696
697 nir_alu_instr *alu = nir_instr_as_alu(parent);
698
699 /* Try to match a condition */
700 enum bi_cond bcond = bi_cond_for_nir(alu->op, true);
701
702 if (bcond == BI_COND_ALWAYS)
703 return;
704
705 /* Some instructions can't compare ints */
706 if (float_only) {
707 nir_alu_type T = nir_op_infos[alu->op].input_types[0];
708 T = nir_alu_type_get_base_type(T);
709
710 if (T != nir_type_float)
711 return;
712 }
713
714 /* We found one, let's fuse it in */
715 csel->cond = bcond;
716 bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift, comps);
717 bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift, comps);
718 }
719
720 static void
721 emit_alu(bi_context *ctx, nir_alu_instr *instr)
722 {
723 /* Try some special functions */
724 switch (instr->op) {
725 case nir_op_fexp2:
726 bi_emit_fexp2(ctx, instr);
727 return;
728 case nir_op_flog2:
729 bi_emit_flog2(ctx, instr);
730 return;
731 default:
732 break;
733 }
734
735 /* Otherwise, assume it's something we can handle normally */
736 bi_instruction alu = {
737 .type = bi_class_for_nir_alu(instr->op),
738 .dest = pan_dest_index(&instr->dest.dest),
739 .dest_type = nir_op_infos[instr->op].output_type
740 | nir_dest_bit_size(instr->dest.dest),
741 };
742
743 /* TODO: Implement lowering of special functions for older Bifrost */
744 assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
745
746 unsigned comps = nir_dest_num_components(instr->dest.dest);
747
748 if (alu.type != BI_COMBINE)
749 assert(comps <= MAX2(1, 32 / comps));
750
751 if (!instr->dest.dest.is_ssa) {
752 for (unsigned i = 0; i < comps; ++i)
753 assert(instr->dest.write_mask);
754 }
755
756 /* We inline constants as we go. This tracks how many constants have
757 * been inlined, since we're limited to 64-bits of constants per
758 * instruction */
759
760 unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
761 unsigned constants_left = (64 / dest_bits);
762 unsigned constant_shift = 0;
763
764 if (alu.type == BI_COMBINE)
765 constants_left = 0;
766
767 /* Copy sources */
768
769 unsigned num_inputs = nir_op_infos[instr->op].num_inputs;
770 assert(num_inputs <= ARRAY_SIZE(alu.src));
771
772 for (unsigned i = 0; i < num_inputs; ++i) {
773 unsigned f = 0;
774
775 if (i && alu.type == BI_CSEL)
776 f++;
777
778 bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift, comps);
779 }
780
781 /* Op-specific fixup */
782 switch (instr->op) {
783 case nir_op_fmul:
784 alu.src[2] = BIR_INDEX_ZERO; /* FMA */
785 alu.src_types[2] = alu.src_types[1];
786 break;
787 case nir_op_fsat:
788 alu.outmod = BIFROST_SAT; /* FMOV */
789 break;
790 case nir_op_fneg:
791 alu.src_neg[0] = true; /* FMOV */
792 break;
793 case nir_op_fabs:
794 alu.src_abs[0] = true; /* FMOV */
795 break;
796 case nir_op_fsub:
797 alu.src_neg[1] = true; /* FADD */
798 break;
799 case nir_op_iadd:
800 alu.op.imath = BI_IMATH_ADD;
801 break;
802 case nir_op_isub:
803 alu.op.imath = BI_IMATH_SUB;
804 break;
805 case nir_op_inot:
806 /* no dedicated bitwise not, but we can invert sources. convert to ~a | 0 */
807 alu.op.bitwise = BI_BITWISE_OR;
808 alu.bitwise.src_invert[0] = true;
809 alu.src[1] = BIR_INDEX_ZERO;
810 break;
811 case nir_op_fmax:
812 case nir_op_imax:
813 case nir_op_umax:
814 alu.op.minmax = BI_MINMAX_MAX; /* MINMAX */
815 break;
816 case nir_op_frcp:
817 alu.op.special = BI_SPECIAL_FRCP;
818 break;
819 case nir_op_frsq:
820 alu.op.special = BI_SPECIAL_FRSQ;
821 break;
822 BI_CASE_CMP(nir_op_flt)
823 BI_CASE_CMP(nir_op_ilt)
824 BI_CASE_CMP(nir_op_fge)
825 BI_CASE_CMP(nir_op_ige)
826 BI_CASE_CMP(nir_op_feq)
827 BI_CASE_CMP(nir_op_ieq)
828 BI_CASE_CMP(nir_op_fne)
829 BI_CASE_CMP(nir_op_ine)
830 alu.cond = bi_cond_for_nir(instr->op, false);
831 break;
832 case nir_op_fround_even:
833 alu.roundmode = BIFROST_RTE;
834 break;
835 case nir_op_fceil:
836 alu.roundmode = BIFROST_RTP;
837 break;
838 case nir_op_ffloor:
839 alu.roundmode = BIFROST_RTN;
840 break;
841 case nir_op_ftrunc:
842 alu.roundmode = BIFROST_RTZ;
843 break;
844 case nir_op_iand:
845 alu.op.bitwise = BI_BITWISE_AND;
846 break;
847 case nir_op_ior:
848 alu.op.bitwise = BI_BITWISE_OR;
849 break;
850 case nir_op_ixor:
851 alu.op.bitwise = BI_BITWISE_XOR;
852 break;
853 case nir_op_f2i32:
854 alu.roundmode = BIFROST_RTZ;
855 break;
856
857 case nir_op_f2f16:
858 case nir_op_i2i16:
859 case nir_op_u2u16: {
860 if (nir_src_bit_size(instr->src[0].src) != 32)
861 break;
862
863 /* Should have been const folded */
864 assert(!nir_src_is_const(instr->src[0].src));
865
866 alu.src_types[1] = alu.src_types[0];
867 alu.src[1] = alu.src[0];
868
869 unsigned last = nir_dest_num_components(instr->dest.dest) - 1;
870 assert(last <= 1);
871
872 alu.swizzle[1][0] = instr->src[0].swizzle[last];
873 break;
874 }
875
876 default:
877 break;
878 }
879
880 if (alu.type == BI_CSEL) {
881 /* Default to csel3 */
882 alu.cond = BI_COND_NE;
883 alu.src[1] = BIR_INDEX_ZERO;
884 alu.src_types[1] = alu.src_types[0];
885
886 /* TODO: Reenable cond fusing when we can split up registers
887 * when scheduling */
888 #if 0
889 bi_fuse_cond(&alu, instr->src[0],
890 &constants_left, &constant_shift, comps, false);
891 #endif
892 } else if (alu.type == BI_BITWISE) {
893 /* Implicit shift argument... at some point we should fold */
894 alu.src[2] = BIR_INDEX_ZERO;
895 alu.src_types[2] = alu.src_types[1];
896 }
897
898 bi_emit(ctx, alu);
899 }
900
901 /* TEX_COMPACT instructions assume normal 2D f32 operation but are more
902 * space-efficient and with simpler RA/scheduling requirements*/
903
904 static void
905 emit_tex_compact(bi_context *ctx, nir_tex_instr *instr)
906 {
907 bi_instruction tex = {
908 .type = BI_TEX,
909 .op = { .texture = BI_TEX_COMPACT },
910 .texture = {
911 .texture_index = instr->texture_index,
912 .sampler_index = instr->sampler_index,
913 },
914 .dest = pan_dest_index(&instr->dest),
915 .dest_type = instr->dest_type,
916 .src_types = { nir_type_float32, nir_type_float32 },
917 .vector_channels = 4
918 };
919
920 for (unsigned i = 0; i < instr->num_srcs; ++i) {
921 int index = pan_src_index(&instr->src[i].src);
922
923 /* We were checked ahead-of-time */
924 if (instr->src[i].src_type == nir_tex_src_lod)
925 continue;
926
927 assert (instr->src[i].src_type == nir_tex_src_coord);
928
929 tex.src[0] = index;
930 tex.src[1] = index;
931 tex.swizzle[0][0] = 0;
932 tex.swizzle[1][0] = 1;
933 }
934
935 bi_emit(ctx, tex);
936 }
937
938 static void
939 emit_tex_full(bi_context *ctx, nir_tex_instr *instr)
940 {
941 unreachable("stub");
942 }
943
944 /* Normal textures ops are tex for frag shaders and txl for vertex shaders with
945 * lod a constant 0. Anything else needs a full texture op. */
946
947 static bool
948 bi_is_normal_tex(gl_shader_stage stage, nir_tex_instr *instr)
949 {
950 if (stage == MESA_SHADER_FRAGMENT)
951 return instr->op == nir_texop_tex;
952
953 if (instr->op != nir_texop_txl)
954 return false;
955
956 for (unsigned i = 0; i < instr->num_srcs; ++i) {
957 if (instr->src[i].src_type != nir_tex_src_lod)
958 continue;
959
960 nir_src src = instr->src[i].src;
961
962 if (!nir_src_is_const(src))
963 continue;
964
965 if (nir_src_as_uint(src) != 0)
966 continue;
967 }
968
969 return true;
970 }
971
972 static void
973 emit_tex(bi_context *ctx, nir_tex_instr *instr)
974 {
975 nir_alu_type base = nir_alu_type_get_base_type(instr->dest_type);
976 unsigned sz = nir_dest_bit_size(instr->dest);
977 instr->dest_type = base | sz;
978
979 bool is_normal = bi_is_normal_tex(ctx->stage, instr);
980 bool is_2d = instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
981 instr->sampler_dim == GLSL_SAMPLER_DIM_EXTERNAL;
982 bool is_f = base == nir_type_float && (sz == 16 || sz == 32);
983
984 bool is_compact = is_normal && is_2d && is_f && !instr->is_shadow;
985
986 if (is_compact)
987 emit_tex_compact(ctx, instr);
988 else
989 emit_tex_full(ctx, instr);
990 }
991
992 static void
993 emit_instr(bi_context *ctx, struct nir_instr *instr)
994 {
995 switch (instr->type) {
996 case nir_instr_type_load_const:
997 emit_load_const(ctx, nir_instr_as_load_const(instr));
998 break;
999
1000 case nir_instr_type_intrinsic:
1001 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1002 break;
1003
1004 case nir_instr_type_alu:
1005 emit_alu(ctx, nir_instr_as_alu(instr));
1006 break;
1007
1008 case nir_instr_type_tex:
1009 emit_tex(ctx, nir_instr_as_tex(instr));
1010 break;
1011
1012 case nir_instr_type_jump:
1013 emit_jump(ctx, nir_instr_as_jump(instr));
1014 break;
1015
1016 case nir_instr_type_ssa_undef:
1017 /* Spurious */
1018 break;
1019
1020 default:
1021 unreachable("Unhandled instruction type");
1022 break;
1023 }
1024 }
1025
1026
1027
1028 static bi_block *
1029 create_empty_block(bi_context *ctx)
1030 {
1031 bi_block *blk = rzalloc(ctx, bi_block);
1032
1033 blk->base.predecessors = _mesa_set_create(blk,
1034 _mesa_hash_pointer,
1035 _mesa_key_pointer_equal);
1036
1037 return blk;
1038 }
1039
1040 static bi_block *
1041 emit_block(bi_context *ctx, nir_block *block)
1042 {
1043 if (ctx->after_block) {
1044 ctx->current_block = ctx->after_block;
1045 ctx->after_block = NULL;
1046 } else {
1047 ctx->current_block = create_empty_block(ctx);
1048 }
1049
1050 list_addtail(&ctx->current_block->base.link, &ctx->blocks);
1051 list_inithead(&ctx->current_block->base.instructions);
1052
1053 nir_foreach_instr(instr, block) {
1054 emit_instr(ctx, instr);
1055 ++ctx->instruction_count;
1056 }
1057
1058 return ctx->current_block;
1059 }
1060
1061 /* Emits an unconditional branch to the end of the current block, returning a
1062 * pointer so the user can fill in details */
1063
1064 static bi_instruction *
1065 bi_emit_branch(bi_context *ctx)
1066 {
1067 bi_instruction branch = {
1068 .type = BI_BRANCH,
1069 .cond = BI_COND_ALWAYS
1070 };
1071
1072 return bi_emit(ctx, branch);
1073 }
1074
1075 /* Sets a condition for a branch by examing the NIR condition. If we're
1076 * familiar with the condition, we unwrap it to fold it into the branch
1077 * instruction. Otherwise, we consume the condition directly. We
1078 * generally use 1-bit booleans which allows us to use small types for
1079 * the conditions.
1080 */
1081
1082 static void
1083 bi_set_branch_cond(bi_instruction *branch, nir_src *cond, bool invert)
1084 {
1085 /* TODO: Try to unwrap instead of always bailing */
1086 branch->src[0] = pan_src_index(cond);
1087 branch->src[1] = BIR_INDEX_ZERO;
1088 branch->src_types[0] = branch->src_types[1] = nir_type_uint |
1089 nir_src_bit_size(*cond);
1090 branch->cond = invert ? BI_COND_EQ : BI_COND_NE;
1091 }
1092
1093 static void
1094 emit_if(bi_context *ctx, nir_if *nif)
1095 {
1096 bi_block *before_block = ctx->current_block;
1097
1098 /* Speculatively emit the branch, but we can't fill it in until later */
1099 bi_instruction *then_branch = bi_emit_branch(ctx);
1100 bi_set_branch_cond(then_branch, &nif->condition, true);
1101
1102 /* Emit the two subblocks. */
1103 bi_block *then_block = emit_cf_list(ctx, &nif->then_list);
1104 bi_block *end_then_block = ctx->current_block;
1105
1106 /* Emit a jump from the end of the then block to the end of the else */
1107 bi_instruction *then_exit = bi_emit_branch(ctx);
1108
1109 /* Emit second block, and check if it's empty */
1110
1111 int count_in = ctx->instruction_count;
1112 bi_block *else_block = emit_cf_list(ctx, &nif->else_list);
1113 bi_block *end_else_block = ctx->current_block;
1114 ctx->after_block = create_empty_block(ctx);
1115
1116 /* Now that we have the subblocks emitted, fix up the branches */
1117
1118 assert(then_block);
1119 assert(else_block);
1120
1121 if (ctx->instruction_count == count_in) {
1122 /* The else block is empty, so don't emit an exit jump */
1123 bi_remove_instruction(then_exit);
1124 then_branch->branch_target = ctx->after_block;
1125 pan_block_add_successor(&end_then_block->base, &ctx->after_block->base); /* fallthrough */
1126 } else {
1127 then_branch->branch_target = else_block;
1128 then_exit->branch_target = ctx->after_block;
1129 pan_block_add_successor(&end_then_block->base, &then_exit->branch_target->base);
1130 pan_block_add_successor(&end_else_block->base, &ctx->after_block->base); /* fallthrough */
1131 }
1132
1133 pan_block_add_successor(&before_block->base, &then_branch->branch_target->base); /* then_branch */
1134 pan_block_add_successor(&before_block->base, &then_block->base); /* fallthrough */
1135 }
1136
1137 static void
1138 emit_loop(bi_context *ctx, nir_loop *nloop)
1139 {
1140 /* Remember where we are */
1141 bi_block *start_block = ctx->current_block;
1142
1143 bi_block *saved_break = ctx->break_block;
1144 bi_block *saved_continue = ctx->continue_block;
1145
1146 ctx->continue_block = create_empty_block(ctx);
1147 ctx->break_block = create_empty_block(ctx);
1148 ctx->after_block = ctx->continue_block;
1149
1150 /* Emit the body itself */
1151 emit_cf_list(ctx, &nloop->body);
1152
1153 /* Branch back to loop back */
1154 bi_instruction *br_back = bi_emit_branch(ctx);
1155 br_back->branch_target = ctx->continue_block;
1156 pan_block_add_successor(&start_block->base, &ctx->continue_block->base);
1157 pan_block_add_successor(&ctx->current_block->base, &ctx->continue_block->base);
1158
1159 ctx->after_block = ctx->break_block;
1160
1161 /* Pop off */
1162 ctx->break_block = saved_break;
1163 ctx->continue_block = saved_continue;
1164 ++ctx->loop_count;
1165 }
1166
1167 static bi_block *
1168 emit_cf_list(bi_context *ctx, struct exec_list *list)
1169 {
1170 bi_block *start_block = NULL;
1171
1172 foreach_list_typed(nir_cf_node, node, node, list) {
1173 switch (node->type) {
1174 case nir_cf_node_block: {
1175 bi_block *block = emit_block(ctx, nir_cf_node_as_block(node));
1176
1177 if (!start_block)
1178 start_block = block;
1179
1180 break;
1181 }
1182
1183 case nir_cf_node_if:
1184 emit_if(ctx, nir_cf_node_as_if(node));
1185 break;
1186
1187 case nir_cf_node_loop:
1188 emit_loop(ctx, nir_cf_node_as_loop(node));
1189 break;
1190
1191 default:
1192 unreachable("Unknown control flow");
1193 }
1194 }
1195
1196 return start_block;
1197 }
1198
1199 static int
1200 glsl_type_size(const struct glsl_type *type, bool bindless)
1201 {
1202 return glsl_count_attribute_slots(type, false);
1203 }
1204
1205 static void
1206 bi_optimize_nir(nir_shader *nir)
1207 {
1208 bool progress;
1209 unsigned lower_flrp = 16 | 32 | 64;
1210
1211 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
1212 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
1213
1214 nir_lower_tex_options lower_tex_options = {
1215 .lower_txs_lod = true,
1216 .lower_txp = ~0,
1217 .lower_tex_without_implicit_lod = true,
1218 .lower_txd = true,
1219 };
1220
1221 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
1222 NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
1223 NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
1224
1225 do {
1226 progress = false;
1227
1228 NIR_PASS(progress, nir, nir_lower_var_copies);
1229 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
1230
1231 NIR_PASS(progress, nir, nir_copy_prop);
1232 NIR_PASS(progress, nir, nir_opt_remove_phis);
1233 NIR_PASS(progress, nir, nir_opt_dce);
1234 NIR_PASS(progress, nir, nir_opt_dead_cf);
1235 NIR_PASS(progress, nir, nir_opt_cse);
1236 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
1237 NIR_PASS(progress, nir, nir_opt_algebraic);
1238 NIR_PASS(progress, nir, nir_opt_constant_folding);
1239
1240 if (lower_flrp != 0) {
1241 bool lower_flrp_progress = false;
1242 NIR_PASS(lower_flrp_progress,
1243 nir,
1244 nir_lower_flrp,
1245 lower_flrp,
1246 false /* always_precise */,
1247 nir->options->lower_ffma);
1248 if (lower_flrp_progress) {
1249 NIR_PASS(progress, nir,
1250 nir_opt_constant_folding);
1251 progress = true;
1252 }
1253
1254 /* Nothing should rematerialize any flrps, so we only
1255 * need to do this lowering once.
1256 */
1257 lower_flrp = 0;
1258 }
1259
1260 NIR_PASS(progress, nir, nir_opt_undef);
1261 NIR_PASS(progress, nir, nir_opt_loop_unroll,
1262 nir_var_shader_in |
1263 nir_var_shader_out |
1264 nir_var_function_temp);
1265 } while (progress);
1266
1267 NIR_PASS(progress, nir, nir_opt_algebraic_late);
1268 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
1269 NIR_PASS(progress, nir, bifrost_nir_lower_algebraic_late);
1270 NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
1271 NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
1272
1273 /* Take us out of SSA */
1274 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
1275 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
1276 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
1277 }
1278
1279 void
1280 bifrost_compile_shader_nir(nir_shader *nir, panfrost_program *program, unsigned product_id)
1281 {
1282 bifrost_debug = debug_get_option_bifrost_debug();
1283
1284 bi_context *ctx = rzalloc(NULL, bi_context);
1285 ctx->nir = nir;
1286 ctx->stage = nir->info.stage;
1287 ctx->quirks = bifrost_get_quirks(product_id);
1288 list_inithead(&ctx->blocks);
1289
1290 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
1291 * (so we don't accidentally duplicate the epilogue since mesa/st has
1292 * messed with our I/O quite a bit already) */
1293
1294 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
1295
1296 if (ctx->stage == MESA_SHADER_VERTEX) {
1297 NIR_PASS_V(nir, nir_lower_viewport_transform);
1298 NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
1299 }
1300
1301 NIR_PASS_V(nir, nir_split_var_copies);
1302 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
1303 NIR_PASS_V(nir, nir_lower_var_copies);
1304 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
1305 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
1306 glsl_type_size, 0);
1307 NIR_PASS_V(nir, nir_lower_ssbo);
1308 NIR_PASS_V(nir, nir_lower_mediump_outputs);
1309
1310 bi_optimize_nir(nir);
1311
1312 if (bifrost_debug & BIFROST_DBG_SHADERS) {
1313 nir_print_shader(nir, stdout);
1314 }
1315
1316 panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
1317 program->sysval_count = ctx->sysvals.sysval_count;
1318 memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
1319 ctx->blend_types = program->blend_types;
1320
1321 nir_foreach_function(func, nir) {
1322 if (!func->impl)
1323 continue;
1324
1325 ctx->impl = func->impl;
1326 emit_cf_list(ctx, &func->impl->body);
1327 break; /* TODO: Multi-function shaders */
1328 }
1329
1330 unsigned block_source_count = 0;
1331
1332 bi_foreach_block(ctx, _block) {
1333 bi_block *block = (bi_block *) _block;
1334
1335 /* Name blocks now that we're done emitting so the order is
1336 * consistent */
1337 block->base.name = block_source_count++;
1338
1339 bi_lower_combine(ctx, block);
1340 }
1341
1342 bool progress = false;
1343
1344 do {
1345 progress = false;
1346
1347 bi_foreach_block(ctx, _block) {
1348 bi_block *block = (bi_block *) _block;
1349 progress |= bi_opt_dead_code_eliminate(ctx, block);
1350 }
1351 } while(progress);
1352
1353 if (bifrost_debug & BIFROST_DBG_SHADERS)
1354 bi_print_shader(ctx, stdout);
1355 bi_schedule(ctx);
1356 bi_register_allocate(ctx);
1357 if (bifrost_debug & BIFROST_DBG_SHADERS)
1358 bi_print_shader(ctx, stdout);
1359 bi_pack(ctx, &program->compiled);
1360
1361 if (bifrost_debug & BIFROST_DBG_SHADERS)
1362 disassemble_bifrost(stdout, program->compiled.data, program->compiled.size, true);
1363
1364 ralloc_free(ctx);
1365 }