pan/bi: Allow printing branches without targets
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next, bool is_fragment)
39 {
40 struct bifrost_header header = {
41 .back_to_back = clause->back_to_back,
42 .no_end_of_shader = (next != NULL),
43 .elide_writes = is_fragment,
44 .branch_cond = clause->branch_conditional,
45 .datareg_writebarrier = clause->data_register_write_barrier,
46 .datareg = clause->data_register,
47 .scoreboard_deps = next ? next->dependencies : 0,
48 .scoreboard_index = clause->scoreboard_id,
49 .clause_type = clause->clause_type,
50 .next_clause_type = next ? next->clause_type : 0,
51 .suppress_inf = true,
52 .suppress_nan = true,
53 };
54
55 header.branch_cond |= header.back_to_back;
56
57 uint64_t u = 0;
58 memcpy(&u, &header, sizeof(header));
59 return u;
60 }
61
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
67
68 static unsigned
69 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
70 {
71 uint64_t want = (cons >> 4);
72
73 for (unsigned i = 0; i < clause->constant_count; ++i) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
76
77 uint64_t candidates[2] = {
78 clause->constants[i] >> 4,
79 clause->constants[i] >> 36
80 };
81
82 /* For <64-bit mode, we treat lo/hi separately */
83
84 if (!b64)
85 candidates[0] &= (0xFFFFFFFF >> 4);
86
87 if (candidates[0] == want)
88 return i;
89
90 if (candidates[1] == want && !b64) {
91 *hi = true;
92 return i;
93 }
94 }
95
96 unreachable("Invalid constant accessed");
97 }
98
99 static unsigned
100 bi_constant_field(unsigned idx)
101 {
102 assert(idx <= 5);
103
104 const unsigned values[] = {
105 4, 5, 6, 7, 2, 3
106 };
107
108 return values[idx] << 4;
109 }
110
111 static bool
112 bi_assign_uniform_constant_single(
113 bi_registers *regs,
114 bi_clause *clause,
115 bi_instruction *ins, bool assigned, bool fast_zero)
116 {
117 if (!ins)
118 return assigned;
119
120 if (ins->type == BI_BLEND) {
121 assert(!assigned);
122 regs->uniform_constant = 0x8;
123 return true;
124 }
125
126 bi_foreach_src(ins, s) {
127 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
128
129 if (ins->src[s] & BIR_INDEX_CONSTANT) {
130 /* Let direct addresses through */
131 if (ins->type == BI_LOAD_VAR)
132 continue;
133
134 bool hi = false;
135 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
136 uint64_t cons = bi_get_immediate(ins, s);
137 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
138 unsigned lo = clause->constants[idx] & 0xF;
139 unsigned f = bi_constant_field(idx) | lo;
140
141 if (assigned && regs->uniform_constant != f)
142 unreachable("Mismatched uniform/const field: imm");
143
144 regs->uniform_constant = f;
145 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
146 assigned = true;
147 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
148 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
149 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
150 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
151 /* FMAs have a fast zero port, ADD needs to use the
152 * uniform/const port's special 0 mode handled here */
153 unsigned f = 0;
154
155 if (assigned && regs->uniform_constant != f)
156 unreachable("Mismatched uniform/const field: 0");
157
158 regs->uniform_constant = f;
159 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
160 assigned = true;
161 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
162 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
163 } else if (s & BIR_INDEX_UNIFORM) {
164 unreachable("Push uniforms not implemented yet");
165 }
166 }
167
168 return assigned;
169 }
170
171 static void
172 bi_assign_uniform_constant(
173 bi_clause *clause,
174 bi_registers *regs,
175 bi_bundle bundle)
176 {
177 bool assigned =
178 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
179
180 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
181 }
182
183 /* Assigns a port for reading, before anything is written */
184
185 static void
186 bi_assign_port_read(bi_registers *regs, unsigned src)
187 {
188 /* We only assign for registers */
189 if (!(src & BIR_INDEX_REGISTER))
190 return;
191
192 unsigned reg = src & ~BIR_INDEX_REGISTER;
193
194 /* Check if we already assigned the port */
195 for (unsigned i = 0; i <= 1; ++i) {
196 if (regs->port[i] == reg && regs->enabled[i])
197 return;
198 }
199
200 if (regs->port[3] == reg && regs->read_port3)
201 return;
202
203 /* Assign it now */
204
205 for (unsigned i = 0; i <= 1; ++i) {
206 if (!regs->enabled[i]) {
207 regs->port[i] = reg;
208 regs->enabled[i] = true;
209 return;
210 }
211 }
212
213 if (!regs->read_port3) {
214 regs->port[3] = reg;
215 regs->read_port3 = true;
216 return;
217 }
218
219 bi_print_ports(regs, stderr);
220 unreachable("Failed to find a free port for src");
221 }
222
223 static bi_registers
224 bi_assign_ports(bi_bundle *now, bi_bundle *prev)
225 {
226 /* We assign ports for the main register mechanism. Special ops
227 * use the data registers, which has its own mechanism entirely
228 * and thus gets skipped over here. */
229
230 unsigned read_dreg = now->add &&
231 bi_class_props[now->add->type] & BI_DATA_REG_SRC;
232
233 unsigned write_dreg = prev->add &&
234 bi_class_props[prev->add->type] & BI_DATA_REG_DEST;
235
236 /* First, assign reads */
237
238 if (now->fma)
239 bi_foreach_src(now->fma, src)
240 bi_assign_port_read(&now->regs, now->fma->src[src]);
241
242 if (now->add) {
243 bi_foreach_src(now->add, src) {
244 if (!(src == 0 && read_dreg))
245 bi_assign_port_read(&now->regs, now->add->src[src]);
246 }
247 }
248
249 /* Next, assign writes */
250
251 if (prev->add && prev->add->dest & BIR_INDEX_REGISTER && !write_dreg) {
252 now->regs.port[2] = prev->add->dest & ~BIR_INDEX_REGISTER;
253 now->regs.write_add = true;
254 }
255
256 if (prev->fma && prev->fma->dest & BIR_INDEX_REGISTER) {
257 unsigned r = prev->fma->dest & ~BIR_INDEX_REGISTER;
258
259 if (now->regs.write_add) {
260 /* Scheduler constraint: cannot read 3 and write 2 */
261 assert(!now->regs.read_port3);
262 now->regs.port[3] = r;
263 } else {
264 now->regs.port[2] = r;
265 }
266
267 now->regs.write_fma = true;
268 }
269
270 return now->regs;
271 }
272
273 /* Determines the register control field, ignoring the first? flag */
274
275 static enum bifrost_reg_control
276 bi_pack_register_ctrl_lo(bi_registers r)
277 {
278 if (r.write_fma) {
279 if (r.write_add) {
280 assert(!r.read_port3);
281 return BIFROST_WRITE_ADD_P2_FMA_P3;
282 } else {
283 if (r.read_port3)
284 return BIFROST_WRITE_FMA_P2_READ_P3;
285 else
286 return BIFROST_WRITE_FMA_P2;
287 }
288 } else if (r.write_add) {
289 if (r.read_port3)
290 return BIFROST_WRITE_ADD_P2_READ_P3;
291 else
292 return BIFROST_WRITE_ADD_P2;
293 } else if (r.read_port3)
294 return BIFROST_READ_P3;
295 else
296 return BIFROST_REG_NONE;
297 }
298
299 /* Ditto but account for the first? flag this time */
300
301 static enum bifrost_reg_control
302 bi_pack_register_ctrl(bi_registers r)
303 {
304 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
305
306 if (r.first_instruction) {
307 if (ctrl == BIFROST_REG_NONE)
308 ctrl = BIFROST_FIRST_NONE;
309 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
310 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
311 else
312 ctrl |= BIFROST_FIRST_NONE;
313 }
314
315 return ctrl;
316 }
317
318 static uint64_t
319 bi_pack_registers(bi_registers regs)
320 {
321 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
322 struct bifrost_regs s = { 0 };
323 uint64_t packed = 0;
324
325 if (regs.enabled[1]) {
326 /* Gotta save that bit!~ Required by the 63-x trick */
327 assert(regs.port[1] > regs.port[0]);
328 assert(regs.enabled[0]);
329
330 /* Do the 63-x trick, see docs/disasm */
331 if (regs.port[0] > 31) {
332 regs.port[0] = 63 - regs.port[0];
333 regs.port[1] = 63 - regs.port[1];
334 }
335
336 assert(regs.port[0] <= 31);
337 assert(regs.port[1] <= 63);
338
339 s.ctrl = ctrl;
340 s.reg1 = regs.port[1];
341 s.reg0 = regs.port[0];
342 } else {
343 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
344 s.ctrl = 0;
345 s.reg1 = ctrl << 2;
346
347 if (regs.enabled[0]) {
348 /* Bit 0 upper bit of port 0 */
349 s.reg1 |= (regs.port[0] >> 5);
350
351 /* Rest of port 0 in usual spot */
352 s.reg0 = (regs.port[0] & 0b11111);
353 } else {
354 /* Bit 1 set if port 0 also disabled */
355 s.reg1 |= (1 << 1);
356 }
357 }
358
359 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
360 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
361
362 bool has_port2 = regs.write_fma || regs.write_add;
363 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
364
365 if (!has_port3)
366 regs.port[3] = regs.port[2];
367
368 if (!has_port2)
369 regs.port[2] = regs.port[3];
370
371 s.reg3 = regs.port[3];
372 s.reg2 = regs.port[2];
373 s.uniform_const = regs.uniform_constant;
374
375 memcpy(&packed, &s, sizeof(s));
376 return packed;
377 }
378
379 static void
380 bi_set_data_register(bi_clause *clause, unsigned idx)
381 {
382 assert(idx & BIR_INDEX_REGISTER);
383 unsigned reg = idx & ~BIR_INDEX_REGISTER;
384 assert(reg <= 63);
385 clause->data_register = reg;
386 }
387
388 static void
389 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
390 {
391 bi_set_data_register(clause, ins->src[0]);
392 }
393
394 static void
395 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
396 {
397 bi_set_data_register(clause, ins->dest);
398 }
399
400 static enum bifrost_packed_src
401 bi_get_src_reg_port(bi_registers *regs, unsigned src)
402 {
403 unsigned reg = src & ~BIR_INDEX_REGISTER;
404
405 if (regs->port[0] == reg && regs->enabled[0])
406 return BIFROST_SRC_PORT0;
407 else if (regs->port[1] == reg && regs->enabled[1])
408 return BIFROST_SRC_PORT1;
409 else if (regs->port[3] == reg && regs->read_port3)
410 return BIFROST_SRC_PORT3;
411 else
412 unreachable("Tried to access register with no port");
413 }
414
415 static enum bifrost_packed_src
416 bi_get_src(bi_instruction *ins, bi_registers *regs, unsigned s)
417 {
418 unsigned src = ins->src[s];
419
420 if (src & BIR_INDEX_REGISTER)
421 return bi_get_src_reg_port(regs, src);
422 else if (src & BIR_INDEX_PASS)
423 return src & ~BIR_INDEX_PASS;
424 else {
425 bi_print_instruction(ins, stderr);
426 unreachable("Unknown src in above instruction");
427 }
428 }
429
430 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
431 * 16-bit and written components must correspond to valid swizzles (component x
432 * or y). */
433
434 static unsigned
435 bi_swiz16(bi_instruction *ins, unsigned src)
436 {
437 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
438 unsigned swizzle = 0;
439
440 for (unsigned c = 0; c < 2; ++c) {
441 if (!bi_writes_component(ins, src)) continue;
442
443 unsigned k = ins->swizzle[src][c];
444 assert(k <= 1);
445 swizzle |= (k << c);
446 }
447
448 return swizzle;
449 }
450
451 static unsigned
452 bi_pack_fma_fma(bi_instruction *ins, bi_registers *regs)
453 {
454 /* (-a)(-b) = ab, so we only need one negate bit */
455 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
456
457 if (ins->op.mscale) {
458 assert(!(ins->src_abs[0] && ins->src_abs[1]));
459 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
460
461 /* We can have exactly one abs, and can flip the multiplication
462 * to make it fit if we have to */
463 bool flip_ab = ins->src_abs[1];
464
465 struct bifrost_fma_mscale pack = {
466 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
467 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
468 .src2 = bi_get_src(ins, regs, 2),
469 .src3 = bi_get_src(ins, regs, 3),
470 .mscale_mode = 0,
471 .mode = ins->outmod,
472 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
473 .src1_neg = negate_mul,
474 .src2_neg = ins->src_neg[2],
475 .op = BIFROST_FMA_OP_MSCALE,
476 };
477
478 RETURN_PACKED(pack);
479 } else if (ins->dest_type == nir_type_float32) {
480 struct bifrost_fma_fma pack = {
481 .src0 = bi_get_src(ins, regs, 0),
482 .src1 = bi_get_src(ins, regs, 1),
483 .src2 = bi_get_src(ins, regs, 2),
484 .src0_abs = ins->src_abs[0],
485 .src1_abs = ins->src_abs[1],
486 .src2_abs = ins->src_abs[2],
487 .src0_neg = negate_mul,
488 .src2_neg = ins->src_neg[2],
489 .outmod = ins->outmod,
490 .roundmode = ins->roundmode,
491 .op = BIFROST_FMA_OP_FMA
492 };
493
494 RETURN_PACKED(pack);
495 } else if (ins->dest_type == nir_type_float16) {
496 struct bifrost_fma_fma16 pack = {
497 .src0 = bi_get_src(ins, regs, 0),
498 .src1 = bi_get_src(ins, regs, 1),
499 .src2 = bi_get_src(ins, regs, 2),
500 .swizzle_0 = bi_swiz16(ins, 0),
501 .swizzle_1 = bi_swiz16(ins, 1),
502 .swizzle_2 = bi_swiz16(ins, 2),
503 .src0_neg = negate_mul,
504 .src2_neg = ins->src_neg[2],
505 .outmod = ins->outmod,
506 .roundmode = ins->roundmode,
507 .op = BIFROST_FMA_OP_FMA16
508 };
509
510 RETURN_PACKED(pack);
511 } else {
512 unreachable("Invalid fma dest type");
513 }
514 }
515
516 static unsigned
517 bi_pack_fma_addmin_f32(bi_instruction *ins, bi_registers *regs)
518 {
519 unsigned op =
520 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
521 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
522 BIFROST_FMA_OP_FMAX32;
523
524 struct bifrost_fma_add pack = {
525 .src0 = bi_get_src(ins, regs, 0),
526 .src1 = bi_get_src(ins, regs, 1),
527 .src0_abs = ins->src_abs[0],
528 .src1_abs = ins->src_abs[1],
529 .src0_neg = ins->src_neg[0],
530 .src1_neg = ins->src_neg[1],
531 .unk = 0x0,
532 .outmod = ins->outmod,
533 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
534 .op = op
535 };
536
537 RETURN_PACKED(pack);
538 }
539
540 static bool
541 bi_pack_fp16_abs(bi_instruction *ins, bi_registers *regs, bool *flip)
542 {
543 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
544 * l be an auxiliary bit we encode. Then the hardware determines:
545 *
546 * abs0 = l || k
547 * abs1 = l && k
548 *
549 * Since add/min/max are commutative, this saves a bit by using the
550 * order of the operands as a bit (k). To pack this, first note:
551 *
552 * (l && k) implies (l || k).
553 *
554 * That is, if the second argument is abs'd, then the first argument
555 * also has abs. So there are three cases:
556 *
557 * Case 0: Neither src has absolute value. Then we have l = k = 0.
558 *
559 * Case 1: Exactly one src has absolute value. Assign that source to
560 * src0 and the other source to src1. Compute k = src1 < src0 based on
561 * that assignment. Then l = ~k.
562 *
563 * Case 2: Both sources have absolute value. Then we have l = k = 1.
564 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
565 * That is, this encoding is only valid if src1 and src0 are distinct.
566 * This is a scheduling restriction (XXX); if an op of this type
567 * requires both identical sources to have abs value, then we must
568 * schedule to ADD (which does not use this ordering trick).
569 */
570
571 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
572 unsigned src_0 = bi_get_src(ins, regs, 0);
573 unsigned src_1 = bi_get_src(ins, regs, 1);
574
575 assert(!(abs_0 && abs_1 && src_0 == src_1));
576
577 if (!abs_0 && !abs_1) {
578 /* Force k = 0 <===> NOT(src1 < src0) */
579 *flip = (src_1 < src_0);
580 return false;
581 } else if (abs_0 && !abs_1) {
582 return src_1 >= src_0;
583 } else if (abs_1 && !abs_0) {
584 *flip = true;
585 return src_0 >= src_1;
586 } else {
587 *flip = !(src_1 < src_0);
588 return true;
589 }
590 }
591
592 static unsigned
593 bi_pack_fmadd_min_f16(bi_instruction *ins, bi_registers *regs, bool FMA)
594 {
595 unsigned op =
596 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
597 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
598 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
599 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
600 BIFROST_FMA_OP_FMAX16;
601
602 bool flip = false;
603 bool l = bi_pack_fp16_abs(ins, regs, &flip);
604 unsigned src_0 = bi_get_src(ins, regs, 0);
605 unsigned src_1 = bi_get_src(ins, regs, 1);
606
607 if (FMA) {
608 struct bifrost_fma_add_minmax16 pack = {
609 .src0 = flip ? src_1 : src_0,
610 .src1 = flip ? src_0 : src_1,
611 .src0_neg = ins->src_neg[flip ? 1 : 0],
612 .src1_neg = ins->src_neg[flip ? 0 : 1],
613 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
614 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
615 .abs1 = l,
616 .outmod = ins->outmod,
617 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
618 .op = op
619 };
620
621 RETURN_PACKED(pack);
622 } else {
623 /* Can't have modes for fp16 */
624 assert(ins->outmod == 0);
625
626 struct bifrost_add_fmin16 pack = {
627 .src0 = flip ? src_1 : src_0,
628 .src1 = flip ? src_0 : src_1,
629 .src0_neg = ins->src_neg[flip ? 1 : 0],
630 .src1_neg = ins->src_neg[flip ? 0 : 1],
631 .abs1 = l,
632 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
633 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
634 .mode = ins->minmax,
635 .op = op
636 };
637
638 RETURN_PACKED(pack);
639 }
640 }
641
642 static unsigned
643 bi_pack_fma_addmin(bi_instruction *ins, bi_registers *regs)
644 {
645 if (ins->dest_type == nir_type_float32)
646 return bi_pack_fma_addmin_f32(ins, regs);
647 else if(ins->dest_type == nir_type_float16)
648 return bi_pack_fmadd_min_f16(ins, regs, true);
649 else
650 unreachable("Unknown FMA/ADD type");
651 }
652
653 static unsigned
654 bi_pack_fma_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
655 {
656 struct bifrost_fma_inst pack = {
657 .src0 = bi_get_src(ins, regs, 0),
658 .op = op
659 };
660
661 RETURN_PACKED(pack);
662 }
663
664 static unsigned
665 bi_pack_fma_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
666 {
667 struct bifrost_fma_2src pack = {
668 .src0 = bi_get_src(ins, regs, 0),
669 .src1 = bi_get_src(ins, regs, 1),
670 .op = op
671 };
672
673 RETURN_PACKED(pack);
674 }
675
676 static unsigned
677 bi_pack_add_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
678 {
679 struct bifrost_add_inst pack = {
680 .src0 = bi_get_src(ins, regs, 0),
681 .op = op
682 };
683
684 RETURN_PACKED(pack);
685 }
686
687 static enum bifrost_csel_cond
688 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
689 {
690 nir_alu_type B = nir_alu_type_get_base_type(T);
691 unsigned idx = (B == nir_type_float) ? 0 :
692 ((B == nir_type_int) ? 1 : 2);
693
694 switch (cond){
695 case BI_COND_LT:
696 *flip = true;
697 case BI_COND_GT: {
698 const enum bifrost_csel_cond ops[] = {
699 BIFROST_FGT_F,
700 BIFROST_IGT_I,
701 BIFROST_UGT_I
702 };
703
704 return ops[idx];
705 }
706 case BI_COND_LE:
707 *flip = true;
708 case BI_COND_GE: {
709 const enum bifrost_csel_cond ops[] = {
710 BIFROST_FGE_F,
711 BIFROST_IGE_I,
712 BIFROST_UGE_I
713 };
714
715 return ops[idx];
716 }
717 case BI_COND_NE:
718 *invert = true;
719 case BI_COND_EQ: {
720 const enum bifrost_csel_cond ops[] = {
721 BIFROST_FEQ_F,
722 BIFROST_IEQ_F,
723 BIFROST_IEQ_F /* sign is irrelevant */
724 };
725
726 return ops[idx];
727 }
728 default:
729 unreachable("Invalid op for csel");
730 }
731 }
732
733 static unsigned
734 bi_pack_fma_csel(bi_instruction *ins, bi_registers *regs)
735 {
736 /* TODO: Use csel3 as well */
737 bool flip = false, invert = false;
738
739 enum bifrost_csel_cond cond =
740 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
741
742 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
743
744 unsigned cmp_0 = (flip ? 1 : 0);
745 unsigned cmp_1 = (flip ? 0 : 1);
746 unsigned res_0 = (invert ? 3 : 2);
747 unsigned res_1 = (invert ? 2 : 3);
748
749 struct bifrost_csel4 pack = {
750 .src0 = bi_get_src(ins, regs, cmp_0),
751 .src1 = bi_get_src(ins, regs, cmp_1),
752 .src2 = bi_get_src(ins, regs, res_0),
753 .src3 = bi_get_src(ins, regs, res_1),
754 .cond = cond,
755 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
756 BIFROST_FMA_OP_CSEL4
757 };
758
759 RETURN_PACKED(pack);
760 }
761
762 static unsigned
763 bi_pack_fma_frexp(bi_instruction *ins, bi_registers *regs)
764 {
765 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
766 return bi_pack_fma_1src(ins, regs, op);
767 }
768
769 static unsigned
770 bi_pack_fma_reduce(bi_instruction *ins, bi_registers *regs)
771 {
772 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
773 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
774 } else {
775 unreachable("Invalid reduce op");
776 }
777 }
778
779 /* We have a single convert opcode in the IR but a number of opcodes that could
780 * come out. In particular we have native opcodes for:
781 *
782 * [ui]16 --> [fui]32 -- int16_to_32
783 * f16 --> f32 -- float16_to_32
784 * f32 --> f16 -- float32_to_16
785 * f32 --> [ui]32 -- float32_to_int
786 * [ui]32 --> f32 -- int_to_float32
787 * [fui]16 --> [fui]16 -- f2i_i2f16
788 */
789
790 static unsigned
791 bi_pack_convert(bi_instruction *ins, bi_registers *regs, bool FMA)
792 {
793 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
794 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
795 bool from_unsigned = from_base == nir_type_uint;
796
797 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
798 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
799 bool to_unsigned = to_base == nir_type_uint;
800 bool to_float = to_base == nir_type_float;
801
802 /* Sanity check */
803 assert((from_base != to_base) || (from_size != to_size));
804 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
805
806 /* f32 to f16 is special */
807 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
808 /* TODO: second vectorized source? */
809 struct bifrost_fma_2src pfma = {
810 .src0 = bi_get_src(ins, regs, 0),
811 .src1 = BIFROST_SRC_STAGE, /* 0 */
812 .op = BIFROST_FMA_FLOAT32_TO_16
813 };
814
815 struct bifrost_add_2src padd = {
816 .src0 = bi_get_src(ins, regs, 0),
817 .src1 = BIFROST_SRC_STAGE, /* 0 */
818 .op = BIFROST_ADD_FLOAT32_TO_16
819 };
820
821 if (FMA) {
822 RETURN_PACKED(pfma);
823 } else {
824 RETURN_PACKED(padd);
825 }
826 }
827
828 /* Otherwise, figure out the mode */
829 unsigned op = 0;
830
831 if (from_size == 16 && to_size == 32) {
832 unsigned component = ins->swizzle[0][0];
833 assert(component <= 1);
834
835 if (from_base == nir_type_float)
836 op = BIFROST_CONVERT_5(component);
837 else
838 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
839 } else {
840 unsigned mode = 0;
841 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
842 bool is_unsigned = from_unsigned;
843
844 if (from_base == nir_type_float) {
845 assert(to_base != nir_type_float);
846 is_unsigned = to_unsigned;
847
848 if (from_size == 32 && to_size == 32)
849 mode = BIFROST_CONV_F32_TO_I32;
850 else if (from_size == 16 && to_size == 16)
851 mode = BIFROST_CONV_F16_TO_I16;
852 else
853 unreachable("Invalid float conversion");
854 } else {
855 assert(to_base == nir_type_float);
856 assert(from_size == to_size);
857
858 if (to_size == 32)
859 mode = BIFROST_CONV_I32_TO_F32;
860 else if (to_size == 16)
861 mode = BIFROST_CONV_I16_TO_F16;
862 else
863 unreachable("Invalid int conversion");
864 }
865
866 /* Fixup swizzle for 32-bit only modes */
867
868 if (mode == BIFROST_CONV_I32_TO_F32)
869 swizzle = 0b11;
870 else if (mode == BIFROST_CONV_F32_TO_I32)
871 swizzle = 0b10;
872
873 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
874
875 /* Unclear what the top bit is for... maybe 16-bit related */
876 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
877 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
878
879 if (!(mode2 || mode6))
880 op |= 0x100;
881 }
882
883 if (FMA)
884 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
885 else
886 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
887 }
888
889 static unsigned
890 bi_pack_fma_select(bi_instruction *ins, bi_registers *regs)
891 {
892 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
893
894 if (size == 16) {
895 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
896 unsigned op = BIFROST_FMA_SEL_16(swiz);
897 return bi_pack_fma_2src(ins, regs, op);
898 } else if (size == 8) {
899 unsigned swiz = 0;
900
901 for (unsigned c = 0; c < 4; ++c) {
902 if (ins->swizzle[c][0]) {
903 /* Ensure lowering restriction is met */
904 assert(ins->swizzle[c][0] == 2);
905 swiz |= (1 << c);
906 }
907 }
908
909 struct bifrost_fma_sel8 pack = {
910 .src0 = bi_get_src(ins, regs, 0),
911 .src1 = bi_get_src(ins, regs, 1),
912 .src2 = bi_get_src(ins, regs, 2),
913 .src3 = bi_get_src(ins, regs, 3),
914 .swizzle = swiz,
915 .op = BIFROST_FMA_OP_SEL8
916 };
917
918 RETURN_PACKED(pack);
919 } else {
920 unreachable("Unimplemented");
921 }
922 }
923
924 static enum bifrost_fcmp_cond
925 bi_fcmp_cond(enum bi_cond cond)
926 {
927 switch (cond) {
928 case BI_COND_LT: return BIFROST_OLT;
929 case BI_COND_LE: return BIFROST_OLE;
930 case BI_COND_GE: return BIFROST_OGE;
931 case BI_COND_GT: return BIFROST_OGT;
932 case BI_COND_EQ: return BIFROST_OEQ;
933 case BI_COND_NE: return BIFROST_UNE;
934 default: unreachable("Unknown bi_cond");
935 }
936 }
937
938 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
939
940 static enum bifrost_fcmp_cond
941 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
942 {
943 switch (cond) {
944 case BIFROST_OGT:
945 return BIFROST_OLT;
946 case BIFROST_OGE:
947 return BIFROST_OLE;
948 case BIFROST_OLT:
949 return BIFROST_OGT;
950 case BIFROST_OLE:
951 return BIFROST_OGE;
952 case BIFROST_OEQ:
953 case BIFROST_UNE:
954 return cond;
955 default:
956 unreachable("Unknown fcmp cond");
957 }
958 }
959
960 static unsigned
961 bi_pack_fma_cmp(bi_instruction *ins, bi_registers *regs)
962 {
963 nir_alu_type Tl = ins->src_types[0];
964 nir_alu_type Tr = ins->src_types[1];
965
966 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
967 /* TODO: Mixed 32/16 cmp */
968 assert(Tl == Tr);
969
970 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
971
972 /* Only src1 has neg, so we arrange:
973 * a < b --- native
974 * a < -b --- native
975 * -a < -b <===> a > b
976 * -a < b <===> a > -b
977 * TODO: Is this NaN-precise?
978 */
979
980 bool flip = ins->src_neg[0];
981 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
982
983 if (flip)
984 cond = bi_flip_fcmp(cond);
985
986 struct bifrost_fma_fcmp pack = {
987 .src0 = bi_get_src(ins, regs, 0),
988 .src1 = bi_get_src(ins, regs, 1),
989 .src0_abs = ins->src_abs[0],
990 .src1_abs = ins->src_abs[1],
991 .src1_neg = neg,
992 .src_expand = 0,
993 .unk1 = 0,
994 .cond = cond,
995 .op = BIFROST_FMA_OP_FCMP_GL
996 };
997
998 RETURN_PACKED(pack);
999 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1000 bool flip = false;
1001 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1002 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1003
1004 if (flip)
1005 cond = bi_flip_fcmp(cond);
1006
1007 struct bifrost_fma_fcmp16 pack = {
1008 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1009 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1010 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1011 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1012 .abs1 = l,
1013 .unk = 0,
1014 .cond = cond,
1015 .op = BIFROST_FMA_OP_FCMP_GL_16,
1016 };
1017
1018 RETURN_PACKED(pack);
1019 } else {
1020 unreachable("Unknown cmp type");
1021 }
1022 }
1023
1024 static unsigned
1025 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1026 {
1027 switch (op) {
1028 case BI_BITWISE_OR:
1029 /* Via De Morgan's */
1030 return rshift ?
1031 BIFROST_FMA_OP_RSHIFT_NAND :
1032 BIFROST_FMA_OP_LSHIFT_NAND;
1033 case BI_BITWISE_AND:
1034 return rshift ?
1035 BIFROST_FMA_OP_RSHIFT_AND :
1036 BIFROST_FMA_OP_LSHIFT_AND;
1037 case BI_BITWISE_XOR:
1038 /* Shift direction handled out of band */
1039 return BIFROST_FMA_OP_RSHIFT_XOR;
1040 default:
1041 unreachable("Unknown op");
1042 }
1043 }
1044
1045 static unsigned
1046 bi_pack_fma_bitwise(bi_instruction *ins, bi_registers *regs)
1047 {
1048 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1049 assert(size <= 32);
1050
1051 bool invert_0 = ins->bitwise.src_invert[0];
1052 bool invert_1 = ins->bitwise.src_invert[1];
1053
1054 if (ins->op.bitwise == BI_BITWISE_OR) {
1055 /* Becomes NAND, so via De Morgan's:
1056 * f(A) | f(B) = ~(~f(A) & ~f(B))
1057 * = NAND(~f(A), ~f(B))
1058 */
1059
1060 invert_0 = !invert_0;
1061 invert_1 = !invert_1;
1062 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1063 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1064 * ~A ^ B = ~(A ^ B) = A ^ ~B
1065 */
1066
1067 invert_0 ^= invert_1;
1068 invert_1 = false;
1069
1070 /* invert_1 ends up specifying shift direction */
1071 invert_1 = !ins->bitwise.rshift;
1072 }
1073
1074 struct bifrost_shift_fma pack = {
1075 .src0 = bi_get_src(ins, regs, 0),
1076 .src1 = bi_get_src(ins, regs, 1),
1077 .src2 = bi_get_src(ins, regs, 2),
1078 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1079 .unk = 1, /* XXX */
1080 .invert_1 = invert_0,
1081 .invert_2 = invert_1,
1082 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1083 };
1084
1085 RETURN_PACKED(pack);
1086 }
1087
1088 static unsigned
1089 bi_pack_fma_round(bi_instruction *ins, bi_registers *regs)
1090 {
1091 bool fp16 = ins->dest_type == nir_type_float16;
1092 assert(fp16 || ins->dest_type == nir_type_float32);
1093
1094 unsigned op = fp16
1095 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1096 : BIFROST_FMA_ROUND_32(ins->roundmode);
1097
1098 return bi_pack_fma_1src(ins, regs, op);
1099 }
1100
1101 static unsigned
1102 bi_pack_fma_imath(bi_instruction *ins, bi_registers *regs)
1103 {
1104 /* Scheduler: only ADD can have 8/16-bit imath */
1105 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1106
1107 unsigned op = ins->op.imath == BI_IMATH_ADD
1108 ? BIFROST_FMA_IADD_32
1109 : BIFROST_FMA_ISUB_32;
1110
1111 return bi_pack_fma_2src(ins, regs, op);
1112 }
1113
1114 static unsigned
1115 bi_pack_fma(bi_clause *clause, bi_bundle bundle, bi_registers *regs)
1116 {
1117 if (!bundle.fma)
1118 return BIFROST_FMA_NOP;
1119
1120 switch (bundle.fma->type) {
1121 case BI_ADD:
1122 return bi_pack_fma_addmin(bundle.fma, regs);
1123 case BI_CMP:
1124 return bi_pack_fma_cmp(bundle.fma, regs);
1125 case BI_BITWISE:
1126 return bi_pack_fma_bitwise(bundle.fma, regs);
1127 case BI_CONVERT:
1128 return bi_pack_convert(bundle.fma, regs, true);
1129 case BI_CSEL:
1130 return bi_pack_fma_csel(bundle.fma, regs);
1131 case BI_FMA:
1132 return bi_pack_fma_fma(bundle.fma, regs);
1133 case BI_FREXP:
1134 return bi_pack_fma_frexp(bundle.fma, regs);
1135 case BI_IMATH:
1136 return bi_pack_fma_imath(bundle.fma, regs);
1137 case BI_MINMAX:
1138 return bi_pack_fma_addmin(bundle.fma, regs);
1139 case BI_MOV:
1140 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1141 case BI_SHIFT:
1142 unreachable("Packing todo");
1143 case BI_SELECT:
1144 return bi_pack_fma_select(bundle.fma, regs);
1145 case BI_ROUND:
1146 return bi_pack_fma_round(bundle.fma, regs);
1147 case BI_REDUCE_FMA:
1148 return bi_pack_fma_reduce(bundle.fma, regs);
1149 default:
1150 unreachable("Cannot encode class as FMA");
1151 }
1152 }
1153
1154 static unsigned
1155 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1156 {
1157 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1158 assert(size == 32 || size == 16);
1159
1160 unsigned op = (size == 32) ?
1161 BIFROST_ADD_OP_LD_VAR_32 :
1162 BIFROST_ADD_OP_LD_VAR_16;
1163
1164 unsigned packed_addr = 0;
1165
1166 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1167 /* Direct uses address field directly */
1168 packed_addr = bi_get_immediate(ins, 0);
1169 } else {
1170 /* Indirect gets an extra source */
1171 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1172 }
1173
1174 /* The destination is thrown in the data register */
1175 assert(ins->dest & BIR_INDEX_REGISTER);
1176 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1177
1178 unsigned channels = ins->vector_channels;
1179 assert(channels >= 1 && channels <= 4);
1180
1181 struct bifrost_ld_var pack = {
1182 .src0 = bi_get_src(ins, regs, 1),
1183 .addr = packed_addr,
1184 .channels = MALI_POSITIVE(channels),
1185 .interp_mode = ins->load_vary.interp_mode,
1186 .reuse = ins->load_vary.reuse,
1187 .flat = ins->load_vary.flat,
1188 .op = op
1189 };
1190
1191 RETURN_PACKED(pack);
1192 }
1193
1194 static unsigned
1195 bi_pack_add_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
1196 {
1197 struct bifrost_add_2src pack = {
1198 .src0 = bi_get_src(ins, regs, 0),
1199 .src1 = bi_get_src(ins, regs, 1),
1200 .op = op
1201 };
1202
1203 RETURN_PACKED(pack);
1204 }
1205
1206 static unsigned
1207 bi_pack_add_addmin_f32(bi_instruction *ins, bi_registers *regs)
1208 {
1209 unsigned op =
1210 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1211 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1212 BIFROST_ADD_OP_FMAX32;
1213
1214 struct bifrost_add_faddmin pack = {
1215 .src0 = bi_get_src(ins, regs, 0),
1216 .src1 = bi_get_src(ins, regs, 1),
1217 .src0_abs = ins->src_abs[0],
1218 .src1_abs = ins->src_abs[1],
1219 .src0_neg = ins->src_neg[0],
1220 .src1_neg = ins->src_neg[1],
1221 .outmod = ins->outmod,
1222 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1223 .op = op
1224 };
1225
1226 RETURN_PACKED(pack);
1227 }
1228
1229 static unsigned
1230 bi_pack_add_add_f16(bi_instruction *ins, bi_registers *regs)
1231 {
1232 /* ADD.v2f16 can't have outmod */
1233 assert(ins->outmod == BIFROST_NONE);
1234
1235 struct bifrost_add_faddmin pack = {
1236 .src0 = bi_get_src(ins, regs, 0),
1237 .src1 = bi_get_src(ins, regs, 1),
1238 .src0_abs = ins->src_abs[0],
1239 .src1_abs = ins->src_abs[1],
1240 .src0_neg = ins->src_neg[0],
1241 .src1_neg = ins->src_neg[1],
1242 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1243 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1244 .mode = ins->roundmode,
1245 .op = BIFROST_ADD_OP_FADD16
1246 };
1247
1248 RETURN_PACKED(pack);
1249 }
1250
1251 static unsigned
1252 bi_pack_add_addmin(bi_instruction *ins, bi_registers *regs)
1253 {
1254 if (ins->dest_type == nir_type_float32)
1255 return bi_pack_add_addmin_f32(ins, regs);
1256 else if (ins->dest_type == nir_type_float16) {
1257 if (ins->type == BI_ADD)
1258 return bi_pack_add_add_f16(ins, regs);
1259 else
1260 return bi_pack_fmadd_min_f16(ins, regs, false);
1261 } else
1262 unreachable("Unknown FMA/ADD type");
1263 }
1264
1265 static unsigned
1266 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1267 {
1268 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1269
1270 const unsigned ops[4] = {
1271 BIFROST_ADD_OP_LD_UBO_1,
1272 BIFROST_ADD_OP_LD_UBO_2,
1273 BIFROST_ADD_OP_LD_UBO_3,
1274 BIFROST_ADD_OP_LD_UBO_4
1275 };
1276
1277 bi_write_data_register(clause, ins);
1278 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1279 }
1280
1281 static enum bifrost_ldst_type
1282 bi_pack_ldst_type(nir_alu_type T)
1283 {
1284 switch (T) {
1285 case nir_type_float16: return BIFROST_LDST_F16;
1286 case nir_type_float32: return BIFROST_LDST_F32;
1287 case nir_type_int32: return BIFROST_LDST_I32;
1288 case nir_type_uint32: return BIFROST_LDST_U32;
1289 default: unreachable("Invalid type loaded");
1290 }
1291 }
1292
1293 static unsigned
1294 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1295 {
1296 struct bifrost_ld_var_addr pack = {
1297 .src0 = bi_get_src(ins, regs, 1),
1298 .src1 = bi_get_src(ins, regs, 2),
1299 .location = bi_get_immediate(ins, 0),
1300 .type = bi_pack_ldst_type(ins->src_types[3]),
1301 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1302 };
1303
1304 bi_write_data_register(clause, ins);
1305 RETURN_PACKED(pack);
1306 }
1307
1308 static unsigned
1309 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1310 {
1311 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1312
1313 struct bifrost_ld_attr pack = {
1314 .src0 = bi_get_src(ins, regs, 1),
1315 .src1 = bi_get_src(ins, regs, 2),
1316 .location = bi_get_immediate(ins, 0),
1317 .channels = MALI_POSITIVE(ins->vector_channels),
1318 .type = bi_pack_ldst_type(ins->dest_type),
1319 .op = BIFROST_ADD_OP_LD_ATTR
1320 };
1321
1322 bi_write_data_register(clause, ins);
1323 RETURN_PACKED(pack);
1324 }
1325
1326 static unsigned
1327 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1328 {
1329 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1330
1331 struct bifrost_st_vary pack = {
1332 .src0 = bi_get_src(ins, regs, 1),
1333 .src1 = bi_get_src(ins, regs, 2),
1334 .src2 = bi_get_src(ins, regs, 3),
1335 .channels = MALI_POSITIVE(ins->vector_channels),
1336 .op = BIFROST_ADD_OP_ST_VAR
1337 };
1338
1339 bi_read_data_register(clause, ins);
1340 RETURN_PACKED(pack);
1341 }
1342
1343 static unsigned
1344 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1345 {
1346 bool fp16 = (ins->src_types[1] == nir_type_float16);
1347
1348 struct bifrost_add_atest pack = {
1349 .src0 = bi_get_src(ins, regs, 0),
1350 .src1 = bi_get_src(ins, regs, 1),
1351 .half = fp16,
1352 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1353 .op = BIFROST_ADD_OP_ATEST,
1354 };
1355
1356 /* Despite *also* writing with the usual mechanism... quirky and
1357 * perhaps unnecessary, but let's match the blob */
1358 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1359
1360 RETURN_PACKED(pack);
1361 }
1362
1363 static unsigned
1364 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1365 {
1366 struct bifrost_add_inst pack = {
1367 .src0 = bi_get_src(ins, regs, 1),
1368 .op = BIFROST_ADD_OP_BLEND
1369 };
1370
1371 /* TODO: Pack location in uniform_const */
1372 assert(ins->blend_location == 0);
1373
1374 bi_read_data_register(clause, ins);
1375 RETURN_PACKED(pack);
1376 }
1377
1378 static unsigned
1379 bi_pack_add_special(bi_instruction *ins, bi_registers *regs)
1380 {
1381 unsigned op = 0;
1382 bool fp16 = ins->dest_type == nir_type_float16;
1383 bool Y = ins->swizzle[0][0];
1384
1385 if (ins->op.special == BI_SPECIAL_FRCP) {
1386 op = fp16 ?
1387 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1388 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1389 BIFROST_ADD_OP_FRCP_FAST_F32;
1390 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1391 op = fp16 ?
1392 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1393 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1394 BIFROST_ADD_OP_FRSQ_FAST_F32;
1395
1396 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1397 assert(!fp16);
1398 op = BIFROST_ADD_OP_FEXP2_FAST;
1399 } else {
1400 unreachable("Unknown special op");
1401 }
1402
1403 return bi_pack_add_1src(ins, regs, op);
1404 }
1405
1406 static unsigned
1407 bi_pack_add_table(bi_instruction *ins, bi_registers *regs)
1408 {
1409 unsigned op = 0;
1410 assert(ins->dest_type == nir_type_float32);
1411
1412 op = BIFROST_ADD_OP_LOG2_HELP;
1413 return bi_pack_add_1src(ins, regs, op);
1414 }
1415 static unsigned
1416 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, bi_registers *regs, gl_shader_stage stage)
1417 {
1418 bool f16 = ins->dest_type == nir_type_float16;
1419 bool vtx = stage != MESA_SHADER_FRAGMENT;
1420
1421 struct bifrost_tex_compact pack = {
1422 .src0 = bi_get_src(ins, regs, 0),
1423 .src1 = bi_get_src(ins, regs, 1),
1424 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1425 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1426 .compute_lod = !vtx,
1427 .tex_index = ins->texture.texture_index,
1428 .sampler_index = ins->texture.sampler_index
1429 };
1430
1431 bi_write_data_register(clause, ins);
1432 RETURN_PACKED(pack);
1433 }
1434
1435 static unsigned
1436 bi_pack_add_select(bi_instruction *ins, bi_registers *regs)
1437 {
1438 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1439 assert(size == 16);
1440
1441 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1442 unsigned op = BIFROST_ADD_SEL_16(swiz);
1443 return bi_pack_add_2src(ins, regs, op);
1444 }
1445
1446 static enum bifrost_discard_cond
1447 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1448 {
1449 switch (cond){
1450 case BI_COND_GT:
1451 *flip = true;
1452 /* fallthrough */
1453 case BI_COND_LT:
1454 return BIFROST_DISCARD_FLT;
1455 case BI_COND_GE:
1456 *flip = true;
1457 /* fallthrough */
1458 case BI_COND_LE:
1459 return BIFROST_DISCARD_FLE;
1460 case BI_COND_NE:
1461 return BIFROST_DISCARD_FNE;
1462 case BI_COND_EQ:
1463 return BIFROST_DISCARD_FEQ;
1464 default:
1465 unreachable("Invalid op for discard");
1466 }
1467 }
1468
1469 static unsigned
1470 bi_pack_add_discard(bi_instruction *ins, bi_registers *regs)
1471 {
1472 bool fp16 = ins->src_types[0] == nir_type_float16;
1473 assert(fp16 || ins->src_types[0] == nir_type_float32);
1474
1475 bool flip = false;
1476 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1477
1478 struct bifrost_add_discard pack = {
1479 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1480 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1481 .cond = cond,
1482 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1483 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1484 .fp32 = fp16 ? 0 : 1,
1485 .op = BIFROST_ADD_OP_DISCARD
1486 };
1487
1488 RETURN_PACKED(pack);
1489 }
1490
1491 static enum bifrost_icmp_cond
1492 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1493 {
1494 switch (cond){
1495 case BI_COND_LT:
1496 *flip = true;
1497 /* fallthrough */
1498 case BI_COND_GT:
1499 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1500 : BIFROST_ICMP_IGT;
1501 case BI_COND_LE:
1502 *flip = true;
1503 /* fallthrough */
1504 case BI_COND_GE:
1505 return is_unsigned ? BIFROST_ICMP_UGE :
1506 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1507 case BI_COND_NE:
1508 return BIFROST_ICMP_NEQ;
1509 case BI_COND_EQ:
1510 return BIFROST_ICMP_EQ;
1511 default:
1512 unreachable("Invalid op for icmp");
1513 }
1514 }
1515
1516 static unsigned
1517 bi_pack_add_icmp32(bi_instruction *ins, bi_registers *regs, bool flip,
1518 enum bifrost_icmp_cond cond)
1519 {
1520 struct bifrost_add_icmp pack = {
1521 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1522 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1523 .cond = cond,
1524 .sz = 1,
1525 .d3d = false,
1526 .op = BIFROST_ADD_OP_ICMP_32
1527 };
1528
1529 RETURN_PACKED(pack);
1530 }
1531
1532 static unsigned
1533 bi_pack_add_icmp16(bi_instruction *ins, bi_registers *regs, bool flip,
1534 enum bifrost_icmp_cond cond)
1535 {
1536 struct bifrost_add_icmp16 pack = {
1537 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1538 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1539 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1540 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1541 .cond = cond,
1542 .d3d = false,
1543 .op = BIFROST_ADD_OP_ICMP_16
1544 };
1545
1546 RETURN_PACKED(pack);
1547 }
1548
1549 static unsigned
1550 bi_pack_add_cmp(bi_instruction *ins, bi_registers *regs)
1551 {
1552 nir_alu_type Tl = ins->src_types[0];
1553 nir_alu_type Tr = ins->src_types[1];
1554 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1555
1556 if (Bl == nir_type_uint || Bl == nir_type_int) {
1557 assert(Tl == Tr);
1558 unsigned sz = nir_alu_type_get_type_size(Tl);
1559
1560 bool flip = false;
1561
1562 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1563 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1564 &flip, Bl == nir_type_uint, sz == 16);
1565
1566 if (sz == 32)
1567 return bi_pack_add_icmp32(ins, regs, flip, cond);
1568 else if (sz == 16)
1569 return bi_pack_add_icmp16(ins, regs, flip, cond);
1570 else
1571 unreachable("TODO");
1572 } else {
1573 unreachable("TODO");
1574 }
1575 }
1576
1577 static unsigned
1578 bi_pack_add_imath(bi_instruction *ins, bi_registers *regs)
1579 {
1580 /* TODO: 32+16 add */
1581 assert(ins->src_types[0] == ins->src_types[1]);
1582 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1583 enum bi_imath_op p = ins->op.imath;
1584
1585 unsigned op = 0;
1586
1587 if (sz == 8) {
1588 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1589 BIFROST_ADD_ISUB_8;
1590 } else if (sz == 16) {
1591 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1592 BIFROST_ADD_ISUB_16;
1593 } else if (sz == 32) {
1594 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1595 BIFROST_ADD_ISUB_32;
1596 } else {
1597 unreachable("64-bit todo");
1598 }
1599
1600 return bi_pack_add_2src(ins, regs, op);
1601 }
1602
1603 static unsigned
1604 bi_pack_add(bi_clause *clause, bi_bundle bundle, bi_registers *regs, gl_shader_stage stage)
1605 {
1606 if (!bundle.add)
1607 return BIFROST_ADD_NOP;
1608
1609 switch (bundle.add->type) {
1610 case BI_ADD:
1611 return bi_pack_add_addmin(bundle.add, regs);
1612 case BI_ATEST:
1613 return bi_pack_add_atest(clause, bundle.add, regs);
1614 case BI_BRANCH:
1615 unreachable("Packing todo");
1616 case BI_CMP:
1617 return bi_pack_add_cmp(bundle.add, regs);
1618 case BI_BLEND:
1619 return bi_pack_add_blend(clause, bundle.add, regs);
1620 case BI_BITWISE:
1621 unreachable("Packing todo");
1622 case BI_CONVERT:
1623 return bi_pack_convert(bundle.add, regs, false);
1624 case BI_DISCARD:
1625 return bi_pack_add_discard(bundle.add, regs);
1626 case BI_FREXP:
1627 unreachable("Packing todo");
1628 case BI_IMATH:
1629 return bi_pack_add_imath(bundle.add, regs);
1630 case BI_LOAD:
1631 unreachable("Packing todo");
1632 case BI_LOAD_ATTR:
1633 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1634 case BI_LOAD_UNIFORM:
1635 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1636 case BI_LOAD_VAR:
1637 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1638 case BI_LOAD_VAR_ADDRESS:
1639 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1640 case BI_MINMAX:
1641 return bi_pack_add_addmin(bundle.add, regs);
1642 case BI_MOV:
1643 case BI_SHIFT:
1644 case BI_STORE:
1645 unreachable("Packing todo");
1646 case BI_STORE_VAR:
1647 return bi_pack_add_st_vary(clause, bundle.add, regs);
1648 case BI_SPECIAL:
1649 return bi_pack_add_special(bundle.add, regs);
1650 case BI_TABLE:
1651 return bi_pack_add_table(bundle.add, regs);
1652 case BI_SELECT:
1653 return bi_pack_add_select(bundle.add, regs);
1654 case BI_TEX:
1655 if (bundle.add->op.texture == BI_TEX_COMPACT)
1656 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1657 else
1658 unreachable("Unknown tex type");
1659 case BI_ROUND:
1660 unreachable("Packing todo");
1661 default:
1662 unreachable("Cannot encode class as ADD");
1663 }
1664 }
1665
1666 struct bi_packed_bundle {
1667 uint64_t lo;
1668 uint64_t hi;
1669 };
1670
1671 /* We must ensure port 1 > port 0 for the 63-x trick to function, so we fix
1672 * this up at pack time. (Scheduling doesn't care.) */
1673
1674 static void
1675 bi_flip_ports(bi_registers *regs)
1676 {
1677 if (regs->enabled[0] && regs->enabled[1] && regs->port[1] < regs->port[0]) {
1678 unsigned temp = regs->port[0];
1679 regs->port[0] = regs->port[1];
1680 regs->port[1] = temp;
1681 }
1682
1683 }
1684
1685 static struct bi_packed_bundle
1686 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1687 {
1688 bi_assign_ports(&bundle, &prev);
1689 bi_assign_uniform_constant(clause, &bundle.regs, bundle);
1690 bundle.regs.first_instruction = first_bundle;
1691
1692 bi_flip_ports(&bundle.regs);
1693
1694 uint64_t reg = bi_pack_registers(bundle.regs);
1695 uint64_t fma = bi_pack_fma(clause, bundle, &bundle.regs);
1696 uint64_t add = bi_pack_add(clause, bundle, &bundle.regs, stage);
1697
1698 struct bi_packed_bundle packed = {
1699 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1700 .hi = add >> 6
1701 };
1702
1703 return packed;
1704 }
1705
1706 /* Packs the next two constants as a dedicated constant quadword at the end of
1707 * the clause, returning the number packed. */
1708
1709 static unsigned
1710 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1711 unsigned index,
1712 struct util_dynarray *emission)
1713 {
1714 /* After these two, are we done? Determines tag */
1715 bool done = clause->constant_count <= (index + 2);
1716 bool only = clause->constant_count <= (index + 1);
1717
1718 /* TODO: Pos */
1719 assert(index == 0 && clause->bundle_count == 1);
1720 assert(only);
1721
1722 uint64_t hi = clause->constants[index + 0] >> 60ull;
1723
1724 struct bifrost_fmt_constant quad = {
1725 .pos = 0, /* TODO */
1726 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1727 .imm_1 = clause->constants[index + 0] >> 4,
1728 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1729 };
1730
1731 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1732 * of the second constant with the first must be less than 8, otherwise
1733 * we have to swap them. On G52, I'm able to reproduce a similar issue
1734 * but with a different workaround (modeled above with a single
1735 * constant, unclear how to workaround for multiple constants.) Further
1736 * investigation needed. Possibly an errata. XXX */
1737
1738 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1739
1740 return 2;
1741 }
1742
1743 static void
1744 bi_pack_clause(bi_context *ctx, bi_clause *clause, bi_clause *next,
1745 struct util_dynarray *emission, gl_shader_stage stage)
1746 {
1747 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1748 assert(clause->bundle_count == 1);
1749
1750 /* Used to decide if we elide writes */
1751 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1752
1753 /* State for packing constants throughout */
1754 unsigned constant_index = 0;
1755
1756 struct bifrost_fmt1 quad_1 = {
1757 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1758 .header = bi_pack_header(clause, next, is_fragment),
1759 .ins_1 = ins_1.lo,
1760 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1761 .ins_0 = (ins_1.hi >> 11) & 0b111,
1762 };
1763
1764 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1765
1766 /* Pack the remaining constants */
1767
1768 while (constant_index < clause->constant_count) {
1769 constant_index += bi_pack_constants(ctx, clause,
1770 constant_index, emission);
1771 }
1772 }
1773
1774 static bi_clause *
1775 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1776 {
1777 /* Try the next clause in this block */
1778 if (clause->link.next != &((bi_block *) block)->clauses)
1779 return list_first_entry(&(clause->link), bi_clause, link);
1780
1781 /* Try the next block, or the one after that if it's empty, etc .*/
1782 pan_block *next_block = pan_next_block(block);
1783
1784 bi_foreach_block_from(ctx, next_block, block) {
1785 bi_block *blk = (bi_block *) block;
1786
1787 if (!list_is_empty(&blk->clauses))
1788 return list_first_entry(&(blk->clauses), bi_clause, link);
1789 }
1790
1791 return NULL;
1792 }
1793
1794 void
1795 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1796 {
1797 util_dynarray_init(emission, NULL);
1798
1799 bi_foreach_block(ctx, _block) {
1800 bi_block *block = (bi_block *) _block;
1801
1802 bi_foreach_clause_in_block(block, clause) {
1803 bi_clause *next = bi_next_clause(ctx, _block, clause);
1804 bi_pack_clause(ctx, clause, next, emission, ctx->stage);
1805 }
1806 }
1807 }