5719e66a58dbcb1bf34564ca2c77c65e1e18e6d5
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next, bool is_fragment)
39 {
40 struct bifrost_header header = {
41 .back_to_back = clause->back_to_back,
42 .no_end_of_shader = (next != NULL),
43 .elide_writes = is_fragment,
44 .branch_cond = clause->branch_conditional,
45 .datareg_writebarrier = clause->data_register_write_barrier,
46 .datareg = clause->data_register,
47 .scoreboard_deps = next ? next->dependencies : 0,
48 .scoreboard_index = clause->scoreboard_id,
49 .clause_type = clause->clause_type,
50 .next_clause_type = next ? next->clause_type : 0,
51 .suppress_inf = true,
52 .suppress_nan = true,
53 };
54
55 header.branch_cond |= header.back_to_back;
56
57 uint64_t u = 0;
58 memcpy(&u, &header, sizeof(header));
59 return u;
60 }
61
62 /* Represents the assignment of ports for a given bundle */
63
64 struct bi_registers {
65 /* Register to assign to each port */
66 unsigned port[4];
67
68 /* Read ports can be disabled */
69 bool enabled[2];
70
71 /* Should we write FMA? what about ADD? If only a single port is
72 * enabled it is in port 2, else ADD/FMA is 2/3 respectively */
73 bool write_fma, write_add;
74
75 /* Should we read with port 3? */
76 bool read_port3;
77
78 /* Packed uniform/constant */
79 uint8_t uniform_constant;
80
81 /* Whether writes are actually for the last instruction */
82 bool first_instruction;
83 };
84
85 static inline void
86 bi_print_ports(struct bi_registers *regs)
87 {
88 for (unsigned i = 0; i < 2; ++i) {
89 if (regs->enabled[i])
90 printf("port %u: %u\n", i, regs->port[i]);
91 }
92
93 if (regs->write_fma || regs->write_add) {
94 printf("port 2 (%s): %u\n",
95 regs->write_add ? "ADD" : "FMA",
96 regs->port[2]);
97 }
98
99 if ((regs->write_fma && regs->write_add) || regs->read_port3) {
100 printf("port 3 (%s): %u\n",
101 regs->read_port3 ? "read" : "FMA",
102 regs->port[3]);
103 }
104 }
105
106 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
107 * pushed uniform per bundle. Figure out which one we need in the bundle (the
108 * scheduler needs to ensure we only have one type per bundle), validate
109 * everything, and rewrite away the register/uniform indices to use 3-bit
110 * sources directly. */
111
112 static unsigned
113 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
114 {
115 uint64_t want = (cons >> 4);
116
117 for (unsigned i = 0; i < clause->constant_count; ++i) {
118 /* Only check top 60-bits since that's what's actually embedded
119 * in the clause, the bottom 4-bits are bundle-inline */
120
121 uint64_t candidates[2] = {
122 clause->constants[i] >> 4,
123 clause->constants[i] >> 36
124 };
125
126 /* For <64-bit mode, we treat lo/hi separately */
127
128 if (!b64)
129 candidates[0] &= (0xFFFFFFFF >> 4);
130
131 if (candidates[0] == want)
132 return i;
133
134 if (candidates[1] == want && !b64) {
135 *hi = true;
136 return i;
137 }
138 }
139
140 unreachable("Invalid constant accessed");
141 }
142
143 static unsigned
144 bi_constant_field(unsigned idx)
145 {
146 assert(idx <= 5);
147
148 const unsigned values[] = {
149 4, 5, 6, 7, 2, 3
150 };
151
152 return values[idx] << 4;
153 }
154
155 static bool
156 bi_assign_uniform_constant_single(
157 struct bi_registers *regs,
158 bi_clause *clause,
159 bi_instruction *ins, bool assigned, bool fast_zero)
160 {
161 if (!ins)
162 return assigned;
163
164 if (ins->type == BI_BLEND) {
165 assert(!assigned);
166 regs->uniform_constant = 0x8;
167 return true;
168 }
169
170 bi_foreach_src(ins, s) {
171 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
172
173 if (ins->src[s] & BIR_INDEX_CONSTANT) {
174 /* Let direct addresses through */
175 if (ins->type == BI_LOAD_VAR)
176 continue;
177
178 bool hi = false;
179 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
180 uint64_t cons = bi_get_immediate(ins, s);
181 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
182 unsigned lo = clause->constants[idx] & 0xF;
183 unsigned f = bi_constant_field(idx) | lo;
184
185 if (assigned && regs->uniform_constant != f)
186 unreachable("Mismatched uniform/const field: imm");
187
188 regs->uniform_constant = f;
189 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
190 assigned = true;
191 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
192 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
193 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
194 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
195 /* FMAs have a fast zero port, ADD needs to use the
196 * uniform/const port's special 0 mode handled here */
197 unsigned f = 0;
198
199 if (assigned && regs->uniform_constant != f)
200 unreachable("Mismatched uniform/const field: 0");
201
202 regs->uniform_constant = f;
203 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
204 assigned = true;
205 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
206 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
207 } else if (s & BIR_INDEX_UNIFORM) {
208 unreachable("Push uniforms not implemented yet");
209 }
210 }
211
212 return assigned;
213 }
214
215 static void
216 bi_assign_uniform_constant(
217 bi_clause *clause,
218 struct bi_registers *regs,
219 bi_bundle bundle)
220 {
221 bool assigned =
222 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
223
224 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
225 }
226
227 /* Assigns a port for reading, before anything is written */
228
229 static void
230 bi_assign_port_read(struct bi_registers *regs, unsigned src)
231 {
232 /* We only assign for registers */
233 if (!(src & BIR_INDEX_REGISTER))
234 return;
235
236 unsigned reg = src & ~BIR_INDEX_REGISTER;
237
238 /* Check if we already assigned the port */
239 for (unsigned i = 0; i <= 1; ++i) {
240 if (regs->port[i] == reg && regs->enabled[i])
241 return;
242 }
243
244 if (regs->port[3] == reg && regs->read_port3)
245 return;
246
247 /* Assign it now */
248
249 for (unsigned i = 0; i <= 1; ++i) {
250 if (!regs->enabled[i]) {
251 regs->port[i] = reg;
252 regs->enabled[i] = true;
253 return;
254 }
255 }
256
257 if (!regs->read_port3) {
258 regs->port[3] = reg;
259 regs->read_port3 = true;
260 return;
261 }
262
263 bi_print_ports(regs);
264 unreachable("Failed to find a free port for src");
265 }
266
267 static struct bi_registers
268 bi_assign_ports(bi_bundle now, bi_bundle prev)
269 {
270 struct bi_registers regs = { 0 };
271
272 /* We assign ports for the main register mechanism. Special ops
273 * use the data registers, which has its own mechanism entirely
274 * and thus gets skipped over here. */
275
276 unsigned read_dreg = now.add &&
277 bi_class_props[now.add->type] & BI_DATA_REG_SRC;
278
279 unsigned write_dreg = prev.add &&
280 bi_class_props[prev.add->type] & BI_DATA_REG_DEST;
281
282 /* First, assign reads */
283
284 if (now.fma)
285 bi_foreach_src(now.fma, src)
286 bi_assign_port_read(&regs, now.fma->src[src]);
287
288 if (now.add) {
289 bi_foreach_src(now.add, src) {
290 if (!(src == 0 && read_dreg))
291 bi_assign_port_read(&regs, now.add->src[src]);
292 }
293 }
294
295 /* Next, assign writes */
296
297 if (prev.add && prev.add->dest & BIR_INDEX_REGISTER && !write_dreg) {
298 regs.port[2] = prev.add->dest & ~BIR_INDEX_REGISTER;
299 regs.write_add = true;
300 }
301
302 if (prev.fma && prev.fma->dest & BIR_INDEX_REGISTER) {
303 unsigned r = prev.fma->dest & ~BIR_INDEX_REGISTER;
304
305 if (regs.write_add) {
306 /* Scheduler constraint: cannot read 3 and write 2 */
307 assert(!regs.read_port3);
308 regs.port[3] = r;
309 } else {
310 regs.port[2] = r;
311 }
312
313 regs.write_fma = true;
314 }
315
316 /* Finally, ensure port 1 > port 0 for the 63-x trick to function */
317
318 if (regs.enabled[0] && regs.enabled[1] && regs.port[1] < regs.port[0]) {
319 unsigned temp = regs.port[0];
320 regs.port[0] = regs.port[1];
321 regs.port[1] = temp;
322 }
323
324 return regs;
325 }
326
327 /* Determines the register control field, ignoring the first? flag */
328
329 static enum bifrost_reg_control
330 bi_pack_register_ctrl_lo(struct bi_registers r)
331 {
332 if (r.write_fma) {
333 if (r.write_add) {
334 assert(!r.read_port3);
335 return BIFROST_WRITE_ADD_P2_FMA_P3;
336 } else {
337 if (r.read_port3)
338 return BIFROST_WRITE_FMA_P2_READ_P3;
339 else
340 return BIFROST_WRITE_FMA_P2;
341 }
342 } else if (r.write_add) {
343 if (r.read_port3)
344 return BIFROST_WRITE_ADD_P2_READ_P3;
345 else
346 return BIFROST_WRITE_ADD_P2;
347 } else if (r.read_port3)
348 return BIFROST_READ_P3;
349 else
350 return BIFROST_REG_NONE;
351 }
352
353 /* Ditto but account for the first? flag this time */
354
355 static enum bifrost_reg_control
356 bi_pack_register_ctrl(struct bi_registers r)
357 {
358 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
359
360 if (r.first_instruction) {
361 if (ctrl == BIFROST_REG_NONE)
362 ctrl = BIFROST_FIRST_NONE;
363 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
364 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
365 else
366 ctrl |= BIFROST_FIRST_NONE;
367 }
368
369 return ctrl;
370 }
371
372 static uint64_t
373 bi_pack_registers(struct bi_registers regs)
374 {
375 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
376 struct bifrost_regs s = { 0 };
377 uint64_t packed = 0;
378
379 if (regs.enabled[1]) {
380 /* Gotta save that bit!~ Required by the 63-x trick */
381 assert(regs.port[1] > regs.port[0]);
382 assert(regs.enabled[0]);
383
384 /* Do the 63-x trick, see docs/disasm */
385 if (regs.port[0] > 31) {
386 regs.port[0] = 63 - regs.port[0];
387 regs.port[1] = 63 - regs.port[1];
388 }
389
390 assert(regs.port[0] <= 31);
391 assert(regs.port[1] <= 63);
392
393 s.ctrl = ctrl;
394 s.reg1 = regs.port[1];
395 s.reg0 = regs.port[0];
396 } else {
397 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
398 s.ctrl = 0;
399 s.reg1 = ctrl << 2;
400
401 if (regs.enabled[0]) {
402 /* Bit 0 upper bit of port 0 */
403 s.reg1 |= (regs.port[0] >> 5);
404
405 /* Rest of port 0 in usual spot */
406 s.reg0 = (regs.port[0] & 0b11111);
407 } else {
408 /* Bit 1 set if port 0 also disabled */
409 s.reg1 |= (1 << 1);
410 }
411 }
412
413 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
414 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
415
416 bool has_port2 = regs.write_fma || regs.write_add;
417 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
418
419 if (!has_port3)
420 regs.port[3] = regs.port[2];
421
422 if (!has_port2)
423 regs.port[2] = regs.port[3];
424
425 s.reg3 = regs.port[3];
426 s.reg2 = regs.port[2];
427 s.uniform_const = regs.uniform_constant;
428
429 memcpy(&packed, &s, sizeof(s));
430 return packed;
431 }
432
433 static void
434 bi_set_data_register(bi_clause *clause, unsigned idx)
435 {
436 assert(idx & BIR_INDEX_REGISTER);
437 unsigned reg = idx & ~BIR_INDEX_REGISTER;
438 assert(reg <= 63);
439 clause->data_register = reg;
440 }
441
442 static void
443 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
444 {
445 bi_set_data_register(clause, ins->src[0]);
446 }
447
448 static void
449 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
450 {
451 bi_set_data_register(clause, ins->dest);
452 }
453
454 static enum bifrost_packed_src
455 bi_get_src_reg_port(struct bi_registers *regs, unsigned src)
456 {
457 unsigned reg = src & ~BIR_INDEX_REGISTER;
458
459 if (regs->port[0] == reg && regs->enabled[0])
460 return BIFROST_SRC_PORT0;
461 else if (regs->port[1] == reg && regs->enabled[1])
462 return BIFROST_SRC_PORT1;
463 else if (regs->port[3] == reg && regs->read_port3)
464 return BIFROST_SRC_PORT3;
465 else
466 unreachable("Tried to access register with no port");
467 }
468
469 static enum bifrost_packed_src
470 bi_get_src(bi_instruction *ins, struct bi_registers *regs, unsigned s)
471 {
472 unsigned src = ins->src[s];
473
474 if (src & BIR_INDEX_REGISTER)
475 return bi_get_src_reg_port(regs, src);
476 else if (src & BIR_INDEX_PASS)
477 return src & ~BIR_INDEX_PASS;
478 else {
479 bi_print_instruction(ins, stderr);
480 unreachable("Unknown src in above instruction");
481 }
482 }
483
484 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
485 * 16-bit and written components must correspond to valid swizzles (component x
486 * or y). */
487
488 static unsigned
489 bi_swiz16(bi_instruction *ins, unsigned src)
490 {
491 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
492 unsigned swizzle = 0;
493
494 for (unsigned c = 0; c < 2; ++c) {
495 if (!bi_writes_component(ins, src)) continue;
496
497 unsigned k = ins->swizzle[src][c];
498 assert(k <= 1);
499 swizzle |= (k << c);
500 }
501
502 return swizzle;
503 }
504
505 static unsigned
506 bi_pack_fma_fma(bi_instruction *ins, struct bi_registers *regs)
507 {
508 /* (-a)(-b) = ab, so we only need one negate bit */
509 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
510
511 if (ins->op.mscale) {
512 assert(!(ins->src_abs[0] && ins->src_abs[1]));
513 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
514
515 /* We can have exactly one abs, and can flip the multiplication
516 * to make it fit if we have to */
517 bool flip_ab = ins->src_abs[1];
518
519 struct bifrost_fma_mscale pack = {
520 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
521 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
522 .src2 = bi_get_src(ins, regs, 2),
523 .src3 = bi_get_src(ins, regs, 3),
524 .mscale_mode = 0,
525 .mode = ins->outmod,
526 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
527 .src1_neg = negate_mul,
528 .src2_neg = ins->src_neg[2],
529 .op = BIFROST_FMA_OP_MSCALE,
530 };
531
532 RETURN_PACKED(pack);
533 } else if (ins->dest_type == nir_type_float32) {
534 struct bifrost_fma_fma pack = {
535 .src0 = bi_get_src(ins, regs, 0),
536 .src1 = bi_get_src(ins, regs, 1),
537 .src2 = bi_get_src(ins, regs, 2),
538 .src0_abs = ins->src_abs[0],
539 .src1_abs = ins->src_abs[1],
540 .src2_abs = ins->src_abs[2],
541 .src0_neg = negate_mul,
542 .src2_neg = ins->src_neg[2],
543 .outmod = ins->outmod,
544 .roundmode = ins->roundmode,
545 .op = BIFROST_FMA_OP_FMA
546 };
547
548 RETURN_PACKED(pack);
549 } else if (ins->dest_type == nir_type_float16) {
550 struct bifrost_fma_fma16 pack = {
551 .src0 = bi_get_src(ins, regs, 0),
552 .src1 = bi_get_src(ins, regs, 1),
553 .src2 = bi_get_src(ins, regs, 2),
554 .swizzle_0 = bi_swiz16(ins, 0),
555 .swizzle_1 = bi_swiz16(ins, 1),
556 .swizzle_2 = bi_swiz16(ins, 2),
557 .src0_neg = negate_mul,
558 .src2_neg = ins->src_neg[2],
559 .outmod = ins->outmod,
560 .roundmode = ins->roundmode,
561 .op = BIFROST_FMA_OP_FMA16
562 };
563
564 RETURN_PACKED(pack);
565 } else {
566 unreachable("Invalid fma dest type");
567 }
568 }
569
570 static unsigned
571 bi_pack_fma_addmin_f32(bi_instruction *ins, struct bi_registers *regs)
572 {
573 unsigned op =
574 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
575 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
576 BIFROST_FMA_OP_FMAX32;
577
578 struct bifrost_fma_add pack = {
579 .src0 = bi_get_src(ins, regs, 0),
580 .src1 = bi_get_src(ins, regs, 1),
581 .src0_abs = ins->src_abs[0],
582 .src1_abs = ins->src_abs[1],
583 .src0_neg = ins->src_neg[0],
584 .src1_neg = ins->src_neg[1],
585 .unk = 0x0,
586 .outmod = ins->outmod,
587 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
588 .op = op
589 };
590
591 RETURN_PACKED(pack);
592 }
593
594 static bool
595 bi_pack_fp16_abs(bi_instruction *ins, struct bi_registers *regs, bool *flip)
596 {
597 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
598 * l be an auxiliary bit we encode. Then the hardware determines:
599 *
600 * abs0 = l || k
601 * abs1 = l && k
602 *
603 * Since add/min/max are commutative, this saves a bit by using the
604 * order of the operands as a bit (k). To pack this, first note:
605 *
606 * (l && k) implies (l || k).
607 *
608 * That is, if the second argument is abs'd, then the first argument
609 * also has abs. So there are three cases:
610 *
611 * Case 0: Neither src has absolute value. Then we have l = k = 0.
612 *
613 * Case 1: Exactly one src has absolute value. Assign that source to
614 * src0 and the other source to src1. Compute k = src1 < src0 based on
615 * that assignment. Then l = ~k.
616 *
617 * Case 2: Both sources have absolute value. Then we have l = k = 1.
618 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
619 * That is, this encoding is only valid if src1 and src0 are distinct.
620 * This is a scheduling restriction (XXX); if an op of this type
621 * requires both identical sources to have abs value, then we must
622 * schedule to ADD (which does not use this ordering trick).
623 */
624
625 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
626 unsigned src_0 = bi_get_src(ins, regs, 0);
627 unsigned src_1 = bi_get_src(ins, regs, 1);
628
629 assert(!(abs_0 && abs_1 && src_0 == src_1));
630
631 if (!abs_0 && !abs_1) {
632 /* Force k = 0 <===> NOT(src1 < src0) */
633 *flip = (src_1 < src_0);
634 return false;
635 } else if (abs_0 && !abs_1) {
636 return src_1 >= src_0;
637 } else if (abs_1 && !abs_0) {
638 *flip = true;
639 return src_0 >= src_1;
640 } else {
641 *flip = !(src_1 < src_0);
642 return true;
643 }
644 }
645
646 static unsigned
647 bi_pack_fmadd_min_f16(bi_instruction *ins, struct bi_registers *regs, bool FMA)
648 {
649 unsigned op =
650 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
651 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
652 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
653 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
654 BIFROST_FMA_OP_FMAX16;
655
656 bool flip = false;
657 bool l = bi_pack_fp16_abs(ins, regs, &flip);
658 unsigned src_0 = bi_get_src(ins, regs, 0);
659 unsigned src_1 = bi_get_src(ins, regs, 1);
660
661 if (FMA) {
662 struct bifrost_fma_add_minmax16 pack = {
663 .src0 = flip ? src_1 : src_0,
664 .src1 = flip ? src_0 : src_1,
665 .src0_neg = ins->src_neg[flip ? 1 : 0],
666 .src1_neg = ins->src_neg[flip ? 0 : 1],
667 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
668 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
669 .abs1 = l,
670 .outmod = ins->outmod,
671 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
672 .op = op
673 };
674
675 RETURN_PACKED(pack);
676 } else {
677 /* Can't have modes for fp16 */
678 assert(ins->outmod == 0);
679
680 struct bifrost_add_fmin16 pack = {
681 .src0 = flip ? src_1 : src_0,
682 .src1 = flip ? src_0 : src_1,
683 .src0_neg = ins->src_neg[flip ? 1 : 0],
684 .src1_neg = ins->src_neg[flip ? 0 : 1],
685 .abs1 = l,
686 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
687 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
688 .mode = ins->minmax,
689 .op = op
690 };
691
692 RETURN_PACKED(pack);
693 }
694 }
695
696 static unsigned
697 bi_pack_fma_addmin(bi_instruction *ins, struct bi_registers *regs)
698 {
699 if (ins->dest_type == nir_type_float32)
700 return bi_pack_fma_addmin_f32(ins, regs);
701 else if(ins->dest_type == nir_type_float16)
702 return bi_pack_fmadd_min_f16(ins, regs, true);
703 else
704 unreachable("Unknown FMA/ADD type");
705 }
706
707 static unsigned
708 bi_pack_fma_1src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
709 {
710 struct bifrost_fma_inst pack = {
711 .src0 = bi_get_src(ins, regs, 0),
712 .op = op
713 };
714
715 RETURN_PACKED(pack);
716 }
717
718 static unsigned
719 bi_pack_fma_2src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
720 {
721 struct bifrost_fma_2src pack = {
722 .src0 = bi_get_src(ins, regs, 0),
723 .src1 = bi_get_src(ins, regs, 1),
724 .op = op
725 };
726
727 RETURN_PACKED(pack);
728 }
729
730 static unsigned
731 bi_pack_add_1src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
732 {
733 struct bifrost_add_inst pack = {
734 .src0 = bi_get_src(ins, regs, 0),
735 .op = op
736 };
737
738 RETURN_PACKED(pack);
739 }
740
741 static enum bifrost_csel_cond
742 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
743 {
744 nir_alu_type B = nir_alu_type_get_base_type(T);
745 unsigned idx = (B == nir_type_float) ? 0 :
746 ((B == nir_type_int) ? 1 : 2);
747
748 switch (cond){
749 case BI_COND_LT:
750 *flip = true;
751 case BI_COND_GT: {
752 const enum bifrost_csel_cond ops[] = {
753 BIFROST_FGT_F,
754 BIFROST_IGT_I,
755 BIFROST_UGT_I
756 };
757
758 return ops[idx];
759 }
760 case BI_COND_LE:
761 *flip = true;
762 case BI_COND_GE: {
763 const enum bifrost_csel_cond ops[] = {
764 BIFROST_FGE_F,
765 BIFROST_IGE_I,
766 BIFROST_UGE_I
767 };
768
769 return ops[idx];
770 }
771 case BI_COND_NE:
772 *invert = true;
773 case BI_COND_EQ: {
774 const enum bifrost_csel_cond ops[] = {
775 BIFROST_FEQ_F,
776 BIFROST_IEQ_F,
777 BIFROST_IEQ_F /* sign is irrelevant */
778 };
779
780 return ops[idx];
781 }
782 default:
783 unreachable("Invalid op for csel");
784 }
785 }
786
787 static unsigned
788 bi_pack_fma_csel(bi_instruction *ins, struct bi_registers *regs)
789 {
790 /* TODO: Use csel3 as well */
791 bool flip = false, invert = false;
792
793 enum bifrost_csel_cond cond =
794 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
795
796 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
797
798 unsigned cmp_0 = (flip ? 1 : 0);
799 unsigned cmp_1 = (flip ? 0 : 1);
800 unsigned res_0 = (invert ? 3 : 2);
801 unsigned res_1 = (invert ? 2 : 3);
802
803 struct bifrost_csel4 pack = {
804 .src0 = bi_get_src(ins, regs, cmp_0),
805 .src1 = bi_get_src(ins, regs, cmp_1),
806 .src2 = bi_get_src(ins, regs, res_0),
807 .src3 = bi_get_src(ins, regs, res_1),
808 .cond = cond,
809 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
810 BIFROST_FMA_OP_CSEL4
811 };
812
813 RETURN_PACKED(pack);
814 }
815
816 static unsigned
817 bi_pack_fma_frexp(bi_instruction *ins, struct bi_registers *regs)
818 {
819 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
820 return bi_pack_fma_1src(ins, regs, op);
821 }
822
823 static unsigned
824 bi_pack_fma_reduce(bi_instruction *ins, struct bi_registers *regs)
825 {
826 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
827 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
828 } else {
829 unreachable("Invalid reduce op");
830 }
831 }
832
833 /* We have a single convert opcode in the IR but a number of opcodes that could
834 * come out. In particular we have native opcodes for:
835 *
836 * [ui]16 --> [fui]32 -- int16_to_32
837 * f16 --> f32 -- float16_to_32
838 * f32 --> f16 -- float32_to_16
839 * f32 --> [ui]32 -- float32_to_int
840 * [ui]32 --> f32 -- int_to_float32
841 * [fui]16 --> [fui]16 -- f2i_i2f16
842 */
843
844 static unsigned
845 bi_pack_convert(bi_instruction *ins, struct bi_registers *regs, bool FMA)
846 {
847 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
848 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
849 bool from_unsigned = from_base == nir_type_uint;
850
851 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
852 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
853 bool to_unsigned = to_base == nir_type_uint;
854 bool to_float = to_base == nir_type_float;
855
856 /* Sanity check */
857 assert((from_base != to_base) || (from_size != to_size));
858 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
859
860 /* f32 to f16 is special */
861 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
862 /* TODO: second vectorized source? */
863 struct bifrost_fma_2src pfma = {
864 .src0 = bi_get_src(ins, regs, 0),
865 .src1 = BIFROST_SRC_STAGE, /* 0 */
866 .op = BIFROST_FMA_FLOAT32_TO_16
867 };
868
869 struct bifrost_add_2src padd = {
870 .src0 = bi_get_src(ins, regs, 0),
871 .src1 = BIFROST_SRC_STAGE, /* 0 */
872 .op = BIFROST_ADD_FLOAT32_TO_16
873 };
874
875 if (FMA) {
876 RETURN_PACKED(pfma);
877 } else {
878 RETURN_PACKED(padd);
879 }
880 }
881
882 /* Otherwise, figure out the mode */
883 unsigned op = 0;
884
885 if (from_size == 16 && to_size == 32) {
886 unsigned component = ins->swizzle[0][0];
887 assert(component <= 1);
888
889 if (from_base == nir_type_float)
890 op = BIFROST_CONVERT_5(component);
891 else
892 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
893 } else {
894 unsigned mode = 0;
895 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
896 bool is_unsigned = from_unsigned;
897
898 if (from_base == nir_type_float) {
899 assert(to_base != nir_type_float);
900 is_unsigned = to_unsigned;
901
902 if (from_size == 32 && to_size == 32)
903 mode = BIFROST_CONV_F32_TO_I32;
904 else if (from_size == 16 && to_size == 16)
905 mode = BIFROST_CONV_F16_TO_I16;
906 else
907 unreachable("Invalid float conversion");
908 } else {
909 assert(to_base == nir_type_float);
910 assert(from_size == to_size);
911
912 if (to_size == 32)
913 mode = BIFROST_CONV_I32_TO_F32;
914 else if (to_size == 16)
915 mode = BIFROST_CONV_I16_TO_F16;
916 else
917 unreachable("Invalid int conversion");
918 }
919
920 /* Fixup swizzle for 32-bit only modes */
921
922 if (mode == BIFROST_CONV_I32_TO_F32)
923 swizzle = 0b11;
924 else if (mode == BIFROST_CONV_F32_TO_I32)
925 swizzle = 0b10;
926
927 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
928
929 /* Unclear what the top bit is for... maybe 16-bit related */
930 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
931 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
932
933 if (!(mode2 || mode6))
934 op |= 0x100;
935 }
936
937 if (FMA)
938 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
939 else
940 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
941 }
942
943 static unsigned
944 bi_pack_fma_select(bi_instruction *ins, struct bi_registers *regs)
945 {
946 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
947
948 if (size == 16) {
949 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
950 unsigned op = BIFROST_FMA_SEL_16(swiz);
951 return bi_pack_fma_2src(ins, regs, op);
952 } else if (size == 8) {
953 unsigned swiz = 0;
954
955 for (unsigned c = 0; c < 4; ++c) {
956 if (ins->swizzle[c][0]) {
957 /* Ensure lowering restriction is met */
958 assert(ins->swizzle[c][0] == 2);
959 swiz |= (1 << c);
960 }
961 }
962
963 struct bifrost_fma_sel8 pack = {
964 .src0 = bi_get_src(ins, regs, 0),
965 .src1 = bi_get_src(ins, regs, 1),
966 .src2 = bi_get_src(ins, regs, 2),
967 .src3 = bi_get_src(ins, regs, 3),
968 .swizzle = swiz,
969 .op = BIFROST_FMA_OP_SEL8
970 };
971
972 RETURN_PACKED(pack);
973 } else {
974 unreachable("Unimplemented");
975 }
976 }
977
978 static enum bifrost_fcmp_cond
979 bi_fcmp_cond(enum bi_cond cond)
980 {
981 switch (cond) {
982 case BI_COND_LT: return BIFROST_OLT;
983 case BI_COND_LE: return BIFROST_OLE;
984 case BI_COND_GE: return BIFROST_OGE;
985 case BI_COND_GT: return BIFROST_OGT;
986 case BI_COND_EQ: return BIFROST_OEQ;
987 case BI_COND_NE: return BIFROST_UNE;
988 default: unreachable("Unknown bi_cond");
989 }
990 }
991
992 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
993
994 static enum bifrost_fcmp_cond
995 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
996 {
997 switch (cond) {
998 case BIFROST_OGT:
999 return BIFROST_OLT;
1000 case BIFROST_OGE:
1001 return BIFROST_OLE;
1002 case BIFROST_OLT:
1003 return BIFROST_OGT;
1004 case BIFROST_OLE:
1005 return BIFROST_OGE;
1006 case BIFROST_OEQ:
1007 case BIFROST_UNE:
1008 return cond;
1009 default:
1010 unreachable("Unknown fcmp cond");
1011 }
1012 }
1013
1014 static unsigned
1015 bi_pack_fma_cmp(bi_instruction *ins, struct bi_registers *regs)
1016 {
1017 nir_alu_type Tl = ins->src_types[0];
1018 nir_alu_type Tr = ins->src_types[1];
1019
1020 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
1021 /* TODO: Mixed 32/16 cmp */
1022 assert(Tl == Tr);
1023
1024 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1025
1026 /* Only src1 has neg, so we arrange:
1027 * a < b --- native
1028 * a < -b --- native
1029 * -a < -b <===> a > b
1030 * -a < b <===> a > -b
1031 * TODO: Is this NaN-precise?
1032 */
1033
1034 bool flip = ins->src_neg[0];
1035 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
1036
1037 if (flip)
1038 cond = bi_flip_fcmp(cond);
1039
1040 struct bifrost_fma_fcmp pack = {
1041 .src0 = bi_get_src(ins, regs, 0),
1042 .src1 = bi_get_src(ins, regs, 1),
1043 .src0_abs = ins->src_abs[0],
1044 .src1_abs = ins->src_abs[1],
1045 .src1_neg = neg,
1046 .src_expand = 0,
1047 .unk1 = 0,
1048 .cond = cond,
1049 .op = BIFROST_FMA_OP_FCMP_GL
1050 };
1051
1052 RETURN_PACKED(pack);
1053 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1054 bool flip = false;
1055 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1056 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1057
1058 if (flip)
1059 cond = bi_flip_fcmp(cond);
1060
1061 struct bifrost_fma_fcmp16 pack = {
1062 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1063 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1064 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1065 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1066 .abs1 = l,
1067 .unk = 0,
1068 .cond = cond,
1069 .op = BIFROST_FMA_OP_FCMP_GL_16,
1070 };
1071
1072 RETURN_PACKED(pack);
1073 } else {
1074 unreachable("Unknown cmp type");
1075 }
1076 }
1077
1078 static unsigned
1079 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1080 {
1081 switch (op) {
1082 case BI_BITWISE_OR:
1083 /* Via De Morgan's */
1084 return rshift ?
1085 BIFROST_FMA_OP_RSHIFT_NAND :
1086 BIFROST_FMA_OP_LSHIFT_NAND;
1087 case BI_BITWISE_AND:
1088 return rshift ?
1089 BIFROST_FMA_OP_RSHIFT_AND :
1090 BIFROST_FMA_OP_LSHIFT_AND;
1091 case BI_BITWISE_XOR:
1092 /* Shift direction handled out of band */
1093 return BIFROST_FMA_OP_RSHIFT_XOR;
1094 default:
1095 unreachable("Unknown op");
1096 }
1097 }
1098
1099 static unsigned
1100 bi_pack_fma_bitwise(bi_instruction *ins, struct bi_registers *regs)
1101 {
1102 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1103 assert(size <= 32);
1104
1105 bool invert_0 = ins->bitwise.src_invert[0];
1106 bool invert_1 = ins->bitwise.src_invert[1];
1107
1108 if (ins->op.bitwise == BI_BITWISE_OR) {
1109 /* Becomes NAND, so via De Morgan's:
1110 * f(A) | f(B) = ~(~f(A) & ~f(B))
1111 * = NAND(~f(A), ~f(B))
1112 */
1113
1114 invert_0 = !invert_0;
1115 invert_1 = !invert_1;
1116 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1117 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1118 * ~A ^ B = ~(A ^ B) = A ^ ~B
1119 */
1120
1121 invert_0 ^= invert_1;
1122 invert_1 = false;
1123
1124 /* invert_1 ends up specifying shift direction */
1125 invert_1 = !ins->bitwise.rshift;
1126 }
1127
1128 struct bifrost_shift_fma pack = {
1129 .src0 = bi_get_src(ins, regs, 0),
1130 .src1 = bi_get_src(ins, regs, 1),
1131 .src2 = bi_get_src(ins, regs, 2),
1132 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1133 .unk = 1, /* XXX */
1134 .invert_1 = invert_0,
1135 .invert_2 = invert_1,
1136 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1137 };
1138
1139 RETURN_PACKED(pack);
1140 }
1141
1142 static unsigned
1143 bi_pack_fma_round(bi_instruction *ins, struct bi_registers *regs)
1144 {
1145 bool fp16 = ins->dest_type == nir_type_float16;
1146 assert(fp16 || ins->dest_type == nir_type_float32);
1147
1148 unsigned op = fp16
1149 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1150 : BIFROST_FMA_ROUND_32(ins->roundmode);
1151
1152 return bi_pack_fma_1src(ins, regs, op);
1153 }
1154
1155 static unsigned
1156 bi_pack_fma_imath(bi_instruction *ins, struct bi_registers *regs)
1157 {
1158 /* Scheduler: only ADD can have 8/16-bit imath */
1159 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1160
1161 unsigned op = ins->op.imath == BI_IMATH_ADD
1162 ? BIFROST_FMA_IADD_32
1163 : BIFROST_FMA_ISUB_32;
1164
1165 return bi_pack_fma_2src(ins, regs, op);
1166 }
1167
1168 static unsigned
1169 bi_pack_fma(bi_clause *clause, bi_bundle bundle, struct bi_registers *regs)
1170 {
1171 if (!bundle.fma)
1172 return BIFROST_FMA_NOP;
1173
1174 switch (bundle.fma->type) {
1175 case BI_ADD:
1176 return bi_pack_fma_addmin(bundle.fma, regs);
1177 case BI_CMP:
1178 return bi_pack_fma_cmp(bundle.fma, regs);
1179 case BI_BITWISE:
1180 return bi_pack_fma_bitwise(bundle.fma, regs);
1181 case BI_CONVERT:
1182 return bi_pack_convert(bundle.fma, regs, true);
1183 case BI_CSEL:
1184 return bi_pack_fma_csel(bundle.fma, regs);
1185 case BI_FMA:
1186 return bi_pack_fma_fma(bundle.fma, regs);
1187 case BI_FREXP:
1188 return bi_pack_fma_frexp(bundle.fma, regs);
1189 case BI_IMATH:
1190 return bi_pack_fma_imath(bundle.fma, regs);
1191 case BI_MINMAX:
1192 return bi_pack_fma_addmin(bundle.fma, regs);
1193 case BI_MOV:
1194 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1195 case BI_SHIFT:
1196 unreachable("Packing todo");
1197 case BI_SELECT:
1198 return bi_pack_fma_select(bundle.fma, regs);
1199 case BI_ROUND:
1200 return bi_pack_fma_round(bundle.fma, regs);
1201 case BI_REDUCE_FMA:
1202 return bi_pack_fma_reduce(bundle.fma, regs);
1203 default:
1204 unreachable("Cannot encode class as FMA");
1205 }
1206 }
1207
1208 static unsigned
1209 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1210 {
1211 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1212 assert(size == 32 || size == 16);
1213
1214 unsigned op = (size == 32) ?
1215 BIFROST_ADD_OP_LD_VAR_32 :
1216 BIFROST_ADD_OP_LD_VAR_16;
1217
1218 unsigned packed_addr = 0;
1219
1220 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1221 /* Direct uses address field directly */
1222 packed_addr = bi_get_immediate(ins, 0);
1223 } else {
1224 /* Indirect gets an extra source */
1225 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1226 }
1227
1228 /* The destination is thrown in the data register */
1229 assert(ins->dest & BIR_INDEX_REGISTER);
1230 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1231
1232 unsigned channels = ins->vector_channels;
1233 assert(channels >= 1 && channels <= 4);
1234
1235 struct bifrost_ld_var pack = {
1236 .src0 = bi_get_src(ins, regs, 1),
1237 .addr = packed_addr,
1238 .channels = MALI_POSITIVE(channels),
1239 .interp_mode = ins->load_vary.interp_mode,
1240 .reuse = ins->load_vary.reuse,
1241 .flat = ins->load_vary.flat,
1242 .op = op
1243 };
1244
1245 RETURN_PACKED(pack);
1246 }
1247
1248 static unsigned
1249 bi_pack_add_2src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
1250 {
1251 struct bifrost_add_2src pack = {
1252 .src0 = bi_get_src(ins, regs, 0),
1253 .src1 = bi_get_src(ins, regs, 1),
1254 .op = op
1255 };
1256
1257 RETURN_PACKED(pack);
1258 }
1259
1260 static unsigned
1261 bi_pack_add_addmin_f32(bi_instruction *ins, struct bi_registers *regs)
1262 {
1263 unsigned op =
1264 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1265 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1266 BIFROST_ADD_OP_FMAX32;
1267
1268 struct bifrost_add_faddmin pack = {
1269 .src0 = bi_get_src(ins, regs, 0),
1270 .src1 = bi_get_src(ins, regs, 1),
1271 .src0_abs = ins->src_abs[0],
1272 .src1_abs = ins->src_abs[1],
1273 .src0_neg = ins->src_neg[0],
1274 .src1_neg = ins->src_neg[1],
1275 .outmod = ins->outmod,
1276 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1277 .op = op
1278 };
1279
1280 RETURN_PACKED(pack);
1281 }
1282
1283 static unsigned
1284 bi_pack_add_add_f16(bi_instruction *ins, struct bi_registers *regs)
1285 {
1286 /* ADD.v2f16 can't have outmod */
1287 assert(ins->outmod == BIFROST_NONE);
1288
1289 struct bifrost_add_faddmin pack = {
1290 .src0 = bi_get_src(ins, regs, 0),
1291 .src1 = bi_get_src(ins, regs, 1),
1292 .src0_abs = ins->src_abs[0],
1293 .src1_abs = ins->src_abs[1],
1294 .src0_neg = ins->src_neg[0],
1295 .src1_neg = ins->src_neg[1],
1296 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1297 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1298 .mode = ins->roundmode,
1299 .op = BIFROST_ADD_OP_FADD16
1300 };
1301
1302 RETURN_PACKED(pack);
1303 }
1304
1305 static unsigned
1306 bi_pack_add_addmin(bi_instruction *ins, struct bi_registers *regs)
1307 {
1308 if (ins->dest_type == nir_type_float32)
1309 return bi_pack_add_addmin_f32(ins, regs);
1310 else if (ins->dest_type == nir_type_float16) {
1311 if (ins->type == BI_ADD)
1312 return bi_pack_add_add_f16(ins, regs);
1313 else
1314 return bi_pack_fmadd_min_f16(ins, regs, false);
1315 } else
1316 unreachable("Unknown FMA/ADD type");
1317 }
1318
1319 static unsigned
1320 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1321 {
1322 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1323
1324 const unsigned ops[4] = {
1325 BIFROST_ADD_OP_LD_UBO_1,
1326 BIFROST_ADD_OP_LD_UBO_2,
1327 BIFROST_ADD_OP_LD_UBO_3,
1328 BIFROST_ADD_OP_LD_UBO_4
1329 };
1330
1331 bi_write_data_register(clause, ins);
1332 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1333 }
1334
1335 static enum bifrost_ldst_type
1336 bi_pack_ldst_type(nir_alu_type T)
1337 {
1338 switch (T) {
1339 case nir_type_float16: return BIFROST_LDST_F16;
1340 case nir_type_float32: return BIFROST_LDST_F32;
1341 case nir_type_int32: return BIFROST_LDST_I32;
1342 case nir_type_uint32: return BIFROST_LDST_U32;
1343 default: unreachable("Invalid type loaded");
1344 }
1345 }
1346
1347 static unsigned
1348 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1349 {
1350 struct bifrost_ld_var_addr pack = {
1351 .src0 = bi_get_src(ins, regs, 1),
1352 .src1 = bi_get_src(ins, regs, 2),
1353 .location = bi_get_immediate(ins, 0),
1354 .type = bi_pack_ldst_type(ins->src_types[3]),
1355 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1356 };
1357
1358 bi_write_data_register(clause, ins);
1359 RETURN_PACKED(pack);
1360 }
1361
1362 static unsigned
1363 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1364 {
1365 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1366
1367 struct bifrost_ld_attr pack = {
1368 .src0 = bi_get_src(ins, regs, 1),
1369 .src1 = bi_get_src(ins, regs, 2),
1370 .location = bi_get_immediate(ins, 0),
1371 .channels = MALI_POSITIVE(ins->vector_channels),
1372 .type = bi_pack_ldst_type(ins->dest_type),
1373 .op = BIFROST_ADD_OP_LD_ATTR
1374 };
1375
1376 bi_write_data_register(clause, ins);
1377 RETURN_PACKED(pack);
1378 }
1379
1380 static unsigned
1381 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1382 {
1383 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1384
1385 struct bifrost_st_vary pack = {
1386 .src0 = bi_get_src(ins, regs, 1),
1387 .src1 = bi_get_src(ins, regs, 2),
1388 .src2 = bi_get_src(ins, regs, 3),
1389 .channels = MALI_POSITIVE(ins->vector_channels),
1390 .op = BIFROST_ADD_OP_ST_VAR
1391 };
1392
1393 bi_read_data_register(clause, ins);
1394 RETURN_PACKED(pack);
1395 }
1396
1397 static unsigned
1398 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1399 {
1400 bool fp16 = (ins->src_types[1] == nir_type_float16);
1401
1402 struct bifrost_add_atest pack = {
1403 .src0 = bi_get_src(ins, regs, 0),
1404 .src1 = bi_get_src(ins, regs, 1),
1405 .half = fp16,
1406 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1407 .op = BIFROST_ADD_OP_ATEST,
1408 };
1409
1410 /* Despite *also* writing with the usual mechanism... quirky and
1411 * perhaps unnecessary, but let's match the blob */
1412 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1413
1414 RETURN_PACKED(pack);
1415 }
1416
1417 static unsigned
1418 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1419 {
1420 struct bifrost_add_inst pack = {
1421 .src0 = bi_get_src(ins, regs, 1),
1422 .op = BIFROST_ADD_OP_BLEND
1423 };
1424
1425 /* TODO: Pack location in uniform_const */
1426 assert(ins->blend_location == 0);
1427
1428 bi_read_data_register(clause, ins);
1429 RETURN_PACKED(pack);
1430 }
1431
1432 static unsigned
1433 bi_pack_add_special(bi_instruction *ins, struct bi_registers *regs)
1434 {
1435 unsigned op = 0;
1436 bool fp16 = ins->dest_type == nir_type_float16;
1437 bool Y = ins->swizzle[0][0];
1438
1439 if (ins->op.special == BI_SPECIAL_FRCP) {
1440 op = fp16 ?
1441 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1442 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1443 BIFROST_ADD_OP_FRCP_FAST_F32;
1444 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1445 op = fp16 ?
1446 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1447 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1448 BIFROST_ADD_OP_FRSQ_FAST_F32;
1449
1450 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1451 assert(!fp16);
1452 op = BIFROST_ADD_OP_FEXP2_FAST;
1453 } else {
1454 unreachable("Unknown special op");
1455 }
1456
1457 return bi_pack_add_1src(ins, regs, op);
1458 }
1459
1460 static unsigned
1461 bi_pack_add_table(bi_instruction *ins, struct bi_registers *regs)
1462 {
1463 unsigned op = 0;
1464 assert(ins->dest_type == nir_type_float32);
1465
1466 op = BIFROST_ADD_OP_LOG2_HELP;
1467 return bi_pack_add_1src(ins, regs, op);
1468 }
1469 static unsigned
1470 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs, gl_shader_stage stage)
1471 {
1472 bool f16 = ins->dest_type == nir_type_float16;
1473 bool vtx = stage != MESA_SHADER_FRAGMENT;
1474
1475 struct bifrost_tex_compact pack = {
1476 .src0 = bi_get_src(ins, regs, 0),
1477 .src1 = bi_get_src(ins, regs, 1),
1478 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1479 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1480 .compute_lod = !vtx,
1481 .tex_index = ins->texture.texture_index,
1482 .sampler_index = ins->texture.sampler_index
1483 };
1484
1485 bi_write_data_register(clause, ins);
1486 RETURN_PACKED(pack);
1487 }
1488
1489 static unsigned
1490 bi_pack_add_select(bi_instruction *ins, struct bi_registers *regs)
1491 {
1492 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1493 assert(size == 16);
1494
1495 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1496 unsigned op = BIFROST_ADD_SEL_16(swiz);
1497 return bi_pack_add_2src(ins, regs, op);
1498 }
1499
1500 static enum bifrost_discard_cond
1501 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1502 {
1503 switch (cond){
1504 case BI_COND_GT:
1505 *flip = true;
1506 /* fallthrough */
1507 case BI_COND_LT:
1508 return BIFROST_DISCARD_FLT;
1509 case BI_COND_GE:
1510 *flip = true;
1511 /* fallthrough */
1512 case BI_COND_LE:
1513 return BIFROST_DISCARD_FLE;
1514 case BI_COND_NE:
1515 return BIFROST_DISCARD_FNE;
1516 case BI_COND_EQ:
1517 return BIFROST_DISCARD_FEQ;
1518 default:
1519 unreachable("Invalid op for discard");
1520 }
1521 }
1522
1523 static unsigned
1524 bi_pack_add_discard(bi_instruction *ins, struct bi_registers *regs)
1525 {
1526 bool fp16 = ins->src_types[0] == nir_type_float16;
1527 assert(fp16 || ins->src_types[0] == nir_type_float32);
1528
1529 bool flip = false;
1530 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1531
1532 struct bifrost_add_discard pack = {
1533 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1534 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1535 .cond = cond,
1536 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1537 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1538 .fp32 = fp16 ? 0 : 1,
1539 .op = BIFROST_ADD_OP_DISCARD
1540 };
1541
1542 RETURN_PACKED(pack);
1543 }
1544
1545 static enum bifrost_icmp_cond
1546 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1547 {
1548 switch (cond){
1549 case BI_COND_LT:
1550 *flip = true;
1551 /* fallthrough */
1552 case BI_COND_GT:
1553 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1554 : BIFROST_ICMP_IGT;
1555 case BI_COND_LE:
1556 *flip = true;
1557 /* fallthrough */
1558 case BI_COND_GE:
1559 return is_unsigned ? BIFROST_ICMP_UGE :
1560 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1561 case BI_COND_NE:
1562 return BIFROST_ICMP_NEQ;
1563 case BI_COND_EQ:
1564 return BIFROST_ICMP_EQ;
1565 default:
1566 unreachable("Invalid op for icmp");
1567 }
1568 }
1569
1570 static unsigned
1571 bi_pack_add_icmp32(bi_instruction *ins, struct bi_registers *regs, bool flip,
1572 enum bifrost_icmp_cond cond)
1573 {
1574 struct bifrost_add_icmp pack = {
1575 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1576 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1577 .cond = cond,
1578 .sz = 1,
1579 .d3d = false,
1580 .op = BIFROST_ADD_OP_ICMP_32
1581 };
1582
1583 RETURN_PACKED(pack);
1584 }
1585
1586 static unsigned
1587 bi_pack_add_icmp16(bi_instruction *ins, struct bi_registers *regs, bool flip,
1588 enum bifrost_icmp_cond cond)
1589 {
1590 struct bifrost_add_icmp16 pack = {
1591 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1592 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1593 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1594 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1595 .cond = cond,
1596 .d3d = false,
1597 .op = BIFROST_ADD_OP_ICMP_16
1598 };
1599
1600 RETURN_PACKED(pack);
1601 }
1602
1603 static unsigned
1604 bi_pack_add_cmp(bi_instruction *ins, struct bi_registers *regs)
1605 {
1606 nir_alu_type Tl = ins->src_types[0];
1607 nir_alu_type Tr = ins->src_types[1];
1608 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1609
1610 if (Bl == nir_type_uint || Bl == nir_type_int) {
1611 assert(Tl == Tr);
1612 unsigned sz = nir_alu_type_get_type_size(Tl);
1613
1614 bool flip = false;
1615
1616 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1617 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1618 &flip, Bl == nir_type_uint, sz == 16);
1619
1620 if (sz == 32)
1621 return bi_pack_add_icmp32(ins, regs, flip, cond);
1622 else if (sz == 16)
1623 return bi_pack_add_icmp16(ins, regs, flip, cond);
1624 else
1625 unreachable("TODO");
1626 } else {
1627 unreachable("TODO");
1628 }
1629 }
1630
1631 static unsigned
1632 bi_pack_add_imath(bi_instruction *ins, struct bi_registers *regs)
1633 {
1634 /* TODO: 32+16 add */
1635 assert(ins->src_types[0] == ins->src_types[1]);
1636 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1637 enum bi_imath_op p = ins->op.imath;
1638
1639 unsigned op = 0;
1640
1641 if (sz == 8) {
1642 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1643 BIFROST_ADD_ISUB_8;
1644 } else if (sz == 16) {
1645 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1646 BIFROST_ADD_ISUB_16;
1647 } else if (sz == 32) {
1648 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1649 BIFROST_ADD_ISUB_32;
1650 } else {
1651 unreachable("64-bit todo");
1652 }
1653
1654 return bi_pack_add_2src(ins, regs, op);
1655 }
1656
1657 static unsigned
1658 bi_pack_add(bi_clause *clause, bi_bundle bundle, struct bi_registers *regs, gl_shader_stage stage)
1659 {
1660 if (!bundle.add)
1661 return BIFROST_ADD_NOP;
1662
1663 switch (bundle.add->type) {
1664 case BI_ADD:
1665 return bi_pack_add_addmin(bundle.add, regs);
1666 case BI_ATEST:
1667 return bi_pack_add_atest(clause, bundle.add, regs);
1668 case BI_BRANCH:
1669 unreachable("Packing todo");
1670 case BI_CMP:
1671 return bi_pack_add_cmp(bundle.add, regs);
1672 case BI_BLEND:
1673 return bi_pack_add_blend(clause, bundle.add, regs);
1674 case BI_BITWISE:
1675 unreachable("Packing todo");
1676 case BI_CONVERT:
1677 return bi_pack_convert(bundle.add, regs, false);
1678 case BI_DISCARD:
1679 return bi_pack_add_discard(bundle.add, regs);
1680 case BI_FREXP:
1681 unreachable("Packing todo");
1682 case BI_IMATH:
1683 return bi_pack_add_imath(bundle.add, regs);
1684 case BI_LOAD:
1685 unreachable("Packing todo");
1686 case BI_LOAD_ATTR:
1687 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1688 case BI_LOAD_UNIFORM:
1689 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1690 case BI_LOAD_VAR:
1691 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1692 case BI_LOAD_VAR_ADDRESS:
1693 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1694 case BI_MINMAX:
1695 return bi_pack_add_addmin(bundle.add, regs);
1696 case BI_MOV:
1697 case BI_SHIFT:
1698 case BI_STORE:
1699 unreachable("Packing todo");
1700 case BI_STORE_VAR:
1701 return bi_pack_add_st_vary(clause, bundle.add, regs);
1702 case BI_SPECIAL:
1703 return bi_pack_add_special(bundle.add, regs);
1704 case BI_TABLE:
1705 return bi_pack_add_table(bundle.add, regs);
1706 case BI_SELECT:
1707 return bi_pack_add_select(bundle.add, regs);
1708 case BI_TEX:
1709 if (bundle.add->op.texture == BI_TEX_COMPACT)
1710 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1711 else
1712 unreachable("Unknown tex type");
1713 case BI_ROUND:
1714 unreachable("Packing todo");
1715 default:
1716 unreachable("Cannot encode class as ADD");
1717 }
1718 }
1719
1720 struct bi_packed_bundle {
1721 uint64_t lo;
1722 uint64_t hi;
1723 };
1724
1725 static struct bi_packed_bundle
1726 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1727 {
1728 struct bi_registers regs = bi_assign_ports(bundle, prev);
1729 bi_assign_uniform_constant(clause, &regs, bundle);
1730 regs.first_instruction = first_bundle;
1731
1732 uint64_t reg = bi_pack_registers(regs);
1733 uint64_t fma = bi_pack_fma(clause, bundle, &regs);
1734 uint64_t add = bi_pack_add(clause, bundle, &regs, stage);
1735
1736 struct bi_packed_bundle packed = {
1737 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1738 .hi = add >> 6
1739 };
1740
1741 return packed;
1742 }
1743
1744 /* Packs the next two constants as a dedicated constant quadword at the end of
1745 * the clause, returning the number packed. */
1746
1747 static unsigned
1748 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1749 unsigned index,
1750 struct util_dynarray *emission)
1751 {
1752 /* After these two, are we done? Determines tag */
1753 bool done = clause->constant_count <= (index + 2);
1754 bool only = clause->constant_count <= (index + 1);
1755
1756 /* TODO: Pos */
1757 assert(index == 0 && clause->bundle_count == 1);
1758 assert(only);
1759
1760 uint64_t hi = clause->constants[index + 0] >> 60ull;
1761
1762 struct bifrost_fmt_constant quad = {
1763 .pos = 0, /* TODO */
1764 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1765 .imm_1 = clause->constants[index + 0] >> 4,
1766 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1767 };
1768
1769 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1770 * of the second constant with the first must be less than 8, otherwise
1771 * we have to swap them. On G52, I'm able to reproduce a similar issue
1772 * but with a different workaround (modeled above with a single
1773 * constant, unclear how to workaround for multiple constants.) Further
1774 * investigation needed. Possibly an errata. XXX */
1775
1776 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1777
1778 return 2;
1779 }
1780
1781 static void
1782 bi_pack_clause(bi_context *ctx, bi_clause *clause, bi_clause *next,
1783 struct util_dynarray *emission, gl_shader_stage stage)
1784 {
1785 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1786 assert(clause->bundle_count == 1);
1787
1788 /* Used to decide if we elide writes */
1789 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1790
1791 /* State for packing constants throughout */
1792 unsigned constant_index = 0;
1793
1794 struct bifrost_fmt1 quad_1 = {
1795 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1796 .header = bi_pack_header(clause, next, is_fragment),
1797 .ins_1 = ins_1.lo,
1798 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1799 .ins_0 = (ins_1.hi >> 11) & 0b111,
1800 };
1801
1802 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1803
1804 /* Pack the remaining constants */
1805
1806 while (constant_index < clause->constant_count) {
1807 constant_index += bi_pack_constants(ctx, clause,
1808 constant_index, emission);
1809 }
1810 }
1811
1812 static bi_clause *
1813 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1814 {
1815 /* Try the next clause in this block */
1816 if (clause->link.next != &((bi_block *) block)->clauses)
1817 return list_first_entry(&(clause->link), bi_clause, link);
1818
1819 /* Try the next block, or the one after that if it's empty, etc .*/
1820 pan_block *next_block = pan_next_block(block);
1821
1822 bi_foreach_block_from(ctx, next_block, block) {
1823 bi_block *blk = (bi_block *) block;
1824
1825 if (!list_is_empty(&blk->clauses))
1826 return list_first_entry(&(blk->clauses), bi_clause, link);
1827 }
1828
1829 return NULL;
1830 }
1831
1832 void
1833 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1834 {
1835 util_dynarray_init(emission, NULL);
1836
1837 bi_foreach_block(ctx, _block) {
1838 bi_block *block = (bi_block *) _block;
1839
1840 bi_foreach_clause_in_block(block, clause) {
1841 bi_clause *next = bi_next_clause(ctx, _block, clause);
1842 bi_pack_clause(ctx, clause, next, emission, ctx->stage);
1843 }
1844 }
1845 }