pan/bi: Add FILE* argument to bi_print_registers
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next, bool is_fragment)
39 {
40 struct bifrost_header header = {
41 .back_to_back = clause->back_to_back,
42 .no_end_of_shader = (next != NULL),
43 .elide_writes = is_fragment,
44 .branch_cond = clause->branch_conditional,
45 .datareg_writebarrier = clause->data_register_write_barrier,
46 .datareg = clause->data_register,
47 .scoreboard_deps = next ? next->dependencies : 0,
48 .scoreboard_index = clause->scoreboard_id,
49 .clause_type = clause->clause_type,
50 .next_clause_type = next ? next->clause_type : 0,
51 .suppress_inf = true,
52 .suppress_nan = true,
53 };
54
55 header.branch_cond |= header.back_to_back;
56
57 uint64_t u = 0;
58 memcpy(&u, &header, sizeof(header));
59 return u;
60 }
61
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
67
68 static unsigned
69 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
70 {
71 uint64_t want = (cons >> 4);
72
73 for (unsigned i = 0; i < clause->constant_count; ++i) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
76
77 uint64_t candidates[2] = {
78 clause->constants[i] >> 4,
79 clause->constants[i] >> 36
80 };
81
82 /* For <64-bit mode, we treat lo/hi separately */
83
84 if (!b64)
85 candidates[0] &= (0xFFFFFFFF >> 4);
86
87 if (candidates[0] == want)
88 return i;
89
90 if (candidates[1] == want && !b64) {
91 *hi = true;
92 return i;
93 }
94 }
95
96 unreachable("Invalid constant accessed");
97 }
98
99 static unsigned
100 bi_constant_field(unsigned idx)
101 {
102 assert(idx <= 5);
103
104 const unsigned values[] = {
105 4, 5, 6, 7, 2, 3
106 };
107
108 return values[idx] << 4;
109 }
110
111 static bool
112 bi_assign_uniform_constant_single(
113 bi_registers *regs,
114 bi_clause *clause,
115 bi_instruction *ins, bool assigned, bool fast_zero)
116 {
117 if (!ins)
118 return assigned;
119
120 if (ins->type == BI_BLEND) {
121 assert(!assigned);
122 regs->uniform_constant = 0x8;
123 return true;
124 }
125
126 bi_foreach_src(ins, s) {
127 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
128
129 if (ins->src[s] & BIR_INDEX_CONSTANT) {
130 /* Let direct addresses through */
131 if (ins->type == BI_LOAD_VAR)
132 continue;
133
134 bool hi = false;
135 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
136 uint64_t cons = bi_get_immediate(ins, s);
137 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
138 unsigned lo = clause->constants[idx] & 0xF;
139 unsigned f = bi_constant_field(idx) | lo;
140
141 if (assigned && regs->uniform_constant != f)
142 unreachable("Mismatched uniform/const field: imm");
143
144 regs->uniform_constant = f;
145 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
146 assigned = true;
147 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
148 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
149 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
150 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
151 /* FMAs have a fast zero port, ADD needs to use the
152 * uniform/const port's special 0 mode handled here */
153 unsigned f = 0;
154
155 if (assigned && regs->uniform_constant != f)
156 unreachable("Mismatched uniform/const field: 0");
157
158 regs->uniform_constant = f;
159 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
160 assigned = true;
161 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
162 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
163 } else if (s & BIR_INDEX_UNIFORM) {
164 unreachable("Push uniforms not implemented yet");
165 }
166 }
167
168 return assigned;
169 }
170
171 static void
172 bi_assign_uniform_constant(
173 bi_clause *clause,
174 bi_registers *regs,
175 bi_bundle bundle)
176 {
177 bool assigned =
178 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
179
180 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
181 }
182
183 /* Assigns a port for reading, before anything is written */
184
185 static void
186 bi_assign_port_read(bi_registers *regs, unsigned src)
187 {
188 /* We only assign for registers */
189 if (!(src & BIR_INDEX_REGISTER))
190 return;
191
192 unsigned reg = src & ~BIR_INDEX_REGISTER;
193
194 /* Check if we already assigned the port */
195 for (unsigned i = 0; i <= 1; ++i) {
196 if (regs->port[i] == reg && regs->enabled[i])
197 return;
198 }
199
200 if (regs->port[3] == reg && regs->read_port3)
201 return;
202
203 /* Assign it now */
204
205 for (unsigned i = 0; i <= 1; ++i) {
206 if (!regs->enabled[i]) {
207 regs->port[i] = reg;
208 regs->enabled[i] = true;
209 return;
210 }
211 }
212
213 if (!regs->read_port3) {
214 regs->port[3] = reg;
215 regs->read_port3 = true;
216 return;
217 }
218
219 bi_print_ports(regs, stderr);
220 unreachable("Failed to find a free port for src");
221 }
222
223 static bi_registers
224 bi_assign_ports(bi_bundle *now, bi_bundle *prev)
225 {
226 /* We assign ports for the main register mechanism. Special ops
227 * use the data registers, which has its own mechanism entirely
228 * and thus gets skipped over here. */
229
230 unsigned read_dreg = now->add &&
231 bi_class_props[now->add->type] & BI_DATA_REG_SRC;
232
233 unsigned write_dreg = prev->add &&
234 bi_class_props[prev->add->type] & BI_DATA_REG_DEST;
235
236 /* First, assign reads */
237
238 if (now->fma)
239 bi_foreach_src(now->fma, src)
240 bi_assign_port_read(&now->regs, now->fma->src[src]);
241
242 if (now->add) {
243 bi_foreach_src(now->add, src) {
244 if (!(src == 0 && read_dreg))
245 bi_assign_port_read(&now->regs, now->add->src[src]);
246 }
247 }
248
249 /* Next, assign writes */
250
251 if (prev->add && prev->add->dest & BIR_INDEX_REGISTER && !write_dreg) {
252 now->regs.port[2] = prev->add->dest & ~BIR_INDEX_REGISTER;
253 now->regs.write_add = true;
254 }
255
256 if (prev->fma && prev->fma->dest & BIR_INDEX_REGISTER) {
257 unsigned r = prev->fma->dest & ~BIR_INDEX_REGISTER;
258
259 if (now->regs.write_add) {
260 /* Scheduler constraint: cannot read 3 and write 2 */
261 assert(!now->regs.read_port3);
262 now->regs.port[3] = r;
263 } else {
264 now->regs.port[2] = r;
265 }
266
267 now->regs.write_fma = true;
268 }
269
270 /* Finally, ensure port 1 > port 0 for the 63-x trick to function */
271
272 if (now->regs.enabled[0] && now->regs.enabled[1] && now->regs.port[1] < now->regs.port[0]) {
273 unsigned temp = now->regs.port[0];
274 now->regs.port[0] = now->regs.port[1];
275 now->regs.port[1] = temp;
276 }
277
278 return now->regs;
279 }
280
281 /* Determines the register control field, ignoring the first? flag */
282
283 static enum bifrost_reg_control
284 bi_pack_register_ctrl_lo(bi_registers r)
285 {
286 if (r.write_fma) {
287 if (r.write_add) {
288 assert(!r.read_port3);
289 return BIFROST_WRITE_ADD_P2_FMA_P3;
290 } else {
291 if (r.read_port3)
292 return BIFROST_WRITE_FMA_P2_READ_P3;
293 else
294 return BIFROST_WRITE_FMA_P2;
295 }
296 } else if (r.write_add) {
297 if (r.read_port3)
298 return BIFROST_WRITE_ADD_P2_READ_P3;
299 else
300 return BIFROST_WRITE_ADD_P2;
301 } else if (r.read_port3)
302 return BIFROST_READ_P3;
303 else
304 return BIFROST_REG_NONE;
305 }
306
307 /* Ditto but account for the first? flag this time */
308
309 static enum bifrost_reg_control
310 bi_pack_register_ctrl(bi_registers r)
311 {
312 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
313
314 if (r.first_instruction) {
315 if (ctrl == BIFROST_REG_NONE)
316 ctrl = BIFROST_FIRST_NONE;
317 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
318 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
319 else
320 ctrl |= BIFROST_FIRST_NONE;
321 }
322
323 return ctrl;
324 }
325
326 static uint64_t
327 bi_pack_registers(bi_registers regs)
328 {
329 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
330 struct bifrost_regs s = { 0 };
331 uint64_t packed = 0;
332
333 if (regs.enabled[1]) {
334 /* Gotta save that bit!~ Required by the 63-x trick */
335 assert(regs.port[1] > regs.port[0]);
336 assert(regs.enabled[0]);
337
338 /* Do the 63-x trick, see docs/disasm */
339 if (regs.port[0] > 31) {
340 regs.port[0] = 63 - regs.port[0];
341 regs.port[1] = 63 - regs.port[1];
342 }
343
344 assert(regs.port[0] <= 31);
345 assert(regs.port[1] <= 63);
346
347 s.ctrl = ctrl;
348 s.reg1 = regs.port[1];
349 s.reg0 = regs.port[0];
350 } else {
351 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
352 s.ctrl = 0;
353 s.reg1 = ctrl << 2;
354
355 if (regs.enabled[0]) {
356 /* Bit 0 upper bit of port 0 */
357 s.reg1 |= (regs.port[0] >> 5);
358
359 /* Rest of port 0 in usual spot */
360 s.reg0 = (regs.port[0] & 0b11111);
361 } else {
362 /* Bit 1 set if port 0 also disabled */
363 s.reg1 |= (1 << 1);
364 }
365 }
366
367 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
368 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
369
370 bool has_port2 = regs.write_fma || regs.write_add;
371 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
372
373 if (!has_port3)
374 regs.port[3] = regs.port[2];
375
376 if (!has_port2)
377 regs.port[2] = regs.port[3];
378
379 s.reg3 = regs.port[3];
380 s.reg2 = regs.port[2];
381 s.uniform_const = regs.uniform_constant;
382
383 memcpy(&packed, &s, sizeof(s));
384 return packed;
385 }
386
387 static void
388 bi_set_data_register(bi_clause *clause, unsigned idx)
389 {
390 assert(idx & BIR_INDEX_REGISTER);
391 unsigned reg = idx & ~BIR_INDEX_REGISTER;
392 assert(reg <= 63);
393 clause->data_register = reg;
394 }
395
396 static void
397 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
398 {
399 bi_set_data_register(clause, ins->src[0]);
400 }
401
402 static void
403 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
404 {
405 bi_set_data_register(clause, ins->dest);
406 }
407
408 static enum bifrost_packed_src
409 bi_get_src_reg_port(bi_registers *regs, unsigned src)
410 {
411 unsigned reg = src & ~BIR_INDEX_REGISTER;
412
413 if (regs->port[0] == reg && regs->enabled[0])
414 return BIFROST_SRC_PORT0;
415 else if (regs->port[1] == reg && regs->enabled[1])
416 return BIFROST_SRC_PORT1;
417 else if (regs->port[3] == reg && regs->read_port3)
418 return BIFROST_SRC_PORT3;
419 else
420 unreachable("Tried to access register with no port");
421 }
422
423 static enum bifrost_packed_src
424 bi_get_src(bi_instruction *ins, bi_registers *regs, unsigned s)
425 {
426 unsigned src = ins->src[s];
427
428 if (src & BIR_INDEX_REGISTER)
429 return bi_get_src_reg_port(regs, src);
430 else if (src & BIR_INDEX_PASS)
431 return src & ~BIR_INDEX_PASS;
432 else {
433 bi_print_instruction(ins, stderr);
434 unreachable("Unknown src in above instruction");
435 }
436 }
437
438 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
439 * 16-bit and written components must correspond to valid swizzles (component x
440 * or y). */
441
442 static unsigned
443 bi_swiz16(bi_instruction *ins, unsigned src)
444 {
445 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
446 unsigned swizzle = 0;
447
448 for (unsigned c = 0; c < 2; ++c) {
449 if (!bi_writes_component(ins, src)) continue;
450
451 unsigned k = ins->swizzle[src][c];
452 assert(k <= 1);
453 swizzle |= (k << c);
454 }
455
456 return swizzle;
457 }
458
459 static unsigned
460 bi_pack_fma_fma(bi_instruction *ins, bi_registers *regs)
461 {
462 /* (-a)(-b) = ab, so we only need one negate bit */
463 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
464
465 if (ins->op.mscale) {
466 assert(!(ins->src_abs[0] && ins->src_abs[1]));
467 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
468
469 /* We can have exactly one abs, and can flip the multiplication
470 * to make it fit if we have to */
471 bool flip_ab = ins->src_abs[1];
472
473 struct bifrost_fma_mscale pack = {
474 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
475 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
476 .src2 = bi_get_src(ins, regs, 2),
477 .src3 = bi_get_src(ins, regs, 3),
478 .mscale_mode = 0,
479 .mode = ins->outmod,
480 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
481 .src1_neg = negate_mul,
482 .src2_neg = ins->src_neg[2],
483 .op = BIFROST_FMA_OP_MSCALE,
484 };
485
486 RETURN_PACKED(pack);
487 } else if (ins->dest_type == nir_type_float32) {
488 struct bifrost_fma_fma pack = {
489 .src0 = bi_get_src(ins, regs, 0),
490 .src1 = bi_get_src(ins, regs, 1),
491 .src2 = bi_get_src(ins, regs, 2),
492 .src0_abs = ins->src_abs[0],
493 .src1_abs = ins->src_abs[1],
494 .src2_abs = ins->src_abs[2],
495 .src0_neg = negate_mul,
496 .src2_neg = ins->src_neg[2],
497 .outmod = ins->outmod,
498 .roundmode = ins->roundmode,
499 .op = BIFROST_FMA_OP_FMA
500 };
501
502 RETURN_PACKED(pack);
503 } else if (ins->dest_type == nir_type_float16) {
504 struct bifrost_fma_fma16 pack = {
505 .src0 = bi_get_src(ins, regs, 0),
506 .src1 = bi_get_src(ins, regs, 1),
507 .src2 = bi_get_src(ins, regs, 2),
508 .swizzle_0 = bi_swiz16(ins, 0),
509 .swizzle_1 = bi_swiz16(ins, 1),
510 .swizzle_2 = bi_swiz16(ins, 2),
511 .src0_neg = negate_mul,
512 .src2_neg = ins->src_neg[2],
513 .outmod = ins->outmod,
514 .roundmode = ins->roundmode,
515 .op = BIFROST_FMA_OP_FMA16
516 };
517
518 RETURN_PACKED(pack);
519 } else {
520 unreachable("Invalid fma dest type");
521 }
522 }
523
524 static unsigned
525 bi_pack_fma_addmin_f32(bi_instruction *ins, bi_registers *regs)
526 {
527 unsigned op =
528 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
529 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
530 BIFROST_FMA_OP_FMAX32;
531
532 struct bifrost_fma_add pack = {
533 .src0 = bi_get_src(ins, regs, 0),
534 .src1 = bi_get_src(ins, regs, 1),
535 .src0_abs = ins->src_abs[0],
536 .src1_abs = ins->src_abs[1],
537 .src0_neg = ins->src_neg[0],
538 .src1_neg = ins->src_neg[1],
539 .unk = 0x0,
540 .outmod = ins->outmod,
541 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
542 .op = op
543 };
544
545 RETURN_PACKED(pack);
546 }
547
548 static bool
549 bi_pack_fp16_abs(bi_instruction *ins, bi_registers *regs, bool *flip)
550 {
551 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
552 * l be an auxiliary bit we encode. Then the hardware determines:
553 *
554 * abs0 = l || k
555 * abs1 = l && k
556 *
557 * Since add/min/max are commutative, this saves a bit by using the
558 * order of the operands as a bit (k). To pack this, first note:
559 *
560 * (l && k) implies (l || k).
561 *
562 * That is, if the second argument is abs'd, then the first argument
563 * also has abs. So there are three cases:
564 *
565 * Case 0: Neither src has absolute value. Then we have l = k = 0.
566 *
567 * Case 1: Exactly one src has absolute value. Assign that source to
568 * src0 and the other source to src1. Compute k = src1 < src0 based on
569 * that assignment. Then l = ~k.
570 *
571 * Case 2: Both sources have absolute value. Then we have l = k = 1.
572 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
573 * That is, this encoding is only valid if src1 and src0 are distinct.
574 * This is a scheduling restriction (XXX); if an op of this type
575 * requires both identical sources to have abs value, then we must
576 * schedule to ADD (which does not use this ordering trick).
577 */
578
579 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
580 unsigned src_0 = bi_get_src(ins, regs, 0);
581 unsigned src_1 = bi_get_src(ins, regs, 1);
582
583 assert(!(abs_0 && abs_1 && src_0 == src_1));
584
585 if (!abs_0 && !abs_1) {
586 /* Force k = 0 <===> NOT(src1 < src0) */
587 *flip = (src_1 < src_0);
588 return false;
589 } else if (abs_0 && !abs_1) {
590 return src_1 >= src_0;
591 } else if (abs_1 && !abs_0) {
592 *flip = true;
593 return src_0 >= src_1;
594 } else {
595 *flip = !(src_1 < src_0);
596 return true;
597 }
598 }
599
600 static unsigned
601 bi_pack_fmadd_min_f16(bi_instruction *ins, bi_registers *regs, bool FMA)
602 {
603 unsigned op =
604 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
605 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
606 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
607 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
608 BIFROST_FMA_OP_FMAX16;
609
610 bool flip = false;
611 bool l = bi_pack_fp16_abs(ins, regs, &flip);
612 unsigned src_0 = bi_get_src(ins, regs, 0);
613 unsigned src_1 = bi_get_src(ins, regs, 1);
614
615 if (FMA) {
616 struct bifrost_fma_add_minmax16 pack = {
617 .src0 = flip ? src_1 : src_0,
618 .src1 = flip ? src_0 : src_1,
619 .src0_neg = ins->src_neg[flip ? 1 : 0],
620 .src1_neg = ins->src_neg[flip ? 0 : 1],
621 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
622 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
623 .abs1 = l,
624 .outmod = ins->outmod,
625 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
626 .op = op
627 };
628
629 RETURN_PACKED(pack);
630 } else {
631 /* Can't have modes for fp16 */
632 assert(ins->outmod == 0);
633
634 struct bifrost_add_fmin16 pack = {
635 .src0 = flip ? src_1 : src_0,
636 .src1 = flip ? src_0 : src_1,
637 .src0_neg = ins->src_neg[flip ? 1 : 0],
638 .src1_neg = ins->src_neg[flip ? 0 : 1],
639 .abs1 = l,
640 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
641 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
642 .mode = ins->minmax,
643 .op = op
644 };
645
646 RETURN_PACKED(pack);
647 }
648 }
649
650 static unsigned
651 bi_pack_fma_addmin(bi_instruction *ins, bi_registers *regs)
652 {
653 if (ins->dest_type == nir_type_float32)
654 return bi_pack_fma_addmin_f32(ins, regs);
655 else if(ins->dest_type == nir_type_float16)
656 return bi_pack_fmadd_min_f16(ins, regs, true);
657 else
658 unreachable("Unknown FMA/ADD type");
659 }
660
661 static unsigned
662 bi_pack_fma_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
663 {
664 struct bifrost_fma_inst pack = {
665 .src0 = bi_get_src(ins, regs, 0),
666 .op = op
667 };
668
669 RETURN_PACKED(pack);
670 }
671
672 static unsigned
673 bi_pack_fma_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
674 {
675 struct bifrost_fma_2src pack = {
676 .src0 = bi_get_src(ins, regs, 0),
677 .src1 = bi_get_src(ins, regs, 1),
678 .op = op
679 };
680
681 RETURN_PACKED(pack);
682 }
683
684 static unsigned
685 bi_pack_add_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
686 {
687 struct bifrost_add_inst pack = {
688 .src0 = bi_get_src(ins, regs, 0),
689 .op = op
690 };
691
692 RETURN_PACKED(pack);
693 }
694
695 static enum bifrost_csel_cond
696 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
697 {
698 nir_alu_type B = nir_alu_type_get_base_type(T);
699 unsigned idx = (B == nir_type_float) ? 0 :
700 ((B == nir_type_int) ? 1 : 2);
701
702 switch (cond){
703 case BI_COND_LT:
704 *flip = true;
705 case BI_COND_GT: {
706 const enum bifrost_csel_cond ops[] = {
707 BIFROST_FGT_F,
708 BIFROST_IGT_I,
709 BIFROST_UGT_I
710 };
711
712 return ops[idx];
713 }
714 case BI_COND_LE:
715 *flip = true;
716 case BI_COND_GE: {
717 const enum bifrost_csel_cond ops[] = {
718 BIFROST_FGE_F,
719 BIFROST_IGE_I,
720 BIFROST_UGE_I
721 };
722
723 return ops[idx];
724 }
725 case BI_COND_NE:
726 *invert = true;
727 case BI_COND_EQ: {
728 const enum bifrost_csel_cond ops[] = {
729 BIFROST_FEQ_F,
730 BIFROST_IEQ_F,
731 BIFROST_IEQ_F /* sign is irrelevant */
732 };
733
734 return ops[idx];
735 }
736 default:
737 unreachable("Invalid op for csel");
738 }
739 }
740
741 static unsigned
742 bi_pack_fma_csel(bi_instruction *ins, bi_registers *regs)
743 {
744 /* TODO: Use csel3 as well */
745 bool flip = false, invert = false;
746
747 enum bifrost_csel_cond cond =
748 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
749
750 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
751
752 unsigned cmp_0 = (flip ? 1 : 0);
753 unsigned cmp_1 = (flip ? 0 : 1);
754 unsigned res_0 = (invert ? 3 : 2);
755 unsigned res_1 = (invert ? 2 : 3);
756
757 struct bifrost_csel4 pack = {
758 .src0 = bi_get_src(ins, regs, cmp_0),
759 .src1 = bi_get_src(ins, regs, cmp_1),
760 .src2 = bi_get_src(ins, regs, res_0),
761 .src3 = bi_get_src(ins, regs, res_1),
762 .cond = cond,
763 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
764 BIFROST_FMA_OP_CSEL4
765 };
766
767 RETURN_PACKED(pack);
768 }
769
770 static unsigned
771 bi_pack_fma_frexp(bi_instruction *ins, bi_registers *regs)
772 {
773 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
774 return bi_pack_fma_1src(ins, regs, op);
775 }
776
777 static unsigned
778 bi_pack_fma_reduce(bi_instruction *ins, bi_registers *regs)
779 {
780 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
781 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
782 } else {
783 unreachable("Invalid reduce op");
784 }
785 }
786
787 /* We have a single convert opcode in the IR but a number of opcodes that could
788 * come out. In particular we have native opcodes for:
789 *
790 * [ui]16 --> [fui]32 -- int16_to_32
791 * f16 --> f32 -- float16_to_32
792 * f32 --> f16 -- float32_to_16
793 * f32 --> [ui]32 -- float32_to_int
794 * [ui]32 --> f32 -- int_to_float32
795 * [fui]16 --> [fui]16 -- f2i_i2f16
796 */
797
798 static unsigned
799 bi_pack_convert(bi_instruction *ins, bi_registers *regs, bool FMA)
800 {
801 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
802 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
803 bool from_unsigned = from_base == nir_type_uint;
804
805 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
806 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
807 bool to_unsigned = to_base == nir_type_uint;
808 bool to_float = to_base == nir_type_float;
809
810 /* Sanity check */
811 assert((from_base != to_base) || (from_size != to_size));
812 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
813
814 /* f32 to f16 is special */
815 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
816 /* TODO: second vectorized source? */
817 struct bifrost_fma_2src pfma = {
818 .src0 = bi_get_src(ins, regs, 0),
819 .src1 = BIFROST_SRC_STAGE, /* 0 */
820 .op = BIFROST_FMA_FLOAT32_TO_16
821 };
822
823 struct bifrost_add_2src padd = {
824 .src0 = bi_get_src(ins, regs, 0),
825 .src1 = BIFROST_SRC_STAGE, /* 0 */
826 .op = BIFROST_ADD_FLOAT32_TO_16
827 };
828
829 if (FMA) {
830 RETURN_PACKED(pfma);
831 } else {
832 RETURN_PACKED(padd);
833 }
834 }
835
836 /* Otherwise, figure out the mode */
837 unsigned op = 0;
838
839 if (from_size == 16 && to_size == 32) {
840 unsigned component = ins->swizzle[0][0];
841 assert(component <= 1);
842
843 if (from_base == nir_type_float)
844 op = BIFROST_CONVERT_5(component);
845 else
846 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
847 } else {
848 unsigned mode = 0;
849 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
850 bool is_unsigned = from_unsigned;
851
852 if (from_base == nir_type_float) {
853 assert(to_base != nir_type_float);
854 is_unsigned = to_unsigned;
855
856 if (from_size == 32 && to_size == 32)
857 mode = BIFROST_CONV_F32_TO_I32;
858 else if (from_size == 16 && to_size == 16)
859 mode = BIFROST_CONV_F16_TO_I16;
860 else
861 unreachable("Invalid float conversion");
862 } else {
863 assert(to_base == nir_type_float);
864 assert(from_size == to_size);
865
866 if (to_size == 32)
867 mode = BIFROST_CONV_I32_TO_F32;
868 else if (to_size == 16)
869 mode = BIFROST_CONV_I16_TO_F16;
870 else
871 unreachable("Invalid int conversion");
872 }
873
874 /* Fixup swizzle for 32-bit only modes */
875
876 if (mode == BIFROST_CONV_I32_TO_F32)
877 swizzle = 0b11;
878 else if (mode == BIFROST_CONV_F32_TO_I32)
879 swizzle = 0b10;
880
881 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
882
883 /* Unclear what the top bit is for... maybe 16-bit related */
884 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
885 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
886
887 if (!(mode2 || mode6))
888 op |= 0x100;
889 }
890
891 if (FMA)
892 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
893 else
894 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
895 }
896
897 static unsigned
898 bi_pack_fma_select(bi_instruction *ins, bi_registers *regs)
899 {
900 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
901
902 if (size == 16) {
903 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
904 unsigned op = BIFROST_FMA_SEL_16(swiz);
905 return bi_pack_fma_2src(ins, regs, op);
906 } else if (size == 8) {
907 unsigned swiz = 0;
908
909 for (unsigned c = 0; c < 4; ++c) {
910 if (ins->swizzle[c][0]) {
911 /* Ensure lowering restriction is met */
912 assert(ins->swizzle[c][0] == 2);
913 swiz |= (1 << c);
914 }
915 }
916
917 struct bifrost_fma_sel8 pack = {
918 .src0 = bi_get_src(ins, regs, 0),
919 .src1 = bi_get_src(ins, regs, 1),
920 .src2 = bi_get_src(ins, regs, 2),
921 .src3 = bi_get_src(ins, regs, 3),
922 .swizzle = swiz,
923 .op = BIFROST_FMA_OP_SEL8
924 };
925
926 RETURN_PACKED(pack);
927 } else {
928 unreachable("Unimplemented");
929 }
930 }
931
932 static enum bifrost_fcmp_cond
933 bi_fcmp_cond(enum bi_cond cond)
934 {
935 switch (cond) {
936 case BI_COND_LT: return BIFROST_OLT;
937 case BI_COND_LE: return BIFROST_OLE;
938 case BI_COND_GE: return BIFROST_OGE;
939 case BI_COND_GT: return BIFROST_OGT;
940 case BI_COND_EQ: return BIFROST_OEQ;
941 case BI_COND_NE: return BIFROST_UNE;
942 default: unreachable("Unknown bi_cond");
943 }
944 }
945
946 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
947
948 static enum bifrost_fcmp_cond
949 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
950 {
951 switch (cond) {
952 case BIFROST_OGT:
953 return BIFROST_OLT;
954 case BIFROST_OGE:
955 return BIFROST_OLE;
956 case BIFROST_OLT:
957 return BIFROST_OGT;
958 case BIFROST_OLE:
959 return BIFROST_OGE;
960 case BIFROST_OEQ:
961 case BIFROST_UNE:
962 return cond;
963 default:
964 unreachable("Unknown fcmp cond");
965 }
966 }
967
968 static unsigned
969 bi_pack_fma_cmp(bi_instruction *ins, bi_registers *regs)
970 {
971 nir_alu_type Tl = ins->src_types[0];
972 nir_alu_type Tr = ins->src_types[1];
973
974 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
975 /* TODO: Mixed 32/16 cmp */
976 assert(Tl == Tr);
977
978 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
979
980 /* Only src1 has neg, so we arrange:
981 * a < b --- native
982 * a < -b --- native
983 * -a < -b <===> a > b
984 * -a < b <===> a > -b
985 * TODO: Is this NaN-precise?
986 */
987
988 bool flip = ins->src_neg[0];
989 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
990
991 if (flip)
992 cond = bi_flip_fcmp(cond);
993
994 struct bifrost_fma_fcmp pack = {
995 .src0 = bi_get_src(ins, regs, 0),
996 .src1 = bi_get_src(ins, regs, 1),
997 .src0_abs = ins->src_abs[0],
998 .src1_abs = ins->src_abs[1],
999 .src1_neg = neg,
1000 .src_expand = 0,
1001 .unk1 = 0,
1002 .cond = cond,
1003 .op = BIFROST_FMA_OP_FCMP_GL
1004 };
1005
1006 RETURN_PACKED(pack);
1007 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1008 bool flip = false;
1009 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1010 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1011
1012 if (flip)
1013 cond = bi_flip_fcmp(cond);
1014
1015 struct bifrost_fma_fcmp16 pack = {
1016 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1017 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1018 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1019 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1020 .abs1 = l,
1021 .unk = 0,
1022 .cond = cond,
1023 .op = BIFROST_FMA_OP_FCMP_GL_16,
1024 };
1025
1026 RETURN_PACKED(pack);
1027 } else {
1028 unreachable("Unknown cmp type");
1029 }
1030 }
1031
1032 static unsigned
1033 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1034 {
1035 switch (op) {
1036 case BI_BITWISE_OR:
1037 /* Via De Morgan's */
1038 return rshift ?
1039 BIFROST_FMA_OP_RSHIFT_NAND :
1040 BIFROST_FMA_OP_LSHIFT_NAND;
1041 case BI_BITWISE_AND:
1042 return rshift ?
1043 BIFROST_FMA_OP_RSHIFT_AND :
1044 BIFROST_FMA_OP_LSHIFT_AND;
1045 case BI_BITWISE_XOR:
1046 /* Shift direction handled out of band */
1047 return BIFROST_FMA_OP_RSHIFT_XOR;
1048 default:
1049 unreachable("Unknown op");
1050 }
1051 }
1052
1053 static unsigned
1054 bi_pack_fma_bitwise(bi_instruction *ins, bi_registers *regs)
1055 {
1056 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1057 assert(size <= 32);
1058
1059 bool invert_0 = ins->bitwise.src_invert[0];
1060 bool invert_1 = ins->bitwise.src_invert[1];
1061
1062 if (ins->op.bitwise == BI_BITWISE_OR) {
1063 /* Becomes NAND, so via De Morgan's:
1064 * f(A) | f(B) = ~(~f(A) & ~f(B))
1065 * = NAND(~f(A), ~f(B))
1066 */
1067
1068 invert_0 = !invert_0;
1069 invert_1 = !invert_1;
1070 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1071 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1072 * ~A ^ B = ~(A ^ B) = A ^ ~B
1073 */
1074
1075 invert_0 ^= invert_1;
1076 invert_1 = false;
1077
1078 /* invert_1 ends up specifying shift direction */
1079 invert_1 = !ins->bitwise.rshift;
1080 }
1081
1082 struct bifrost_shift_fma pack = {
1083 .src0 = bi_get_src(ins, regs, 0),
1084 .src1 = bi_get_src(ins, regs, 1),
1085 .src2 = bi_get_src(ins, regs, 2),
1086 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1087 .unk = 1, /* XXX */
1088 .invert_1 = invert_0,
1089 .invert_2 = invert_1,
1090 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1091 };
1092
1093 RETURN_PACKED(pack);
1094 }
1095
1096 static unsigned
1097 bi_pack_fma_round(bi_instruction *ins, bi_registers *regs)
1098 {
1099 bool fp16 = ins->dest_type == nir_type_float16;
1100 assert(fp16 || ins->dest_type == nir_type_float32);
1101
1102 unsigned op = fp16
1103 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1104 : BIFROST_FMA_ROUND_32(ins->roundmode);
1105
1106 return bi_pack_fma_1src(ins, regs, op);
1107 }
1108
1109 static unsigned
1110 bi_pack_fma_imath(bi_instruction *ins, bi_registers *regs)
1111 {
1112 /* Scheduler: only ADD can have 8/16-bit imath */
1113 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1114
1115 unsigned op = ins->op.imath == BI_IMATH_ADD
1116 ? BIFROST_FMA_IADD_32
1117 : BIFROST_FMA_ISUB_32;
1118
1119 return bi_pack_fma_2src(ins, regs, op);
1120 }
1121
1122 static unsigned
1123 bi_pack_fma(bi_clause *clause, bi_bundle bundle, bi_registers *regs)
1124 {
1125 if (!bundle.fma)
1126 return BIFROST_FMA_NOP;
1127
1128 switch (bundle.fma->type) {
1129 case BI_ADD:
1130 return bi_pack_fma_addmin(bundle.fma, regs);
1131 case BI_CMP:
1132 return bi_pack_fma_cmp(bundle.fma, regs);
1133 case BI_BITWISE:
1134 return bi_pack_fma_bitwise(bundle.fma, regs);
1135 case BI_CONVERT:
1136 return bi_pack_convert(bundle.fma, regs, true);
1137 case BI_CSEL:
1138 return bi_pack_fma_csel(bundle.fma, regs);
1139 case BI_FMA:
1140 return bi_pack_fma_fma(bundle.fma, regs);
1141 case BI_FREXP:
1142 return bi_pack_fma_frexp(bundle.fma, regs);
1143 case BI_IMATH:
1144 return bi_pack_fma_imath(bundle.fma, regs);
1145 case BI_MINMAX:
1146 return bi_pack_fma_addmin(bundle.fma, regs);
1147 case BI_MOV:
1148 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1149 case BI_SHIFT:
1150 unreachable("Packing todo");
1151 case BI_SELECT:
1152 return bi_pack_fma_select(bundle.fma, regs);
1153 case BI_ROUND:
1154 return bi_pack_fma_round(bundle.fma, regs);
1155 case BI_REDUCE_FMA:
1156 return bi_pack_fma_reduce(bundle.fma, regs);
1157 default:
1158 unreachable("Cannot encode class as FMA");
1159 }
1160 }
1161
1162 static unsigned
1163 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1164 {
1165 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1166 assert(size == 32 || size == 16);
1167
1168 unsigned op = (size == 32) ?
1169 BIFROST_ADD_OP_LD_VAR_32 :
1170 BIFROST_ADD_OP_LD_VAR_16;
1171
1172 unsigned packed_addr = 0;
1173
1174 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1175 /* Direct uses address field directly */
1176 packed_addr = bi_get_immediate(ins, 0);
1177 } else {
1178 /* Indirect gets an extra source */
1179 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1180 }
1181
1182 /* The destination is thrown in the data register */
1183 assert(ins->dest & BIR_INDEX_REGISTER);
1184 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1185
1186 unsigned channels = ins->vector_channels;
1187 assert(channels >= 1 && channels <= 4);
1188
1189 struct bifrost_ld_var pack = {
1190 .src0 = bi_get_src(ins, regs, 1),
1191 .addr = packed_addr,
1192 .channels = MALI_POSITIVE(channels),
1193 .interp_mode = ins->load_vary.interp_mode,
1194 .reuse = ins->load_vary.reuse,
1195 .flat = ins->load_vary.flat,
1196 .op = op
1197 };
1198
1199 RETURN_PACKED(pack);
1200 }
1201
1202 static unsigned
1203 bi_pack_add_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
1204 {
1205 struct bifrost_add_2src pack = {
1206 .src0 = bi_get_src(ins, regs, 0),
1207 .src1 = bi_get_src(ins, regs, 1),
1208 .op = op
1209 };
1210
1211 RETURN_PACKED(pack);
1212 }
1213
1214 static unsigned
1215 bi_pack_add_addmin_f32(bi_instruction *ins, bi_registers *regs)
1216 {
1217 unsigned op =
1218 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1219 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1220 BIFROST_ADD_OP_FMAX32;
1221
1222 struct bifrost_add_faddmin pack = {
1223 .src0 = bi_get_src(ins, regs, 0),
1224 .src1 = bi_get_src(ins, regs, 1),
1225 .src0_abs = ins->src_abs[0],
1226 .src1_abs = ins->src_abs[1],
1227 .src0_neg = ins->src_neg[0],
1228 .src1_neg = ins->src_neg[1],
1229 .outmod = ins->outmod,
1230 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1231 .op = op
1232 };
1233
1234 RETURN_PACKED(pack);
1235 }
1236
1237 static unsigned
1238 bi_pack_add_add_f16(bi_instruction *ins, bi_registers *regs)
1239 {
1240 /* ADD.v2f16 can't have outmod */
1241 assert(ins->outmod == BIFROST_NONE);
1242
1243 struct bifrost_add_faddmin pack = {
1244 .src0 = bi_get_src(ins, regs, 0),
1245 .src1 = bi_get_src(ins, regs, 1),
1246 .src0_abs = ins->src_abs[0],
1247 .src1_abs = ins->src_abs[1],
1248 .src0_neg = ins->src_neg[0],
1249 .src1_neg = ins->src_neg[1],
1250 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1251 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1252 .mode = ins->roundmode,
1253 .op = BIFROST_ADD_OP_FADD16
1254 };
1255
1256 RETURN_PACKED(pack);
1257 }
1258
1259 static unsigned
1260 bi_pack_add_addmin(bi_instruction *ins, bi_registers *regs)
1261 {
1262 if (ins->dest_type == nir_type_float32)
1263 return bi_pack_add_addmin_f32(ins, regs);
1264 else if (ins->dest_type == nir_type_float16) {
1265 if (ins->type == BI_ADD)
1266 return bi_pack_add_add_f16(ins, regs);
1267 else
1268 return bi_pack_fmadd_min_f16(ins, regs, false);
1269 } else
1270 unreachable("Unknown FMA/ADD type");
1271 }
1272
1273 static unsigned
1274 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1275 {
1276 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1277
1278 const unsigned ops[4] = {
1279 BIFROST_ADD_OP_LD_UBO_1,
1280 BIFROST_ADD_OP_LD_UBO_2,
1281 BIFROST_ADD_OP_LD_UBO_3,
1282 BIFROST_ADD_OP_LD_UBO_4
1283 };
1284
1285 bi_write_data_register(clause, ins);
1286 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1287 }
1288
1289 static enum bifrost_ldst_type
1290 bi_pack_ldst_type(nir_alu_type T)
1291 {
1292 switch (T) {
1293 case nir_type_float16: return BIFROST_LDST_F16;
1294 case nir_type_float32: return BIFROST_LDST_F32;
1295 case nir_type_int32: return BIFROST_LDST_I32;
1296 case nir_type_uint32: return BIFROST_LDST_U32;
1297 default: unreachable("Invalid type loaded");
1298 }
1299 }
1300
1301 static unsigned
1302 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1303 {
1304 struct bifrost_ld_var_addr pack = {
1305 .src0 = bi_get_src(ins, regs, 1),
1306 .src1 = bi_get_src(ins, regs, 2),
1307 .location = bi_get_immediate(ins, 0),
1308 .type = bi_pack_ldst_type(ins->src_types[3]),
1309 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1310 };
1311
1312 bi_write_data_register(clause, ins);
1313 RETURN_PACKED(pack);
1314 }
1315
1316 static unsigned
1317 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1318 {
1319 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1320
1321 struct bifrost_ld_attr pack = {
1322 .src0 = bi_get_src(ins, regs, 1),
1323 .src1 = bi_get_src(ins, regs, 2),
1324 .location = bi_get_immediate(ins, 0),
1325 .channels = MALI_POSITIVE(ins->vector_channels),
1326 .type = bi_pack_ldst_type(ins->dest_type),
1327 .op = BIFROST_ADD_OP_LD_ATTR
1328 };
1329
1330 bi_write_data_register(clause, ins);
1331 RETURN_PACKED(pack);
1332 }
1333
1334 static unsigned
1335 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1336 {
1337 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1338
1339 struct bifrost_st_vary pack = {
1340 .src0 = bi_get_src(ins, regs, 1),
1341 .src1 = bi_get_src(ins, regs, 2),
1342 .src2 = bi_get_src(ins, regs, 3),
1343 .channels = MALI_POSITIVE(ins->vector_channels),
1344 .op = BIFROST_ADD_OP_ST_VAR
1345 };
1346
1347 bi_read_data_register(clause, ins);
1348 RETURN_PACKED(pack);
1349 }
1350
1351 static unsigned
1352 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1353 {
1354 bool fp16 = (ins->src_types[1] == nir_type_float16);
1355
1356 struct bifrost_add_atest pack = {
1357 .src0 = bi_get_src(ins, regs, 0),
1358 .src1 = bi_get_src(ins, regs, 1),
1359 .half = fp16,
1360 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1361 .op = BIFROST_ADD_OP_ATEST,
1362 };
1363
1364 /* Despite *also* writing with the usual mechanism... quirky and
1365 * perhaps unnecessary, but let's match the blob */
1366 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1367
1368 RETURN_PACKED(pack);
1369 }
1370
1371 static unsigned
1372 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1373 {
1374 struct bifrost_add_inst pack = {
1375 .src0 = bi_get_src(ins, regs, 1),
1376 .op = BIFROST_ADD_OP_BLEND
1377 };
1378
1379 /* TODO: Pack location in uniform_const */
1380 assert(ins->blend_location == 0);
1381
1382 bi_read_data_register(clause, ins);
1383 RETURN_PACKED(pack);
1384 }
1385
1386 static unsigned
1387 bi_pack_add_special(bi_instruction *ins, bi_registers *regs)
1388 {
1389 unsigned op = 0;
1390 bool fp16 = ins->dest_type == nir_type_float16;
1391 bool Y = ins->swizzle[0][0];
1392
1393 if (ins->op.special == BI_SPECIAL_FRCP) {
1394 op = fp16 ?
1395 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1396 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1397 BIFROST_ADD_OP_FRCP_FAST_F32;
1398 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1399 op = fp16 ?
1400 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1401 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1402 BIFROST_ADD_OP_FRSQ_FAST_F32;
1403
1404 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1405 assert(!fp16);
1406 op = BIFROST_ADD_OP_FEXP2_FAST;
1407 } else {
1408 unreachable("Unknown special op");
1409 }
1410
1411 return bi_pack_add_1src(ins, regs, op);
1412 }
1413
1414 static unsigned
1415 bi_pack_add_table(bi_instruction *ins, bi_registers *regs)
1416 {
1417 unsigned op = 0;
1418 assert(ins->dest_type == nir_type_float32);
1419
1420 op = BIFROST_ADD_OP_LOG2_HELP;
1421 return bi_pack_add_1src(ins, regs, op);
1422 }
1423 static unsigned
1424 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, bi_registers *regs, gl_shader_stage stage)
1425 {
1426 bool f16 = ins->dest_type == nir_type_float16;
1427 bool vtx = stage != MESA_SHADER_FRAGMENT;
1428
1429 struct bifrost_tex_compact pack = {
1430 .src0 = bi_get_src(ins, regs, 0),
1431 .src1 = bi_get_src(ins, regs, 1),
1432 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1433 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1434 .compute_lod = !vtx,
1435 .tex_index = ins->texture.texture_index,
1436 .sampler_index = ins->texture.sampler_index
1437 };
1438
1439 bi_write_data_register(clause, ins);
1440 RETURN_PACKED(pack);
1441 }
1442
1443 static unsigned
1444 bi_pack_add_select(bi_instruction *ins, bi_registers *regs)
1445 {
1446 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1447 assert(size == 16);
1448
1449 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1450 unsigned op = BIFROST_ADD_SEL_16(swiz);
1451 return bi_pack_add_2src(ins, regs, op);
1452 }
1453
1454 static enum bifrost_discard_cond
1455 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1456 {
1457 switch (cond){
1458 case BI_COND_GT:
1459 *flip = true;
1460 /* fallthrough */
1461 case BI_COND_LT:
1462 return BIFROST_DISCARD_FLT;
1463 case BI_COND_GE:
1464 *flip = true;
1465 /* fallthrough */
1466 case BI_COND_LE:
1467 return BIFROST_DISCARD_FLE;
1468 case BI_COND_NE:
1469 return BIFROST_DISCARD_FNE;
1470 case BI_COND_EQ:
1471 return BIFROST_DISCARD_FEQ;
1472 default:
1473 unreachable("Invalid op for discard");
1474 }
1475 }
1476
1477 static unsigned
1478 bi_pack_add_discard(bi_instruction *ins, bi_registers *regs)
1479 {
1480 bool fp16 = ins->src_types[0] == nir_type_float16;
1481 assert(fp16 || ins->src_types[0] == nir_type_float32);
1482
1483 bool flip = false;
1484 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1485
1486 struct bifrost_add_discard pack = {
1487 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1488 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1489 .cond = cond,
1490 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1491 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1492 .fp32 = fp16 ? 0 : 1,
1493 .op = BIFROST_ADD_OP_DISCARD
1494 };
1495
1496 RETURN_PACKED(pack);
1497 }
1498
1499 static enum bifrost_icmp_cond
1500 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1501 {
1502 switch (cond){
1503 case BI_COND_LT:
1504 *flip = true;
1505 /* fallthrough */
1506 case BI_COND_GT:
1507 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1508 : BIFROST_ICMP_IGT;
1509 case BI_COND_LE:
1510 *flip = true;
1511 /* fallthrough */
1512 case BI_COND_GE:
1513 return is_unsigned ? BIFROST_ICMP_UGE :
1514 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1515 case BI_COND_NE:
1516 return BIFROST_ICMP_NEQ;
1517 case BI_COND_EQ:
1518 return BIFROST_ICMP_EQ;
1519 default:
1520 unreachable("Invalid op for icmp");
1521 }
1522 }
1523
1524 static unsigned
1525 bi_pack_add_icmp32(bi_instruction *ins, bi_registers *regs, bool flip,
1526 enum bifrost_icmp_cond cond)
1527 {
1528 struct bifrost_add_icmp pack = {
1529 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1530 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1531 .cond = cond,
1532 .sz = 1,
1533 .d3d = false,
1534 .op = BIFROST_ADD_OP_ICMP_32
1535 };
1536
1537 RETURN_PACKED(pack);
1538 }
1539
1540 static unsigned
1541 bi_pack_add_icmp16(bi_instruction *ins, bi_registers *regs, bool flip,
1542 enum bifrost_icmp_cond cond)
1543 {
1544 struct bifrost_add_icmp16 pack = {
1545 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1546 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1547 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1548 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1549 .cond = cond,
1550 .d3d = false,
1551 .op = BIFROST_ADD_OP_ICMP_16
1552 };
1553
1554 RETURN_PACKED(pack);
1555 }
1556
1557 static unsigned
1558 bi_pack_add_cmp(bi_instruction *ins, bi_registers *regs)
1559 {
1560 nir_alu_type Tl = ins->src_types[0];
1561 nir_alu_type Tr = ins->src_types[1];
1562 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1563
1564 if (Bl == nir_type_uint || Bl == nir_type_int) {
1565 assert(Tl == Tr);
1566 unsigned sz = nir_alu_type_get_type_size(Tl);
1567
1568 bool flip = false;
1569
1570 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1571 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1572 &flip, Bl == nir_type_uint, sz == 16);
1573
1574 if (sz == 32)
1575 return bi_pack_add_icmp32(ins, regs, flip, cond);
1576 else if (sz == 16)
1577 return bi_pack_add_icmp16(ins, regs, flip, cond);
1578 else
1579 unreachable("TODO");
1580 } else {
1581 unreachable("TODO");
1582 }
1583 }
1584
1585 static unsigned
1586 bi_pack_add_imath(bi_instruction *ins, bi_registers *regs)
1587 {
1588 /* TODO: 32+16 add */
1589 assert(ins->src_types[0] == ins->src_types[1]);
1590 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1591 enum bi_imath_op p = ins->op.imath;
1592
1593 unsigned op = 0;
1594
1595 if (sz == 8) {
1596 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1597 BIFROST_ADD_ISUB_8;
1598 } else if (sz == 16) {
1599 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1600 BIFROST_ADD_ISUB_16;
1601 } else if (sz == 32) {
1602 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1603 BIFROST_ADD_ISUB_32;
1604 } else {
1605 unreachable("64-bit todo");
1606 }
1607
1608 return bi_pack_add_2src(ins, regs, op);
1609 }
1610
1611 static unsigned
1612 bi_pack_add(bi_clause *clause, bi_bundle bundle, bi_registers *regs, gl_shader_stage stage)
1613 {
1614 if (!bundle.add)
1615 return BIFROST_ADD_NOP;
1616
1617 switch (bundle.add->type) {
1618 case BI_ADD:
1619 return bi_pack_add_addmin(bundle.add, regs);
1620 case BI_ATEST:
1621 return bi_pack_add_atest(clause, bundle.add, regs);
1622 case BI_BRANCH:
1623 unreachable("Packing todo");
1624 case BI_CMP:
1625 return bi_pack_add_cmp(bundle.add, regs);
1626 case BI_BLEND:
1627 return bi_pack_add_blend(clause, bundle.add, regs);
1628 case BI_BITWISE:
1629 unreachable("Packing todo");
1630 case BI_CONVERT:
1631 return bi_pack_convert(bundle.add, regs, false);
1632 case BI_DISCARD:
1633 return bi_pack_add_discard(bundle.add, regs);
1634 case BI_FREXP:
1635 unreachable("Packing todo");
1636 case BI_IMATH:
1637 return bi_pack_add_imath(bundle.add, regs);
1638 case BI_LOAD:
1639 unreachable("Packing todo");
1640 case BI_LOAD_ATTR:
1641 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1642 case BI_LOAD_UNIFORM:
1643 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1644 case BI_LOAD_VAR:
1645 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1646 case BI_LOAD_VAR_ADDRESS:
1647 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1648 case BI_MINMAX:
1649 return bi_pack_add_addmin(bundle.add, regs);
1650 case BI_MOV:
1651 case BI_SHIFT:
1652 case BI_STORE:
1653 unreachable("Packing todo");
1654 case BI_STORE_VAR:
1655 return bi_pack_add_st_vary(clause, bundle.add, regs);
1656 case BI_SPECIAL:
1657 return bi_pack_add_special(bundle.add, regs);
1658 case BI_TABLE:
1659 return bi_pack_add_table(bundle.add, regs);
1660 case BI_SELECT:
1661 return bi_pack_add_select(bundle.add, regs);
1662 case BI_TEX:
1663 if (bundle.add->op.texture == BI_TEX_COMPACT)
1664 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1665 else
1666 unreachable("Unknown tex type");
1667 case BI_ROUND:
1668 unreachable("Packing todo");
1669 default:
1670 unreachable("Cannot encode class as ADD");
1671 }
1672 }
1673
1674 struct bi_packed_bundle {
1675 uint64_t lo;
1676 uint64_t hi;
1677 };
1678
1679 static struct bi_packed_bundle
1680 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1681 {
1682 bi_assign_ports(&bundle, &prev);
1683 bi_assign_uniform_constant(clause, &bundle.regs, bundle);
1684 bundle.regs.first_instruction = first_bundle;
1685
1686 uint64_t reg = bi_pack_registers(bundle.regs);
1687 uint64_t fma = bi_pack_fma(clause, bundle, &bundle.regs);
1688 uint64_t add = bi_pack_add(clause, bundle, &bundle.regs, stage);
1689
1690 struct bi_packed_bundle packed = {
1691 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1692 .hi = add >> 6
1693 };
1694
1695 return packed;
1696 }
1697
1698 /* Packs the next two constants as a dedicated constant quadword at the end of
1699 * the clause, returning the number packed. */
1700
1701 static unsigned
1702 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1703 unsigned index,
1704 struct util_dynarray *emission)
1705 {
1706 /* After these two, are we done? Determines tag */
1707 bool done = clause->constant_count <= (index + 2);
1708 bool only = clause->constant_count <= (index + 1);
1709
1710 /* TODO: Pos */
1711 assert(index == 0 && clause->bundle_count == 1);
1712 assert(only);
1713
1714 uint64_t hi = clause->constants[index + 0] >> 60ull;
1715
1716 struct bifrost_fmt_constant quad = {
1717 .pos = 0, /* TODO */
1718 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1719 .imm_1 = clause->constants[index + 0] >> 4,
1720 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1721 };
1722
1723 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1724 * of the second constant with the first must be less than 8, otherwise
1725 * we have to swap them. On G52, I'm able to reproduce a similar issue
1726 * but with a different workaround (modeled above with a single
1727 * constant, unclear how to workaround for multiple constants.) Further
1728 * investigation needed. Possibly an errata. XXX */
1729
1730 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1731
1732 return 2;
1733 }
1734
1735 static void
1736 bi_pack_clause(bi_context *ctx, bi_clause *clause, bi_clause *next,
1737 struct util_dynarray *emission, gl_shader_stage stage)
1738 {
1739 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1740 assert(clause->bundle_count == 1);
1741
1742 /* Used to decide if we elide writes */
1743 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1744
1745 /* State for packing constants throughout */
1746 unsigned constant_index = 0;
1747
1748 struct bifrost_fmt1 quad_1 = {
1749 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1750 .header = bi_pack_header(clause, next, is_fragment),
1751 .ins_1 = ins_1.lo,
1752 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1753 .ins_0 = (ins_1.hi >> 11) & 0b111,
1754 };
1755
1756 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1757
1758 /* Pack the remaining constants */
1759
1760 while (constant_index < clause->constant_count) {
1761 constant_index += bi_pack_constants(ctx, clause,
1762 constant_index, emission);
1763 }
1764 }
1765
1766 static bi_clause *
1767 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1768 {
1769 /* Try the next clause in this block */
1770 if (clause->link.next != &((bi_block *) block)->clauses)
1771 return list_first_entry(&(clause->link), bi_clause, link);
1772
1773 /* Try the next block, or the one after that if it's empty, etc .*/
1774 pan_block *next_block = pan_next_block(block);
1775
1776 bi_foreach_block_from(ctx, next_block, block) {
1777 bi_block *blk = (bi_block *) block;
1778
1779 if (!list_is_empty(&blk->clauses))
1780 return list_first_entry(&(blk->clauses), bi_clause, link);
1781 }
1782
1783 return NULL;
1784 }
1785
1786 void
1787 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1788 {
1789 util_dynarray_init(emission, NULL);
1790
1791 bi_foreach_block(ctx, _block) {
1792 bi_block *block = (bi_block *) _block;
1793
1794 bi_foreach_clause_in_block(block, clause) {
1795 bi_clause *next = bi_next_clause(ctx, _block, clause);
1796 bi_pack_clause(ctx, clause, next, emission, ctx->stage);
1797 }
1798 }
1799 }