pan/bi: Move bi_registers to common IR structures
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next, bool is_fragment)
39 {
40 struct bifrost_header header = {
41 .back_to_back = clause->back_to_back,
42 .no_end_of_shader = (next != NULL),
43 .elide_writes = is_fragment,
44 .branch_cond = clause->branch_conditional,
45 .datareg_writebarrier = clause->data_register_write_barrier,
46 .datareg = clause->data_register,
47 .scoreboard_deps = next ? next->dependencies : 0,
48 .scoreboard_index = clause->scoreboard_id,
49 .clause_type = clause->clause_type,
50 .next_clause_type = next ? next->clause_type : 0,
51 .suppress_inf = true,
52 .suppress_nan = true,
53 };
54
55 header.branch_cond |= header.back_to_back;
56
57 uint64_t u = 0;
58 memcpy(&u, &header, sizeof(header));
59 return u;
60 }
61
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
67
68 static unsigned
69 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
70 {
71 uint64_t want = (cons >> 4);
72
73 for (unsigned i = 0; i < clause->constant_count; ++i) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
76
77 uint64_t candidates[2] = {
78 clause->constants[i] >> 4,
79 clause->constants[i] >> 36
80 };
81
82 /* For <64-bit mode, we treat lo/hi separately */
83
84 if (!b64)
85 candidates[0] &= (0xFFFFFFFF >> 4);
86
87 if (candidates[0] == want)
88 return i;
89
90 if (candidates[1] == want && !b64) {
91 *hi = true;
92 return i;
93 }
94 }
95
96 unreachable("Invalid constant accessed");
97 }
98
99 static unsigned
100 bi_constant_field(unsigned idx)
101 {
102 assert(idx <= 5);
103
104 const unsigned values[] = {
105 4, 5, 6, 7, 2, 3
106 };
107
108 return values[idx] << 4;
109 }
110
111 static bool
112 bi_assign_uniform_constant_single(
113 struct bi_registers *regs,
114 bi_clause *clause,
115 bi_instruction *ins, bool assigned, bool fast_zero)
116 {
117 if (!ins)
118 return assigned;
119
120 if (ins->type == BI_BLEND) {
121 assert(!assigned);
122 regs->uniform_constant = 0x8;
123 return true;
124 }
125
126 bi_foreach_src(ins, s) {
127 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
128
129 if (ins->src[s] & BIR_INDEX_CONSTANT) {
130 /* Let direct addresses through */
131 if (ins->type == BI_LOAD_VAR)
132 continue;
133
134 bool hi = false;
135 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
136 uint64_t cons = bi_get_immediate(ins, s);
137 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
138 unsigned lo = clause->constants[idx] & 0xF;
139 unsigned f = bi_constant_field(idx) | lo;
140
141 if (assigned && regs->uniform_constant != f)
142 unreachable("Mismatched uniform/const field: imm");
143
144 regs->uniform_constant = f;
145 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
146 assigned = true;
147 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
148 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
149 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
150 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
151 /* FMAs have a fast zero port, ADD needs to use the
152 * uniform/const port's special 0 mode handled here */
153 unsigned f = 0;
154
155 if (assigned && regs->uniform_constant != f)
156 unreachable("Mismatched uniform/const field: 0");
157
158 regs->uniform_constant = f;
159 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
160 assigned = true;
161 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
162 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
163 } else if (s & BIR_INDEX_UNIFORM) {
164 unreachable("Push uniforms not implemented yet");
165 }
166 }
167
168 return assigned;
169 }
170
171 static void
172 bi_assign_uniform_constant(
173 bi_clause *clause,
174 struct bi_registers *regs,
175 bi_bundle bundle)
176 {
177 bool assigned =
178 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
179
180 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
181 }
182
183 /* Assigns a port for reading, before anything is written */
184
185 static void
186 bi_assign_port_read(struct bi_registers *regs, unsigned src)
187 {
188 /* We only assign for registers */
189 if (!(src & BIR_INDEX_REGISTER))
190 return;
191
192 unsigned reg = src & ~BIR_INDEX_REGISTER;
193
194 /* Check if we already assigned the port */
195 for (unsigned i = 0; i <= 1; ++i) {
196 if (regs->port[i] == reg && regs->enabled[i])
197 return;
198 }
199
200 if (regs->port[3] == reg && regs->read_port3)
201 return;
202
203 /* Assign it now */
204
205 for (unsigned i = 0; i <= 1; ++i) {
206 if (!regs->enabled[i]) {
207 regs->port[i] = reg;
208 regs->enabled[i] = true;
209 return;
210 }
211 }
212
213 if (!regs->read_port3) {
214 regs->port[3] = reg;
215 regs->read_port3 = true;
216 return;
217 }
218
219 bi_print_ports(regs);
220 unreachable("Failed to find a free port for src");
221 }
222
223 static struct bi_registers
224 bi_assign_ports(bi_bundle now, bi_bundle prev)
225 {
226 struct bi_registers regs = { 0 };
227
228 /* We assign ports for the main register mechanism. Special ops
229 * use the data registers, which has its own mechanism entirely
230 * and thus gets skipped over here. */
231
232 unsigned read_dreg = now.add &&
233 bi_class_props[now.add->type] & BI_DATA_REG_SRC;
234
235 unsigned write_dreg = prev.add &&
236 bi_class_props[prev.add->type] & BI_DATA_REG_DEST;
237
238 /* First, assign reads */
239
240 if (now.fma)
241 bi_foreach_src(now.fma, src)
242 bi_assign_port_read(&regs, now.fma->src[src]);
243
244 if (now.add) {
245 bi_foreach_src(now.add, src) {
246 if (!(src == 0 && read_dreg))
247 bi_assign_port_read(&regs, now.add->src[src]);
248 }
249 }
250
251 /* Next, assign writes */
252
253 if (prev.add && prev.add->dest & BIR_INDEX_REGISTER && !write_dreg) {
254 regs.port[2] = prev.add->dest & ~BIR_INDEX_REGISTER;
255 regs.write_add = true;
256 }
257
258 if (prev.fma && prev.fma->dest & BIR_INDEX_REGISTER) {
259 unsigned r = prev.fma->dest & ~BIR_INDEX_REGISTER;
260
261 if (regs.write_add) {
262 /* Scheduler constraint: cannot read 3 and write 2 */
263 assert(!regs.read_port3);
264 regs.port[3] = r;
265 } else {
266 regs.port[2] = r;
267 }
268
269 regs.write_fma = true;
270 }
271
272 /* Finally, ensure port 1 > port 0 for the 63-x trick to function */
273
274 if (regs.enabled[0] && regs.enabled[1] && regs.port[1] < regs.port[0]) {
275 unsigned temp = regs.port[0];
276 regs.port[0] = regs.port[1];
277 regs.port[1] = temp;
278 }
279
280 return regs;
281 }
282
283 /* Determines the register control field, ignoring the first? flag */
284
285 static enum bifrost_reg_control
286 bi_pack_register_ctrl_lo(struct bi_registers r)
287 {
288 if (r.write_fma) {
289 if (r.write_add) {
290 assert(!r.read_port3);
291 return BIFROST_WRITE_ADD_P2_FMA_P3;
292 } else {
293 if (r.read_port3)
294 return BIFROST_WRITE_FMA_P2_READ_P3;
295 else
296 return BIFROST_WRITE_FMA_P2;
297 }
298 } else if (r.write_add) {
299 if (r.read_port3)
300 return BIFROST_WRITE_ADD_P2_READ_P3;
301 else
302 return BIFROST_WRITE_ADD_P2;
303 } else if (r.read_port3)
304 return BIFROST_READ_P3;
305 else
306 return BIFROST_REG_NONE;
307 }
308
309 /* Ditto but account for the first? flag this time */
310
311 static enum bifrost_reg_control
312 bi_pack_register_ctrl(struct bi_registers r)
313 {
314 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
315
316 if (r.first_instruction) {
317 if (ctrl == BIFROST_REG_NONE)
318 ctrl = BIFROST_FIRST_NONE;
319 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
320 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
321 else
322 ctrl |= BIFROST_FIRST_NONE;
323 }
324
325 return ctrl;
326 }
327
328 static uint64_t
329 bi_pack_registers(struct bi_registers regs)
330 {
331 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
332 struct bifrost_regs s = { 0 };
333 uint64_t packed = 0;
334
335 if (regs.enabled[1]) {
336 /* Gotta save that bit!~ Required by the 63-x trick */
337 assert(regs.port[1] > regs.port[0]);
338 assert(regs.enabled[0]);
339
340 /* Do the 63-x trick, see docs/disasm */
341 if (regs.port[0] > 31) {
342 regs.port[0] = 63 - regs.port[0];
343 regs.port[1] = 63 - regs.port[1];
344 }
345
346 assert(regs.port[0] <= 31);
347 assert(regs.port[1] <= 63);
348
349 s.ctrl = ctrl;
350 s.reg1 = regs.port[1];
351 s.reg0 = regs.port[0];
352 } else {
353 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
354 s.ctrl = 0;
355 s.reg1 = ctrl << 2;
356
357 if (regs.enabled[0]) {
358 /* Bit 0 upper bit of port 0 */
359 s.reg1 |= (regs.port[0] >> 5);
360
361 /* Rest of port 0 in usual spot */
362 s.reg0 = (regs.port[0] & 0b11111);
363 } else {
364 /* Bit 1 set if port 0 also disabled */
365 s.reg1 |= (1 << 1);
366 }
367 }
368
369 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
370 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
371
372 bool has_port2 = regs.write_fma || regs.write_add;
373 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
374
375 if (!has_port3)
376 regs.port[3] = regs.port[2];
377
378 if (!has_port2)
379 regs.port[2] = regs.port[3];
380
381 s.reg3 = regs.port[3];
382 s.reg2 = regs.port[2];
383 s.uniform_const = regs.uniform_constant;
384
385 memcpy(&packed, &s, sizeof(s));
386 return packed;
387 }
388
389 static void
390 bi_set_data_register(bi_clause *clause, unsigned idx)
391 {
392 assert(idx & BIR_INDEX_REGISTER);
393 unsigned reg = idx & ~BIR_INDEX_REGISTER;
394 assert(reg <= 63);
395 clause->data_register = reg;
396 }
397
398 static void
399 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
400 {
401 bi_set_data_register(clause, ins->src[0]);
402 }
403
404 static void
405 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
406 {
407 bi_set_data_register(clause, ins->dest);
408 }
409
410 static enum bifrost_packed_src
411 bi_get_src_reg_port(struct bi_registers *regs, unsigned src)
412 {
413 unsigned reg = src & ~BIR_INDEX_REGISTER;
414
415 if (regs->port[0] == reg && regs->enabled[0])
416 return BIFROST_SRC_PORT0;
417 else if (regs->port[1] == reg && regs->enabled[1])
418 return BIFROST_SRC_PORT1;
419 else if (regs->port[3] == reg && regs->read_port3)
420 return BIFROST_SRC_PORT3;
421 else
422 unreachable("Tried to access register with no port");
423 }
424
425 static enum bifrost_packed_src
426 bi_get_src(bi_instruction *ins, struct bi_registers *regs, unsigned s)
427 {
428 unsigned src = ins->src[s];
429
430 if (src & BIR_INDEX_REGISTER)
431 return bi_get_src_reg_port(regs, src);
432 else if (src & BIR_INDEX_PASS)
433 return src & ~BIR_INDEX_PASS;
434 else {
435 bi_print_instruction(ins, stderr);
436 unreachable("Unknown src in above instruction");
437 }
438 }
439
440 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
441 * 16-bit and written components must correspond to valid swizzles (component x
442 * or y). */
443
444 static unsigned
445 bi_swiz16(bi_instruction *ins, unsigned src)
446 {
447 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
448 unsigned swizzle = 0;
449
450 for (unsigned c = 0; c < 2; ++c) {
451 if (!bi_writes_component(ins, src)) continue;
452
453 unsigned k = ins->swizzle[src][c];
454 assert(k <= 1);
455 swizzle |= (k << c);
456 }
457
458 return swizzle;
459 }
460
461 static unsigned
462 bi_pack_fma_fma(bi_instruction *ins, struct bi_registers *regs)
463 {
464 /* (-a)(-b) = ab, so we only need one negate bit */
465 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
466
467 if (ins->op.mscale) {
468 assert(!(ins->src_abs[0] && ins->src_abs[1]));
469 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
470
471 /* We can have exactly one abs, and can flip the multiplication
472 * to make it fit if we have to */
473 bool flip_ab = ins->src_abs[1];
474
475 struct bifrost_fma_mscale pack = {
476 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
477 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
478 .src2 = bi_get_src(ins, regs, 2),
479 .src3 = bi_get_src(ins, regs, 3),
480 .mscale_mode = 0,
481 .mode = ins->outmod,
482 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
483 .src1_neg = negate_mul,
484 .src2_neg = ins->src_neg[2],
485 .op = BIFROST_FMA_OP_MSCALE,
486 };
487
488 RETURN_PACKED(pack);
489 } else if (ins->dest_type == nir_type_float32) {
490 struct bifrost_fma_fma pack = {
491 .src0 = bi_get_src(ins, regs, 0),
492 .src1 = bi_get_src(ins, regs, 1),
493 .src2 = bi_get_src(ins, regs, 2),
494 .src0_abs = ins->src_abs[0],
495 .src1_abs = ins->src_abs[1],
496 .src2_abs = ins->src_abs[2],
497 .src0_neg = negate_mul,
498 .src2_neg = ins->src_neg[2],
499 .outmod = ins->outmod,
500 .roundmode = ins->roundmode,
501 .op = BIFROST_FMA_OP_FMA
502 };
503
504 RETURN_PACKED(pack);
505 } else if (ins->dest_type == nir_type_float16) {
506 struct bifrost_fma_fma16 pack = {
507 .src0 = bi_get_src(ins, regs, 0),
508 .src1 = bi_get_src(ins, regs, 1),
509 .src2 = bi_get_src(ins, regs, 2),
510 .swizzle_0 = bi_swiz16(ins, 0),
511 .swizzle_1 = bi_swiz16(ins, 1),
512 .swizzle_2 = bi_swiz16(ins, 2),
513 .src0_neg = negate_mul,
514 .src2_neg = ins->src_neg[2],
515 .outmod = ins->outmod,
516 .roundmode = ins->roundmode,
517 .op = BIFROST_FMA_OP_FMA16
518 };
519
520 RETURN_PACKED(pack);
521 } else {
522 unreachable("Invalid fma dest type");
523 }
524 }
525
526 static unsigned
527 bi_pack_fma_addmin_f32(bi_instruction *ins, struct bi_registers *regs)
528 {
529 unsigned op =
530 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
531 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
532 BIFROST_FMA_OP_FMAX32;
533
534 struct bifrost_fma_add pack = {
535 .src0 = bi_get_src(ins, regs, 0),
536 .src1 = bi_get_src(ins, regs, 1),
537 .src0_abs = ins->src_abs[0],
538 .src1_abs = ins->src_abs[1],
539 .src0_neg = ins->src_neg[0],
540 .src1_neg = ins->src_neg[1],
541 .unk = 0x0,
542 .outmod = ins->outmod,
543 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
544 .op = op
545 };
546
547 RETURN_PACKED(pack);
548 }
549
550 static bool
551 bi_pack_fp16_abs(bi_instruction *ins, struct bi_registers *regs, bool *flip)
552 {
553 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
554 * l be an auxiliary bit we encode. Then the hardware determines:
555 *
556 * abs0 = l || k
557 * abs1 = l && k
558 *
559 * Since add/min/max are commutative, this saves a bit by using the
560 * order of the operands as a bit (k). To pack this, first note:
561 *
562 * (l && k) implies (l || k).
563 *
564 * That is, if the second argument is abs'd, then the first argument
565 * also has abs. So there are three cases:
566 *
567 * Case 0: Neither src has absolute value. Then we have l = k = 0.
568 *
569 * Case 1: Exactly one src has absolute value. Assign that source to
570 * src0 and the other source to src1. Compute k = src1 < src0 based on
571 * that assignment. Then l = ~k.
572 *
573 * Case 2: Both sources have absolute value. Then we have l = k = 1.
574 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
575 * That is, this encoding is only valid if src1 and src0 are distinct.
576 * This is a scheduling restriction (XXX); if an op of this type
577 * requires both identical sources to have abs value, then we must
578 * schedule to ADD (which does not use this ordering trick).
579 */
580
581 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
582 unsigned src_0 = bi_get_src(ins, regs, 0);
583 unsigned src_1 = bi_get_src(ins, regs, 1);
584
585 assert(!(abs_0 && abs_1 && src_0 == src_1));
586
587 if (!abs_0 && !abs_1) {
588 /* Force k = 0 <===> NOT(src1 < src0) */
589 *flip = (src_1 < src_0);
590 return false;
591 } else if (abs_0 && !abs_1) {
592 return src_1 >= src_0;
593 } else if (abs_1 && !abs_0) {
594 *flip = true;
595 return src_0 >= src_1;
596 } else {
597 *flip = !(src_1 < src_0);
598 return true;
599 }
600 }
601
602 static unsigned
603 bi_pack_fmadd_min_f16(bi_instruction *ins, struct bi_registers *regs, bool FMA)
604 {
605 unsigned op =
606 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
607 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
608 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
609 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
610 BIFROST_FMA_OP_FMAX16;
611
612 bool flip = false;
613 bool l = bi_pack_fp16_abs(ins, regs, &flip);
614 unsigned src_0 = bi_get_src(ins, regs, 0);
615 unsigned src_1 = bi_get_src(ins, regs, 1);
616
617 if (FMA) {
618 struct bifrost_fma_add_minmax16 pack = {
619 .src0 = flip ? src_1 : src_0,
620 .src1 = flip ? src_0 : src_1,
621 .src0_neg = ins->src_neg[flip ? 1 : 0],
622 .src1_neg = ins->src_neg[flip ? 0 : 1],
623 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
624 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
625 .abs1 = l,
626 .outmod = ins->outmod,
627 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
628 .op = op
629 };
630
631 RETURN_PACKED(pack);
632 } else {
633 /* Can't have modes for fp16 */
634 assert(ins->outmod == 0);
635
636 struct bifrost_add_fmin16 pack = {
637 .src0 = flip ? src_1 : src_0,
638 .src1 = flip ? src_0 : src_1,
639 .src0_neg = ins->src_neg[flip ? 1 : 0],
640 .src1_neg = ins->src_neg[flip ? 0 : 1],
641 .abs1 = l,
642 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
643 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
644 .mode = ins->minmax,
645 .op = op
646 };
647
648 RETURN_PACKED(pack);
649 }
650 }
651
652 static unsigned
653 bi_pack_fma_addmin(bi_instruction *ins, struct bi_registers *regs)
654 {
655 if (ins->dest_type == nir_type_float32)
656 return bi_pack_fma_addmin_f32(ins, regs);
657 else if(ins->dest_type == nir_type_float16)
658 return bi_pack_fmadd_min_f16(ins, regs, true);
659 else
660 unreachable("Unknown FMA/ADD type");
661 }
662
663 static unsigned
664 bi_pack_fma_1src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
665 {
666 struct bifrost_fma_inst pack = {
667 .src0 = bi_get_src(ins, regs, 0),
668 .op = op
669 };
670
671 RETURN_PACKED(pack);
672 }
673
674 static unsigned
675 bi_pack_fma_2src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
676 {
677 struct bifrost_fma_2src pack = {
678 .src0 = bi_get_src(ins, regs, 0),
679 .src1 = bi_get_src(ins, regs, 1),
680 .op = op
681 };
682
683 RETURN_PACKED(pack);
684 }
685
686 static unsigned
687 bi_pack_add_1src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
688 {
689 struct bifrost_add_inst pack = {
690 .src0 = bi_get_src(ins, regs, 0),
691 .op = op
692 };
693
694 RETURN_PACKED(pack);
695 }
696
697 static enum bifrost_csel_cond
698 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
699 {
700 nir_alu_type B = nir_alu_type_get_base_type(T);
701 unsigned idx = (B == nir_type_float) ? 0 :
702 ((B == nir_type_int) ? 1 : 2);
703
704 switch (cond){
705 case BI_COND_LT:
706 *flip = true;
707 case BI_COND_GT: {
708 const enum bifrost_csel_cond ops[] = {
709 BIFROST_FGT_F,
710 BIFROST_IGT_I,
711 BIFROST_UGT_I
712 };
713
714 return ops[idx];
715 }
716 case BI_COND_LE:
717 *flip = true;
718 case BI_COND_GE: {
719 const enum bifrost_csel_cond ops[] = {
720 BIFROST_FGE_F,
721 BIFROST_IGE_I,
722 BIFROST_UGE_I
723 };
724
725 return ops[idx];
726 }
727 case BI_COND_NE:
728 *invert = true;
729 case BI_COND_EQ: {
730 const enum bifrost_csel_cond ops[] = {
731 BIFROST_FEQ_F,
732 BIFROST_IEQ_F,
733 BIFROST_IEQ_F /* sign is irrelevant */
734 };
735
736 return ops[idx];
737 }
738 default:
739 unreachable("Invalid op for csel");
740 }
741 }
742
743 static unsigned
744 bi_pack_fma_csel(bi_instruction *ins, struct bi_registers *regs)
745 {
746 /* TODO: Use csel3 as well */
747 bool flip = false, invert = false;
748
749 enum bifrost_csel_cond cond =
750 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
751
752 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
753
754 unsigned cmp_0 = (flip ? 1 : 0);
755 unsigned cmp_1 = (flip ? 0 : 1);
756 unsigned res_0 = (invert ? 3 : 2);
757 unsigned res_1 = (invert ? 2 : 3);
758
759 struct bifrost_csel4 pack = {
760 .src0 = bi_get_src(ins, regs, cmp_0),
761 .src1 = bi_get_src(ins, regs, cmp_1),
762 .src2 = bi_get_src(ins, regs, res_0),
763 .src3 = bi_get_src(ins, regs, res_1),
764 .cond = cond,
765 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
766 BIFROST_FMA_OP_CSEL4
767 };
768
769 RETURN_PACKED(pack);
770 }
771
772 static unsigned
773 bi_pack_fma_frexp(bi_instruction *ins, struct bi_registers *regs)
774 {
775 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
776 return bi_pack_fma_1src(ins, regs, op);
777 }
778
779 static unsigned
780 bi_pack_fma_reduce(bi_instruction *ins, struct bi_registers *regs)
781 {
782 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
783 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
784 } else {
785 unreachable("Invalid reduce op");
786 }
787 }
788
789 /* We have a single convert opcode in the IR but a number of opcodes that could
790 * come out. In particular we have native opcodes for:
791 *
792 * [ui]16 --> [fui]32 -- int16_to_32
793 * f16 --> f32 -- float16_to_32
794 * f32 --> f16 -- float32_to_16
795 * f32 --> [ui]32 -- float32_to_int
796 * [ui]32 --> f32 -- int_to_float32
797 * [fui]16 --> [fui]16 -- f2i_i2f16
798 */
799
800 static unsigned
801 bi_pack_convert(bi_instruction *ins, struct bi_registers *regs, bool FMA)
802 {
803 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
804 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
805 bool from_unsigned = from_base == nir_type_uint;
806
807 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
808 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
809 bool to_unsigned = to_base == nir_type_uint;
810 bool to_float = to_base == nir_type_float;
811
812 /* Sanity check */
813 assert((from_base != to_base) || (from_size != to_size));
814 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
815
816 /* f32 to f16 is special */
817 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
818 /* TODO: second vectorized source? */
819 struct bifrost_fma_2src pfma = {
820 .src0 = bi_get_src(ins, regs, 0),
821 .src1 = BIFROST_SRC_STAGE, /* 0 */
822 .op = BIFROST_FMA_FLOAT32_TO_16
823 };
824
825 struct bifrost_add_2src padd = {
826 .src0 = bi_get_src(ins, regs, 0),
827 .src1 = BIFROST_SRC_STAGE, /* 0 */
828 .op = BIFROST_ADD_FLOAT32_TO_16
829 };
830
831 if (FMA) {
832 RETURN_PACKED(pfma);
833 } else {
834 RETURN_PACKED(padd);
835 }
836 }
837
838 /* Otherwise, figure out the mode */
839 unsigned op = 0;
840
841 if (from_size == 16 && to_size == 32) {
842 unsigned component = ins->swizzle[0][0];
843 assert(component <= 1);
844
845 if (from_base == nir_type_float)
846 op = BIFROST_CONVERT_5(component);
847 else
848 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
849 } else {
850 unsigned mode = 0;
851 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
852 bool is_unsigned = from_unsigned;
853
854 if (from_base == nir_type_float) {
855 assert(to_base != nir_type_float);
856 is_unsigned = to_unsigned;
857
858 if (from_size == 32 && to_size == 32)
859 mode = BIFROST_CONV_F32_TO_I32;
860 else if (from_size == 16 && to_size == 16)
861 mode = BIFROST_CONV_F16_TO_I16;
862 else
863 unreachable("Invalid float conversion");
864 } else {
865 assert(to_base == nir_type_float);
866 assert(from_size == to_size);
867
868 if (to_size == 32)
869 mode = BIFROST_CONV_I32_TO_F32;
870 else if (to_size == 16)
871 mode = BIFROST_CONV_I16_TO_F16;
872 else
873 unreachable("Invalid int conversion");
874 }
875
876 /* Fixup swizzle for 32-bit only modes */
877
878 if (mode == BIFROST_CONV_I32_TO_F32)
879 swizzle = 0b11;
880 else if (mode == BIFROST_CONV_F32_TO_I32)
881 swizzle = 0b10;
882
883 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
884
885 /* Unclear what the top bit is for... maybe 16-bit related */
886 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
887 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
888
889 if (!(mode2 || mode6))
890 op |= 0x100;
891 }
892
893 if (FMA)
894 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
895 else
896 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
897 }
898
899 static unsigned
900 bi_pack_fma_select(bi_instruction *ins, struct bi_registers *regs)
901 {
902 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
903
904 if (size == 16) {
905 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
906 unsigned op = BIFROST_FMA_SEL_16(swiz);
907 return bi_pack_fma_2src(ins, regs, op);
908 } else if (size == 8) {
909 unsigned swiz = 0;
910
911 for (unsigned c = 0; c < 4; ++c) {
912 if (ins->swizzle[c][0]) {
913 /* Ensure lowering restriction is met */
914 assert(ins->swizzle[c][0] == 2);
915 swiz |= (1 << c);
916 }
917 }
918
919 struct bifrost_fma_sel8 pack = {
920 .src0 = bi_get_src(ins, regs, 0),
921 .src1 = bi_get_src(ins, regs, 1),
922 .src2 = bi_get_src(ins, regs, 2),
923 .src3 = bi_get_src(ins, regs, 3),
924 .swizzle = swiz,
925 .op = BIFROST_FMA_OP_SEL8
926 };
927
928 RETURN_PACKED(pack);
929 } else {
930 unreachable("Unimplemented");
931 }
932 }
933
934 static enum bifrost_fcmp_cond
935 bi_fcmp_cond(enum bi_cond cond)
936 {
937 switch (cond) {
938 case BI_COND_LT: return BIFROST_OLT;
939 case BI_COND_LE: return BIFROST_OLE;
940 case BI_COND_GE: return BIFROST_OGE;
941 case BI_COND_GT: return BIFROST_OGT;
942 case BI_COND_EQ: return BIFROST_OEQ;
943 case BI_COND_NE: return BIFROST_UNE;
944 default: unreachable("Unknown bi_cond");
945 }
946 }
947
948 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
949
950 static enum bifrost_fcmp_cond
951 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
952 {
953 switch (cond) {
954 case BIFROST_OGT:
955 return BIFROST_OLT;
956 case BIFROST_OGE:
957 return BIFROST_OLE;
958 case BIFROST_OLT:
959 return BIFROST_OGT;
960 case BIFROST_OLE:
961 return BIFROST_OGE;
962 case BIFROST_OEQ:
963 case BIFROST_UNE:
964 return cond;
965 default:
966 unreachable("Unknown fcmp cond");
967 }
968 }
969
970 static unsigned
971 bi_pack_fma_cmp(bi_instruction *ins, struct bi_registers *regs)
972 {
973 nir_alu_type Tl = ins->src_types[0];
974 nir_alu_type Tr = ins->src_types[1];
975
976 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
977 /* TODO: Mixed 32/16 cmp */
978 assert(Tl == Tr);
979
980 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
981
982 /* Only src1 has neg, so we arrange:
983 * a < b --- native
984 * a < -b --- native
985 * -a < -b <===> a > b
986 * -a < b <===> a > -b
987 * TODO: Is this NaN-precise?
988 */
989
990 bool flip = ins->src_neg[0];
991 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
992
993 if (flip)
994 cond = bi_flip_fcmp(cond);
995
996 struct bifrost_fma_fcmp pack = {
997 .src0 = bi_get_src(ins, regs, 0),
998 .src1 = bi_get_src(ins, regs, 1),
999 .src0_abs = ins->src_abs[0],
1000 .src1_abs = ins->src_abs[1],
1001 .src1_neg = neg,
1002 .src_expand = 0,
1003 .unk1 = 0,
1004 .cond = cond,
1005 .op = BIFROST_FMA_OP_FCMP_GL
1006 };
1007
1008 RETURN_PACKED(pack);
1009 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1010 bool flip = false;
1011 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1012 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1013
1014 if (flip)
1015 cond = bi_flip_fcmp(cond);
1016
1017 struct bifrost_fma_fcmp16 pack = {
1018 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1019 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1020 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1021 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1022 .abs1 = l,
1023 .unk = 0,
1024 .cond = cond,
1025 .op = BIFROST_FMA_OP_FCMP_GL_16,
1026 };
1027
1028 RETURN_PACKED(pack);
1029 } else {
1030 unreachable("Unknown cmp type");
1031 }
1032 }
1033
1034 static unsigned
1035 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1036 {
1037 switch (op) {
1038 case BI_BITWISE_OR:
1039 /* Via De Morgan's */
1040 return rshift ?
1041 BIFROST_FMA_OP_RSHIFT_NAND :
1042 BIFROST_FMA_OP_LSHIFT_NAND;
1043 case BI_BITWISE_AND:
1044 return rshift ?
1045 BIFROST_FMA_OP_RSHIFT_AND :
1046 BIFROST_FMA_OP_LSHIFT_AND;
1047 case BI_BITWISE_XOR:
1048 /* Shift direction handled out of band */
1049 return BIFROST_FMA_OP_RSHIFT_XOR;
1050 default:
1051 unreachable("Unknown op");
1052 }
1053 }
1054
1055 static unsigned
1056 bi_pack_fma_bitwise(bi_instruction *ins, struct bi_registers *regs)
1057 {
1058 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1059 assert(size <= 32);
1060
1061 bool invert_0 = ins->bitwise.src_invert[0];
1062 bool invert_1 = ins->bitwise.src_invert[1];
1063
1064 if (ins->op.bitwise == BI_BITWISE_OR) {
1065 /* Becomes NAND, so via De Morgan's:
1066 * f(A) | f(B) = ~(~f(A) & ~f(B))
1067 * = NAND(~f(A), ~f(B))
1068 */
1069
1070 invert_0 = !invert_0;
1071 invert_1 = !invert_1;
1072 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1073 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1074 * ~A ^ B = ~(A ^ B) = A ^ ~B
1075 */
1076
1077 invert_0 ^= invert_1;
1078 invert_1 = false;
1079
1080 /* invert_1 ends up specifying shift direction */
1081 invert_1 = !ins->bitwise.rshift;
1082 }
1083
1084 struct bifrost_shift_fma pack = {
1085 .src0 = bi_get_src(ins, regs, 0),
1086 .src1 = bi_get_src(ins, regs, 1),
1087 .src2 = bi_get_src(ins, regs, 2),
1088 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1089 .unk = 1, /* XXX */
1090 .invert_1 = invert_0,
1091 .invert_2 = invert_1,
1092 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1093 };
1094
1095 RETURN_PACKED(pack);
1096 }
1097
1098 static unsigned
1099 bi_pack_fma_round(bi_instruction *ins, struct bi_registers *regs)
1100 {
1101 bool fp16 = ins->dest_type == nir_type_float16;
1102 assert(fp16 || ins->dest_type == nir_type_float32);
1103
1104 unsigned op = fp16
1105 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1106 : BIFROST_FMA_ROUND_32(ins->roundmode);
1107
1108 return bi_pack_fma_1src(ins, regs, op);
1109 }
1110
1111 static unsigned
1112 bi_pack_fma_imath(bi_instruction *ins, struct bi_registers *regs)
1113 {
1114 /* Scheduler: only ADD can have 8/16-bit imath */
1115 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1116
1117 unsigned op = ins->op.imath == BI_IMATH_ADD
1118 ? BIFROST_FMA_IADD_32
1119 : BIFROST_FMA_ISUB_32;
1120
1121 return bi_pack_fma_2src(ins, regs, op);
1122 }
1123
1124 static unsigned
1125 bi_pack_fma(bi_clause *clause, bi_bundle bundle, struct bi_registers *regs)
1126 {
1127 if (!bundle.fma)
1128 return BIFROST_FMA_NOP;
1129
1130 switch (bundle.fma->type) {
1131 case BI_ADD:
1132 return bi_pack_fma_addmin(bundle.fma, regs);
1133 case BI_CMP:
1134 return bi_pack_fma_cmp(bundle.fma, regs);
1135 case BI_BITWISE:
1136 return bi_pack_fma_bitwise(bundle.fma, regs);
1137 case BI_CONVERT:
1138 return bi_pack_convert(bundle.fma, regs, true);
1139 case BI_CSEL:
1140 return bi_pack_fma_csel(bundle.fma, regs);
1141 case BI_FMA:
1142 return bi_pack_fma_fma(bundle.fma, regs);
1143 case BI_FREXP:
1144 return bi_pack_fma_frexp(bundle.fma, regs);
1145 case BI_IMATH:
1146 return bi_pack_fma_imath(bundle.fma, regs);
1147 case BI_MINMAX:
1148 return bi_pack_fma_addmin(bundle.fma, regs);
1149 case BI_MOV:
1150 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1151 case BI_SHIFT:
1152 unreachable("Packing todo");
1153 case BI_SELECT:
1154 return bi_pack_fma_select(bundle.fma, regs);
1155 case BI_ROUND:
1156 return bi_pack_fma_round(bundle.fma, regs);
1157 case BI_REDUCE_FMA:
1158 return bi_pack_fma_reduce(bundle.fma, regs);
1159 default:
1160 unreachable("Cannot encode class as FMA");
1161 }
1162 }
1163
1164 static unsigned
1165 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1166 {
1167 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1168 assert(size == 32 || size == 16);
1169
1170 unsigned op = (size == 32) ?
1171 BIFROST_ADD_OP_LD_VAR_32 :
1172 BIFROST_ADD_OP_LD_VAR_16;
1173
1174 unsigned packed_addr = 0;
1175
1176 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1177 /* Direct uses address field directly */
1178 packed_addr = bi_get_immediate(ins, 0);
1179 } else {
1180 /* Indirect gets an extra source */
1181 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1182 }
1183
1184 /* The destination is thrown in the data register */
1185 assert(ins->dest & BIR_INDEX_REGISTER);
1186 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1187
1188 unsigned channels = ins->vector_channels;
1189 assert(channels >= 1 && channels <= 4);
1190
1191 struct bifrost_ld_var pack = {
1192 .src0 = bi_get_src(ins, regs, 1),
1193 .addr = packed_addr,
1194 .channels = MALI_POSITIVE(channels),
1195 .interp_mode = ins->load_vary.interp_mode,
1196 .reuse = ins->load_vary.reuse,
1197 .flat = ins->load_vary.flat,
1198 .op = op
1199 };
1200
1201 RETURN_PACKED(pack);
1202 }
1203
1204 static unsigned
1205 bi_pack_add_2src(bi_instruction *ins, struct bi_registers *regs, unsigned op)
1206 {
1207 struct bifrost_add_2src pack = {
1208 .src0 = bi_get_src(ins, regs, 0),
1209 .src1 = bi_get_src(ins, regs, 1),
1210 .op = op
1211 };
1212
1213 RETURN_PACKED(pack);
1214 }
1215
1216 static unsigned
1217 bi_pack_add_addmin_f32(bi_instruction *ins, struct bi_registers *regs)
1218 {
1219 unsigned op =
1220 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1221 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1222 BIFROST_ADD_OP_FMAX32;
1223
1224 struct bifrost_add_faddmin pack = {
1225 .src0 = bi_get_src(ins, regs, 0),
1226 .src1 = bi_get_src(ins, regs, 1),
1227 .src0_abs = ins->src_abs[0],
1228 .src1_abs = ins->src_abs[1],
1229 .src0_neg = ins->src_neg[0],
1230 .src1_neg = ins->src_neg[1],
1231 .outmod = ins->outmod,
1232 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1233 .op = op
1234 };
1235
1236 RETURN_PACKED(pack);
1237 }
1238
1239 static unsigned
1240 bi_pack_add_add_f16(bi_instruction *ins, struct bi_registers *regs)
1241 {
1242 /* ADD.v2f16 can't have outmod */
1243 assert(ins->outmod == BIFROST_NONE);
1244
1245 struct bifrost_add_faddmin pack = {
1246 .src0 = bi_get_src(ins, regs, 0),
1247 .src1 = bi_get_src(ins, regs, 1),
1248 .src0_abs = ins->src_abs[0],
1249 .src1_abs = ins->src_abs[1],
1250 .src0_neg = ins->src_neg[0],
1251 .src1_neg = ins->src_neg[1],
1252 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1253 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1254 .mode = ins->roundmode,
1255 .op = BIFROST_ADD_OP_FADD16
1256 };
1257
1258 RETURN_PACKED(pack);
1259 }
1260
1261 static unsigned
1262 bi_pack_add_addmin(bi_instruction *ins, struct bi_registers *regs)
1263 {
1264 if (ins->dest_type == nir_type_float32)
1265 return bi_pack_add_addmin_f32(ins, regs);
1266 else if (ins->dest_type == nir_type_float16) {
1267 if (ins->type == BI_ADD)
1268 return bi_pack_add_add_f16(ins, regs);
1269 else
1270 return bi_pack_fmadd_min_f16(ins, regs, false);
1271 } else
1272 unreachable("Unknown FMA/ADD type");
1273 }
1274
1275 static unsigned
1276 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1277 {
1278 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1279
1280 const unsigned ops[4] = {
1281 BIFROST_ADD_OP_LD_UBO_1,
1282 BIFROST_ADD_OP_LD_UBO_2,
1283 BIFROST_ADD_OP_LD_UBO_3,
1284 BIFROST_ADD_OP_LD_UBO_4
1285 };
1286
1287 bi_write_data_register(clause, ins);
1288 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1289 }
1290
1291 static enum bifrost_ldst_type
1292 bi_pack_ldst_type(nir_alu_type T)
1293 {
1294 switch (T) {
1295 case nir_type_float16: return BIFROST_LDST_F16;
1296 case nir_type_float32: return BIFROST_LDST_F32;
1297 case nir_type_int32: return BIFROST_LDST_I32;
1298 case nir_type_uint32: return BIFROST_LDST_U32;
1299 default: unreachable("Invalid type loaded");
1300 }
1301 }
1302
1303 static unsigned
1304 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1305 {
1306 struct bifrost_ld_var_addr pack = {
1307 .src0 = bi_get_src(ins, regs, 1),
1308 .src1 = bi_get_src(ins, regs, 2),
1309 .location = bi_get_immediate(ins, 0),
1310 .type = bi_pack_ldst_type(ins->src_types[3]),
1311 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1312 };
1313
1314 bi_write_data_register(clause, ins);
1315 RETURN_PACKED(pack);
1316 }
1317
1318 static unsigned
1319 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1320 {
1321 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1322
1323 struct bifrost_ld_attr pack = {
1324 .src0 = bi_get_src(ins, regs, 1),
1325 .src1 = bi_get_src(ins, regs, 2),
1326 .location = bi_get_immediate(ins, 0),
1327 .channels = MALI_POSITIVE(ins->vector_channels),
1328 .type = bi_pack_ldst_type(ins->dest_type),
1329 .op = BIFROST_ADD_OP_LD_ATTR
1330 };
1331
1332 bi_write_data_register(clause, ins);
1333 RETURN_PACKED(pack);
1334 }
1335
1336 static unsigned
1337 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1338 {
1339 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1340
1341 struct bifrost_st_vary pack = {
1342 .src0 = bi_get_src(ins, regs, 1),
1343 .src1 = bi_get_src(ins, regs, 2),
1344 .src2 = bi_get_src(ins, regs, 3),
1345 .channels = MALI_POSITIVE(ins->vector_channels),
1346 .op = BIFROST_ADD_OP_ST_VAR
1347 };
1348
1349 bi_read_data_register(clause, ins);
1350 RETURN_PACKED(pack);
1351 }
1352
1353 static unsigned
1354 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1355 {
1356 bool fp16 = (ins->src_types[1] == nir_type_float16);
1357
1358 struct bifrost_add_atest pack = {
1359 .src0 = bi_get_src(ins, regs, 0),
1360 .src1 = bi_get_src(ins, regs, 1),
1361 .half = fp16,
1362 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1363 .op = BIFROST_ADD_OP_ATEST,
1364 };
1365
1366 /* Despite *also* writing with the usual mechanism... quirky and
1367 * perhaps unnecessary, but let's match the blob */
1368 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1369
1370 RETURN_PACKED(pack);
1371 }
1372
1373 static unsigned
1374 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
1375 {
1376 struct bifrost_add_inst pack = {
1377 .src0 = bi_get_src(ins, regs, 1),
1378 .op = BIFROST_ADD_OP_BLEND
1379 };
1380
1381 /* TODO: Pack location in uniform_const */
1382 assert(ins->blend_location == 0);
1383
1384 bi_read_data_register(clause, ins);
1385 RETURN_PACKED(pack);
1386 }
1387
1388 static unsigned
1389 bi_pack_add_special(bi_instruction *ins, struct bi_registers *regs)
1390 {
1391 unsigned op = 0;
1392 bool fp16 = ins->dest_type == nir_type_float16;
1393 bool Y = ins->swizzle[0][0];
1394
1395 if (ins->op.special == BI_SPECIAL_FRCP) {
1396 op = fp16 ?
1397 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1398 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1399 BIFROST_ADD_OP_FRCP_FAST_F32;
1400 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1401 op = fp16 ?
1402 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1403 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1404 BIFROST_ADD_OP_FRSQ_FAST_F32;
1405
1406 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1407 assert(!fp16);
1408 op = BIFROST_ADD_OP_FEXP2_FAST;
1409 } else {
1410 unreachable("Unknown special op");
1411 }
1412
1413 return bi_pack_add_1src(ins, regs, op);
1414 }
1415
1416 static unsigned
1417 bi_pack_add_table(bi_instruction *ins, struct bi_registers *regs)
1418 {
1419 unsigned op = 0;
1420 assert(ins->dest_type == nir_type_float32);
1421
1422 op = BIFROST_ADD_OP_LOG2_HELP;
1423 return bi_pack_add_1src(ins, regs, op);
1424 }
1425 static unsigned
1426 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs, gl_shader_stage stage)
1427 {
1428 bool f16 = ins->dest_type == nir_type_float16;
1429 bool vtx = stage != MESA_SHADER_FRAGMENT;
1430
1431 struct bifrost_tex_compact pack = {
1432 .src0 = bi_get_src(ins, regs, 0),
1433 .src1 = bi_get_src(ins, regs, 1),
1434 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1435 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1436 .compute_lod = !vtx,
1437 .tex_index = ins->texture.texture_index,
1438 .sampler_index = ins->texture.sampler_index
1439 };
1440
1441 bi_write_data_register(clause, ins);
1442 RETURN_PACKED(pack);
1443 }
1444
1445 static unsigned
1446 bi_pack_add_select(bi_instruction *ins, struct bi_registers *regs)
1447 {
1448 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1449 assert(size == 16);
1450
1451 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1452 unsigned op = BIFROST_ADD_SEL_16(swiz);
1453 return bi_pack_add_2src(ins, regs, op);
1454 }
1455
1456 static enum bifrost_discard_cond
1457 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1458 {
1459 switch (cond){
1460 case BI_COND_GT:
1461 *flip = true;
1462 /* fallthrough */
1463 case BI_COND_LT:
1464 return BIFROST_DISCARD_FLT;
1465 case BI_COND_GE:
1466 *flip = true;
1467 /* fallthrough */
1468 case BI_COND_LE:
1469 return BIFROST_DISCARD_FLE;
1470 case BI_COND_NE:
1471 return BIFROST_DISCARD_FNE;
1472 case BI_COND_EQ:
1473 return BIFROST_DISCARD_FEQ;
1474 default:
1475 unreachable("Invalid op for discard");
1476 }
1477 }
1478
1479 static unsigned
1480 bi_pack_add_discard(bi_instruction *ins, struct bi_registers *regs)
1481 {
1482 bool fp16 = ins->src_types[0] == nir_type_float16;
1483 assert(fp16 || ins->src_types[0] == nir_type_float32);
1484
1485 bool flip = false;
1486 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1487
1488 struct bifrost_add_discard pack = {
1489 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1490 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1491 .cond = cond,
1492 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1493 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1494 .fp32 = fp16 ? 0 : 1,
1495 .op = BIFROST_ADD_OP_DISCARD
1496 };
1497
1498 RETURN_PACKED(pack);
1499 }
1500
1501 static enum bifrost_icmp_cond
1502 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1503 {
1504 switch (cond){
1505 case BI_COND_LT:
1506 *flip = true;
1507 /* fallthrough */
1508 case BI_COND_GT:
1509 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1510 : BIFROST_ICMP_IGT;
1511 case BI_COND_LE:
1512 *flip = true;
1513 /* fallthrough */
1514 case BI_COND_GE:
1515 return is_unsigned ? BIFROST_ICMP_UGE :
1516 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1517 case BI_COND_NE:
1518 return BIFROST_ICMP_NEQ;
1519 case BI_COND_EQ:
1520 return BIFROST_ICMP_EQ;
1521 default:
1522 unreachable("Invalid op for icmp");
1523 }
1524 }
1525
1526 static unsigned
1527 bi_pack_add_icmp32(bi_instruction *ins, struct bi_registers *regs, bool flip,
1528 enum bifrost_icmp_cond cond)
1529 {
1530 struct bifrost_add_icmp pack = {
1531 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1532 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1533 .cond = cond,
1534 .sz = 1,
1535 .d3d = false,
1536 .op = BIFROST_ADD_OP_ICMP_32
1537 };
1538
1539 RETURN_PACKED(pack);
1540 }
1541
1542 static unsigned
1543 bi_pack_add_icmp16(bi_instruction *ins, struct bi_registers *regs, bool flip,
1544 enum bifrost_icmp_cond cond)
1545 {
1546 struct bifrost_add_icmp16 pack = {
1547 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1548 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1549 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1550 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1551 .cond = cond,
1552 .d3d = false,
1553 .op = BIFROST_ADD_OP_ICMP_16
1554 };
1555
1556 RETURN_PACKED(pack);
1557 }
1558
1559 static unsigned
1560 bi_pack_add_cmp(bi_instruction *ins, struct bi_registers *regs)
1561 {
1562 nir_alu_type Tl = ins->src_types[0];
1563 nir_alu_type Tr = ins->src_types[1];
1564 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1565
1566 if (Bl == nir_type_uint || Bl == nir_type_int) {
1567 assert(Tl == Tr);
1568 unsigned sz = nir_alu_type_get_type_size(Tl);
1569
1570 bool flip = false;
1571
1572 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1573 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1574 &flip, Bl == nir_type_uint, sz == 16);
1575
1576 if (sz == 32)
1577 return bi_pack_add_icmp32(ins, regs, flip, cond);
1578 else if (sz == 16)
1579 return bi_pack_add_icmp16(ins, regs, flip, cond);
1580 else
1581 unreachable("TODO");
1582 } else {
1583 unreachable("TODO");
1584 }
1585 }
1586
1587 static unsigned
1588 bi_pack_add_imath(bi_instruction *ins, struct bi_registers *regs)
1589 {
1590 /* TODO: 32+16 add */
1591 assert(ins->src_types[0] == ins->src_types[1]);
1592 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1593 enum bi_imath_op p = ins->op.imath;
1594
1595 unsigned op = 0;
1596
1597 if (sz == 8) {
1598 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1599 BIFROST_ADD_ISUB_8;
1600 } else if (sz == 16) {
1601 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1602 BIFROST_ADD_ISUB_16;
1603 } else if (sz == 32) {
1604 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1605 BIFROST_ADD_ISUB_32;
1606 } else {
1607 unreachable("64-bit todo");
1608 }
1609
1610 return bi_pack_add_2src(ins, regs, op);
1611 }
1612
1613 static unsigned
1614 bi_pack_add(bi_clause *clause, bi_bundle bundle, struct bi_registers *regs, gl_shader_stage stage)
1615 {
1616 if (!bundle.add)
1617 return BIFROST_ADD_NOP;
1618
1619 switch (bundle.add->type) {
1620 case BI_ADD:
1621 return bi_pack_add_addmin(bundle.add, regs);
1622 case BI_ATEST:
1623 return bi_pack_add_atest(clause, bundle.add, regs);
1624 case BI_BRANCH:
1625 unreachable("Packing todo");
1626 case BI_CMP:
1627 return bi_pack_add_cmp(bundle.add, regs);
1628 case BI_BLEND:
1629 return bi_pack_add_blend(clause, bundle.add, regs);
1630 case BI_BITWISE:
1631 unreachable("Packing todo");
1632 case BI_CONVERT:
1633 return bi_pack_convert(bundle.add, regs, false);
1634 case BI_DISCARD:
1635 return bi_pack_add_discard(bundle.add, regs);
1636 case BI_FREXP:
1637 unreachable("Packing todo");
1638 case BI_IMATH:
1639 return bi_pack_add_imath(bundle.add, regs);
1640 case BI_LOAD:
1641 unreachable("Packing todo");
1642 case BI_LOAD_ATTR:
1643 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1644 case BI_LOAD_UNIFORM:
1645 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1646 case BI_LOAD_VAR:
1647 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1648 case BI_LOAD_VAR_ADDRESS:
1649 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1650 case BI_MINMAX:
1651 return bi_pack_add_addmin(bundle.add, regs);
1652 case BI_MOV:
1653 case BI_SHIFT:
1654 case BI_STORE:
1655 unreachable("Packing todo");
1656 case BI_STORE_VAR:
1657 return bi_pack_add_st_vary(clause, bundle.add, regs);
1658 case BI_SPECIAL:
1659 return bi_pack_add_special(bundle.add, regs);
1660 case BI_TABLE:
1661 return bi_pack_add_table(bundle.add, regs);
1662 case BI_SELECT:
1663 return bi_pack_add_select(bundle.add, regs);
1664 case BI_TEX:
1665 if (bundle.add->op.texture == BI_TEX_COMPACT)
1666 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1667 else
1668 unreachable("Unknown tex type");
1669 case BI_ROUND:
1670 unreachable("Packing todo");
1671 default:
1672 unreachable("Cannot encode class as ADD");
1673 }
1674 }
1675
1676 struct bi_packed_bundle {
1677 uint64_t lo;
1678 uint64_t hi;
1679 };
1680
1681 static struct bi_packed_bundle
1682 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1683 {
1684 struct bi_registers regs = bi_assign_ports(bundle, prev);
1685 bi_assign_uniform_constant(clause, &regs, bundle);
1686 regs.first_instruction = first_bundle;
1687
1688 uint64_t reg = bi_pack_registers(regs);
1689 uint64_t fma = bi_pack_fma(clause, bundle, &regs);
1690 uint64_t add = bi_pack_add(clause, bundle, &regs, stage);
1691
1692 struct bi_packed_bundle packed = {
1693 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1694 .hi = add >> 6
1695 };
1696
1697 return packed;
1698 }
1699
1700 /* Packs the next two constants as a dedicated constant quadword at the end of
1701 * the clause, returning the number packed. */
1702
1703 static unsigned
1704 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1705 unsigned index,
1706 struct util_dynarray *emission)
1707 {
1708 /* After these two, are we done? Determines tag */
1709 bool done = clause->constant_count <= (index + 2);
1710 bool only = clause->constant_count <= (index + 1);
1711
1712 /* TODO: Pos */
1713 assert(index == 0 && clause->bundle_count == 1);
1714 assert(only);
1715
1716 uint64_t hi = clause->constants[index + 0] >> 60ull;
1717
1718 struct bifrost_fmt_constant quad = {
1719 .pos = 0, /* TODO */
1720 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1721 .imm_1 = clause->constants[index + 0] >> 4,
1722 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1723 };
1724
1725 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1726 * of the second constant with the first must be less than 8, otherwise
1727 * we have to swap them. On G52, I'm able to reproduce a similar issue
1728 * but with a different workaround (modeled above with a single
1729 * constant, unclear how to workaround for multiple constants.) Further
1730 * investigation needed. Possibly an errata. XXX */
1731
1732 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1733
1734 return 2;
1735 }
1736
1737 static void
1738 bi_pack_clause(bi_context *ctx, bi_clause *clause, bi_clause *next,
1739 struct util_dynarray *emission, gl_shader_stage stage)
1740 {
1741 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1742 assert(clause->bundle_count == 1);
1743
1744 /* Used to decide if we elide writes */
1745 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1746
1747 /* State for packing constants throughout */
1748 unsigned constant_index = 0;
1749
1750 struct bifrost_fmt1 quad_1 = {
1751 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1752 .header = bi_pack_header(clause, next, is_fragment),
1753 .ins_1 = ins_1.lo,
1754 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1755 .ins_0 = (ins_1.hi >> 11) & 0b111,
1756 };
1757
1758 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1759
1760 /* Pack the remaining constants */
1761
1762 while (constant_index < clause->constant_count) {
1763 constant_index += bi_pack_constants(ctx, clause,
1764 constant_index, emission);
1765 }
1766 }
1767
1768 static bi_clause *
1769 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1770 {
1771 /* Try the next clause in this block */
1772 if (clause->link.next != &((bi_block *) block)->clauses)
1773 return list_first_entry(&(clause->link), bi_clause, link);
1774
1775 /* Try the next block, or the one after that if it's empty, etc .*/
1776 pan_block *next_block = pan_next_block(block);
1777
1778 bi_foreach_block_from(ctx, next_block, block) {
1779 bi_block *blk = (bi_block *) block;
1780
1781 if (!list_is_empty(&blk->clauses))
1782 return list_first_entry(&(blk->clauses), bi_clause, link);
1783 }
1784
1785 return NULL;
1786 }
1787
1788 void
1789 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1790 {
1791 util_dynarray_init(emission, NULL);
1792
1793 bi_foreach_block(ctx, _block) {
1794 bi_block *block = (bi_block *) _block;
1795
1796 bi_foreach_clause_in_block(block, clause) {
1797 bi_clause *next = bi_next_clause(ctx, _block, clause);
1798 bi_pack_clause(ctx, clause, next, emission, ctx->stage);
1799 }
1800 }
1801 }