pan/bi: Assign constant port for branch offsets
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next, bool is_fragment)
39 {
40 struct bifrost_header header = {
41 .back_to_back = clause->back_to_back,
42 .no_end_of_shader = (next != NULL),
43 .elide_writes = is_fragment,
44 .branch_cond = clause->branch_conditional,
45 .datareg_writebarrier = clause->data_register_write_barrier,
46 .datareg = clause->data_register,
47 .scoreboard_deps = next ? next->dependencies : 0,
48 .scoreboard_index = clause->scoreboard_id,
49 .clause_type = clause->clause_type,
50 .next_clause_type = next ? next->clause_type : 0,
51 .suppress_inf = true,
52 .suppress_nan = true,
53 };
54
55 header.branch_cond |= header.back_to_back;
56
57 uint64_t u = 0;
58 memcpy(&u, &header, sizeof(header));
59 return u;
60 }
61
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
67
68 static unsigned
69 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
70 {
71 uint64_t want = (cons >> 4);
72
73 for (unsigned i = 0; i < clause->constant_count; ++i) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
76
77 uint64_t candidates[2] = {
78 clause->constants[i] >> 4,
79 clause->constants[i] >> 36
80 };
81
82 /* For <64-bit mode, we treat lo/hi separately */
83
84 if (!b64)
85 candidates[0] &= (0xFFFFFFFF >> 4);
86
87 if (candidates[0] == want)
88 return i;
89
90 if (candidates[1] == want && !b64) {
91 *hi = true;
92 return i;
93 }
94 }
95
96 unreachable("Invalid constant accessed");
97 }
98
99 static unsigned
100 bi_constant_field(unsigned idx)
101 {
102 assert(idx <= 5);
103
104 const unsigned values[] = {
105 4, 5, 6, 7, 2, 3
106 };
107
108 return values[idx] << 4;
109 }
110
111 static bool
112 bi_assign_uniform_constant_single(
113 bi_registers *regs,
114 bi_clause *clause,
115 bi_instruction *ins, bool assigned, bool fast_zero)
116 {
117 if (!ins)
118 return assigned;
119
120 if (ins->type == BI_BLEND) {
121 assert(!assigned);
122 regs->uniform_constant = 0x8;
123 return true;
124 }
125
126 if (ins->type == BI_BRANCH && clause->branch_constant) {
127 /* By convention branch constant is last */
128 unsigned idx = clause->constant_count - 1;
129
130 /* We can only jump to clauses which are qword aligned so the
131 * bottom 4-bits of the offset are necessarily 0 */
132 unsigned lo = 0;
133
134 /* Build the constant */
135 unsigned C = bi_constant_field(idx) | lo;
136
137 if (assigned && regs->uniform_constant != C)
138 unreachable("Mismatched uniform/const field: branch");
139
140 regs->uniform_constant = C;
141 return true;
142 }
143
144 bi_foreach_src(ins, s) {
145 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
146 if (s == 1 && (ins->type == BI_BRANCH)) continue;
147
148 if (ins->src[s] & BIR_INDEX_CONSTANT) {
149 /* Let direct addresses through */
150 if (ins->type == BI_LOAD_VAR)
151 continue;
152
153 bool hi = false;
154 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
155 uint64_t cons = bi_get_immediate(ins, s);
156 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
157 unsigned lo = clause->constants[idx] & 0xF;
158 unsigned f = bi_constant_field(idx) | lo;
159
160 if (assigned && regs->uniform_constant != f)
161 unreachable("Mismatched uniform/const field: imm");
162
163 regs->uniform_constant = f;
164 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
165 assigned = true;
166 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
167 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
168 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
169 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
170 /* FMAs have a fast zero port, ADD needs to use the
171 * uniform/const port's special 0 mode handled here */
172 unsigned f = 0;
173
174 if (assigned && regs->uniform_constant != f)
175 unreachable("Mismatched uniform/const field: 0");
176
177 regs->uniform_constant = f;
178 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
179 assigned = true;
180 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
181 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
182 } else if (s & BIR_INDEX_UNIFORM) {
183 unreachable("Push uniforms not implemented yet");
184 }
185 }
186
187 return assigned;
188 }
189
190 static void
191 bi_assign_uniform_constant(
192 bi_clause *clause,
193 bi_registers *regs,
194 bi_bundle bundle)
195 {
196 bool assigned =
197 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
198
199 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
200 }
201
202 /* Assigns a port for reading, before anything is written */
203
204 static void
205 bi_assign_port_read(bi_registers *regs, unsigned src)
206 {
207 /* We only assign for registers */
208 if (!(src & BIR_INDEX_REGISTER))
209 return;
210
211 unsigned reg = src & ~BIR_INDEX_REGISTER;
212
213 /* Check if we already assigned the port */
214 for (unsigned i = 0; i <= 1; ++i) {
215 if (regs->port[i] == reg && regs->enabled[i])
216 return;
217 }
218
219 if (regs->port[3] == reg && regs->read_port3)
220 return;
221
222 /* Assign it now */
223
224 for (unsigned i = 0; i <= 1; ++i) {
225 if (!regs->enabled[i]) {
226 regs->port[i] = reg;
227 regs->enabled[i] = true;
228 return;
229 }
230 }
231
232 if (!regs->read_port3) {
233 regs->port[3] = reg;
234 regs->read_port3 = true;
235 return;
236 }
237
238 bi_print_ports(regs, stderr);
239 unreachable("Failed to find a free port for src");
240 }
241
242 static bi_registers
243 bi_assign_ports(bi_bundle *now, bi_bundle *prev)
244 {
245 /* We assign ports for the main register mechanism. Special ops
246 * use the data registers, which has its own mechanism entirely
247 * and thus gets skipped over here. */
248
249 unsigned read_dreg = now->add &&
250 bi_class_props[now->add->type] & BI_DATA_REG_SRC;
251
252 unsigned write_dreg = prev->add &&
253 bi_class_props[prev->add->type] & BI_DATA_REG_DEST;
254
255 /* First, assign reads */
256
257 if (now->fma)
258 bi_foreach_src(now->fma, src)
259 bi_assign_port_read(&now->regs, now->fma->src[src]);
260
261 if (now->add) {
262 bi_foreach_src(now->add, src) {
263 if (!(src == 0 && read_dreg))
264 bi_assign_port_read(&now->regs, now->add->src[src]);
265 }
266 }
267
268 /* Next, assign writes */
269
270 if (prev->add && prev->add->dest & BIR_INDEX_REGISTER && !write_dreg) {
271 now->regs.port[2] = prev->add->dest & ~BIR_INDEX_REGISTER;
272 now->regs.write_add = true;
273 }
274
275 if (prev->fma && prev->fma->dest & BIR_INDEX_REGISTER) {
276 unsigned r = prev->fma->dest & ~BIR_INDEX_REGISTER;
277
278 if (now->regs.write_add) {
279 /* Scheduler constraint: cannot read 3 and write 2 */
280 assert(!now->regs.read_port3);
281 now->regs.port[3] = r;
282 } else {
283 now->regs.port[2] = r;
284 }
285
286 now->regs.write_fma = true;
287 }
288
289 return now->regs;
290 }
291
292 /* Determines the register control field, ignoring the first? flag */
293
294 static enum bifrost_reg_control
295 bi_pack_register_ctrl_lo(bi_registers r)
296 {
297 if (r.write_fma) {
298 if (r.write_add) {
299 assert(!r.read_port3);
300 return BIFROST_WRITE_ADD_P2_FMA_P3;
301 } else {
302 if (r.read_port3)
303 return BIFROST_WRITE_FMA_P2_READ_P3;
304 else
305 return BIFROST_WRITE_FMA_P2;
306 }
307 } else if (r.write_add) {
308 if (r.read_port3)
309 return BIFROST_WRITE_ADD_P2_READ_P3;
310 else
311 return BIFROST_WRITE_ADD_P2;
312 } else if (r.read_port3)
313 return BIFROST_READ_P3;
314 else
315 return BIFROST_REG_NONE;
316 }
317
318 /* Ditto but account for the first? flag this time */
319
320 static enum bifrost_reg_control
321 bi_pack_register_ctrl(bi_registers r)
322 {
323 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
324
325 if (r.first_instruction) {
326 if (ctrl == BIFROST_REG_NONE)
327 ctrl = BIFROST_FIRST_NONE;
328 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
329 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
330 else
331 ctrl |= BIFROST_FIRST_NONE;
332 }
333
334 return ctrl;
335 }
336
337 static uint64_t
338 bi_pack_registers(bi_registers regs)
339 {
340 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
341 struct bifrost_regs s = { 0 };
342 uint64_t packed = 0;
343
344 if (regs.enabled[1]) {
345 /* Gotta save that bit!~ Required by the 63-x trick */
346 assert(regs.port[1] > regs.port[0]);
347 assert(regs.enabled[0]);
348
349 /* Do the 63-x trick, see docs/disasm */
350 if (regs.port[0] > 31) {
351 regs.port[0] = 63 - regs.port[0];
352 regs.port[1] = 63 - regs.port[1];
353 }
354
355 assert(regs.port[0] <= 31);
356 assert(regs.port[1] <= 63);
357
358 s.ctrl = ctrl;
359 s.reg1 = regs.port[1];
360 s.reg0 = regs.port[0];
361 } else {
362 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
363 s.ctrl = 0;
364 s.reg1 = ctrl << 2;
365
366 if (regs.enabled[0]) {
367 /* Bit 0 upper bit of port 0 */
368 s.reg1 |= (regs.port[0] >> 5);
369
370 /* Rest of port 0 in usual spot */
371 s.reg0 = (regs.port[0] & 0b11111);
372 } else {
373 /* Bit 1 set if port 0 also disabled */
374 s.reg1 |= (1 << 1);
375 }
376 }
377
378 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
379 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
380
381 bool has_port2 = regs.write_fma || regs.write_add;
382 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
383
384 if (!has_port3)
385 regs.port[3] = regs.port[2];
386
387 if (!has_port2)
388 regs.port[2] = regs.port[3];
389
390 s.reg3 = regs.port[3];
391 s.reg2 = regs.port[2];
392 s.uniform_const = regs.uniform_constant;
393
394 memcpy(&packed, &s, sizeof(s));
395 return packed;
396 }
397
398 static void
399 bi_set_data_register(bi_clause *clause, unsigned idx)
400 {
401 assert(idx & BIR_INDEX_REGISTER);
402 unsigned reg = idx & ~BIR_INDEX_REGISTER;
403 assert(reg <= 63);
404 clause->data_register = reg;
405 }
406
407 static void
408 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
409 {
410 bi_set_data_register(clause, ins->src[0]);
411 }
412
413 static void
414 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
415 {
416 bi_set_data_register(clause, ins->dest);
417 }
418
419 static enum bifrost_packed_src
420 bi_get_src_reg_port(bi_registers *regs, unsigned src)
421 {
422 unsigned reg = src & ~BIR_INDEX_REGISTER;
423
424 if (regs->port[0] == reg && regs->enabled[0])
425 return BIFROST_SRC_PORT0;
426 else if (regs->port[1] == reg && regs->enabled[1])
427 return BIFROST_SRC_PORT1;
428 else if (regs->port[3] == reg && regs->read_port3)
429 return BIFROST_SRC_PORT3;
430 else
431 unreachable("Tried to access register with no port");
432 }
433
434 static enum bifrost_packed_src
435 bi_get_src(bi_instruction *ins, bi_registers *regs, unsigned s)
436 {
437 unsigned src = ins->src[s];
438
439 if (src & BIR_INDEX_REGISTER)
440 return bi_get_src_reg_port(regs, src);
441 else if (src & BIR_INDEX_PASS)
442 return src & ~BIR_INDEX_PASS;
443 else {
444 bi_print_instruction(ins, stderr);
445 unreachable("Unknown src in above instruction");
446 }
447 }
448
449 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
450 * 16-bit and written components must correspond to valid swizzles (component x
451 * or y). */
452
453 static unsigned
454 bi_swiz16(bi_instruction *ins, unsigned src)
455 {
456 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
457 unsigned swizzle = 0;
458
459 for (unsigned c = 0; c < 2; ++c) {
460 if (!bi_writes_component(ins, src)) continue;
461
462 unsigned k = ins->swizzle[src][c];
463 assert(k <= 1);
464 swizzle |= (k << c);
465 }
466
467 return swizzle;
468 }
469
470 static unsigned
471 bi_pack_fma_fma(bi_instruction *ins, bi_registers *regs)
472 {
473 /* (-a)(-b) = ab, so we only need one negate bit */
474 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
475
476 if (ins->op.mscale) {
477 assert(!(ins->src_abs[0] && ins->src_abs[1]));
478 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
479
480 /* We can have exactly one abs, and can flip the multiplication
481 * to make it fit if we have to */
482 bool flip_ab = ins->src_abs[1];
483
484 struct bifrost_fma_mscale pack = {
485 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
486 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
487 .src2 = bi_get_src(ins, regs, 2),
488 .src3 = bi_get_src(ins, regs, 3),
489 .mscale_mode = 0,
490 .mode = ins->outmod,
491 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
492 .src1_neg = negate_mul,
493 .src2_neg = ins->src_neg[2],
494 .op = BIFROST_FMA_OP_MSCALE,
495 };
496
497 RETURN_PACKED(pack);
498 } else if (ins->dest_type == nir_type_float32) {
499 struct bifrost_fma_fma pack = {
500 .src0 = bi_get_src(ins, regs, 0),
501 .src1 = bi_get_src(ins, regs, 1),
502 .src2 = bi_get_src(ins, regs, 2),
503 .src0_abs = ins->src_abs[0],
504 .src1_abs = ins->src_abs[1],
505 .src2_abs = ins->src_abs[2],
506 .src0_neg = negate_mul,
507 .src2_neg = ins->src_neg[2],
508 .outmod = ins->outmod,
509 .roundmode = ins->roundmode,
510 .op = BIFROST_FMA_OP_FMA
511 };
512
513 RETURN_PACKED(pack);
514 } else if (ins->dest_type == nir_type_float16) {
515 struct bifrost_fma_fma16 pack = {
516 .src0 = bi_get_src(ins, regs, 0),
517 .src1 = bi_get_src(ins, regs, 1),
518 .src2 = bi_get_src(ins, regs, 2),
519 .swizzle_0 = bi_swiz16(ins, 0),
520 .swizzle_1 = bi_swiz16(ins, 1),
521 .swizzle_2 = bi_swiz16(ins, 2),
522 .src0_neg = negate_mul,
523 .src2_neg = ins->src_neg[2],
524 .outmod = ins->outmod,
525 .roundmode = ins->roundmode,
526 .op = BIFROST_FMA_OP_FMA16
527 };
528
529 RETURN_PACKED(pack);
530 } else {
531 unreachable("Invalid fma dest type");
532 }
533 }
534
535 static unsigned
536 bi_pack_fma_addmin_f32(bi_instruction *ins, bi_registers *regs)
537 {
538 unsigned op =
539 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
540 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
541 BIFROST_FMA_OP_FMAX32;
542
543 struct bifrost_fma_add pack = {
544 .src0 = bi_get_src(ins, regs, 0),
545 .src1 = bi_get_src(ins, regs, 1),
546 .src0_abs = ins->src_abs[0],
547 .src1_abs = ins->src_abs[1],
548 .src0_neg = ins->src_neg[0],
549 .src1_neg = ins->src_neg[1],
550 .unk = 0x0,
551 .outmod = ins->outmod,
552 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
553 .op = op
554 };
555
556 RETURN_PACKED(pack);
557 }
558
559 static bool
560 bi_pack_fp16_abs(bi_instruction *ins, bi_registers *regs, bool *flip)
561 {
562 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
563 * l be an auxiliary bit we encode. Then the hardware determines:
564 *
565 * abs0 = l || k
566 * abs1 = l && k
567 *
568 * Since add/min/max are commutative, this saves a bit by using the
569 * order of the operands as a bit (k). To pack this, first note:
570 *
571 * (l && k) implies (l || k).
572 *
573 * That is, if the second argument is abs'd, then the first argument
574 * also has abs. So there are three cases:
575 *
576 * Case 0: Neither src has absolute value. Then we have l = k = 0.
577 *
578 * Case 1: Exactly one src has absolute value. Assign that source to
579 * src0 and the other source to src1. Compute k = src1 < src0 based on
580 * that assignment. Then l = ~k.
581 *
582 * Case 2: Both sources have absolute value. Then we have l = k = 1.
583 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
584 * That is, this encoding is only valid if src1 and src0 are distinct.
585 * This is a scheduling restriction (XXX); if an op of this type
586 * requires both identical sources to have abs value, then we must
587 * schedule to ADD (which does not use this ordering trick).
588 */
589
590 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
591 unsigned src_0 = bi_get_src(ins, regs, 0);
592 unsigned src_1 = bi_get_src(ins, regs, 1);
593
594 assert(!(abs_0 && abs_1 && src_0 == src_1));
595
596 if (!abs_0 && !abs_1) {
597 /* Force k = 0 <===> NOT(src1 < src0) */
598 *flip = (src_1 < src_0);
599 return false;
600 } else if (abs_0 && !abs_1) {
601 return src_1 >= src_0;
602 } else if (abs_1 && !abs_0) {
603 *flip = true;
604 return src_0 >= src_1;
605 } else {
606 *flip = !(src_1 < src_0);
607 return true;
608 }
609 }
610
611 static unsigned
612 bi_pack_fmadd_min_f16(bi_instruction *ins, bi_registers *regs, bool FMA)
613 {
614 unsigned op =
615 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
616 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
617 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
618 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
619 BIFROST_FMA_OP_FMAX16;
620
621 bool flip = false;
622 bool l = bi_pack_fp16_abs(ins, regs, &flip);
623 unsigned src_0 = bi_get_src(ins, regs, 0);
624 unsigned src_1 = bi_get_src(ins, regs, 1);
625
626 if (FMA) {
627 struct bifrost_fma_add_minmax16 pack = {
628 .src0 = flip ? src_1 : src_0,
629 .src1 = flip ? src_0 : src_1,
630 .src0_neg = ins->src_neg[flip ? 1 : 0],
631 .src1_neg = ins->src_neg[flip ? 0 : 1],
632 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
633 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
634 .abs1 = l,
635 .outmod = ins->outmod,
636 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
637 .op = op
638 };
639
640 RETURN_PACKED(pack);
641 } else {
642 /* Can't have modes for fp16 */
643 assert(ins->outmod == 0);
644
645 struct bifrost_add_fmin16 pack = {
646 .src0 = flip ? src_1 : src_0,
647 .src1 = flip ? src_0 : src_1,
648 .src0_neg = ins->src_neg[flip ? 1 : 0],
649 .src1_neg = ins->src_neg[flip ? 0 : 1],
650 .abs1 = l,
651 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
652 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
653 .mode = ins->minmax,
654 .op = op
655 };
656
657 RETURN_PACKED(pack);
658 }
659 }
660
661 static unsigned
662 bi_pack_fma_addmin(bi_instruction *ins, bi_registers *regs)
663 {
664 if (ins->dest_type == nir_type_float32)
665 return bi_pack_fma_addmin_f32(ins, regs);
666 else if(ins->dest_type == nir_type_float16)
667 return bi_pack_fmadd_min_f16(ins, regs, true);
668 else
669 unreachable("Unknown FMA/ADD type");
670 }
671
672 static unsigned
673 bi_pack_fma_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
674 {
675 struct bifrost_fma_inst pack = {
676 .src0 = bi_get_src(ins, regs, 0),
677 .op = op
678 };
679
680 RETURN_PACKED(pack);
681 }
682
683 static unsigned
684 bi_pack_fma_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
685 {
686 struct bifrost_fma_2src pack = {
687 .src0 = bi_get_src(ins, regs, 0),
688 .src1 = bi_get_src(ins, regs, 1),
689 .op = op
690 };
691
692 RETURN_PACKED(pack);
693 }
694
695 static unsigned
696 bi_pack_add_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
697 {
698 struct bifrost_add_inst pack = {
699 .src0 = bi_get_src(ins, regs, 0),
700 .op = op
701 };
702
703 RETURN_PACKED(pack);
704 }
705
706 static enum bifrost_csel_cond
707 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
708 {
709 nir_alu_type B = nir_alu_type_get_base_type(T);
710 unsigned idx = (B == nir_type_float) ? 0 :
711 ((B == nir_type_int) ? 1 : 2);
712
713 switch (cond){
714 case BI_COND_LT:
715 *flip = true;
716 case BI_COND_GT: {
717 const enum bifrost_csel_cond ops[] = {
718 BIFROST_FGT_F,
719 BIFROST_IGT_I,
720 BIFROST_UGT_I
721 };
722
723 return ops[idx];
724 }
725 case BI_COND_LE:
726 *flip = true;
727 case BI_COND_GE: {
728 const enum bifrost_csel_cond ops[] = {
729 BIFROST_FGE_F,
730 BIFROST_IGE_I,
731 BIFROST_UGE_I
732 };
733
734 return ops[idx];
735 }
736 case BI_COND_NE:
737 *invert = true;
738 case BI_COND_EQ: {
739 const enum bifrost_csel_cond ops[] = {
740 BIFROST_FEQ_F,
741 BIFROST_IEQ_F,
742 BIFROST_IEQ_F /* sign is irrelevant */
743 };
744
745 return ops[idx];
746 }
747 default:
748 unreachable("Invalid op for csel");
749 }
750 }
751
752 static unsigned
753 bi_pack_fma_csel(bi_instruction *ins, bi_registers *regs)
754 {
755 /* TODO: Use csel3 as well */
756 bool flip = false, invert = false;
757
758 enum bifrost_csel_cond cond =
759 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
760
761 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
762
763 unsigned cmp_0 = (flip ? 1 : 0);
764 unsigned cmp_1 = (flip ? 0 : 1);
765 unsigned res_0 = (invert ? 3 : 2);
766 unsigned res_1 = (invert ? 2 : 3);
767
768 struct bifrost_csel4 pack = {
769 .src0 = bi_get_src(ins, regs, cmp_0),
770 .src1 = bi_get_src(ins, regs, cmp_1),
771 .src2 = bi_get_src(ins, regs, res_0),
772 .src3 = bi_get_src(ins, regs, res_1),
773 .cond = cond,
774 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
775 BIFROST_FMA_OP_CSEL4
776 };
777
778 RETURN_PACKED(pack);
779 }
780
781 static unsigned
782 bi_pack_fma_frexp(bi_instruction *ins, bi_registers *regs)
783 {
784 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
785 return bi_pack_fma_1src(ins, regs, op);
786 }
787
788 static unsigned
789 bi_pack_fma_reduce(bi_instruction *ins, bi_registers *regs)
790 {
791 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
792 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
793 } else {
794 unreachable("Invalid reduce op");
795 }
796 }
797
798 /* We have a single convert opcode in the IR but a number of opcodes that could
799 * come out. In particular we have native opcodes for:
800 *
801 * [ui]16 --> [fui]32 -- int16_to_32
802 * f16 --> f32 -- float16_to_32
803 * f32 --> f16 -- float32_to_16
804 * f32 --> [ui]32 -- float32_to_int
805 * [ui]32 --> f32 -- int_to_float32
806 * [fui]16 --> [fui]16 -- f2i_i2f16
807 */
808
809 static unsigned
810 bi_pack_convert(bi_instruction *ins, bi_registers *regs, bool FMA)
811 {
812 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
813 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
814 bool from_unsigned = from_base == nir_type_uint;
815
816 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
817 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
818 bool to_unsigned = to_base == nir_type_uint;
819 bool to_float = to_base == nir_type_float;
820
821 /* Sanity check */
822 assert((from_base != to_base) || (from_size != to_size));
823 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
824
825 /* f32 to f16 is special */
826 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
827 /* TODO: second vectorized source? */
828 struct bifrost_fma_2src pfma = {
829 .src0 = bi_get_src(ins, regs, 0),
830 .src1 = BIFROST_SRC_STAGE, /* 0 */
831 .op = BIFROST_FMA_FLOAT32_TO_16
832 };
833
834 struct bifrost_add_2src padd = {
835 .src0 = bi_get_src(ins, regs, 0),
836 .src1 = BIFROST_SRC_STAGE, /* 0 */
837 .op = BIFROST_ADD_FLOAT32_TO_16
838 };
839
840 if (FMA) {
841 RETURN_PACKED(pfma);
842 } else {
843 RETURN_PACKED(padd);
844 }
845 }
846
847 /* Otherwise, figure out the mode */
848 unsigned op = 0;
849
850 if (from_size == 16 && to_size == 32) {
851 unsigned component = ins->swizzle[0][0];
852 assert(component <= 1);
853
854 if (from_base == nir_type_float)
855 op = BIFROST_CONVERT_5(component);
856 else
857 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
858 } else {
859 unsigned mode = 0;
860 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
861 bool is_unsigned = from_unsigned;
862
863 if (from_base == nir_type_float) {
864 assert(to_base != nir_type_float);
865 is_unsigned = to_unsigned;
866
867 if (from_size == 32 && to_size == 32)
868 mode = BIFROST_CONV_F32_TO_I32;
869 else if (from_size == 16 && to_size == 16)
870 mode = BIFROST_CONV_F16_TO_I16;
871 else
872 unreachable("Invalid float conversion");
873 } else {
874 assert(to_base == nir_type_float);
875 assert(from_size == to_size);
876
877 if (to_size == 32)
878 mode = BIFROST_CONV_I32_TO_F32;
879 else if (to_size == 16)
880 mode = BIFROST_CONV_I16_TO_F16;
881 else
882 unreachable("Invalid int conversion");
883 }
884
885 /* Fixup swizzle for 32-bit only modes */
886
887 if (mode == BIFROST_CONV_I32_TO_F32)
888 swizzle = 0b11;
889 else if (mode == BIFROST_CONV_F32_TO_I32)
890 swizzle = 0b10;
891
892 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
893
894 /* Unclear what the top bit is for... maybe 16-bit related */
895 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
896 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
897
898 if (!(mode2 || mode6))
899 op |= 0x100;
900 }
901
902 if (FMA)
903 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
904 else
905 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
906 }
907
908 static unsigned
909 bi_pack_fma_select(bi_instruction *ins, bi_registers *regs)
910 {
911 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
912
913 if (size == 16) {
914 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
915 unsigned op = BIFROST_FMA_SEL_16(swiz);
916 return bi_pack_fma_2src(ins, regs, op);
917 } else if (size == 8) {
918 unsigned swiz = 0;
919
920 for (unsigned c = 0; c < 4; ++c) {
921 if (ins->swizzle[c][0]) {
922 /* Ensure lowering restriction is met */
923 assert(ins->swizzle[c][0] == 2);
924 swiz |= (1 << c);
925 }
926 }
927
928 struct bifrost_fma_sel8 pack = {
929 .src0 = bi_get_src(ins, regs, 0),
930 .src1 = bi_get_src(ins, regs, 1),
931 .src2 = bi_get_src(ins, regs, 2),
932 .src3 = bi_get_src(ins, regs, 3),
933 .swizzle = swiz,
934 .op = BIFROST_FMA_OP_SEL8
935 };
936
937 RETURN_PACKED(pack);
938 } else {
939 unreachable("Unimplemented");
940 }
941 }
942
943 static enum bifrost_fcmp_cond
944 bi_fcmp_cond(enum bi_cond cond)
945 {
946 switch (cond) {
947 case BI_COND_LT: return BIFROST_OLT;
948 case BI_COND_LE: return BIFROST_OLE;
949 case BI_COND_GE: return BIFROST_OGE;
950 case BI_COND_GT: return BIFROST_OGT;
951 case BI_COND_EQ: return BIFROST_OEQ;
952 case BI_COND_NE: return BIFROST_UNE;
953 default: unreachable("Unknown bi_cond");
954 }
955 }
956
957 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
958
959 static enum bifrost_fcmp_cond
960 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
961 {
962 switch (cond) {
963 case BIFROST_OGT:
964 return BIFROST_OLT;
965 case BIFROST_OGE:
966 return BIFROST_OLE;
967 case BIFROST_OLT:
968 return BIFROST_OGT;
969 case BIFROST_OLE:
970 return BIFROST_OGE;
971 case BIFROST_OEQ:
972 case BIFROST_UNE:
973 return cond;
974 default:
975 unreachable("Unknown fcmp cond");
976 }
977 }
978
979 static unsigned
980 bi_pack_fma_cmp(bi_instruction *ins, bi_registers *regs)
981 {
982 nir_alu_type Tl = ins->src_types[0];
983 nir_alu_type Tr = ins->src_types[1];
984
985 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
986 /* TODO: Mixed 32/16 cmp */
987 assert(Tl == Tr);
988
989 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
990
991 /* Only src1 has neg, so we arrange:
992 * a < b --- native
993 * a < -b --- native
994 * -a < -b <===> a > b
995 * -a < b <===> a > -b
996 * TODO: Is this NaN-precise?
997 */
998
999 bool flip = ins->src_neg[0];
1000 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
1001
1002 if (flip)
1003 cond = bi_flip_fcmp(cond);
1004
1005 struct bifrost_fma_fcmp pack = {
1006 .src0 = bi_get_src(ins, regs, 0),
1007 .src1 = bi_get_src(ins, regs, 1),
1008 .src0_abs = ins->src_abs[0],
1009 .src1_abs = ins->src_abs[1],
1010 .src1_neg = neg,
1011 .src_expand = 0,
1012 .unk1 = 0,
1013 .cond = cond,
1014 .op = BIFROST_FMA_OP_FCMP_GL
1015 };
1016
1017 RETURN_PACKED(pack);
1018 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1019 bool flip = false;
1020 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1021 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1022
1023 if (flip)
1024 cond = bi_flip_fcmp(cond);
1025
1026 struct bifrost_fma_fcmp16 pack = {
1027 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1028 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1029 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1030 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1031 .abs1 = l,
1032 .unk = 0,
1033 .cond = cond,
1034 .op = BIFROST_FMA_OP_FCMP_GL_16,
1035 };
1036
1037 RETURN_PACKED(pack);
1038 } else {
1039 unreachable("Unknown cmp type");
1040 }
1041 }
1042
1043 static unsigned
1044 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1045 {
1046 switch (op) {
1047 case BI_BITWISE_OR:
1048 /* Via De Morgan's */
1049 return rshift ?
1050 BIFROST_FMA_OP_RSHIFT_NAND :
1051 BIFROST_FMA_OP_LSHIFT_NAND;
1052 case BI_BITWISE_AND:
1053 return rshift ?
1054 BIFROST_FMA_OP_RSHIFT_AND :
1055 BIFROST_FMA_OP_LSHIFT_AND;
1056 case BI_BITWISE_XOR:
1057 /* Shift direction handled out of band */
1058 return BIFROST_FMA_OP_RSHIFT_XOR;
1059 default:
1060 unreachable("Unknown op");
1061 }
1062 }
1063
1064 static unsigned
1065 bi_pack_fma_bitwise(bi_instruction *ins, bi_registers *regs)
1066 {
1067 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1068 assert(size <= 32);
1069
1070 bool invert_0 = ins->bitwise.src_invert[0];
1071 bool invert_1 = ins->bitwise.src_invert[1];
1072
1073 if (ins->op.bitwise == BI_BITWISE_OR) {
1074 /* Becomes NAND, so via De Morgan's:
1075 * f(A) | f(B) = ~(~f(A) & ~f(B))
1076 * = NAND(~f(A), ~f(B))
1077 */
1078
1079 invert_0 = !invert_0;
1080 invert_1 = !invert_1;
1081 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1082 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1083 * ~A ^ B = ~(A ^ B) = A ^ ~B
1084 */
1085
1086 invert_0 ^= invert_1;
1087 invert_1 = false;
1088
1089 /* invert_1 ends up specifying shift direction */
1090 invert_1 = !ins->bitwise.rshift;
1091 }
1092
1093 struct bifrost_shift_fma pack = {
1094 .src0 = bi_get_src(ins, regs, 0),
1095 .src1 = bi_get_src(ins, regs, 1),
1096 .src2 = bi_get_src(ins, regs, 2),
1097 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1098 .unk = 1, /* XXX */
1099 .invert_1 = invert_0,
1100 .invert_2 = invert_1,
1101 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1102 };
1103
1104 RETURN_PACKED(pack);
1105 }
1106
1107 static unsigned
1108 bi_pack_fma_round(bi_instruction *ins, bi_registers *regs)
1109 {
1110 bool fp16 = ins->dest_type == nir_type_float16;
1111 assert(fp16 || ins->dest_type == nir_type_float32);
1112
1113 unsigned op = fp16
1114 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1115 : BIFROST_FMA_ROUND_32(ins->roundmode);
1116
1117 return bi_pack_fma_1src(ins, regs, op);
1118 }
1119
1120 static unsigned
1121 bi_pack_fma_imath(bi_instruction *ins, bi_registers *regs)
1122 {
1123 /* Scheduler: only ADD can have 8/16-bit imath */
1124 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1125
1126 unsigned op = ins->op.imath == BI_IMATH_ADD
1127 ? BIFROST_FMA_IADD_32
1128 : BIFROST_FMA_ISUB_32;
1129
1130 return bi_pack_fma_2src(ins, regs, op);
1131 }
1132
1133 static unsigned
1134 bi_pack_fma(bi_clause *clause, bi_bundle bundle, bi_registers *regs)
1135 {
1136 if (!bundle.fma)
1137 return BIFROST_FMA_NOP;
1138
1139 switch (bundle.fma->type) {
1140 case BI_ADD:
1141 return bi_pack_fma_addmin(bundle.fma, regs);
1142 case BI_CMP:
1143 return bi_pack_fma_cmp(bundle.fma, regs);
1144 case BI_BITWISE:
1145 return bi_pack_fma_bitwise(bundle.fma, regs);
1146 case BI_CONVERT:
1147 return bi_pack_convert(bundle.fma, regs, true);
1148 case BI_CSEL:
1149 return bi_pack_fma_csel(bundle.fma, regs);
1150 case BI_FMA:
1151 return bi_pack_fma_fma(bundle.fma, regs);
1152 case BI_FREXP:
1153 return bi_pack_fma_frexp(bundle.fma, regs);
1154 case BI_IMATH:
1155 return bi_pack_fma_imath(bundle.fma, regs);
1156 case BI_MINMAX:
1157 return bi_pack_fma_addmin(bundle.fma, regs);
1158 case BI_MOV:
1159 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1160 case BI_SHIFT:
1161 unreachable("Packing todo");
1162 case BI_SELECT:
1163 return bi_pack_fma_select(bundle.fma, regs);
1164 case BI_ROUND:
1165 return bi_pack_fma_round(bundle.fma, regs);
1166 case BI_REDUCE_FMA:
1167 return bi_pack_fma_reduce(bundle.fma, regs);
1168 default:
1169 unreachable("Cannot encode class as FMA");
1170 }
1171 }
1172
1173 static unsigned
1174 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1175 {
1176 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1177 assert(size == 32 || size == 16);
1178
1179 unsigned op = (size == 32) ?
1180 BIFROST_ADD_OP_LD_VAR_32 :
1181 BIFROST_ADD_OP_LD_VAR_16;
1182
1183 unsigned packed_addr = 0;
1184
1185 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1186 /* Direct uses address field directly */
1187 packed_addr = bi_get_immediate(ins, 0);
1188 } else {
1189 /* Indirect gets an extra source */
1190 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1191 }
1192
1193 /* The destination is thrown in the data register */
1194 assert(ins->dest & BIR_INDEX_REGISTER);
1195 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1196
1197 unsigned channels = ins->vector_channels;
1198 assert(channels >= 1 && channels <= 4);
1199
1200 struct bifrost_ld_var pack = {
1201 .src0 = bi_get_src(ins, regs, 1),
1202 .addr = packed_addr,
1203 .channels = MALI_POSITIVE(channels),
1204 .interp_mode = ins->load_vary.interp_mode,
1205 .reuse = ins->load_vary.reuse,
1206 .flat = ins->load_vary.flat,
1207 .op = op
1208 };
1209
1210 RETURN_PACKED(pack);
1211 }
1212
1213 static unsigned
1214 bi_pack_add_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
1215 {
1216 struct bifrost_add_2src pack = {
1217 .src0 = bi_get_src(ins, regs, 0),
1218 .src1 = bi_get_src(ins, regs, 1),
1219 .op = op
1220 };
1221
1222 RETURN_PACKED(pack);
1223 }
1224
1225 static unsigned
1226 bi_pack_add_addmin_f32(bi_instruction *ins, bi_registers *regs)
1227 {
1228 unsigned op =
1229 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1230 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1231 BIFROST_ADD_OP_FMAX32;
1232
1233 struct bifrost_add_faddmin pack = {
1234 .src0 = bi_get_src(ins, regs, 0),
1235 .src1 = bi_get_src(ins, regs, 1),
1236 .src0_abs = ins->src_abs[0],
1237 .src1_abs = ins->src_abs[1],
1238 .src0_neg = ins->src_neg[0],
1239 .src1_neg = ins->src_neg[1],
1240 .outmod = ins->outmod,
1241 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1242 .op = op
1243 };
1244
1245 RETURN_PACKED(pack);
1246 }
1247
1248 static unsigned
1249 bi_pack_add_add_f16(bi_instruction *ins, bi_registers *regs)
1250 {
1251 /* ADD.v2f16 can't have outmod */
1252 assert(ins->outmod == BIFROST_NONE);
1253
1254 struct bifrost_add_faddmin pack = {
1255 .src0 = bi_get_src(ins, regs, 0),
1256 .src1 = bi_get_src(ins, regs, 1),
1257 .src0_abs = ins->src_abs[0],
1258 .src1_abs = ins->src_abs[1],
1259 .src0_neg = ins->src_neg[0],
1260 .src1_neg = ins->src_neg[1],
1261 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1262 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1263 .mode = ins->roundmode,
1264 .op = BIFROST_ADD_OP_FADD16
1265 };
1266
1267 RETURN_PACKED(pack);
1268 }
1269
1270 static unsigned
1271 bi_pack_add_addmin(bi_instruction *ins, bi_registers *regs)
1272 {
1273 if (ins->dest_type == nir_type_float32)
1274 return bi_pack_add_addmin_f32(ins, regs);
1275 else if (ins->dest_type == nir_type_float16) {
1276 if (ins->type == BI_ADD)
1277 return bi_pack_add_add_f16(ins, regs);
1278 else
1279 return bi_pack_fmadd_min_f16(ins, regs, false);
1280 } else
1281 unreachable("Unknown FMA/ADD type");
1282 }
1283
1284 static unsigned
1285 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1286 {
1287 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1288
1289 const unsigned ops[4] = {
1290 BIFROST_ADD_OP_LD_UBO_1,
1291 BIFROST_ADD_OP_LD_UBO_2,
1292 BIFROST_ADD_OP_LD_UBO_3,
1293 BIFROST_ADD_OP_LD_UBO_4
1294 };
1295
1296 bi_write_data_register(clause, ins);
1297 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1298 }
1299
1300 static enum bifrost_ldst_type
1301 bi_pack_ldst_type(nir_alu_type T)
1302 {
1303 switch (T) {
1304 case nir_type_float16: return BIFROST_LDST_F16;
1305 case nir_type_float32: return BIFROST_LDST_F32;
1306 case nir_type_int32: return BIFROST_LDST_I32;
1307 case nir_type_uint32: return BIFROST_LDST_U32;
1308 default: unreachable("Invalid type loaded");
1309 }
1310 }
1311
1312 static unsigned
1313 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1314 {
1315 struct bifrost_ld_var_addr pack = {
1316 .src0 = bi_get_src(ins, regs, 1),
1317 .src1 = bi_get_src(ins, regs, 2),
1318 .location = bi_get_immediate(ins, 0),
1319 .type = bi_pack_ldst_type(ins->src_types[3]),
1320 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1321 };
1322
1323 bi_write_data_register(clause, ins);
1324 RETURN_PACKED(pack);
1325 }
1326
1327 static unsigned
1328 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1329 {
1330 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1331
1332 struct bifrost_ld_attr pack = {
1333 .src0 = bi_get_src(ins, regs, 1),
1334 .src1 = bi_get_src(ins, regs, 2),
1335 .location = bi_get_immediate(ins, 0),
1336 .channels = MALI_POSITIVE(ins->vector_channels),
1337 .type = bi_pack_ldst_type(ins->dest_type),
1338 .op = BIFROST_ADD_OP_LD_ATTR
1339 };
1340
1341 bi_write_data_register(clause, ins);
1342 RETURN_PACKED(pack);
1343 }
1344
1345 static unsigned
1346 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1347 {
1348 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1349
1350 struct bifrost_st_vary pack = {
1351 .src0 = bi_get_src(ins, regs, 1),
1352 .src1 = bi_get_src(ins, regs, 2),
1353 .src2 = bi_get_src(ins, regs, 3),
1354 .channels = MALI_POSITIVE(ins->vector_channels),
1355 .op = BIFROST_ADD_OP_ST_VAR
1356 };
1357
1358 bi_read_data_register(clause, ins);
1359 RETURN_PACKED(pack);
1360 }
1361
1362 static unsigned
1363 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1364 {
1365 bool fp16 = (ins->src_types[1] == nir_type_float16);
1366
1367 struct bifrost_add_atest pack = {
1368 .src0 = bi_get_src(ins, regs, 0),
1369 .src1 = bi_get_src(ins, regs, 1),
1370 .half = fp16,
1371 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1372 .op = BIFROST_ADD_OP_ATEST,
1373 };
1374
1375 /* Despite *also* writing with the usual mechanism... quirky and
1376 * perhaps unnecessary, but let's match the blob */
1377 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1378
1379 RETURN_PACKED(pack);
1380 }
1381
1382 static unsigned
1383 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1384 {
1385 struct bifrost_add_inst pack = {
1386 .src0 = bi_get_src(ins, regs, 1),
1387 .op = BIFROST_ADD_OP_BLEND
1388 };
1389
1390 /* TODO: Pack location in uniform_const */
1391 assert(ins->blend_location == 0);
1392
1393 bi_read_data_register(clause, ins);
1394 RETURN_PACKED(pack);
1395 }
1396
1397 static unsigned
1398 bi_pack_add_special(bi_instruction *ins, bi_registers *regs)
1399 {
1400 unsigned op = 0;
1401 bool fp16 = ins->dest_type == nir_type_float16;
1402 bool Y = ins->swizzle[0][0];
1403
1404 if (ins->op.special == BI_SPECIAL_FRCP) {
1405 op = fp16 ?
1406 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1407 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1408 BIFROST_ADD_OP_FRCP_FAST_F32;
1409 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1410 op = fp16 ?
1411 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1412 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1413 BIFROST_ADD_OP_FRSQ_FAST_F32;
1414
1415 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1416 assert(!fp16);
1417 op = BIFROST_ADD_OP_FEXP2_FAST;
1418 } else {
1419 unreachable("Unknown special op");
1420 }
1421
1422 return bi_pack_add_1src(ins, regs, op);
1423 }
1424
1425 static unsigned
1426 bi_pack_add_table(bi_instruction *ins, bi_registers *regs)
1427 {
1428 unsigned op = 0;
1429 assert(ins->dest_type == nir_type_float32);
1430
1431 op = BIFROST_ADD_OP_LOG2_HELP;
1432 return bi_pack_add_1src(ins, regs, op);
1433 }
1434 static unsigned
1435 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, bi_registers *regs, gl_shader_stage stage)
1436 {
1437 bool f16 = ins->dest_type == nir_type_float16;
1438 bool vtx = stage != MESA_SHADER_FRAGMENT;
1439
1440 struct bifrost_tex_compact pack = {
1441 .src0 = bi_get_src(ins, regs, 0),
1442 .src1 = bi_get_src(ins, regs, 1),
1443 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1444 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1445 .compute_lod = !vtx,
1446 .tex_index = ins->texture.texture_index,
1447 .sampler_index = ins->texture.sampler_index
1448 };
1449
1450 bi_write_data_register(clause, ins);
1451 RETURN_PACKED(pack);
1452 }
1453
1454 static unsigned
1455 bi_pack_add_select(bi_instruction *ins, bi_registers *regs)
1456 {
1457 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1458 assert(size == 16);
1459
1460 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1461 unsigned op = BIFROST_ADD_SEL_16(swiz);
1462 return bi_pack_add_2src(ins, regs, op);
1463 }
1464
1465 static enum bifrost_discard_cond
1466 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1467 {
1468 switch (cond){
1469 case BI_COND_GT:
1470 *flip = true;
1471 /* fallthrough */
1472 case BI_COND_LT:
1473 return BIFROST_DISCARD_FLT;
1474 case BI_COND_GE:
1475 *flip = true;
1476 /* fallthrough */
1477 case BI_COND_LE:
1478 return BIFROST_DISCARD_FLE;
1479 case BI_COND_NE:
1480 return BIFROST_DISCARD_FNE;
1481 case BI_COND_EQ:
1482 return BIFROST_DISCARD_FEQ;
1483 default:
1484 unreachable("Invalid op for discard");
1485 }
1486 }
1487
1488 static unsigned
1489 bi_pack_add_discard(bi_instruction *ins, bi_registers *regs)
1490 {
1491 bool fp16 = ins->src_types[0] == nir_type_float16;
1492 assert(fp16 || ins->src_types[0] == nir_type_float32);
1493
1494 bool flip = false;
1495 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1496
1497 struct bifrost_add_discard pack = {
1498 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1499 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1500 .cond = cond,
1501 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1502 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1503 .fp32 = fp16 ? 0 : 1,
1504 .op = BIFROST_ADD_OP_DISCARD
1505 };
1506
1507 RETURN_PACKED(pack);
1508 }
1509
1510 static enum bifrost_icmp_cond
1511 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1512 {
1513 switch (cond){
1514 case BI_COND_LT:
1515 *flip = true;
1516 /* fallthrough */
1517 case BI_COND_GT:
1518 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1519 : BIFROST_ICMP_IGT;
1520 case BI_COND_LE:
1521 *flip = true;
1522 /* fallthrough */
1523 case BI_COND_GE:
1524 return is_unsigned ? BIFROST_ICMP_UGE :
1525 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1526 case BI_COND_NE:
1527 return BIFROST_ICMP_NEQ;
1528 case BI_COND_EQ:
1529 return BIFROST_ICMP_EQ;
1530 default:
1531 unreachable("Invalid op for icmp");
1532 }
1533 }
1534
1535 static unsigned
1536 bi_pack_add_icmp32(bi_instruction *ins, bi_registers *regs, bool flip,
1537 enum bifrost_icmp_cond cond)
1538 {
1539 struct bifrost_add_icmp pack = {
1540 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1541 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1542 .cond = cond,
1543 .sz = 1,
1544 .d3d = false,
1545 .op = BIFROST_ADD_OP_ICMP_32
1546 };
1547
1548 RETURN_PACKED(pack);
1549 }
1550
1551 static unsigned
1552 bi_pack_add_icmp16(bi_instruction *ins, bi_registers *regs, bool flip,
1553 enum bifrost_icmp_cond cond)
1554 {
1555 struct bifrost_add_icmp16 pack = {
1556 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1557 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1558 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1559 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1560 .cond = cond,
1561 .d3d = false,
1562 .op = BIFROST_ADD_OP_ICMP_16
1563 };
1564
1565 RETURN_PACKED(pack);
1566 }
1567
1568 static unsigned
1569 bi_pack_add_cmp(bi_instruction *ins, bi_registers *regs)
1570 {
1571 nir_alu_type Tl = ins->src_types[0];
1572 nir_alu_type Tr = ins->src_types[1];
1573 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1574
1575 if (Bl == nir_type_uint || Bl == nir_type_int) {
1576 assert(Tl == Tr);
1577 unsigned sz = nir_alu_type_get_type_size(Tl);
1578
1579 bool flip = false;
1580
1581 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1582 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1583 &flip, Bl == nir_type_uint, sz == 16);
1584
1585 if (sz == 32)
1586 return bi_pack_add_icmp32(ins, regs, flip, cond);
1587 else if (sz == 16)
1588 return bi_pack_add_icmp16(ins, regs, flip, cond);
1589 else
1590 unreachable("TODO");
1591 } else {
1592 unreachable("TODO");
1593 }
1594 }
1595
1596 static unsigned
1597 bi_pack_add_imath(bi_instruction *ins, bi_registers *regs)
1598 {
1599 /* TODO: 32+16 add */
1600 assert(ins->src_types[0] == ins->src_types[1]);
1601 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1602 enum bi_imath_op p = ins->op.imath;
1603
1604 unsigned op = 0;
1605
1606 if (sz == 8) {
1607 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1608 BIFROST_ADD_ISUB_8;
1609 } else if (sz == 16) {
1610 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1611 BIFROST_ADD_ISUB_16;
1612 } else if (sz == 32) {
1613 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1614 BIFROST_ADD_ISUB_32;
1615 } else {
1616 unreachable("64-bit todo");
1617 }
1618
1619 return bi_pack_add_2src(ins, regs, op);
1620 }
1621
1622 static unsigned
1623 bi_pack_add(bi_clause *clause, bi_bundle bundle, bi_registers *regs, gl_shader_stage stage)
1624 {
1625 if (!bundle.add)
1626 return BIFROST_ADD_NOP;
1627
1628 switch (bundle.add->type) {
1629 case BI_ADD:
1630 return bi_pack_add_addmin(bundle.add, regs);
1631 case BI_ATEST:
1632 return bi_pack_add_atest(clause, bundle.add, regs);
1633 case BI_BRANCH:
1634 unreachable("Packing todo");
1635 case BI_CMP:
1636 return bi_pack_add_cmp(bundle.add, regs);
1637 case BI_BLEND:
1638 return bi_pack_add_blend(clause, bundle.add, regs);
1639 case BI_BITWISE:
1640 unreachable("Packing todo");
1641 case BI_CONVERT:
1642 return bi_pack_convert(bundle.add, regs, false);
1643 case BI_DISCARD:
1644 return bi_pack_add_discard(bundle.add, regs);
1645 case BI_FREXP:
1646 unreachable("Packing todo");
1647 case BI_IMATH:
1648 return bi_pack_add_imath(bundle.add, regs);
1649 case BI_LOAD:
1650 unreachable("Packing todo");
1651 case BI_LOAD_ATTR:
1652 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1653 case BI_LOAD_UNIFORM:
1654 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1655 case BI_LOAD_VAR:
1656 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1657 case BI_LOAD_VAR_ADDRESS:
1658 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1659 case BI_MINMAX:
1660 return bi_pack_add_addmin(bundle.add, regs);
1661 case BI_MOV:
1662 case BI_SHIFT:
1663 case BI_STORE:
1664 unreachable("Packing todo");
1665 case BI_STORE_VAR:
1666 return bi_pack_add_st_vary(clause, bundle.add, regs);
1667 case BI_SPECIAL:
1668 return bi_pack_add_special(bundle.add, regs);
1669 case BI_TABLE:
1670 return bi_pack_add_table(bundle.add, regs);
1671 case BI_SELECT:
1672 return bi_pack_add_select(bundle.add, regs);
1673 case BI_TEX:
1674 if (bundle.add->op.texture == BI_TEX_COMPACT)
1675 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1676 else
1677 unreachable("Unknown tex type");
1678 case BI_ROUND:
1679 unreachable("Packing todo");
1680 default:
1681 unreachable("Cannot encode class as ADD");
1682 }
1683 }
1684
1685 struct bi_packed_bundle {
1686 uint64_t lo;
1687 uint64_t hi;
1688 };
1689
1690 /* We must ensure port 1 > port 0 for the 63-x trick to function, so we fix
1691 * this up at pack time. (Scheduling doesn't care.) */
1692
1693 static void
1694 bi_flip_ports(bi_registers *regs)
1695 {
1696 if (regs->enabled[0] && regs->enabled[1] && regs->port[1] < regs->port[0]) {
1697 unsigned temp = regs->port[0];
1698 regs->port[0] = regs->port[1];
1699 regs->port[1] = temp;
1700 }
1701
1702 }
1703
1704 static struct bi_packed_bundle
1705 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1706 {
1707 bi_assign_ports(&bundle, &prev);
1708 bi_assign_uniform_constant(clause, &bundle.regs, bundle);
1709 bundle.regs.first_instruction = first_bundle;
1710
1711 bi_flip_ports(&bundle.regs);
1712
1713 uint64_t reg = bi_pack_registers(bundle.regs);
1714 uint64_t fma = bi_pack_fma(clause, bundle, &bundle.regs);
1715 uint64_t add = bi_pack_add(clause, bundle, &bundle.regs, stage);
1716
1717 struct bi_packed_bundle packed = {
1718 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1719 .hi = add >> 6
1720 };
1721
1722 return packed;
1723 }
1724
1725 /* Packs the next two constants as a dedicated constant quadword at the end of
1726 * the clause, returning the number packed. There are two cases to consider:
1727 *
1728 * Case #1: Branching is not used. For a single constant copy the upper nibble
1729 * over, easy.
1730 *
1731 * Case #2: Branching is used. For a single constant, it suffices to set the
1732 * upper nibble to 4 and leave the latter constant 0, which matches what the
1733 * blob does.
1734 *
1735 * Extending to multiple constants is considerably more tricky and left for
1736 * future work.
1737 */
1738
1739 static unsigned
1740 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1741 unsigned index,
1742 struct util_dynarray *emission)
1743 {
1744 /* After these two, are we done? Determines tag */
1745 bool done = clause->constant_count <= (index + 2);
1746 bool only = clause->constant_count <= (index + 1);
1747
1748 /* Is the constant we're packing for a branch? */
1749 bool branches = clause->branch_constant && done;
1750
1751 /* TODO: Pos */
1752 assert(index == 0 && clause->bundle_count == 1);
1753 assert(only);
1754
1755 uint64_t hi = clause->constants[index + 0] >> 60ull;
1756
1757 struct bifrost_fmt_constant quad = {
1758 .pos = 0, /* TODO */
1759 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1760 .imm_1 = clause->constants[index + 0] >> 4,
1761 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1762 };
1763
1764 if (branches) {
1765 /* Branch offsets are less than 60-bits so this should work at
1766 * least for now */
1767 quad.imm_1 |= (4ull << 60ull) >> 4;
1768 assert (hi == 0);
1769 }
1770
1771 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1772 * of the second constant with the first must be less than 8, otherwise
1773 * we have to swap them. On G52, I'm able to reproduce a similar issue
1774 * but with a different workaround (modeled above with a single
1775 * constant, unclear how to workaround for multiple constants.) Further
1776 * investigation needed. Possibly an errata. XXX */
1777
1778 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1779
1780 return 2;
1781 }
1782
1783 static void
1784 bi_pack_clause(bi_context *ctx, bi_clause *clause, bi_clause *next,
1785 struct util_dynarray *emission, gl_shader_stage stage)
1786 {
1787 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1788 assert(clause->bundle_count == 1);
1789
1790 /* Used to decide if we elide writes */
1791 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1792
1793 /* State for packing constants throughout */
1794 unsigned constant_index = 0;
1795
1796 struct bifrost_fmt1 quad_1 = {
1797 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1798 .header = bi_pack_header(clause, next, is_fragment),
1799 .ins_1 = ins_1.lo,
1800 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1801 .ins_0 = (ins_1.hi >> 11) & 0b111,
1802 };
1803
1804 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1805
1806 /* Pack the remaining constants */
1807
1808 while (constant_index < clause->constant_count) {
1809 constant_index += bi_pack_constants(ctx, clause,
1810 constant_index, emission);
1811 }
1812 }
1813
1814 static bi_clause *
1815 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1816 {
1817 /* Try the next clause in this block */
1818 if (clause->link.next != &((bi_block *) block)->clauses)
1819 return list_first_entry(&(clause->link), bi_clause, link);
1820
1821 /* Try the next block, or the one after that if it's empty, etc .*/
1822 pan_block *next_block = pan_next_block(block);
1823
1824 bi_foreach_block_from(ctx, next_block, block) {
1825 bi_block *blk = (bi_block *) block;
1826
1827 if (!list_is_empty(&blk->clauses))
1828 return list_first_entry(&(blk->clauses), bi_clause, link);
1829 }
1830
1831 return NULL;
1832 }
1833
1834 void
1835 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1836 {
1837 util_dynarray_init(emission, NULL);
1838
1839 bi_foreach_block(ctx, _block) {
1840 bi_block *block = (bi_block *) _block;
1841
1842 bi_foreach_clause_in_block(block, clause) {
1843 bi_clause *next = bi_next_clause(ctx, _block, clause);
1844 bi_pack_clause(ctx, clause, next, emission, ctx->stage);
1845 }
1846 }
1847 }