pan/bi: Pack branch offset constants
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next, bool is_fragment)
39 {
40 struct bifrost_header header = {
41 .back_to_back = clause->back_to_back,
42 .no_end_of_shader = (next != NULL),
43 .elide_writes = is_fragment,
44 .branch_cond = clause->branch_conditional,
45 .datareg_writebarrier = clause->data_register_write_barrier,
46 .datareg = clause->data_register,
47 .scoreboard_deps = next ? next->dependencies : 0,
48 .scoreboard_index = clause->scoreboard_id,
49 .clause_type = clause->clause_type,
50 .next_clause_type = next ? next->clause_type : 0,
51 .suppress_inf = true,
52 .suppress_nan = true,
53 };
54
55 header.branch_cond |= header.back_to_back;
56
57 uint64_t u = 0;
58 memcpy(&u, &header, sizeof(header));
59 return u;
60 }
61
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
67
68 static unsigned
69 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
70 {
71 uint64_t want = (cons >> 4);
72
73 for (unsigned i = 0; i < clause->constant_count; ++i) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
76
77 uint64_t candidates[2] = {
78 clause->constants[i] >> 4,
79 clause->constants[i] >> 36
80 };
81
82 /* For <64-bit mode, we treat lo/hi separately */
83
84 if (!b64)
85 candidates[0] &= (0xFFFFFFFF >> 4);
86
87 if (candidates[0] == want)
88 return i;
89
90 if (candidates[1] == want && !b64) {
91 *hi = true;
92 return i;
93 }
94 }
95
96 unreachable("Invalid constant accessed");
97 }
98
99 static unsigned
100 bi_constant_field(unsigned idx)
101 {
102 assert(idx <= 5);
103
104 const unsigned values[] = {
105 4, 5, 6, 7, 2, 3
106 };
107
108 return values[idx] << 4;
109 }
110
111 static bool
112 bi_assign_uniform_constant_single(
113 bi_registers *regs,
114 bi_clause *clause,
115 bi_instruction *ins, bool assigned, bool fast_zero)
116 {
117 if (!ins)
118 return assigned;
119
120 if (ins->type == BI_BLEND) {
121 assert(!assigned);
122 regs->uniform_constant = 0x8;
123 return true;
124 }
125
126 bi_foreach_src(ins, s) {
127 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
128 if (s == 1 && (ins->type == BI_BRANCH)) continue;
129
130 if (ins->src[s] & BIR_INDEX_CONSTANT) {
131 /* Let direct addresses through */
132 if (ins->type == BI_LOAD_VAR)
133 continue;
134
135 bool hi = false;
136 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
137 uint64_t cons = bi_get_immediate(ins, s);
138 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
139 unsigned lo = clause->constants[idx] & 0xF;
140 unsigned f = bi_constant_field(idx) | lo;
141
142 if (assigned && regs->uniform_constant != f)
143 unreachable("Mismatched uniform/const field: imm");
144
145 regs->uniform_constant = f;
146 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
147 assigned = true;
148 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
149 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
150 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
151 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
152 /* FMAs have a fast zero port, ADD needs to use the
153 * uniform/const port's special 0 mode handled here */
154 unsigned f = 0;
155
156 if (assigned && regs->uniform_constant != f)
157 unreachable("Mismatched uniform/const field: 0");
158
159 regs->uniform_constant = f;
160 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
161 assigned = true;
162 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
163 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
164 } else if (s & BIR_INDEX_UNIFORM) {
165 unreachable("Push uniforms not implemented yet");
166 }
167 }
168
169 return assigned;
170 }
171
172 static void
173 bi_assign_uniform_constant(
174 bi_clause *clause,
175 bi_registers *regs,
176 bi_bundle bundle)
177 {
178 bool assigned =
179 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
180
181 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
182 }
183
184 /* Assigns a port for reading, before anything is written */
185
186 static void
187 bi_assign_port_read(bi_registers *regs, unsigned src)
188 {
189 /* We only assign for registers */
190 if (!(src & BIR_INDEX_REGISTER))
191 return;
192
193 unsigned reg = src & ~BIR_INDEX_REGISTER;
194
195 /* Check if we already assigned the port */
196 for (unsigned i = 0; i <= 1; ++i) {
197 if (regs->port[i] == reg && regs->enabled[i])
198 return;
199 }
200
201 if (regs->port[3] == reg && regs->read_port3)
202 return;
203
204 /* Assign it now */
205
206 for (unsigned i = 0; i <= 1; ++i) {
207 if (!regs->enabled[i]) {
208 regs->port[i] = reg;
209 regs->enabled[i] = true;
210 return;
211 }
212 }
213
214 if (!regs->read_port3) {
215 regs->port[3] = reg;
216 regs->read_port3 = true;
217 return;
218 }
219
220 bi_print_ports(regs, stderr);
221 unreachable("Failed to find a free port for src");
222 }
223
224 static bi_registers
225 bi_assign_ports(bi_bundle *now, bi_bundle *prev)
226 {
227 /* We assign ports for the main register mechanism. Special ops
228 * use the data registers, which has its own mechanism entirely
229 * and thus gets skipped over here. */
230
231 unsigned read_dreg = now->add &&
232 bi_class_props[now->add->type] & BI_DATA_REG_SRC;
233
234 unsigned write_dreg = prev->add &&
235 bi_class_props[prev->add->type] & BI_DATA_REG_DEST;
236
237 /* First, assign reads */
238
239 if (now->fma)
240 bi_foreach_src(now->fma, src)
241 bi_assign_port_read(&now->regs, now->fma->src[src]);
242
243 if (now->add) {
244 bi_foreach_src(now->add, src) {
245 if (!(src == 0 && read_dreg))
246 bi_assign_port_read(&now->regs, now->add->src[src]);
247 }
248 }
249
250 /* Next, assign writes */
251
252 if (prev->add && prev->add->dest & BIR_INDEX_REGISTER && !write_dreg) {
253 now->regs.port[2] = prev->add->dest & ~BIR_INDEX_REGISTER;
254 now->regs.write_add = true;
255 }
256
257 if (prev->fma && prev->fma->dest & BIR_INDEX_REGISTER) {
258 unsigned r = prev->fma->dest & ~BIR_INDEX_REGISTER;
259
260 if (now->regs.write_add) {
261 /* Scheduler constraint: cannot read 3 and write 2 */
262 assert(!now->regs.read_port3);
263 now->regs.port[3] = r;
264 } else {
265 now->regs.port[2] = r;
266 }
267
268 now->regs.write_fma = true;
269 }
270
271 return now->regs;
272 }
273
274 /* Determines the register control field, ignoring the first? flag */
275
276 static enum bifrost_reg_control
277 bi_pack_register_ctrl_lo(bi_registers r)
278 {
279 if (r.write_fma) {
280 if (r.write_add) {
281 assert(!r.read_port3);
282 return BIFROST_WRITE_ADD_P2_FMA_P3;
283 } else {
284 if (r.read_port3)
285 return BIFROST_WRITE_FMA_P2_READ_P3;
286 else
287 return BIFROST_WRITE_FMA_P2;
288 }
289 } else if (r.write_add) {
290 if (r.read_port3)
291 return BIFROST_WRITE_ADD_P2_READ_P3;
292 else
293 return BIFROST_WRITE_ADD_P2;
294 } else if (r.read_port3)
295 return BIFROST_READ_P3;
296 else
297 return BIFROST_REG_NONE;
298 }
299
300 /* Ditto but account for the first? flag this time */
301
302 static enum bifrost_reg_control
303 bi_pack_register_ctrl(bi_registers r)
304 {
305 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
306
307 if (r.first_instruction) {
308 if (ctrl == BIFROST_REG_NONE)
309 ctrl = BIFROST_FIRST_NONE;
310 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
311 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
312 else
313 ctrl |= BIFROST_FIRST_NONE;
314 }
315
316 return ctrl;
317 }
318
319 static uint64_t
320 bi_pack_registers(bi_registers regs)
321 {
322 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
323 struct bifrost_regs s = { 0 };
324 uint64_t packed = 0;
325
326 if (regs.enabled[1]) {
327 /* Gotta save that bit!~ Required by the 63-x trick */
328 assert(regs.port[1] > regs.port[0]);
329 assert(regs.enabled[0]);
330
331 /* Do the 63-x trick, see docs/disasm */
332 if (regs.port[0] > 31) {
333 regs.port[0] = 63 - regs.port[0];
334 regs.port[1] = 63 - regs.port[1];
335 }
336
337 assert(regs.port[0] <= 31);
338 assert(regs.port[1] <= 63);
339
340 s.ctrl = ctrl;
341 s.reg1 = regs.port[1];
342 s.reg0 = regs.port[0];
343 } else {
344 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
345 s.ctrl = 0;
346 s.reg1 = ctrl << 2;
347
348 if (regs.enabled[0]) {
349 /* Bit 0 upper bit of port 0 */
350 s.reg1 |= (regs.port[0] >> 5);
351
352 /* Rest of port 0 in usual spot */
353 s.reg0 = (regs.port[0] & 0b11111);
354 } else {
355 /* Bit 1 set if port 0 also disabled */
356 s.reg1 |= (1 << 1);
357 }
358 }
359
360 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
361 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
362
363 bool has_port2 = regs.write_fma || regs.write_add;
364 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
365
366 if (!has_port3)
367 regs.port[3] = regs.port[2];
368
369 if (!has_port2)
370 regs.port[2] = regs.port[3];
371
372 s.reg3 = regs.port[3];
373 s.reg2 = regs.port[2];
374 s.uniform_const = regs.uniform_constant;
375
376 memcpy(&packed, &s, sizeof(s));
377 return packed;
378 }
379
380 static void
381 bi_set_data_register(bi_clause *clause, unsigned idx)
382 {
383 assert(idx & BIR_INDEX_REGISTER);
384 unsigned reg = idx & ~BIR_INDEX_REGISTER;
385 assert(reg <= 63);
386 clause->data_register = reg;
387 }
388
389 static void
390 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
391 {
392 bi_set_data_register(clause, ins->src[0]);
393 }
394
395 static void
396 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
397 {
398 bi_set_data_register(clause, ins->dest);
399 }
400
401 static enum bifrost_packed_src
402 bi_get_src_reg_port(bi_registers *regs, unsigned src)
403 {
404 unsigned reg = src & ~BIR_INDEX_REGISTER;
405
406 if (regs->port[0] == reg && regs->enabled[0])
407 return BIFROST_SRC_PORT0;
408 else if (regs->port[1] == reg && regs->enabled[1])
409 return BIFROST_SRC_PORT1;
410 else if (regs->port[3] == reg && regs->read_port3)
411 return BIFROST_SRC_PORT3;
412 else
413 unreachable("Tried to access register with no port");
414 }
415
416 static enum bifrost_packed_src
417 bi_get_src(bi_instruction *ins, bi_registers *regs, unsigned s)
418 {
419 unsigned src = ins->src[s];
420
421 if (src & BIR_INDEX_REGISTER)
422 return bi_get_src_reg_port(regs, src);
423 else if (src & BIR_INDEX_PASS)
424 return src & ~BIR_INDEX_PASS;
425 else {
426 bi_print_instruction(ins, stderr);
427 unreachable("Unknown src in above instruction");
428 }
429 }
430
431 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
432 * 16-bit and written components must correspond to valid swizzles (component x
433 * or y). */
434
435 static unsigned
436 bi_swiz16(bi_instruction *ins, unsigned src)
437 {
438 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
439 unsigned swizzle = 0;
440
441 for (unsigned c = 0; c < 2; ++c) {
442 if (!bi_writes_component(ins, src)) continue;
443
444 unsigned k = ins->swizzle[src][c];
445 assert(k <= 1);
446 swizzle |= (k << c);
447 }
448
449 return swizzle;
450 }
451
452 static unsigned
453 bi_pack_fma_fma(bi_instruction *ins, bi_registers *regs)
454 {
455 /* (-a)(-b) = ab, so we only need one negate bit */
456 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
457
458 if (ins->op.mscale) {
459 assert(!(ins->src_abs[0] && ins->src_abs[1]));
460 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
461
462 /* We can have exactly one abs, and can flip the multiplication
463 * to make it fit if we have to */
464 bool flip_ab = ins->src_abs[1];
465
466 struct bifrost_fma_mscale pack = {
467 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
468 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
469 .src2 = bi_get_src(ins, regs, 2),
470 .src3 = bi_get_src(ins, regs, 3),
471 .mscale_mode = 0,
472 .mode = ins->outmod,
473 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
474 .src1_neg = negate_mul,
475 .src2_neg = ins->src_neg[2],
476 .op = BIFROST_FMA_OP_MSCALE,
477 };
478
479 RETURN_PACKED(pack);
480 } else if (ins->dest_type == nir_type_float32) {
481 struct bifrost_fma_fma pack = {
482 .src0 = bi_get_src(ins, regs, 0),
483 .src1 = bi_get_src(ins, regs, 1),
484 .src2 = bi_get_src(ins, regs, 2),
485 .src0_abs = ins->src_abs[0],
486 .src1_abs = ins->src_abs[1],
487 .src2_abs = ins->src_abs[2],
488 .src0_neg = negate_mul,
489 .src2_neg = ins->src_neg[2],
490 .outmod = ins->outmod,
491 .roundmode = ins->roundmode,
492 .op = BIFROST_FMA_OP_FMA
493 };
494
495 RETURN_PACKED(pack);
496 } else if (ins->dest_type == nir_type_float16) {
497 struct bifrost_fma_fma16 pack = {
498 .src0 = bi_get_src(ins, regs, 0),
499 .src1 = bi_get_src(ins, regs, 1),
500 .src2 = bi_get_src(ins, regs, 2),
501 .swizzle_0 = bi_swiz16(ins, 0),
502 .swizzle_1 = bi_swiz16(ins, 1),
503 .swizzle_2 = bi_swiz16(ins, 2),
504 .src0_neg = negate_mul,
505 .src2_neg = ins->src_neg[2],
506 .outmod = ins->outmod,
507 .roundmode = ins->roundmode,
508 .op = BIFROST_FMA_OP_FMA16
509 };
510
511 RETURN_PACKED(pack);
512 } else {
513 unreachable("Invalid fma dest type");
514 }
515 }
516
517 static unsigned
518 bi_pack_fma_addmin_f32(bi_instruction *ins, bi_registers *regs)
519 {
520 unsigned op =
521 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
522 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
523 BIFROST_FMA_OP_FMAX32;
524
525 struct bifrost_fma_add pack = {
526 .src0 = bi_get_src(ins, regs, 0),
527 .src1 = bi_get_src(ins, regs, 1),
528 .src0_abs = ins->src_abs[0],
529 .src1_abs = ins->src_abs[1],
530 .src0_neg = ins->src_neg[0],
531 .src1_neg = ins->src_neg[1],
532 .unk = 0x0,
533 .outmod = ins->outmod,
534 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
535 .op = op
536 };
537
538 RETURN_PACKED(pack);
539 }
540
541 static bool
542 bi_pack_fp16_abs(bi_instruction *ins, bi_registers *regs, bool *flip)
543 {
544 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
545 * l be an auxiliary bit we encode. Then the hardware determines:
546 *
547 * abs0 = l || k
548 * abs1 = l && k
549 *
550 * Since add/min/max are commutative, this saves a bit by using the
551 * order of the operands as a bit (k). To pack this, first note:
552 *
553 * (l && k) implies (l || k).
554 *
555 * That is, if the second argument is abs'd, then the first argument
556 * also has abs. So there are three cases:
557 *
558 * Case 0: Neither src has absolute value. Then we have l = k = 0.
559 *
560 * Case 1: Exactly one src has absolute value. Assign that source to
561 * src0 and the other source to src1. Compute k = src1 < src0 based on
562 * that assignment. Then l = ~k.
563 *
564 * Case 2: Both sources have absolute value. Then we have l = k = 1.
565 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
566 * That is, this encoding is only valid if src1 and src0 are distinct.
567 * This is a scheduling restriction (XXX); if an op of this type
568 * requires both identical sources to have abs value, then we must
569 * schedule to ADD (which does not use this ordering trick).
570 */
571
572 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
573 unsigned src_0 = bi_get_src(ins, regs, 0);
574 unsigned src_1 = bi_get_src(ins, regs, 1);
575
576 assert(!(abs_0 && abs_1 && src_0 == src_1));
577
578 if (!abs_0 && !abs_1) {
579 /* Force k = 0 <===> NOT(src1 < src0) */
580 *flip = (src_1 < src_0);
581 return false;
582 } else if (abs_0 && !abs_1) {
583 return src_1 >= src_0;
584 } else if (abs_1 && !abs_0) {
585 *flip = true;
586 return src_0 >= src_1;
587 } else {
588 *flip = !(src_1 < src_0);
589 return true;
590 }
591 }
592
593 static unsigned
594 bi_pack_fmadd_min_f16(bi_instruction *ins, bi_registers *regs, bool FMA)
595 {
596 unsigned op =
597 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
598 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
599 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
600 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
601 BIFROST_FMA_OP_FMAX16;
602
603 bool flip = false;
604 bool l = bi_pack_fp16_abs(ins, regs, &flip);
605 unsigned src_0 = bi_get_src(ins, regs, 0);
606 unsigned src_1 = bi_get_src(ins, regs, 1);
607
608 if (FMA) {
609 struct bifrost_fma_add_minmax16 pack = {
610 .src0 = flip ? src_1 : src_0,
611 .src1 = flip ? src_0 : src_1,
612 .src0_neg = ins->src_neg[flip ? 1 : 0],
613 .src1_neg = ins->src_neg[flip ? 0 : 1],
614 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
615 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
616 .abs1 = l,
617 .outmod = ins->outmod,
618 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
619 .op = op
620 };
621
622 RETURN_PACKED(pack);
623 } else {
624 /* Can't have modes for fp16 */
625 assert(ins->outmod == 0);
626
627 struct bifrost_add_fmin16 pack = {
628 .src0 = flip ? src_1 : src_0,
629 .src1 = flip ? src_0 : src_1,
630 .src0_neg = ins->src_neg[flip ? 1 : 0],
631 .src1_neg = ins->src_neg[flip ? 0 : 1],
632 .abs1 = l,
633 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
634 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
635 .mode = ins->minmax,
636 .op = op
637 };
638
639 RETURN_PACKED(pack);
640 }
641 }
642
643 static unsigned
644 bi_pack_fma_addmin(bi_instruction *ins, bi_registers *regs)
645 {
646 if (ins->dest_type == nir_type_float32)
647 return bi_pack_fma_addmin_f32(ins, regs);
648 else if(ins->dest_type == nir_type_float16)
649 return bi_pack_fmadd_min_f16(ins, regs, true);
650 else
651 unreachable("Unknown FMA/ADD type");
652 }
653
654 static unsigned
655 bi_pack_fma_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
656 {
657 struct bifrost_fma_inst pack = {
658 .src0 = bi_get_src(ins, regs, 0),
659 .op = op
660 };
661
662 RETURN_PACKED(pack);
663 }
664
665 static unsigned
666 bi_pack_fma_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
667 {
668 struct bifrost_fma_2src pack = {
669 .src0 = bi_get_src(ins, regs, 0),
670 .src1 = bi_get_src(ins, regs, 1),
671 .op = op
672 };
673
674 RETURN_PACKED(pack);
675 }
676
677 static unsigned
678 bi_pack_add_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
679 {
680 struct bifrost_add_inst pack = {
681 .src0 = bi_get_src(ins, regs, 0),
682 .op = op
683 };
684
685 RETURN_PACKED(pack);
686 }
687
688 static enum bifrost_csel_cond
689 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
690 {
691 nir_alu_type B = nir_alu_type_get_base_type(T);
692 unsigned idx = (B == nir_type_float) ? 0 :
693 ((B == nir_type_int) ? 1 : 2);
694
695 switch (cond){
696 case BI_COND_LT:
697 *flip = true;
698 case BI_COND_GT: {
699 const enum bifrost_csel_cond ops[] = {
700 BIFROST_FGT_F,
701 BIFROST_IGT_I,
702 BIFROST_UGT_I
703 };
704
705 return ops[idx];
706 }
707 case BI_COND_LE:
708 *flip = true;
709 case BI_COND_GE: {
710 const enum bifrost_csel_cond ops[] = {
711 BIFROST_FGE_F,
712 BIFROST_IGE_I,
713 BIFROST_UGE_I
714 };
715
716 return ops[idx];
717 }
718 case BI_COND_NE:
719 *invert = true;
720 case BI_COND_EQ: {
721 const enum bifrost_csel_cond ops[] = {
722 BIFROST_FEQ_F,
723 BIFROST_IEQ_F,
724 BIFROST_IEQ_F /* sign is irrelevant */
725 };
726
727 return ops[idx];
728 }
729 default:
730 unreachable("Invalid op for csel");
731 }
732 }
733
734 static unsigned
735 bi_pack_fma_csel(bi_instruction *ins, bi_registers *regs)
736 {
737 /* TODO: Use csel3 as well */
738 bool flip = false, invert = false;
739
740 enum bifrost_csel_cond cond =
741 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
742
743 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
744
745 unsigned cmp_0 = (flip ? 1 : 0);
746 unsigned cmp_1 = (flip ? 0 : 1);
747 unsigned res_0 = (invert ? 3 : 2);
748 unsigned res_1 = (invert ? 2 : 3);
749
750 struct bifrost_csel4 pack = {
751 .src0 = bi_get_src(ins, regs, cmp_0),
752 .src1 = bi_get_src(ins, regs, cmp_1),
753 .src2 = bi_get_src(ins, regs, res_0),
754 .src3 = bi_get_src(ins, regs, res_1),
755 .cond = cond,
756 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
757 BIFROST_FMA_OP_CSEL4
758 };
759
760 RETURN_PACKED(pack);
761 }
762
763 static unsigned
764 bi_pack_fma_frexp(bi_instruction *ins, bi_registers *regs)
765 {
766 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
767 return bi_pack_fma_1src(ins, regs, op);
768 }
769
770 static unsigned
771 bi_pack_fma_reduce(bi_instruction *ins, bi_registers *regs)
772 {
773 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
774 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
775 } else {
776 unreachable("Invalid reduce op");
777 }
778 }
779
780 /* We have a single convert opcode in the IR but a number of opcodes that could
781 * come out. In particular we have native opcodes for:
782 *
783 * [ui]16 --> [fui]32 -- int16_to_32
784 * f16 --> f32 -- float16_to_32
785 * f32 --> f16 -- float32_to_16
786 * f32 --> [ui]32 -- float32_to_int
787 * [ui]32 --> f32 -- int_to_float32
788 * [fui]16 --> [fui]16 -- f2i_i2f16
789 */
790
791 static unsigned
792 bi_pack_convert(bi_instruction *ins, bi_registers *regs, bool FMA)
793 {
794 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
795 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
796 bool from_unsigned = from_base == nir_type_uint;
797
798 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
799 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
800 bool to_unsigned = to_base == nir_type_uint;
801 bool to_float = to_base == nir_type_float;
802
803 /* Sanity check */
804 assert((from_base != to_base) || (from_size != to_size));
805 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
806
807 /* f32 to f16 is special */
808 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
809 /* TODO: second vectorized source? */
810 struct bifrost_fma_2src pfma = {
811 .src0 = bi_get_src(ins, regs, 0),
812 .src1 = BIFROST_SRC_STAGE, /* 0 */
813 .op = BIFROST_FMA_FLOAT32_TO_16
814 };
815
816 struct bifrost_add_2src padd = {
817 .src0 = bi_get_src(ins, regs, 0),
818 .src1 = BIFROST_SRC_STAGE, /* 0 */
819 .op = BIFROST_ADD_FLOAT32_TO_16
820 };
821
822 if (FMA) {
823 RETURN_PACKED(pfma);
824 } else {
825 RETURN_PACKED(padd);
826 }
827 }
828
829 /* Otherwise, figure out the mode */
830 unsigned op = 0;
831
832 if (from_size == 16 && to_size == 32) {
833 unsigned component = ins->swizzle[0][0];
834 assert(component <= 1);
835
836 if (from_base == nir_type_float)
837 op = BIFROST_CONVERT_5(component);
838 else
839 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
840 } else {
841 unsigned mode = 0;
842 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
843 bool is_unsigned = from_unsigned;
844
845 if (from_base == nir_type_float) {
846 assert(to_base != nir_type_float);
847 is_unsigned = to_unsigned;
848
849 if (from_size == 32 && to_size == 32)
850 mode = BIFROST_CONV_F32_TO_I32;
851 else if (from_size == 16 && to_size == 16)
852 mode = BIFROST_CONV_F16_TO_I16;
853 else
854 unreachable("Invalid float conversion");
855 } else {
856 assert(to_base == nir_type_float);
857 assert(from_size == to_size);
858
859 if (to_size == 32)
860 mode = BIFROST_CONV_I32_TO_F32;
861 else if (to_size == 16)
862 mode = BIFROST_CONV_I16_TO_F16;
863 else
864 unreachable("Invalid int conversion");
865 }
866
867 /* Fixup swizzle for 32-bit only modes */
868
869 if (mode == BIFROST_CONV_I32_TO_F32)
870 swizzle = 0b11;
871 else if (mode == BIFROST_CONV_F32_TO_I32)
872 swizzle = 0b10;
873
874 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
875
876 /* Unclear what the top bit is for... maybe 16-bit related */
877 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
878 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
879
880 if (!(mode2 || mode6))
881 op |= 0x100;
882 }
883
884 if (FMA)
885 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
886 else
887 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
888 }
889
890 static unsigned
891 bi_pack_fma_select(bi_instruction *ins, bi_registers *regs)
892 {
893 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
894
895 if (size == 16) {
896 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
897 unsigned op = BIFROST_FMA_SEL_16(swiz);
898 return bi_pack_fma_2src(ins, regs, op);
899 } else if (size == 8) {
900 unsigned swiz = 0;
901
902 for (unsigned c = 0; c < 4; ++c) {
903 if (ins->swizzle[c][0]) {
904 /* Ensure lowering restriction is met */
905 assert(ins->swizzle[c][0] == 2);
906 swiz |= (1 << c);
907 }
908 }
909
910 struct bifrost_fma_sel8 pack = {
911 .src0 = bi_get_src(ins, regs, 0),
912 .src1 = bi_get_src(ins, regs, 1),
913 .src2 = bi_get_src(ins, regs, 2),
914 .src3 = bi_get_src(ins, regs, 3),
915 .swizzle = swiz,
916 .op = BIFROST_FMA_OP_SEL8
917 };
918
919 RETURN_PACKED(pack);
920 } else {
921 unreachable("Unimplemented");
922 }
923 }
924
925 static enum bifrost_fcmp_cond
926 bi_fcmp_cond(enum bi_cond cond)
927 {
928 switch (cond) {
929 case BI_COND_LT: return BIFROST_OLT;
930 case BI_COND_LE: return BIFROST_OLE;
931 case BI_COND_GE: return BIFROST_OGE;
932 case BI_COND_GT: return BIFROST_OGT;
933 case BI_COND_EQ: return BIFROST_OEQ;
934 case BI_COND_NE: return BIFROST_UNE;
935 default: unreachable("Unknown bi_cond");
936 }
937 }
938
939 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
940
941 static enum bifrost_fcmp_cond
942 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
943 {
944 switch (cond) {
945 case BIFROST_OGT:
946 return BIFROST_OLT;
947 case BIFROST_OGE:
948 return BIFROST_OLE;
949 case BIFROST_OLT:
950 return BIFROST_OGT;
951 case BIFROST_OLE:
952 return BIFROST_OGE;
953 case BIFROST_OEQ:
954 case BIFROST_UNE:
955 return cond;
956 default:
957 unreachable("Unknown fcmp cond");
958 }
959 }
960
961 static unsigned
962 bi_pack_fma_cmp(bi_instruction *ins, bi_registers *regs)
963 {
964 nir_alu_type Tl = ins->src_types[0];
965 nir_alu_type Tr = ins->src_types[1];
966
967 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
968 /* TODO: Mixed 32/16 cmp */
969 assert(Tl == Tr);
970
971 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
972
973 /* Only src1 has neg, so we arrange:
974 * a < b --- native
975 * a < -b --- native
976 * -a < -b <===> a > b
977 * -a < b <===> a > -b
978 * TODO: Is this NaN-precise?
979 */
980
981 bool flip = ins->src_neg[0];
982 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
983
984 if (flip)
985 cond = bi_flip_fcmp(cond);
986
987 struct bifrost_fma_fcmp pack = {
988 .src0 = bi_get_src(ins, regs, 0),
989 .src1 = bi_get_src(ins, regs, 1),
990 .src0_abs = ins->src_abs[0],
991 .src1_abs = ins->src_abs[1],
992 .src1_neg = neg,
993 .src_expand = 0,
994 .unk1 = 0,
995 .cond = cond,
996 .op = BIFROST_FMA_OP_FCMP_GL
997 };
998
999 RETURN_PACKED(pack);
1000 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1001 bool flip = false;
1002 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1003 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1004
1005 if (flip)
1006 cond = bi_flip_fcmp(cond);
1007
1008 struct bifrost_fma_fcmp16 pack = {
1009 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1010 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1011 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1012 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1013 .abs1 = l,
1014 .unk = 0,
1015 .cond = cond,
1016 .op = BIFROST_FMA_OP_FCMP_GL_16,
1017 };
1018
1019 RETURN_PACKED(pack);
1020 } else {
1021 unreachable("Unknown cmp type");
1022 }
1023 }
1024
1025 static unsigned
1026 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1027 {
1028 switch (op) {
1029 case BI_BITWISE_OR:
1030 /* Via De Morgan's */
1031 return rshift ?
1032 BIFROST_FMA_OP_RSHIFT_NAND :
1033 BIFROST_FMA_OP_LSHIFT_NAND;
1034 case BI_BITWISE_AND:
1035 return rshift ?
1036 BIFROST_FMA_OP_RSHIFT_AND :
1037 BIFROST_FMA_OP_LSHIFT_AND;
1038 case BI_BITWISE_XOR:
1039 /* Shift direction handled out of band */
1040 return BIFROST_FMA_OP_RSHIFT_XOR;
1041 default:
1042 unreachable("Unknown op");
1043 }
1044 }
1045
1046 static unsigned
1047 bi_pack_fma_bitwise(bi_instruction *ins, bi_registers *regs)
1048 {
1049 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1050 assert(size <= 32);
1051
1052 bool invert_0 = ins->bitwise.src_invert[0];
1053 bool invert_1 = ins->bitwise.src_invert[1];
1054
1055 if (ins->op.bitwise == BI_BITWISE_OR) {
1056 /* Becomes NAND, so via De Morgan's:
1057 * f(A) | f(B) = ~(~f(A) & ~f(B))
1058 * = NAND(~f(A), ~f(B))
1059 */
1060
1061 invert_0 = !invert_0;
1062 invert_1 = !invert_1;
1063 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1064 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1065 * ~A ^ B = ~(A ^ B) = A ^ ~B
1066 */
1067
1068 invert_0 ^= invert_1;
1069 invert_1 = false;
1070
1071 /* invert_1 ends up specifying shift direction */
1072 invert_1 = !ins->bitwise.rshift;
1073 }
1074
1075 struct bifrost_shift_fma pack = {
1076 .src0 = bi_get_src(ins, regs, 0),
1077 .src1 = bi_get_src(ins, regs, 1),
1078 .src2 = bi_get_src(ins, regs, 2),
1079 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1080 .unk = 1, /* XXX */
1081 .invert_1 = invert_0,
1082 .invert_2 = invert_1,
1083 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1084 };
1085
1086 RETURN_PACKED(pack);
1087 }
1088
1089 static unsigned
1090 bi_pack_fma_round(bi_instruction *ins, bi_registers *regs)
1091 {
1092 bool fp16 = ins->dest_type == nir_type_float16;
1093 assert(fp16 || ins->dest_type == nir_type_float32);
1094
1095 unsigned op = fp16
1096 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1097 : BIFROST_FMA_ROUND_32(ins->roundmode);
1098
1099 return bi_pack_fma_1src(ins, regs, op);
1100 }
1101
1102 static unsigned
1103 bi_pack_fma_imath(bi_instruction *ins, bi_registers *regs)
1104 {
1105 /* Scheduler: only ADD can have 8/16-bit imath */
1106 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1107
1108 unsigned op = ins->op.imath == BI_IMATH_ADD
1109 ? BIFROST_FMA_IADD_32
1110 : BIFROST_FMA_ISUB_32;
1111
1112 return bi_pack_fma_2src(ins, regs, op);
1113 }
1114
1115 static unsigned
1116 bi_pack_fma(bi_clause *clause, bi_bundle bundle, bi_registers *regs)
1117 {
1118 if (!bundle.fma)
1119 return BIFROST_FMA_NOP;
1120
1121 switch (bundle.fma->type) {
1122 case BI_ADD:
1123 return bi_pack_fma_addmin(bundle.fma, regs);
1124 case BI_CMP:
1125 return bi_pack_fma_cmp(bundle.fma, regs);
1126 case BI_BITWISE:
1127 return bi_pack_fma_bitwise(bundle.fma, regs);
1128 case BI_CONVERT:
1129 return bi_pack_convert(bundle.fma, regs, true);
1130 case BI_CSEL:
1131 return bi_pack_fma_csel(bundle.fma, regs);
1132 case BI_FMA:
1133 return bi_pack_fma_fma(bundle.fma, regs);
1134 case BI_FREXP:
1135 return bi_pack_fma_frexp(bundle.fma, regs);
1136 case BI_IMATH:
1137 return bi_pack_fma_imath(bundle.fma, regs);
1138 case BI_MINMAX:
1139 return bi_pack_fma_addmin(bundle.fma, regs);
1140 case BI_MOV:
1141 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1142 case BI_SHIFT:
1143 unreachable("Packing todo");
1144 case BI_SELECT:
1145 return bi_pack_fma_select(bundle.fma, regs);
1146 case BI_ROUND:
1147 return bi_pack_fma_round(bundle.fma, regs);
1148 case BI_REDUCE_FMA:
1149 return bi_pack_fma_reduce(bundle.fma, regs);
1150 default:
1151 unreachable("Cannot encode class as FMA");
1152 }
1153 }
1154
1155 static unsigned
1156 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1157 {
1158 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1159 assert(size == 32 || size == 16);
1160
1161 unsigned op = (size == 32) ?
1162 BIFROST_ADD_OP_LD_VAR_32 :
1163 BIFROST_ADD_OP_LD_VAR_16;
1164
1165 unsigned packed_addr = 0;
1166
1167 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1168 /* Direct uses address field directly */
1169 packed_addr = bi_get_immediate(ins, 0);
1170 } else {
1171 /* Indirect gets an extra source */
1172 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1173 }
1174
1175 /* The destination is thrown in the data register */
1176 assert(ins->dest & BIR_INDEX_REGISTER);
1177 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1178
1179 unsigned channels = ins->vector_channels;
1180 assert(channels >= 1 && channels <= 4);
1181
1182 struct bifrost_ld_var pack = {
1183 .src0 = bi_get_src(ins, regs, 1),
1184 .addr = packed_addr,
1185 .channels = MALI_POSITIVE(channels),
1186 .interp_mode = ins->load_vary.interp_mode,
1187 .reuse = ins->load_vary.reuse,
1188 .flat = ins->load_vary.flat,
1189 .op = op
1190 };
1191
1192 RETURN_PACKED(pack);
1193 }
1194
1195 static unsigned
1196 bi_pack_add_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
1197 {
1198 struct bifrost_add_2src pack = {
1199 .src0 = bi_get_src(ins, regs, 0),
1200 .src1 = bi_get_src(ins, regs, 1),
1201 .op = op
1202 };
1203
1204 RETURN_PACKED(pack);
1205 }
1206
1207 static unsigned
1208 bi_pack_add_addmin_f32(bi_instruction *ins, bi_registers *regs)
1209 {
1210 unsigned op =
1211 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1212 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1213 BIFROST_ADD_OP_FMAX32;
1214
1215 struct bifrost_add_faddmin pack = {
1216 .src0 = bi_get_src(ins, regs, 0),
1217 .src1 = bi_get_src(ins, regs, 1),
1218 .src0_abs = ins->src_abs[0],
1219 .src1_abs = ins->src_abs[1],
1220 .src0_neg = ins->src_neg[0],
1221 .src1_neg = ins->src_neg[1],
1222 .outmod = ins->outmod,
1223 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1224 .op = op
1225 };
1226
1227 RETURN_PACKED(pack);
1228 }
1229
1230 static unsigned
1231 bi_pack_add_add_f16(bi_instruction *ins, bi_registers *regs)
1232 {
1233 /* ADD.v2f16 can't have outmod */
1234 assert(ins->outmod == BIFROST_NONE);
1235
1236 struct bifrost_add_faddmin pack = {
1237 .src0 = bi_get_src(ins, regs, 0),
1238 .src1 = bi_get_src(ins, regs, 1),
1239 .src0_abs = ins->src_abs[0],
1240 .src1_abs = ins->src_abs[1],
1241 .src0_neg = ins->src_neg[0],
1242 .src1_neg = ins->src_neg[1],
1243 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1244 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1245 .mode = ins->roundmode,
1246 .op = BIFROST_ADD_OP_FADD16
1247 };
1248
1249 RETURN_PACKED(pack);
1250 }
1251
1252 static unsigned
1253 bi_pack_add_addmin(bi_instruction *ins, bi_registers *regs)
1254 {
1255 if (ins->dest_type == nir_type_float32)
1256 return bi_pack_add_addmin_f32(ins, regs);
1257 else if (ins->dest_type == nir_type_float16) {
1258 if (ins->type == BI_ADD)
1259 return bi_pack_add_add_f16(ins, regs);
1260 else
1261 return bi_pack_fmadd_min_f16(ins, regs, false);
1262 } else
1263 unreachable("Unknown FMA/ADD type");
1264 }
1265
1266 static unsigned
1267 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1268 {
1269 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1270
1271 const unsigned ops[4] = {
1272 BIFROST_ADD_OP_LD_UBO_1,
1273 BIFROST_ADD_OP_LD_UBO_2,
1274 BIFROST_ADD_OP_LD_UBO_3,
1275 BIFROST_ADD_OP_LD_UBO_4
1276 };
1277
1278 bi_write_data_register(clause, ins);
1279 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1280 }
1281
1282 static enum bifrost_ldst_type
1283 bi_pack_ldst_type(nir_alu_type T)
1284 {
1285 switch (T) {
1286 case nir_type_float16: return BIFROST_LDST_F16;
1287 case nir_type_float32: return BIFROST_LDST_F32;
1288 case nir_type_int32: return BIFROST_LDST_I32;
1289 case nir_type_uint32: return BIFROST_LDST_U32;
1290 default: unreachable("Invalid type loaded");
1291 }
1292 }
1293
1294 static unsigned
1295 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1296 {
1297 struct bifrost_ld_var_addr pack = {
1298 .src0 = bi_get_src(ins, regs, 1),
1299 .src1 = bi_get_src(ins, regs, 2),
1300 .location = bi_get_immediate(ins, 0),
1301 .type = bi_pack_ldst_type(ins->src_types[3]),
1302 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1303 };
1304
1305 bi_write_data_register(clause, ins);
1306 RETURN_PACKED(pack);
1307 }
1308
1309 static unsigned
1310 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1311 {
1312 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1313
1314 struct bifrost_ld_attr pack = {
1315 .src0 = bi_get_src(ins, regs, 1),
1316 .src1 = bi_get_src(ins, regs, 2),
1317 .location = bi_get_immediate(ins, 0),
1318 .channels = MALI_POSITIVE(ins->vector_channels),
1319 .type = bi_pack_ldst_type(ins->dest_type),
1320 .op = BIFROST_ADD_OP_LD_ATTR
1321 };
1322
1323 bi_write_data_register(clause, ins);
1324 RETURN_PACKED(pack);
1325 }
1326
1327 static unsigned
1328 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1329 {
1330 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1331
1332 struct bifrost_st_vary pack = {
1333 .src0 = bi_get_src(ins, regs, 1),
1334 .src1 = bi_get_src(ins, regs, 2),
1335 .src2 = bi_get_src(ins, regs, 3),
1336 .channels = MALI_POSITIVE(ins->vector_channels),
1337 .op = BIFROST_ADD_OP_ST_VAR
1338 };
1339
1340 bi_read_data_register(clause, ins);
1341 RETURN_PACKED(pack);
1342 }
1343
1344 static unsigned
1345 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1346 {
1347 bool fp16 = (ins->src_types[1] == nir_type_float16);
1348
1349 struct bifrost_add_atest pack = {
1350 .src0 = bi_get_src(ins, regs, 0),
1351 .src1 = bi_get_src(ins, regs, 1),
1352 .half = fp16,
1353 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1354 .op = BIFROST_ADD_OP_ATEST,
1355 };
1356
1357 /* Despite *also* writing with the usual mechanism... quirky and
1358 * perhaps unnecessary, but let's match the blob */
1359 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1360
1361 RETURN_PACKED(pack);
1362 }
1363
1364 static unsigned
1365 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1366 {
1367 struct bifrost_add_inst pack = {
1368 .src0 = bi_get_src(ins, regs, 1),
1369 .op = BIFROST_ADD_OP_BLEND
1370 };
1371
1372 /* TODO: Pack location in uniform_const */
1373 assert(ins->blend_location == 0);
1374
1375 bi_read_data_register(clause, ins);
1376 RETURN_PACKED(pack);
1377 }
1378
1379 static unsigned
1380 bi_pack_add_special(bi_instruction *ins, bi_registers *regs)
1381 {
1382 unsigned op = 0;
1383 bool fp16 = ins->dest_type == nir_type_float16;
1384 bool Y = ins->swizzle[0][0];
1385
1386 if (ins->op.special == BI_SPECIAL_FRCP) {
1387 op = fp16 ?
1388 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1389 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1390 BIFROST_ADD_OP_FRCP_FAST_F32;
1391 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1392 op = fp16 ?
1393 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1394 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1395 BIFROST_ADD_OP_FRSQ_FAST_F32;
1396
1397 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1398 assert(!fp16);
1399 op = BIFROST_ADD_OP_FEXP2_FAST;
1400 } else {
1401 unreachable("Unknown special op");
1402 }
1403
1404 return bi_pack_add_1src(ins, regs, op);
1405 }
1406
1407 static unsigned
1408 bi_pack_add_table(bi_instruction *ins, bi_registers *regs)
1409 {
1410 unsigned op = 0;
1411 assert(ins->dest_type == nir_type_float32);
1412
1413 op = BIFROST_ADD_OP_LOG2_HELP;
1414 return bi_pack_add_1src(ins, regs, op);
1415 }
1416 static unsigned
1417 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, bi_registers *regs, gl_shader_stage stage)
1418 {
1419 bool f16 = ins->dest_type == nir_type_float16;
1420 bool vtx = stage != MESA_SHADER_FRAGMENT;
1421
1422 struct bifrost_tex_compact pack = {
1423 .src0 = bi_get_src(ins, regs, 0),
1424 .src1 = bi_get_src(ins, regs, 1),
1425 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1426 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1427 .compute_lod = !vtx,
1428 .tex_index = ins->texture.texture_index,
1429 .sampler_index = ins->texture.sampler_index
1430 };
1431
1432 bi_write_data_register(clause, ins);
1433 RETURN_PACKED(pack);
1434 }
1435
1436 static unsigned
1437 bi_pack_add_select(bi_instruction *ins, bi_registers *regs)
1438 {
1439 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1440 assert(size == 16);
1441
1442 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1443 unsigned op = BIFROST_ADD_SEL_16(swiz);
1444 return bi_pack_add_2src(ins, regs, op);
1445 }
1446
1447 static enum bifrost_discard_cond
1448 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1449 {
1450 switch (cond){
1451 case BI_COND_GT:
1452 *flip = true;
1453 /* fallthrough */
1454 case BI_COND_LT:
1455 return BIFROST_DISCARD_FLT;
1456 case BI_COND_GE:
1457 *flip = true;
1458 /* fallthrough */
1459 case BI_COND_LE:
1460 return BIFROST_DISCARD_FLE;
1461 case BI_COND_NE:
1462 return BIFROST_DISCARD_FNE;
1463 case BI_COND_EQ:
1464 return BIFROST_DISCARD_FEQ;
1465 default:
1466 unreachable("Invalid op for discard");
1467 }
1468 }
1469
1470 static unsigned
1471 bi_pack_add_discard(bi_instruction *ins, bi_registers *regs)
1472 {
1473 bool fp16 = ins->src_types[0] == nir_type_float16;
1474 assert(fp16 || ins->src_types[0] == nir_type_float32);
1475
1476 bool flip = false;
1477 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1478
1479 struct bifrost_add_discard pack = {
1480 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1481 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1482 .cond = cond,
1483 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1484 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1485 .fp32 = fp16 ? 0 : 1,
1486 .op = BIFROST_ADD_OP_DISCARD
1487 };
1488
1489 RETURN_PACKED(pack);
1490 }
1491
1492 static enum bifrost_icmp_cond
1493 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1494 {
1495 switch (cond){
1496 case BI_COND_LT:
1497 *flip = true;
1498 /* fallthrough */
1499 case BI_COND_GT:
1500 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1501 : BIFROST_ICMP_IGT;
1502 case BI_COND_LE:
1503 *flip = true;
1504 /* fallthrough */
1505 case BI_COND_GE:
1506 return is_unsigned ? BIFROST_ICMP_UGE :
1507 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1508 case BI_COND_NE:
1509 return BIFROST_ICMP_NEQ;
1510 case BI_COND_EQ:
1511 return BIFROST_ICMP_EQ;
1512 default:
1513 unreachable("Invalid op for icmp");
1514 }
1515 }
1516
1517 static unsigned
1518 bi_pack_add_icmp32(bi_instruction *ins, bi_registers *regs, bool flip,
1519 enum bifrost_icmp_cond cond)
1520 {
1521 struct bifrost_add_icmp pack = {
1522 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1523 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1524 .cond = cond,
1525 .sz = 1,
1526 .d3d = false,
1527 .op = BIFROST_ADD_OP_ICMP_32
1528 };
1529
1530 RETURN_PACKED(pack);
1531 }
1532
1533 static unsigned
1534 bi_pack_add_icmp16(bi_instruction *ins, bi_registers *regs, bool flip,
1535 enum bifrost_icmp_cond cond)
1536 {
1537 struct bifrost_add_icmp16 pack = {
1538 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1539 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1540 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1541 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1542 .cond = cond,
1543 .d3d = false,
1544 .op = BIFROST_ADD_OP_ICMP_16
1545 };
1546
1547 RETURN_PACKED(pack);
1548 }
1549
1550 static unsigned
1551 bi_pack_add_cmp(bi_instruction *ins, bi_registers *regs)
1552 {
1553 nir_alu_type Tl = ins->src_types[0];
1554 nir_alu_type Tr = ins->src_types[1];
1555 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1556
1557 if (Bl == nir_type_uint || Bl == nir_type_int) {
1558 assert(Tl == Tr);
1559 unsigned sz = nir_alu_type_get_type_size(Tl);
1560
1561 bool flip = false;
1562
1563 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1564 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1565 &flip, Bl == nir_type_uint, sz == 16);
1566
1567 if (sz == 32)
1568 return bi_pack_add_icmp32(ins, regs, flip, cond);
1569 else if (sz == 16)
1570 return bi_pack_add_icmp16(ins, regs, flip, cond);
1571 else
1572 unreachable("TODO");
1573 } else {
1574 unreachable("TODO");
1575 }
1576 }
1577
1578 static unsigned
1579 bi_pack_add_imath(bi_instruction *ins, bi_registers *regs)
1580 {
1581 /* TODO: 32+16 add */
1582 assert(ins->src_types[0] == ins->src_types[1]);
1583 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1584 enum bi_imath_op p = ins->op.imath;
1585
1586 unsigned op = 0;
1587
1588 if (sz == 8) {
1589 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1590 BIFROST_ADD_ISUB_8;
1591 } else if (sz == 16) {
1592 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1593 BIFROST_ADD_ISUB_16;
1594 } else if (sz == 32) {
1595 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1596 BIFROST_ADD_ISUB_32;
1597 } else {
1598 unreachable("64-bit todo");
1599 }
1600
1601 return bi_pack_add_2src(ins, regs, op);
1602 }
1603
1604 static unsigned
1605 bi_pack_add(bi_clause *clause, bi_bundle bundle, bi_registers *regs, gl_shader_stage stage)
1606 {
1607 if (!bundle.add)
1608 return BIFROST_ADD_NOP;
1609
1610 switch (bundle.add->type) {
1611 case BI_ADD:
1612 return bi_pack_add_addmin(bundle.add, regs);
1613 case BI_ATEST:
1614 return bi_pack_add_atest(clause, bundle.add, regs);
1615 case BI_BRANCH:
1616 unreachable("Packing todo");
1617 case BI_CMP:
1618 return bi_pack_add_cmp(bundle.add, regs);
1619 case BI_BLEND:
1620 return bi_pack_add_blend(clause, bundle.add, regs);
1621 case BI_BITWISE:
1622 unreachable("Packing todo");
1623 case BI_CONVERT:
1624 return bi_pack_convert(bundle.add, regs, false);
1625 case BI_DISCARD:
1626 return bi_pack_add_discard(bundle.add, regs);
1627 case BI_FREXP:
1628 unreachable("Packing todo");
1629 case BI_IMATH:
1630 return bi_pack_add_imath(bundle.add, regs);
1631 case BI_LOAD:
1632 unreachable("Packing todo");
1633 case BI_LOAD_ATTR:
1634 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1635 case BI_LOAD_UNIFORM:
1636 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1637 case BI_LOAD_VAR:
1638 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1639 case BI_LOAD_VAR_ADDRESS:
1640 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1641 case BI_MINMAX:
1642 return bi_pack_add_addmin(bundle.add, regs);
1643 case BI_MOV:
1644 case BI_SHIFT:
1645 case BI_STORE:
1646 unreachable("Packing todo");
1647 case BI_STORE_VAR:
1648 return bi_pack_add_st_vary(clause, bundle.add, regs);
1649 case BI_SPECIAL:
1650 return bi_pack_add_special(bundle.add, regs);
1651 case BI_TABLE:
1652 return bi_pack_add_table(bundle.add, regs);
1653 case BI_SELECT:
1654 return bi_pack_add_select(bundle.add, regs);
1655 case BI_TEX:
1656 if (bundle.add->op.texture == BI_TEX_COMPACT)
1657 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1658 else
1659 unreachable("Unknown tex type");
1660 case BI_ROUND:
1661 unreachable("Packing todo");
1662 default:
1663 unreachable("Cannot encode class as ADD");
1664 }
1665 }
1666
1667 struct bi_packed_bundle {
1668 uint64_t lo;
1669 uint64_t hi;
1670 };
1671
1672 /* We must ensure port 1 > port 0 for the 63-x trick to function, so we fix
1673 * this up at pack time. (Scheduling doesn't care.) */
1674
1675 static void
1676 bi_flip_ports(bi_registers *regs)
1677 {
1678 if (regs->enabled[0] && regs->enabled[1] && regs->port[1] < regs->port[0]) {
1679 unsigned temp = regs->port[0];
1680 regs->port[0] = regs->port[1];
1681 regs->port[1] = temp;
1682 }
1683
1684 }
1685
1686 static struct bi_packed_bundle
1687 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1688 {
1689 bi_assign_ports(&bundle, &prev);
1690 bi_assign_uniform_constant(clause, &bundle.regs, bundle);
1691 bundle.regs.first_instruction = first_bundle;
1692
1693 bi_flip_ports(&bundle.regs);
1694
1695 uint64_t reg = bi_pack_registers(bundle.regs);
1696 uint64_t fma = bi_pack_fma(clause, bundle, &bundle.regs);
1697 uint64_t add = bi_pack_add(clause, bundle, &bundle.regs, stage);
1698
1699 struct bi_packed_bundle packed = {
1700 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1701 .hi = add >> 6
1702 };
1703
1704 return packed;
1705 }
1706
1707 /* Packs the next two constants as a dedicated constant quadword at the end of
1708 * the clause, returning the number packed. There are two cases to consider:
1709 *
1710 * Case #1: Branching is not used. For a single constant copy the upper nibble
1711 * over, easy.
1712 *
1713 * Case #2: Branching is used. For a single constant, it suffices to set the
1714 * upper nibble to 4 and leave the latter constant 0, which matches what the
1715 * blob does.
1716 *
1717 * Extending to multiple constants is considerably more tricky and left for
1718 * future work.
1719 */
1720
1721 static unsigned
1722 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1723 unsigned index,
1724 struct util_dynarray *emission)
1725 {
1726 /* After these two, are we done? Determines tag */
1727 bool done = clause->constant_count <= (index + 2);
1728 bool only = clause->constant_count <= (index + 1);
1729
1730 /* Is the constant we're packing for a branch? */
1731 bool branches = clause->branch_constant && done;
1732
1733 /* TODO: Pos */
1734 assert(index == 0 && clause->bundle_count == 1);
1735 assert(only);
1736
1737 uint64_t hi = clause->constants[index + 0] >> 60ull;
1738
1739 struct bifrost_fmt_constant quad = {
1740 .pos = 0, /* TODO */
1741 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1742 .imm_1 = clause->constants[index + 0] >> 4,
1743 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1744 };
1745
1746 if (branches) {
1747 /* Branch offsets are less than 60-bits so this should work at
1748 * least for now */
1749 quad.imm_1 |= (4ull << 60ull) >> 4;
1750 assert (hi == 0);
1751 }
1752
1753 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1754 * of the second constant with the first must be less than 8, otherwise
1755 * we have to swap them. On G52, I'm able to reproduce a similar issue
1756 * but with a different workaround (modeled above with a single
1757 * constant, unclear how to workaround for multiple constants.) Further
1758 * investigation needed. Possibly an errata. XXX */
1759
1760 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1761
1762 return 2;
1763 }
1764
1765 static void
1766 bi_pack_clause(bi_context *ctx, bi_clause *clause, bi_clause *next,
1767 struct util_dynarray *emission, gl_shader_stage stage)
1768 {
1769 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1770 assert(clause->bundle_count == 1);
1771
1772 /* Used to decide if we elide writes */
1773 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1774
1775 /* State for packing constants throughout */
1776 unsigned constant_index = 0;
1777
1778 struct bifrost_fmt1 quad_1 = {
1779 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1780 .header = bi_pack_header(clause, next, is_fragment),
1781 .ins_1 = ins_1.lo,
1782 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1783 .ins_0 = (ins_1.hi >> 11) & 0b111,
1784 };
1785
1786 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1787
1788 /* Pack the remaining constants */
1789
1790 while (constant_index < clause->constant_count) {
1791 constant_index += bi_pack_constants(ctx, clause,
1792 constant_index, emission);
1793 }
1794 }
1795
1796 static bi_clause *
1797 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1798 {
1799 /* Try the next clause in this block */
1800 if (clause->link.next != &((bi_block *) block)->clauses)
1801 return list_first_entry(&(clause->link), bi_clause, link);
1802
1803 /* Try the next block, or the one after that if it's empty, etc .*/
1804 pan_block *next_block = pan_next_block(block);
1805
1806 bi_foreach_block_from(ctx, next_block, block) {
1807 bi_block *blk = (bi_block *) block;
1808
1809 if (!list_is_empty(&blk->clauses))
1810 return list_first_entry(&(blk->clauses), bi_clause, link);
1811 }
1812
1813 return NULL;
1814 }
1815
1816 void
1817 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1818 {
1819 util_dynarray_init(emission, NULL);
1820
1821 bi_foreach_block(ctx, _block) {
1822 bi_block *block = (bi_block *) _block;
1823
1824 bi_foreach_clause_in_block(block, clause) {
1825 bi_clause *next = bi_next_clause(ctx, _block, clause);
1826 bi_pack_clause(ctx, clause, next, emission, ctx->stage);
1827 }
1828 }
1829 }