pan/bi: Allow two successors in header packing
[mesa.git] / src / panfrost / bifrost / bi_pack.c
1 /*
2 * Copyright (C) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "bi_print.h"
26
27 #define RETURN_PACKED(str) { \
28 uint64_t temp = 0; \
29 memcpy(&temp, &str, sizeof(str)); \
30 return temp; \
31 }
32
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
36
37 static uint64_t
38 bi_pack_header(bi_clause *clause, bi_clause *next_1, bi_clause *next_2, bool is_fragment)
39 {
40 /* next_dependencies are the union of the dependencies of successors'
41 * dependencies */
42
43 unsigned scoreboard_deps = next_1 ? next_1->dependencies : 0;
44 scoreboard_deps |= next_2 ? next_2->dependencies : 0;
45
46 struct bifrost_header header = {
47 .back_to_back = clause->back_to_back,
48 .no_end_of_shader = (next_1 != NULL),
49 .elide_writes = is_fragment,
50 .branch_cond = clause->branch_conditional || clause->back_to_back,
51 .datareg_writebarrier = clause->data_register_write_barrier,
52 .datareg = clause->data_register,
53 .scoreboard_deps = scoreboard_deps,
54 .scoreboard_index = clause->scoreboard_id,
55 .clause_type = clause->clause_type,
56 .next_clause_type = next_1 ? next_1->clause_type : 0,
57 .suppress_inf = true,
58 .suppress_nan = true,
59 };
60
61 header.branch_cond |= header.back_to_back;
62
63 uint64_t u = 0;
64 memcpy(&u, &header, sizeof(header));
65 return u;
66 }
67
68 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
69 * pushed uniform per bundle. Figure out which one we need in the bundle (the
70 * scheduler needs to ensure we only have one type per bundle), validate
71 * everything, and rewrite away the register/uniform indices to use 3-bit
72 * sources directly. */
73
74 static unsigned
75 bi_lookup_constant(bi_clause *clause, uint64_t cons, bool *hi, bool b64)
76 {
77 uint64_t want = (cons >> 4);
78
79 for (unsigned i = 0; i < clause->constant_count; ++i) {
80 /* Only check top 60-bits since that's what's actually embedded
81 * in the clause, the bottom 4-bits are bundle-inline */
82
83 uint64_t candidates[2] = {
84 clause->constants[i] >> 4,
85 clause->constants[i] >> 36
86 };
87
88 /* For <64-bit mode, we treat lo/hi separately */
89
90 if (!b64)
91 candidates[0] &= (0xFFFFFFFF >> 4);
92
93 if (candidates[0] == want)
94 return i;
95
96 if (candidates[1] == want && !b64) {
97 *hi = true;
98 return i;
99 }
100 }
101
102 unreachable("Invalid constant accessed");
103 }
104
105 static unsigned
106 bi_constant_field(unsigned idx)
107 {
108 assert(idx <= 5);
109
110 const unsigned values[] = {
111 4, 5, 6, 7, 2, 3
112 };
113
114 return values[idx] << 4;
115 }
116
117 static bool
118 bi_assign_uniform_constant_single(
119 bi_registers *regs,
120 bi_clause *clause,
121 bi_instruction *ins, bool assigned, bool fast_zero)
122 {
123 if (!ins)
124 return assigned;
125
126 if (ins->type == BI_BLEND) {
127 assert(!assigned);
128 regs->uniform_constant = 0x8;
129 return true;
130 }
131
132 if (ins->type == BI_BRANCH && clause->branch_constant) {
133 /* By convention branch constant is last */
134 unsigned idx = clause->constant_count - 1;
135
136 /* We can only jump to clauses which are qword aligned so the
137 * bottom 4-bits of the offset are necessarily 0 */
138 unsigned lo = 0;
139
140 /* Build the constant */
141 unsigned C = bi_constant_field(idx) | lo;
142
143 if (assigned && regs->uniform_constant != C)
144 unreachable("Mismatched uniform/const field: branch");
145
146 regs->uniform_constant = C;
147 return true;
148 }
149
150 bi_foreach_src(ins, s) {
151 if (s == 0 && (ins->type == BI_LOAD_VAR_ADDRESS || ins->type == BI_LOAD_ATTR)) continue;
152 if (s == 1 && (ins->type == BI_BRANCH)) continue;
153
154 if (ins->src[s] & BIR_INDEX_CONSTANT) {
155 /* Let direct addresses through */
156 if (ins->type == BI_LOAD_VAR)
157 continue;
158
159 bool hi = false;
160 bool b64 = nir_alu_type_get_type_size(ins->src_types[s]) > 32;
161 uint64_t cons = bi_get_immediate(ins, s);
162 unsigned idx = bi_lookup_constant(clause, cons, &hi, b64);
163 unsigned lo = clause->constants[idx] & 0xF;
164 unsigned f = bi_constant_field(idx) | lo;
165
166 if (assigned && regs->uniform_constant != f)
167 unreachable("Mismatched uniform/const field: imm");
168
169 regs->uniform_constant = f;
170 ins->src[s] = BIR_INDEX_PASS | (hi ? BIFROST_SRC_CONST_HI : BIFROST_SRC_CONST_LO);
171 assigned = true;
172 } else if (ins->src[s] & BIR_INDEX_ZERO && (ins->type == BI_LOAD_UNIFORM || ins->type == BI_LOAD_VAR)) {
173 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
174 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_HI;
175 } else if (ins->src[s] & BIR_INDEX_ZERO && !fast_zero) {
176 /* FMAs have a fast zero port, ADD needs to use the
177 * uniform/const port's special 0 mode handled here */
178 unsigned f = 0;
179
180 if (assigned && regs->uniform_constant != f)
181 unreachable("Mismatched uniform/const field: 0");
182
183 regs->uniform_constant = f;
184 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_CONST_LO;
185 assigned = true;
186 } else if (ins->src[s] & BIR_INDEX_ZERO && fast_zero) {
187 ins->src[s] = BIR_INDEX_PASS | BIFROST_SRC_STAGE;
188 } else if (s & BIR_INDEX_UNIFORM) {
189 unreachable("Push uniforms not implemented yet");
190 }
191 }
192
193 return assigned;
194 }
195
196 static void
197 bi_assign_uniform_constant(
198 bi_clause *clause,
199 bi_registers *regs,
200 bi_bundle bundle)
201 {
202 bool assigned =
203 bi_assign_uniform_constant_single(regs, clause, bundle.fma, false, true);
204
205 bi_assign_uniform_constant_single(regs, clause, bundle.add, assigned, false);
206 }
207
208 /* Assigns a port for reading, before anything is written */
209
210 static void
211 bi_assign_port_read(bi_registers *regs, unsigned src)
212 {
213 /* We only assign for registers */
214 if (!(src & BIR_INDEX_REGISTER))
215 return;
216
217 unsigned reg = src & ~BIR_INDEX_REGISTER;
218
219 /* Check if we already assigned the port */
220 for (unsigned i = 0; i <= 1; ++i) {
221 if (regs->port[i] == reg && regs->enabled[i])
222 return;
223 }
224
225 if (regs->port[3] == reg && regs->read_port3)
226 return;
227
228 /* Assign it now */
229
230 for (unsigned i = 0; i <= 1; ++i) {
231 if (!regs->enabled[i]) {
232 regs->port[i] = reg;
233 regs->enabled[i] = true;
234 return;
235 }
236 }
237
238 if (!regs->read_port3) {
239 regs->port[3] = reg;
240 regs->read_port3 = true;
241 return;
242 }
243
244 bi_print_ports(regs, stderr);
245 unreachable("Failed to find a free port for src");
246 }
247
248 static bi_registers
249 bi_assign_ports(bi_bundle *now, bi_bundle *prev)
250 {
251 /* We assign ports for the main register mechanism. Special ops
252 * use the data registers, which has its own mechanism entirely
253 * and thus gets skipped over here. */
254
255 unsigned read_dreg = now->add &&
256 bi_class_props[now->add->type] & BI_DATA_REG_SRC;
257
258 unsigned write_dreg = prev->add &&
259 bi_class_props[prev->add->type] & BI_DATA_REG_DEST;
260
261 /* First, assign reads */
262
263 if (now->fma)
264 bi_foreach_src(now->fma, src)
265 bi_assign_port_read(&now->regs, now->fma->src[src]);
266
267 if (now->add) {
268 bi_foreach_src(now->add, src) {
269 if (!(src == 0 && read_dreg))
270 bi_assign_port_read(&now->regs, now->add->src[src]);
271 }
272 }
273
274 /* Next, assign writes */
275
276 if (prev->add && prev->add->dest & BIR_INDEX_REGISTER && !write_dreg) {
277 now->regs.port[2] = prev->add->dest & ~BIR_INDEX_REGISTER;
278 now->regs.write_add = true;
279 }
280
281 if (prev->fma && prev->fma->dest & BIR_INDEX_REGISTER) {
282 unsigned r = prev->fma->dest & ~BIR_INDEX_REGISTER;
283
284 if (now->regs.write_add) {
285 /* Scheduler constraint: cannot read 3 and write 2 */
286 assert(!now->regs.read_port3);
287 now->regs.port[3] = r;
288 } else {
289 now->regs.port[2] = r;
290 }
291
292 now->regs.write_fma = true;
293 }
294
295 return now->regs;
296 }
297
298 /* Determines the register control field, ignoring the first? flag */
299
300 static enum bifrost_reg_control
301 bi_pack_register_ctrl_lo(bi_registers r)
302 {
303 if (r.write_fma) {
304 if (r.write_add) {
305 assert(!r.read_port3);
306 return BIFROST_WRITE_ADD_P2_FMA_P3;
307 } else {
308 if (r.read_port3)
309 return BIFROST_WRITE_FMA_P2_READ_P3;
310 else
311 return BIFROST_WRITE_FMA_P2;
312 }
313 } else if (r.write_add) {
314 if (r.read_port3)
315 return BIFROST_WRITE_ADD_P2_READ_P3;
316 else
317 return BIFROST_WRITE_ADD_P2;
318 } else if (r.read_port3)
319 return BIFROST_READ_P3;
320 else
321 return BIFROST_REG_NONE;
322 }
323
324 /* Ditto but account for the first? flag this time */
325
326 static enum bifrost_reg_control
327 bi_pack_register_ctrl(bi_registers r)
328 {
329 enum bifrost_reg_control ctrl = bi_pack_register_ctrl_lo(r);
330
331 if (r.first_instruction) {
332 if (ctrl == BIFROST_REG_NONE)
333 ctrl = BIFROST_FIRST_NONE;
334 else if (ctrl == BIFROST_WRITE_FMA_P2_READ_P3)
335 ctrl = BIFROST_FIRST_WRITE_FMA_P2_READ_P3;
336 else
337 ctrl |= BIFROST_FIRST_NONE;
338 }
339
340 return ctrl;
341 }
342
343 static uint64_t
344 bi_pack_registers(bi_registers regs)
345 {
346 enum bifrost_reg_control ctrl = bi_pack_register_ctrl(regs);
347 struct bifrost_regs s = { 0 };
348 uint64_t packed = 0;
349
350 if (regs.enabled[1]) {
351 /* Gotta save that bit!~ Required by the 63-x trick */
352 assert(regs.port[1] > regs.port[0]);
353 assert(regs.enabled[0]);
354
355 /* Do the 63-x trick, see docs/disasm */
356 if (regs.port[0] > 31) {
357 regs.port[0] = 63 - regs.port[0];
358 regs.port[1] = 63 - regs.port[1];
359 }
360
361 assert(regs.port[0] <= 31);
362 assert(regs.port[1] <= 63);
363
364 s.ctrl = ctrl;
365 s.reg1 = regs.port[1];
366 s.reg0 = regs.port[0];
367 } else {
368 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
369 s.ctrl = 0;
370 s.reg1 = ctrl << 2;
371
372 if (regs.enabled[0]) {
373 /* Bit 0 upper bit of port 0 */
374 s.reg1 |= (regs.port[0] >> 5);
375
376 /* Rest of port 0 in usual spot */
377 s.reg0 = (regs.port[0] & 0b11111);
378 } else {
379 /* Bit 1 set if port 0 also disabled */
380 s.reg1 |= (1 << 1);
381 }
382 }
383
384 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
385 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
386
387 bool has_port2 = regs.write_fma || regs.write_add;
388 bool has_port3 = regs.read_port3 || (regs.write_fma && regs.write_add);
389
390 if (!has_port3)
391 regs.port[3] = regs.port[2];
392
393 if (!has_port2)
394 regs.port[2] = regs.port[3];
395
396 s.reg3 = regs.port[3];
397 s.reg2 = regs.port[2];
398 s.uniform_const = regs.uniform_constant;
399
400 memcpy(&packed, &s, sizeof(s));
401 return packed;
402 }
403
404 static void
405 bi_set_data_register(bi_clause *clause, unsigned idx)
406 {
407 assert(idx & BIR_INDEX_REGISTER);
408 unsigned reg = idx & ~BIR_INDEX_REGISTER;
409 assert(reg <= 63);
410 clause->data_register = reg;
411 }
412
413 static void
414 bi_read_data_register(bi_clause *clause, bi_instruction *ins)
415 {
416 bi_set_data_register(clause, ins->src[0]);
417 }
418
419 static void
420 bi_write_data_register(bi_clause *clause, bi_instruction *ins)
421 {
422 bi_set_data_register(clause, ins->dest);
423 }
424
425 static enum bifrost_packed_src
426 bi_get_src_reg_port(bi_registers *regs, unsigned src)
427 {
428 unsigned reg = src & ~BIR_INDEX_REGISTER;
429
430 if (regs->port[0] == reg && regs->enabled[0])
431 return BIFROST_SRC_PORT0;
432 else if (regs->port[1] == reg && regs->enabled[1])
433 return BIFROST_SRC_PORT1;
434 else if (regs->port[3] == reg && regs->read_port3)
435 return BIFROST_SRC_PORT3;
436 else
437 unreachable("Tried to access register with no port");
438 }
439
440 static enum bifrost_packed_src
441 bi_get_src(bi_instruction *ins, bi_registers *regs, unsigned s)
442 {
443 unsigned src = ins->src[s];
444
445 if (src & BIR_INDEX_REGISTER)
446 return bi_get_src_reg_port(regs, src);
447 else if (src & BIR_INDEX_PASS)
448 return src & ~BIR_INDEX_PASS;
449 else {
450 bi_print_instruction(ins, stderr);
451 unreachable("Unknown src in above instruction");
452 }
453 }
454
455 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
456 * 16-bit and written components must correspond to valid swizzles (component x
457 * or y). */
458
459 static unsigned
460 bi_swiz16(bi_instruction *ins, unsigned src)
461 {
462 assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16);
463 unsigned swizzle = 0;
464
465 for (unsigned c = 0; c < 2; ++c) {
466 if (!bi_writes_component(ins, src)) continue;
467
468 unsigned k = ins->swizzle[src][c];
469 assert(k <= 1);
470 swizzle |= (k << c);
471 }
472
473 return swizzle;
474 }
475
476 static unsigned
477 bi_pack_fma_fma(bi_instruction *ins, bi_registers *regs)
478 {
479 /* (-a)(-b) = ab, so we only need one negate bit */
480 bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1];
481
482 if (ins->op.mscale) {
483 assert(!(ins->src_abs[0] && ins->src_abs[1]));
484 assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]);
485
486 /* We can have exactly one abs, and can flip the multiplication
487 * to make it fit if we have to */
488 bool flip_ab = ins->src_abs[1];
489
490 struct bifrost_fma_mscale pack = {
491 .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0),
492 .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1),
493 .src2 = bi_get_src(ins, regs, 2),
494 .src3 = bi_get_src(ins, regs, 3),
495 .mscale_mode = 0,
496 .mode = ins->outmod,
497 .src0_abs = ins->src_abs[0] || ins->src_abs[1],
498 .src1_neg = negate_mul,
499 .src2_neg = ins->src_neg[2],
500 .op = BIFROST_FMA_OP_MSCALE,
501 };
502
503 RETURN_PACKED(pack);
504 } else if (ins->dest_type == nir_type_float32) {
505 struct bifrost_fma_fma pack = {
506 .src0 = bi_get_src(ins, regs, 0),
507 .src1 = bi_get_src(ins, regs, 1),
508 .src2 = bi_get_src(ins, regs, 2),
509 .src0_abs = ins->src_abs[0],
510 .src1_abs = ins->src_abs[1],
511 .src2_abs = ins->src_abs[2],
512 .src0_neg = negate_mul,
513 .src2_neg = ins->src_neg[2],
514 .outmod = ins->outmod,
515 .roundmode = ins->roundmode,
516 .op = BIFROST_FMA_OP_FMA
517 };
518
519 RETURN_PACKED(pack);
520 } else if (ins->dest_type == nir_type_float16) {
521 struct bifrost_fma_fma16 pack = {
522 .src0 = bi_get_src(ins, regs, 0),
523 .src1 = bi_get_src(ins, regs, 1),
524 .src2 = bi_get_src(ins, regs, 2),
525 .swizzle_0 = bi_swiz16(ins, 0),
526 .swizzle_1 = bi_swiz16(ins, 1),
527 .swizzle_2 = bi_swiz16(ins, 2),
528 .src0_neg = negate_mul,
529 .src2_neg = ins->src_neg[2],
530 .outmod = ins->outmod,
531 .roundmode = ins->roundmode,
532 .op = BIFROST_FMA_OP_FMA16
533 };
534
535 RETURN_PACKED(pack);
536 } else {
537 unreachable("Invalid fma dest type");
538 }
539 }
540
541 static unsigned
542 bi_pack_fma_addmin_f32(bi_instruction *ins, bi_registers *regs)
543 {
544 unsigned op =
545 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 :
546 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 :
547 BIFROST_FMA_OP_FMAX32;
548
549 struct bifrost_fma_add pack = {
550 .src0 = bi_get_src(ins, regs, 0),
551 .src1 = bi_get_src(ins, regs, 1),
552 .src0_abs = ins->src_abs[0],
553 .src1_abs = ins->src_abs[1],
554 .src0_neg = ins->src_neg[0],
555 .src1_neg = ins->src_neg[1],
556 .unk = 0x0,
557 .outmod = ins->outmod,
558 .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
559 .op = op
560 };
561
562 RETURN_PACKED(pack);
563 }
564
565 static bool
566 bi_pack_fp16_abs(bi_instruction *ins, bi_registers *regs, bool *flip)
567 {
568 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
569 * l be an auxiliary bit we encode. Then the hardware determines:
570 *
571 * abs0 = l || k
572 * abs1 = l && k
573 *
574 * Since add/min/max are commutative, this saves a bit by using the
575 * order of the operands as a bit (k). To pack this, first note:
576 *
577 * (l && k) implies (l || k).
578 *
579 * That is, if the second argument is abs'd, then the first argument
580 * also has abs. So there are three cases:
581 *
582 * Case 0: Neither src has absolute value. Then we have l = k = 0.
583 *
584 * Case 1: Exactly one src has absolute value. Assign that source to
585 * src0 and the other source to src1. Compute k = src1 < src0 based on
586 * that assignment. Then l = ~k.
587 *
588 * Case 2: Both sources have absolute value. Then we have l = k = 1.
589 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
590 * That is, this encoding is only valid if src1 and src0 are distinct.
591 * This is a scheduling restriction (XXX); if an op of this type
592 * requires both identical sources to have abs value, then we must
593 * schedule to ADD (which does not use this ordering trick).
594 */
595
596 unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1];
597 unsigned src_0 = bi_get_src(ins, regs, 0);
598 unsigned src_1 = bi_get_src(ins, regs, 1);
599
600 assert(!(abs_0 && abs_1 && src_0 == src_1));
601
602 if (!abs_0 && !abs_1) {
603 /* Force k = 0 <===> NOT(src1 < src0) */
604 *flip = (src_1 < src_0);
605 return false;
606 } else if (abs_0 && !abs_1) {
607 return src_1 >= src_0;
608 } else if (abs_1 && !abs_0) {
609 *flip = true;
610 return src_0 >= src_1;
611 } else {
612 *flip = !(src_1 < src_0);
613 return true;
614 }
615 }
616
617 static unsigned
618 bi_pack_fmadd_min_f16(bi_instruction *ins, bi_registers *regs, bool FMA)
619 {
620 unsigned op =
621 (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ?
622 BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) :
623 (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 :
624 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 :
625 BIFROST_FMA_OP_FMAX16;
626
627 bool flip = false;
628 bool l = bi_pack_fp16_abs(ins, regs, &flip);
629 unsigned src_0 = bi_get_src(ins, regs, 0);
630 unsigned src_1 = bi_get_src(ins, regs, 1);
631
632 if (FMA) {
633 struct bifrost_fma_add_minmax16 pack = {
634 .src0 = flip ? src_1 : src_0,
635 .src1 = flip ? src_0 : src_1,
636 .src0_neg = ins->src_neg[flip ? 1 : 0],
637 .src1_neg = ins->src_neg[flip ? 0 : 1],
638 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
639 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
640 .abs1 = l,
641 .outmod = ins->outmod,
642 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
643 .op = op
644 };
645
646 RETURN_PACKED(pack);
647 } else {
648 /* Can't have modes for fp16 */
649 assert(ins->outmod == 0);
650
651 struct bifrost_add_fmin16 pack = {
652 .src0 = flip ? src_1 : src_0,
653 .src1 = flip ? src_0 : src_1,
654 .src0_neg = ins->src_neg[flip ? 1 : 0],
655 .src1_neg = ins->src_neg[flip ? 0 : 1],
656 .abs1 = l,
657 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
658 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
659 .mode = ins->minmax,
660 .op = op
661 };
662
663 RETURN_PACKED(pack);
664 }
665 }
666
667 static unsigned
668 bi_pack_fma_addmin(bi_instruction *ins, bi_registers *regs)
669 {
670 if (ins->dest_type == nir_type_float32)
671 return bi_pack_fma_addmin_f32(ins, regs);
672 else if(ins->dest_type == nir_type_float16)
673 return bi_pack_fmadd_min_f16(ins, regs, true);
674 else
675 unreachable("Unknown FMA/ADD type");
676 }
677
678 static unsigned
679 bi_pack_fma_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
680 {
681 struct bifrost_fma_inst pack = {
682 .src0 = bi_get_src(ins, regs, 0),
683 .op = op
684 };
685
686 RETURN_PACKED(pack);
687 }
688
689 static unsigned
690 bi_pack_fma_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
691 {
692 struct bifrost_fma_2src pack = {
693 .src0 = bi_get_src(ins, regs, 0),
694 .src1 = bi_get_src(ins, regs, 1),
695 .op = op
696 };
697
698 RETURN_PACKED(pack);
699 }
700
701 static unsigned
702 bi_pack_add_1src(bi_instruction *ins, bi_registers *regs, unsigned op)
703 {
704 struct bifrost_add_inst pack = {
705 .src0 = bi_get_src(ins, regs, 0),
706 .op = op
707 };
708
709 RETURN_PACKED(pack);
710 }
711
712 static enum bifrost_csel_cond
713 bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T)
714 {
715 nir_alu_type B = nir_alu_type_get_base_type(T);
716 unsigned idx = (B == nir_type_float) ? 0 :
717 ((B == nir_type_int) ? 1 : 2);
718
719 switch (cond){
720 case BI_COND_LT:
721 *flip = true;
722 case BI_COND_GT: {
723 const enum bifrost_csel_cond ops[] = {
724 BIFROST_FGT_F,
725 BIFROST_IGT_I,
726 BIFROST_UGT_I
727 };
728
729 return ops[idx];
730 }
731 case BI_COND_LE:
732 *flip = true;
733 case BI_COND_GE: {
734 const enum bifrost_csel_cond ops[] = {
735 BIFROST_FGE_F,
736 BIFROST_IGE_I,
737 BIFROST_UGE_I
738 };
739
740 return ops[idx];
741 }
742 case BI_COND_NE:
743 *invert = true;
744 case BI_COND_EQ: {
745 const enum bifrost_csel_cond ops[] = {
746 BIFROST_FEQ_F,
747 BIFROST_IEQ_F,
748 BIFROST_IEQ_F /* sign is irrelevant */
749 };
750
751 return ops[idx];
752 }
753 default:
754 unreachable("Invalid op for csel");
755 }
756 }
757
758 static unsigned
759 bi_pack_fma_csel(bi_instruction *ins, bi_registers *regs)
760 {
761 /* TODO: Use csel3 as well */
762 bool flip = false, invert = false;
763
764 enum bifrost_csel_cond cond =
765 bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]);
766
767 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
768
769 unsigned cmp_0 = (flip ? 1 : 0);
770 unsigned cmp_1 = (flip ? 0 : 1);
771 unsigned res_0 = (invert ? 3 : 2);
772 unsigned res_1 = (invert ? 2 : 3);
773
774 struct bifrost_csel4 pack = {
775 .src0 = bi_get_src(ins, regs, cmp_0),
776 .src1 = bi_get_src(ins, regs, cmp_1),
777 .src2 = bi_get_src(ins, regs, res_0),
778 .src3 = bi_get_src(ins, regs, res_1),
779 .cond = cond,
780 .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 :
781 BIFROST_FMA_OP_CSEL4
782 };
783
784 RETURN_PACKED(pack);
785 }
786
787 static unsigned
788 bi_pack_fma_frexp(bi_instruction *ins, bi_registers *regs)
789 {
790 unsigned op = BIFROST_FMA_OP_FREXPE_LOG;
791 return bi_pack_fma_1src(ins, regs, op);
792 }
793
794 static unsigned
795 bi_pack_fma_reduce(bi_instruction *ins, bi_registers *regs)
796 {
797 if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) {
798 return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM);
799 } else {
800 unreachable("Invalid reduce op");
801 }
802 }
803
804 /* We have a single convert opcode in the IR but a number of opcodes that could
805 * come out. In particular we have native opcodes for:
806 *
807 * [ui]16 --> [fui]32 -- int16_to_32
808 * f16 --> f32 -- float16_to_32
809 * f32 --> f16 -- float32_to_16
810 * f32 --> [ui]32 -- float32_to_int
811 * [ui]32 --> f32 -- int_to_float32
812 * [fui]16 --> [fui]16 -- f2i_i2f16
813 */
814
815 static unsigned
816 bi_pack_convert(bi_instruction *ins, bi_registers *regs, bool FMA)
817 {
818 nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]);
819 unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]);
820 bool from_unsigned = from_base == nir_type_uint;
821
822 nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type);
823 unsigned to_size = nir_alu_type_get_type_size(ins->dest_type);
824 bool to_unsigned = to_base == nir_type_uint;
825 bool to_float = to_base == nir_type_float;
826
827 /* Sanity check */
828 assert((from_base != to_base) || (from_size != to_size));
829 assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2);
830
831 /* f32 to f16 is special */
832 if (from_size == 32 && to_size == 16 && from_base == nir_type_float && to_base == from_base) {
833 /* TODO: second vectorized source? */
834 struct bifrost_fma_2src pfma = {
835 .src0 = bi_get_src(ins, regs, 0),
836 .src1 = BIFROST_SRC_STAGE, /* 0 */
837 .op = BIFROST_FMA_FLOAT32_TO_16
838 };
839
840 struct bifrost_add_2src padd = {
841 .src0 = bi_get_src(ins, regs, 0),
842 .src1 = BIFROST_SRC_STAGE, /* 0 */
843 .op = BIFROST_ADD_FLOAT32_TO_16
844 };
845
846 if (FMA) {
847 RETURN_PACKED(pfma);
848 } else {
849 RETURN_PACKED(padd);
850 }
851 }
852
853 /* Otherwise, figure out the mode */
854 unsigned op = 0;
855
856 if (from_size == 16 && to_size == 32) {
857 unsigned component = ins->swizzle[0][0];
858 assert(component <= 1);
859
860 if (from_base == nir_type_float)
861 op = BIFROST_CONVERT_5(component);
862 else
863 op = BIFROST_CONVERT_4(from_unsigned, component, to_float);
864 } else {
865 unsigned mode = 0;
866 unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0;
867 bool is_unsigned = from_unsigned;
868
869 if (from_base == nir_type_float) {
870 assert(to_base != nir_type_float);
871 is_unsigned = to_unsigned;
872
873 if (from_size == 32 && to_size == 32)
874 mode = BIFROST_CONV_F32_TO_I32;
875 else if (from_size == 16 && to_size == 16)
876 mode = BIFROST_CONV_F16_TO_I16;
877 else
878 unreachable("Invalid float conversion");
879 } else {
880 assert(to_base == nir_type_float);
881 assert(from_size == to_size);
882
883 if (to_size == 32)
884 mode = BIFROST_CONV_I32_TO_F32;
885 else if (to_size == 16)
886 mode = BIFROST_CONV_I16_TO_F16;
887 else
888 unreachable("Invalid int conversion");
889 }
890
891 /* Fixup swizzle for 32-bit only modes */
892
893 if (mode == BIFROST_CONV_I32_TO_F32)
894 swizzle = 0b11;
895 else if (mode == BIFROST_CONV_F32_TO_I32)
896 swizzle = 0b10;
897
898 op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode);
899
900 /* Unclear what the top bit is for... maybe 16-bit related */
901 bool mode2 = mode == BIFROST_CONV_F16_TO_I16;
902 bool mode6 = mode == BIFROST_CONV_I16_TO_F16;
903
904 if (!(mode2 || mode6))
905 op |= 0x100;
906 }
907
908 if (FMA)
909 return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op);
910 else
911 return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op);
912 }
913
914 static unsigned
915 bi_pack_fma_select(bi_instruction *ins, bi_registers *regs)
916 {
917 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
918
919 if (size == 16) {
920 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
921 unsigned op = BIFROST_FMA_SEL_16(swiz);
922 return bi_pack_fma_2src(ins, regs, op);
923 } else if (size == 8) {
924 unsigned swiz = 0;
925
926 for (unsigned c = 0; c < 4; ++c) {
927 if (ins->swizzle[c][0]) {
928 /* Ensure lowering restriction is met */
929 assert(ins->swizzle[c][0] == 2);
930 swiz |= (1 << c);
931 }
932 }
933
934 struct bifrost_fma_sel8 pack = {
935 .src0 = bi_get_src(ins, regs, 0),
936 .src1 = bi_get_src(ins, regs, 1),
937 .src2 = bi_get_src(ins, regs, 2),
938 .src3 = bi_get_src(ins, regs, 3),
939 .swizzle = swiz,
940 .op = BIFROST_FMA_OP_SEL8
941 };
942
943 RETURN_PACKED(pack);
944 } else {
945 unreachable("Unimplemented");
946 }
947 }
948
949 static enum bifrost_fcmp_cond
950 bi_fcmp_cond(enum bi_cond cond)
951 {
952 switch (cond) {
953 case BI_COND_LT: return BIFROST_OLT;
954 case BI_COND_LE: return BIFROST_OLE;
955 case BI_COND_GE: return BIFROST_OGE;
956 case BI_COND_GT: return BIFROST_OGT;
957 case BI_COND_EQ: return BIFROST_OEQ;
958 case BI_COND_NE: return BIFROST_UNE;
959 default: unreachable("Unknown bi_cond");
960 }
961 }
962
963 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
964
965 static enum bifrost_fcmp_cond
966 bi_flip_fcmp(enum bifrost_fcmp_cond cond)
967 {
968 switch (cond) {
969 case BIFROST_OGT:
970 return BIFROST_OLT;
971 case BIFROST_OGE:
972 return BIFROST_OLE;
973 case BIFROST_OLT:
974 return BIFROST_OGT;
975 case BIFROST_OLE:
976 return BIFROST_OGE;
977 case BIFROST_OEQ:
978 case BIFROST_UNE:
979 return cond;
980 default:
981 unreachable("Unknown fcmp cond");
982 }
983 }
984
985 static unsigned
986 bi_pack_fma_cmp(bi_instruction *ins, bi_registers *regs)
987 {
988 nir_alu_type Tl = ins->src_types[0];
989 nir_alu_type Tr = ins->src_types[1];
990
991 if (Tl == nir_type_float32 || Tr == nir_type_float32) {
992 /* TODO: Mixed 32/16 cmp */
993 assert(Tl == Tr);
994
995 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
996
997 /* Only src1 has neg, so we arrange:
998 * a < b --- native
999 * a < -b --- native
1000 * -a < -b <===> a > b
1001 * -a < b <===> a > -b
1002 * TODO: Is this NaN-precise?
1003 */
1004
1005 bool flip = ins->src_neg[0];
1006 bool neg = ins->src_neg[0] ^ ins->src_neg[1];
1007
1008 if (flip)
1009 cond = bi_flip_fcmp(cond);
1010
1011 struct bifrost_fma_fcmp pack = {
1012 .src0 = bi_get_src(ins, regs, 0),
1013 .src1 = bi_get_src(ins, regs, 1),
1014 .src0_abs = ins->src_abs[0],
1015 .src1_abs = ins->src_abs[1],
1016 .src1_neg = neg,
1017 .src_expand = 0,
1018 .unk1 = 0,
1019 .cond = cond,
1020 .op = BIFROST_FMA_OP_FCMP_GL
1021 };
1022
1023 RETURN_PACKED(pack);
1024 } else if (Tl == nir_type_float16 && Tr == nir_type_float16) {
1025 bool flip = false;
1026 bool l = bi_pack_fp16_abs(ins, regs, &flip);
1027 enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond);
1028
1029 if (flip)
1030 cond = bi_flip_fcmp(cond);
1031
1032 struct bifrost_fma_fcmp16 pack = {
1033 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1034 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1035 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1036 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1037 .abs1 = l,
1038 .unk = 0,
1039 .cond = cond,
1040 .op = BIFROST_FMA_OP_FCMP_GL_16,
1041 };
1042
1043 RETURN_PACKED(pack);
1044 } else {
1045 unreachable("Unknown cmp type");
1046 }
1047 }
1048
1049 static unsigned
1050 bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift)
1051 {
1052 switch (op) {
1053 case BI_BITWISE_OR:
1054 /* Via De Morgan's */
1055 return rshift ?
1056 BIFROST_FMA_OP_RSHIFT_NAND :
1057 BIFROST_FMA_OP_LSHIFT_NAND;
1058 case BI_BITWISE_AND:
1059 return rshift ?
1060 BIFROST_FMA_OP_RSHIFT_AND :
1061 BIFROST_FMA_OP_LSHIFT_AND;
1062 case BI_BITWISE_XOR:
1063 /* Shift direction handled out of band */
1064 return BIFROST_FMA_OP_RSHIFT_XOR;
1065 default:
1066 unreachable("Unknown op");
1067 }
1068 }
1069
1070 static unsigned
1071 bi_pack_fma_bitwise(bi_instruction *ins, bi_registers *regs)
1072 {
1073 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1074 assert(size <= 32);
1075
1076 bool invert_0 = ins->bitwise.src_invert[0];
1077 bool invert_1 = ins->bitwise.src_invert[1];
1078
1079 if (ins->op.bitwise == BI_BITWISE_OR) {
1080 /* Becomes NAND, so via De Morgan's:
1081 * f(A) | f(B) = ~(~f(A) & ~f(B))
1082 * = NAND(~f(A), ~f(B))
1083 */
1084
1085 invert_0 = !invert_0;
1086 invert_1 = !invert_1;
1087 } else if (ins->op.bitwise == BI_BITWISE_XOR) {
1088 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1089 * ~A ^ B = ~(A ^ B) = A ^ ~B
1090 */
1091
1092 invert_0 ^= invert_1;
1093 invert_1 = false;
1094
1095 /* invert_1 ends up specifying shift direction */
1096 invert_1 = !ins->bitwise.rshift;
1097 }
1098
1099 struct bifrost_shift_fma pack = {
1100 .src0 = bi_get_src(ins, regs, 0),
1101 .src1 = bi_get_src(ins, regs, 1),
1102 .src2 = bi_get_src(ins, regs, 2),
1103 .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0,
1104 .unk = 1, /* XXX */
1105 .invert_1 = invert_0,
1106 .invert_2 = invert_1,
1107 .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift)
1108 };
1109
1110 RETURN_PACKED(pack);
1111 }
1112
1113 static unsigned
1114 bi_pack_fma_round(bi_instruction *ins, bi_registers *regs)
1115 {
1116 bool fp16 = ins->dest_type == nir_type_float16;
1117 assert(fp16 || ins->dest_type == nir_type_float32);
1118
1119 unsigned op = fp16
1120 ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0))
1121 : BIFROST_FMA_ROUND_32(ins->roundmode);
1122
1123 return bi_pack_fma_1src(ins, regs, op);
1124 }
1125
1126 static unsigned
1127 bi_pack_fma_imath(bi_instruction *ins, bi_registers *regs)
1128 {
1129 /* Scheduler: only ADD can have 8/16-bit imath */
1130 assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32);
1131
1132 unsigned op = ins->op.imath == BI_IMATH_ADD
1133 ? BIFROST_FMA_IADD_32
1134 : BIFROST_FMA_ISUB_32;
1135
1136 return bi_pack_fma_2src(ins, regs, op);
1137 }
1138
1139 static unsigned
1140 bi_pack_fma(bi_clause *clause, bi_bundle bundle, bi_registers *regs)
1141 {
1142 if (!bundle.fma)
1143 return BIFROST_FMA_NOP;
1144
1145 switch (bundle.fma->type) {
1146 case BI_ADD:
1147 return bi_pack_fma_addmin(bundle.fma, regs);
1148 case BI_CMP:
1149 return bi_pack_fma_cmp(bundle.fma, regs);
1150 case BI_BITWISE:
1151 return bi_pack_fma_bitwise(bundle.fma, regs);
1152 case BI_CONVERT:
1153 return bi_pack_convert(bundle.fma, regs, true);
1154 case BI_CSEL:
1155 return bi_pack_fma_csel(bundle.fma, regs);
1156 case BI_FMA:
1157 return bi_pack_fma_fma(bundle.fma, regs);
1158 case BI_FREXP:
1159 return bi_pack_fma_frexp(bundle.fma, regs);
1160 case BI_IMATH:
1161 return bi_pack_fma_imath(bundle.fma, regs);
1162 case BI_MINMAX:
1163 return bi_pack_fma_addmin(bundle.fma, regs);
1164 case BI_MOV:
1165 return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV);
1166 case BI_SHIFT:
1167 unreachable("Packing todo");
1168 case BI_SELECT:
1169 return bi_pack_fma_select(bundle.fma, regs);
1170 case BI_ROUND:
1171 return bi_pack_fma_round(bundle.fma, regs);
1172 case BI_REDUCE_FMA:
1173 return bi_pack_fma_reduce(bundle.fma, regs);
1174 default:
1175 unreachable("Cannot encode class as FMA");
1176 }
1177 }
1178
1179 static unsigned
1180 bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1181 {
1182 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
1183 assert(size == 32 || size == 16);
1184
1185 unsigned op = (size == 32) ?
1186 BIFROST_ADD_OP_LD_VAR_32 :
1187 BIFROST_ADD_OP_LD_VAR_16;
1188
1189 unsigned packed_addr = 0;
1190
1191 if (ins->src[0] & BIR_INDEX_CONSTANT) {
1192 /* Direct uses address field directly */
1193 packed_addr = bi_get_immediate(ins, 0);
1194 } else {
1195 /* Indirect gets an extra source */
1196 packed_addr = bi_get_src(ins, regs, 0) | 0b11000;
1197 }
1198
1199 /* The destination is thrown in the data register */
1200 assert(ins->dest & BIR_INDEX_REGISTER);
1201 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1202
1203 unsigned channels = ins->vector_channels;
1204 assert(channels >= 1 && channels <= 4);
1205
1206 struct bifrost_ld_var pack = {
1207 .src0 = bi_get_src(ins, regs, 1),
1208 .addr = packed_addr,
1209 .channels = MALI_POSITIVE(channels),
1210 .interp_mode = ins->load_vary.interp_mode,
1211 .reuse = ins->load_vary.reuse,
1212 .flat = ins->load_vary.flat,
1213 .op = op
1214 };
1215
1216 RETURN_PACKED(pack);
1217 }
1218
1219 static unsigned
1220 bi_pack_add_2src(bi_instruction *ins, bi_registers *regs, unsigned op)
1221 {
1222 struct bifrost_add_2src pack = {
1223 .src0 = bi_get_src(ins, regs, 0),
1224 .src1 = bi_get_src(ins, regs, 1),
1225 .op = op
1226 };
1227
1228 RETURN_PACKED(pack);
1229 }
1230
1231 static unsigned
1232 bi_pack_add_addmin_f32(bi_instruction *ins, bi_registers *regs)
1233 {
1234 unsigned op =
1235 (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 :
1236 (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 :
1237 BIFROST_ADD_OP_FMAX32;
1238
1239 struct bifrost_add_faddmin pack = {
1240 .src0 = bi_get_src(ins, regs, 0),
1241 .src1 = bi_get_src(ins, regs, 1),
1242 .src0_abs = ins->src_abs[0],
1243 .src1_abs = ins->src_abs[1],
1244 .src0_neg = ins->src_neg[0],
1245 .src1_neg = ins->src_neg[1],
1246 .outmod = ins->outmod,
1247 .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax,
1248 .op = op
1249 };
1250
1251 RETURN_PACKED(pack);
1252 }
1253
1254 static unsigned
1255 bi_pack_add_add_f16(bi_instruction *ins, bi_registers *regs)
1256 {
1257 /* ADD.v2f16 can't have outmod */
1258 assert(ins->outmod == BIFROST_NONE);
1259
1260 struct bifrost_add_faddmin pack = {
1261 .src0 = bi_get_src(ins, regs, 0),
1262 .src1 = bi_get_src(ins, regs, 1),
1263 .src0_abs = ins->src_abs[0],
1264 .src1_abs = ins->src_abs[1],
1265 .src0_neg = ins->src_neg[0],
1266 .src1_neg = ins->src_neg[1],
1267 .select = bi_swiz16(ins, 0), /* swizzle_0 */
1268 .outmod = bi_swiz16(ins, 1), /* swizzle_1 */
1269 .mode = ins->roundmode,
1270 .op = BIFROST_ADD_OP_FADD16
1271 };
1272
1273 RETURN_PACKED(pack);
1274 }
1275
1276 static unsigned
1277 bi_pack_add_addmin(bi_instruction *ins, bi_registers *regs)
1278 {
1279 if (ins->dest_type == nir_type_float32)
1280 return bi_pack_add_addmin_f32(ins, regs);
1281 else if (ins->dest_type == nir_type_float16) {
1282 if (ins->type == BI_ADD)
1283 return bi_pack_add_add_f16(ins, regs);
1284 else
1285 return bi_pack_fmadd_min_f16(ins, regs, false);
1286 } else
1287 unreachable("Unknown FMA/ADD type");
1288 }
1289
1290 static unsigned
1291 bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1292 {
1293 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1294
1295 const unsigned ops[4] = {
1296 BIFROST_ADD_OP_LD_UBO_1,
1297 BIFROST_ADD_OP_LD_UBO_2,
1298 BIFROST_ADD_OP_LD_UBO_3,
1299 BIFROST_ADD_OP_LD_UBO_4
1300 };
1301
1302 bi_write_data_register(clause, ins);
1303 return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
1304 }
1305
1306 static enum bifrost_ldst_type
1307 bi_pack_ldst_type(nir_alu_type T)
1308 {
1309 switch (T) {
1310 case nir_type_float16: return BIFROST_LDST_F16;
1311 case nir_type_float32: return BIFROST_LDST_F32;
1312 case nir_type_int32: return BIFROST_LDST_I32;
1313 case nir_type_uint32: return BIFROST_LDST_U32;
1314 default: unreachable("Invalid type loaded");
1315 }
1316 }
1317
1318 static unsigned
1319 bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1320 {
1321 struct bifrost_ld_var_addr pack = {
1322 .src0 = bi_get_src(ins, regs, 1),
1323 .src1 = bi_get_src(ins, regs, 2),
1324 .location = bi_get_immediate(ins, 0),
1325 .type = bi_pack_ldst_type(ins->src_types[3]),
1326 .op = BIFROST_ADD_OP_LD_VAR_ADDR
1327 };
1328
1329 bi_write_data_register(clause, ins);
1330 RETURN_PACKED(pack);
1331 }
1332
1333 static unsigned
1334 bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1335 {
1336 assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
1337
1338 struct bifrost_ld_attr pack = {
1339 .src0 = bi_get_src(ins, regs, 1),
1340 .src1 = bi_get_src(ins, regs, 2),
1341 .location = bi_get_immediate(ins, 0),
1342 .channels = MALI_POSITIVE(ins->vector_channels),
1343 .type = bi_pack_ldst_type(ins->dest_type),
1344 .op = BIFROST_ADD_OP_LD_ATTR
1345 };
1346
1347 bi_write_data_register(clause, ins);
1348 RETURN_PACKED(pack);
1349 }
1350
1351 static unsigned
1352 bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1353 {
1354 assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
1355
1356 struct bifrost_st_vary pack = {
1357 .src0 = bi_get_src(ins, regs, 1),
1358 .src1 = bi_get_src(ins, regs, 2),
1359 .src2 = bi_get_src(ins, regs, 3),
1360 .channels = MALI_POSITIVE(ins->vector_channels),
1361 .op = BIFROST_ADD_OP_ST_VAR
1362 };
1363
1364 bi_read_data_register(clause, ins);
1365 RETURN_PACKED(pack);
1366 }
1367
1368 static unsigned
1369 bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1370 {
1371 bool fp16 = (ins->src_types[1] == nir_type_float16);
1372
1373 struct bifrost_add_atest pack = {
1374 .src0 = bi_get_src(ins, regs, 0),
1375 .src1 = bi_get_src(ins, regs, 1),
1376 .half = fp16,
1377 .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */
1378 .op = BIFROST_ADD_OP_ATEST,
1379 };
1380
1381 /* Despite *also* writing with the usual mechanism... quirky and
1382 * perhaps unnecessary, but let's match the blob */
1383 clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
1384
1385 RETURN_PACKED(pack);
1386 }
1387
1388 static unsigned
1389 bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, bi_registers *regs)
1390 {
1391 struct bifrost_add_inst pack = {
1392 .src0 = bi_get_src(ins, regs, 1),
1393 .op = BIFROST_ADD_OP_BLEND
1394 };
1395
1396 /* TODO: Pack location in uniform_const */
1397 assert(ins->blend_location == 0);
1398
1399 bi_read_data_register(clause, ins);
1400 RETURN_PACKED(pack);
1401 }
1402
1403 static unsigned
1404 bi_pack_add_special(bi_instruction *ins, bi_registers *regs)
1405 {
1406 unsigned op = 0;
1407 bool fp16 = ins->dest_type == nir_type_float16;
1408 bool Y = ins->swizzle[0][0];
1409
1410 if (ins->op.special == BI_SPECIAL_FRCP) {
1411 op = fp16 ?
1412 (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y :
1413 BIFROST_ADD_OP_FRCP_FAST_F16_X) :
1414 BIFROST_ADD_OP_FRCP_FAST_F32;
1415 } else if (ins->op.special == BI_SPECIAL_FRSQ) {
1416 op = fp16 ?
1417 (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y :
1418 BIFROST_ADD_OP_FRSQ_FAST_F16_X) :
1419 BIFROST_ADD_OP_FRSQ_FAST_F32;
1420
1421 } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) {
1422 assert(!fp16);
1423 op = BIFROST_ADD_OP_FEXP2_FAST;
1424 } else {
1425 unreachable("Unknown special op");
1426 }
1427
1428 return bi_pack_add_1src(ins, regs, op);
1429 }
1430
1431 static unsigned
1432 bi_pack_add_table(bi_instruction *ins, bi_registers *regs)
1433 {
1434 unsigned op = 0;
1435 assert(ins->dest_type == nir_type_float32);
1436
1437 op = BIFROST_ADD_OP_LOG2_HELP;
1438 return bi_pack_add_1src(ins, regs, op);
1439 }
1440 static unsigned
1441 bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, bi_registers *regs, gl_shader_stage stage)
1442 {
1443 bool f16 = ins->dest_type == nir_type_float16;
1444 bool vtx = stage != MESA_SHADER_FRAGMENT;
1445
1446 struct bifrost_tex_compact pack = {
1447 .src0 = bi_get_src(ins, regs, 0),
1448 .src1 = bi_get_src(ins, regs, 1),
1449 .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) :
1450 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx),
1451 .compute_lod = !vtx,
1452 .tex_index = ins->texture.texture_index,
1453 .sampler_index = ins->texture.sampler_index
1454 };
1455
1456 bi_write_data_register(clause, ins);
1457 RETURN_PACKED(pack);
1458 }
1459
1460 static unsigned
1461 bi_pack_add_select(bi_instruction *ins, bi_registers *regs)
1462 {
1463 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1464 assert(size == 16);
1465
1466 unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1));
1467 unsigned op = BIFROST_ADD_SEL_16(swiz);
1468 return bi_pack_add_2src(ins, regs, op);
1469 }
1470
1471 static enum bifrost_discard_cond
1472 bi_cond_to_discard(enum bi_cond cond, bool *flip)
1473 {
1474 switch (cond){
1475 case BI_COND_GT:
1476 *flip = true;
1477 /* fallthrough */
1478 case BI_COND_LT:
1479 return BIFROST_DISCARD_FLT;
1480 case BI_COND_GE:
1481 *flip = true;
1482 /* fallthrough */
1483 case BI_COND_LE:
1484 return BIFROST_DISCARD_FLE;
1485 case BI_COND_NE:
1486 return BIFROST_DISCARD_FNE;
1487 case BI_COND_EQ:
1488 return BIFROST_DISCARD_FEQ;
1489 default:
1490 unreachable("Invalid op for discard");
1491 }
1492 }
1493
1494 static unsigned
1495 bi_pack_add_discard(bi_instruction *ins, bi_registers *regs)
1496 {
1497 bool fp16 = ins->src_types[0] == nir_type_float16;
1498 assert(fp16 || ins->src_types[0] == nir_type_float32);
1499
1500 bool flip = false;
1501 enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip);
1502
1503 struct bifrost_add_discard pack = {
1504 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1505 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1506 .cond = cond,
1507 .src0_select = fp16 ? ins->swizzle[0][0] : 0,
1508 .src1_select = fp16 ? ins->swizzle[1][0] : 0,
1509 .fp32 = fp16 ? 0 : 1,
1510 .op = BIFROST_ADD_OP_DISCARD
1511 };
1512
1513 RETURN_PACKED(pack);
1514 }
1515
1516 static enum bifrost_icmp_cond
1517 bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16)
1518 {
1519 switch (cond){
1520 case BI_COND_LT:
1521 *flip = true;
1522 /* fallthrough */
1523 case BI_COND_GT:
1524 return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT)
1525 : BIFROST_ICMP_IGT;
1526 case BI_COND_LE:
1527 *flip = true;
1528 /* fallthrough */
1529 case BI_COND_GE:
1530 return is_unsigned ? BIFROST_ICMP_UGE :
1531 (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE);
1532 case BI_COND_NE:
1533 return BIFROST_ICMP_NEQ;
1534 case BI_COND_EQ:
1535 return BIFROST_ICMP_EQ;
1536 default:
1537 unreachable("Invalid op for icmp");
1538 }
1539 }
1540
1541 static unsigned
1542 bi_pack_add_icmp32(bi_instruction *ins, bi_registers *regs, bool flip,
1543 enum bifrost_icmp_cond cond)
1544 {
1545 struct bifrost_add_icmp pack = {
1546 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1547 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1548 .cond = cond,
1549 .sz = 1,
1550 .d3d = false,
1551 .op = BIFROST_ADD_OP_ICMP_32
1552 };
1553
1554 RETURN_PACKED(pack);
1555 }
1556
1557 static unsigned
1558 bi_pack_add_icmp16(bi_instruction *ins, bi_registers *regs, bool flip,
1559 enum bifrost_icmp_cond cond)
1560 {
1561 struct bifrost_add_icmp16 pack = {
1562 .src0 = bi_get_src(ins, regs, flip ? 1 : 0),
1563 .src1 = bi_get_src(ins, regs, flip ? 0 : 1),
1564 .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0),
1565 .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1),
1566 .cond = cond,
1567 .d3d = false,
1568 .op = BIFROST_ADD_OP_ICMP_16
1569 };
1570
1571 RETURN_PACKED(pack);
1572 }
1573
1574 static unsigned
1575 bi_pack_add_cmp(bi_instruction *ins, bi_registers *regs)
1576 {
1577 nir_alu_type Tl = ins->src_types[0];
1578 nir_alu_type Tr = ins->src_types[1];
1579 nir_alu_type Bl = nir_alu_type_get_base_type(Tl);
1580
1581 if (Bl == nir_type_uint || Bl == nir_type_int) {
1582 assert(Tl == Tr);
1583 unsigned sz = nir_alu_type_get_type_size(Tl);
1584
1585 bool flip = false;
1586
1587 enum bifrost_icmp_cond cond = bi_cond_to_icmp(
1588 sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond,
1589 &flip, Bl == nir_type_uint, sz == 16);
1590
1591 if (sz == 32)
1592 return bi_pack_add_icmp32(ins, regs, flip, cond);
1593 else if (sz == 16)
1594 return bi_pack_add_icmp16(ins, regs, flip, cond);
1595 else
1596 unreachable("TODO");
1597 } else {
1598 unreachable("TODO");
1599 }
1600 }
1601
1602 static unsigned
1603 bi_pack_add_imath(bi_instruction *ins, bi_registers *regs)
1604 {
1605 /* TODO: 32+16 add */
1606 assert(ins->src_types[0] == ins->src_types[1]);
1607 unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]);
1608 enum bi_imath_op p = ins->op.imath;
1609
1610 unsigned op = 0;
1611
1612 if (sz == 8) {
1613 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 :
1614 BIFROST_ADD_ISUB_8;
1615 } else if (sz == 16) {
1616 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 :
1617 BIFROST_ADD_ISUB_16;
1618 } else if (sz == 32) {
1619 op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 :
1620 BIFROST_ADD_ISUB_32;
1621 } else {
1622 unreachable("64-bit todo");
1623 }
1624
1625 return bi_pack_add_2src(ins, regs, op);
1626 }
1627
1628 static unsigned
1629 bi_pack_add_branch_cond(bi_instruction *ins, bi_registers *regs)
1630 {
1631 assert(ins->cond == BI_COND_EQ);
1632 assert(ins->src[1] == BIR_INDEX_ZERO);
1633
1634 unsigned zero_ctrl = 0;
1635 unsigned size = nir_alu_type_get_type_size(ins->src_types[0]);
1636
1637 if (size == 16) {
1638 /* See BR_SIZE_ZERO swizzle disassembly */
1639 zero_ctrl = ins->swizzle[0][0] ? 1 : 2;
1640 } else {
1641 assert(size == 32);
1642 }
1643
1644 /* EQ swap to NE */
1645 bool port_swapped = false;
1646
1647 /* We assigned the constant port to fetch the branch offset so we can
1648 * just passthrough here. We put in the HI slot to match the blob since
1649 * that's where the magic flags end up */
1650 struct bifrost_branch pack = {
1651 .src0 = bi_get_src(ins, regs, 0),
1652 .src1 = (zero_ctrl << 1) | !port_swapped,
1653 .src2 = BIFROST_SRC_CONST_HI,
1654 .cond = BR_COND_EQ,
1655 .size = BR_SIZE_ZERO,
1656 .op = BIFROST_ADD_OP_BRANCH
1657 };
1658
1659 RETURN_PACKED(pack);
1660 }
1661
1662 static unsigned
1663 bi_pack_add_branch_uncond(bi_instruction *ins, bi_registers *regs)
1664 {
1665 struct bifrost_branch pack = {
1666 /* It's unclear what these bits actually mean */
1667 .src0 = BIFROST_SRC_CONST_LO,
1668 .src1 = BIFROST_SRC_PASS_FMA,
1669
1670 /* Offset, see above */
1671 .src2 = BIFROST_SRC_CONST_HI,
1672
1673 /* All ones in fact */
1674 .cond = (BR_ALWAYS & 0x7),
1675 .size = (BR_ALWAYS >> 3),
1676 .op = BIFROST_ADD_OP_BRANCH
1677 };
1678
1679 RETURN_PACKED(pack);
1680 }
1681
1682 static unsigned
1683 bi_pack_add_branch(bi_instruction *ins, bi_registers *regs)
1684 {
1685 if (ins->cond == BI_COND_ALWAYS)
1686 return bi_pack_add_branch_uncond(ins, regs);
1687 else
1688 return bi_pack_add_branch_cond(ins, regs);
1689 }
1690
1691 static unsigned
1692 bi_pack_add(bi_clause *clause, bi_bundle bundle, bi_registers *regs, gl_shader_stage stage)
1693 {
1694 if (!bundle.add)
1695 return BIFROST_ADD_NOP;
1696
1697 switch (bundle.add->type) {
1698 case BI_ADD:
1699 return bi_pack_add_addmin(bundle.add, regs);
1700 case BI_ATEST:
1701 return bi_pack_add_atest(clause, bundle.add, regs);
1702 case BI_BRANCH:
1703 return bi_pack_add_branch(bundle.add, regs);
1704 case BI_CMP:
1705 return bi_pack_add_cmp(bundle.add, regs);
1706 case BI_BLEND:
1707 return bi_pack_add_blend(clause, bundle.add, regs);
1708 case BI_BITWISE:
1709 unreachable("Packing todo");
1710 case BI_CONVERT:
1711 return bi_pack_convert(bundle.add, regs, false);
1712 case BI_DISCARD:
1713 return bi_pack_add_discard(bundle.add, regs);
1714 case BI_FREXP:
1715 unreachable("Packing todo");
1716 case BI_IMATH:
1717 return bi_pack_add_imath(bundle.add, regs);
1718 case BI_LOAD:
1719 unreachable("Packing todo");
1720 case BI_LOAD_ATTR:
1721 return bi_pack_add_ld_attr(clause, bundle.add, regs);
1722 case BI_LOAD_UNIFORM:
1723 return bi_pack_add_ld_ubo(clause, bundle.add, regs);
1724 case BI_LOAD_VAR:
1725 return bi_pack_add_ld_vary(clause, bundle.add, regs);
1726 case BI_LOAD_VAR_ADDRESS:
1727 return bi_pack_add_ld_var_addr(clause, bundle.add, regs);
1728 case BI_MINMAX:
1729 return bi_pack_add_addmin(bundle.add, regs);
1730 case BI_MOV:
1731 case BI_SHIFT:
1732 case BI_STORE:
1733 unreachable("Packing todo");
1734 case BI_STORE_VAR:
1735 return bi_pack_add_st_vary(clause, bundle.add, regs);
1736 case BI_SPECIAL:
1737 return bi_pack_add_special(bundle.add, regs);
1738 case BI_TABLE:
1739 return bi_pack_add_table(bundle.add, regs);
1740 case BI_SELECT:
1741 return bi_pack_add_select(bundle.add, regs);
1742 case BI_TEX:
1743 if (bundle.add->op.texture == BI_TEX_COMPACT)
1744 return bi_pack_add_tex_compact(clause, bundle.add, regs, stage);
1745 else
1746 unreachable("Unknown tex type");
1747 case BI_ROUND:
1748 unreachable("Packing todo");
1749 default:
1750 unreachable("Cannot encode class as ADD");
1751 }
1752 }
1753
1754 struct bi_packed_bundle {
1755 uint64_t lo;
1756 uint64_t hi;
1757 };
1758
1759 /* We must ensure port 1 > port 0 for the 63-x trick to function, so we fix
1760 * this up at pack time. (Scheduling doesn't care.) */
1761
1762 static void
1763 bi_flip_ports(bi_registers *regs)
1764 {
1765 if (regs->enabled[0] && regs->enabled[1] && regs->port[1] < regs->port[0]) {
1766 unsigned temp = regs->port[0];
1767 regs->port[0] = regs->port[1];
1768 regs->port[1] = temp;
1769 }
1770
1771 }
1772
1773 static struct bi_packed_bundle
1774 bi_pack_bundle(bi_clause *clause, bi_bundle bundle, bi_bundle prev, bool first_bundle, gl_shader_stage stage)
1775 {
1776 bi_assign_ports(&bundle, &prev);
1777 bi_assign_uniform_constant(clause, &bundle.regs, bundle);
1778 bundle.regs.first_instruction = first_bundle;
1779
1780 bi_flip_ports(&bundle.regs);
1781
1782 uint64_t reg = bi_pack_registers(bundle.regs);
1783 uint64_t fma = bi_pack_fma(clause, bundle, &bundle.regs);
1784 uint64_t add = bi_pack_add(clause, bundle, &bundle.regs, stage);
1785
1786 struct bi_packed_bundle packed = {
1787 .lo = reg | (fma << 35) | ((add & 0b111111) << 58),
1788 .hi = add >> 6
1789 };
1790
1791 return packed;
1792 }
1793
1794 /* Packs the next two constants as a dedicated constant quadword at the end of
1795 * the clause, returning the number packed. There are two cases to consider:
1796 *
1797 * Case #1: Branching is not used. For a single constant copy the upper nibble
1798 * over, easy.
1799 *
1800 * Case #2: Branching is used. For a single constant, it suffices to set the
1801 * upper nibble to 4 and leave the latter constant 0, which matches what the
1802 * blob does.
1803 *
1804 * Extending to multiple constants is considerably more tricky and left for
1805 * future work.
1806 */
1807
1808 static unsigned
1809 bi_pack_constants(bi_context *ctx, bi_clause *clause,
1810 unsigned index,
1811 struct util_dynarray *emission)
1812 {
1813 /* After these two, are we done? Determines tag */
1814 bool done = clause->constant_count <= (index + 2);
1815 bool only = clause->constant_count <= (index + 1);
1816
1817 /* Is the constant we're packing for a branch? */
1818 bool branches = clause->branch_constant && done;
1819
1820 /* TODO: Pos */
1821 assert(index == 0 && clause->bundle_count == 1);
1822 assert(only);
1823
1824 /* Compute branch offset instead of a dummy 0 */
1825 if (branches) {
1826 bi_instruction *br = clause->bundles[clause->bundle_count - 1].add;
1827 assert(br && br->type == BI_BRANCH && br->branch_target);
1828
1829 /* Put it in the high place */
1830 int32_t qwords = bi_block_offset(ctx, clause, br->branch_target);
1831 int32_t bytes = qwords * 16;
1832
1833 /* Copy so we get proper sign behaviour */
1834 uint32_t raw = 0;
1835 memcpy(&raw, &bytes, sizeof(raw));
1836
1837 /* Clear off top bits for the magic bits */
1838 raw &= ~0xF0000000;
1839
1840 /* Put in top 32-bits */
1841 clause->constants[index + 0] = ((uint64_t) raw) << 32ull;
1842 }
1843
1844 uint64_t hi = clause->constants[index + 0] >> 60ull;
1845
1846 struct bifrost_fmt_constant quad = {
1847 .pos = 0, /* TODO */
1848 .tag = done ? BIFROST_FMTC_FINAL : BIFROST_FMTC_CONSTANTS,
1849 .imm_1 = clause->constants[index + 0] >> 4,
1850 .imm_2 = ((hi < 8) ? (hi << 60ull) : 0) >> 4,
1851 };
1852
1853 if (branches) {
1854 /* Branch offsets are less than 60-bits so this should work at
1855 * least for now */
1856 quad.imm_1 |= (4ull << 60ull) >> 4;
1857 assert (hi == 0);
1858 }
1859
1860 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1861 * of the second constant with the first must be less than 8, otherwise
1862 * we have to swap them. On G52, I'm able to reproduce a similar issue
1863 * but with a different workaround (modeled above with a single
1864 * constant, unclear how to workaround for multiple constants.) Further
1865 * investigation needed. Possibly an errata. XXX */
1866
1867 util_dynarray_append(emission, struct bifrost_fmt_constant, quad);
1868
1869 return 2;
1870 }
1871
1872 static void
1873 bi_pack_clause(bi_context *ctx, bi_clause *clause,
1874 bi_clause *next_1, bi_clause *next_2,
1875 struct util_dynarray *emission, gl_shader_stage stage)
1876 {
1877 struct bi_packed_bundle ins_1 = bi_pack_bundle(clause, clause->bundles[0], clause->bundles[0], true, stage);
1878 assert(clause->bundle_count == 1);
1879
1880 /* Used to decide if we elide writes */
1881 bool is_fragment = ctx->stage == MESA_SHADER_FRAGMENT;
1882
1883 /* State for packing constants throughout */
1884 unsigned constant_index = 0;
1885
1886 struct bifrost_fmt1 quad_1 = {
1887 .tag = clause->constant_count ? BIFROST_FMT1_CONSTANTS : BIFROST_FMT1_FINAL,
1888 .header = bi_pack_header(clause, next_1, next_2, is_fragment),
1889 .ins_1 = ins_1.lo,
1890 .ins_2 = ins_1.hi & ((1 << 11) - 1),
1891 .ins_0 = (ins_1.hi >> 11) & 0b111,
1892 };
1893
1894 util_dynarray_append(emission, struct bifrost_fmt1, quad_1);
1895
1896 /* Pack the remaining constants */
1897
1898 while (constant_index < clause->constant_count) {
1899 constant_index += bi_pack_constants(ctx, clause,
1900 constant_index, emission);
1901 }
1902 }
1903
1904 static bi_clause *
1905 bi_next_clause(bi_context *ctx, pan_block *block, bi_clause *clause)
1906 {
1907 /* Try the next clause in this block */
1908 if (clause->link.next != &((bi_block *) block)->clauses)
1909 return list_first_entry(&(clause->link), bi_clause, link);
1910
1911 /* Try the next block, or the one after that if it's empty, etc .*/
1912 pan_block *next_block = pan_next_block(block);
1913
1914 bi_foreach_block_from(ctx, next_block, block) {
1915 bi_block *blk = (bi_block *) block;
1916
1917 if (!list_is_empty(&blk->clauses))
1918 return list_first_entry(&(blk->clauses), bi_clause, link);
1919 }
1920
1921 return NULL;
1922 }
1923
1924 void
1925 bi_pack(bi_context *ctx, struct util_dynarray *emission)
1926 {
1927 util_dynarray_init(emission, NULL);
1928
1929 bi_foreach_block(ctx, _block) {
1930 bi_block *block = (bi_block *) _block;
1931
1932 bi_foreach_clause_in_block(block, clause) {
1933 bi_clause *next = bi_next_clause(ctx, _block, clause);
1934 bi_pack_clause(ctx, clause, next, NULL, emission, ctx->stage);
1935 }
1936 }
1937 }