2 * Copyright (C) 2020 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #define RETURN_PACKED(str) { \
29 memcpy(&temp, &str, sizeof(str)); \
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
38 bi_pack_header(bi_clause
*clause
, bi_clause
*next
, bool is_fragment
)
40 struct bifrost_header header
= {
41 .back_to_back
= clause
->back_to_back
,
42 .no_end_of_shader
= (next
!= NULL
),
43 .elide_writes
= is_fragment
,
44 .branch_cond
= clause
->branch_conditional
,
45 .datareg_writebarrier
= clause
->data_register_write_barrier
,
46 .datareg
= clause
->data_register
,
47 .scoreboard_deps
= next
? next
->dependencies
: 0,
48 .scoreboard_index
= clause
->scoreboard_id
,
49 .clause_type
= clause
->clause_type
,
50 .next_clause_type
= next
? next
->clause_type
: 0,
55 header
.branch_cond
|= header
.back_to_back
;
58 memcpy(&u
, &header
, sizeof(header
));
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
69 bi_lookup_constant(bi_clause
*clause
, uint64_t cons
, bool *hi
, bool b64
)
71 uint64_t want
= (cons
>> 4);
73 for (unsigned i
= 0; i
< clause
->constant_count
; ++i
) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
77 uint64_t candidates
[2] = {
78 clause
->constants
[i
] >> 4,
79 clause
->constants
[i
] >> 36
82 /* For <64-bit mode, we treat lo/hi separately */
85 candidates
[0] &= (0xFFFFFFFF >> 4);
87 if (candidates
[0] == want
)
90 if (candidates
[1] == want
&& !b64
) {
96 unreachable("Invalid constant accessed");
100 bi_constant_field(unsigned idx
)
104 const unsigned values
[] = {
108 return values
[idx
] << 4;
112 bi_assign_uniform_constant_single(
113 struct bi_registers
*regs
,
115 bi_instruction
*ins
, bool assigned
, bool fast_zero
)
120 if (ins
->type
== BI_BLEND
) {
122 regs
->uniform_constant
= 0x8;
126 bi_foreach_src(ins
, s
) {
127 if (s
== 0 && (ins
->type
== BI_LOAD_VAR_ADDRESS
|| ins
->type
== BI_LOAD_ATTR
)) continue;
129 if (ins
->src
[s
] & BIR_INDEX_CONSTANT
) {
130 /* Let direct addresses through */
131 if (ins
->type
== BI_LOAD_VAR
)
135 bool b64
= nir_alu_type_get_type_size(ins
->src_types
[s
]) > 32;
136 uint64_t cons
= bi_get_immediate(ins
, s
);
137 unsigned idx
= bi_lookup_constant(clause
, cons
, &hi
, b64
);
138 unsigned lo
= clause
->constants
[idx
] & 0xF;
139 unsigned f
= bi_constant_field(idx
) | lo
;
141 if (assigned
&& regs
->uniform_constant
!= f
)
142 unreachable("Mismatched uniform/const field: imm");
144 regs
->uniform_constant
= f
;
145 ins
->src
[s
] = BIR_INDEX_PASS
| (hi
? BIFROST_SRC_CONST_HI
: BIFROST_SRC_CONST_LO
);
147 } else if (ins
->src
[s
] & BIR_INDEX_ZERO
&& (ins
->type
== BI_LOAD_UNIFORM
|| ins
->type
== BI_LOAD_VAR
)) {
148 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
149 ins
->src
[s
] = BIR_INDEX_PASS
| BIFROST_SRC_CONST_HI
;
150 } else if (ins
->src
[s
] & BIR_INDEX_ZERO
&& !fast_zero
) {
151 /* FMAs have a fast zero port, ADD needs to use the
152 * uniform/const port's special 0 mode handled here */
155 if (assigned
&& regs
->uniform_constant
!= f
)
156 unreachable("Mismatched uniform/const field: 0");
158 regs
->uniform_constant
= f
;
159 ins
->src
[s
] = BIR_INDEX_PASS
| BIFROST_SRC_CONST_LO
;
161 } else if (ins
->src
[s
] & BIR_INDEX_ZERO
&& fast_zero
) {
162 ins
->src
[s
] = BIR_INDEX_PASS
| BIFROST_SRC_STAGE
;
163 } else if (s
& BIR_INDEX_UNIFORM
) {
164 unreachable("Push uniforms not implemented yet");
172 bi_assign_uniform_constant(
174 struct bi_registers
*regs
,
178 bi_assign_uniform_constant_single(regs
, clause
, bundle
.fma
, false, true);
180 bi_assign_uniform_constant_single(regs
, clause
, bundle
.add
, assigned
, false);
183 /* Assigns a port for reading, before anything is written */
186 bi_assign_port_read(struct bi_registers
*regs
, unsigned src
)
188 /* We only assign for registers */
189 if (!(src
& BIR_INDEX_REGISTER
))
192 unsigned reg
= src
& ~BIR_INDEX_REGISTER
;
194 /* Check if we already assigned the port */
195 for (unsigned i
= 0; i
<= 1; ++i
) {
196 if (regs
->port
[i
] == reg
&& regs
->enabled
[i
])
200 if (regs
->port
[3] == reg
&& regs
->read_port3
)
205 for (unsigned i
= 0; i
<= 1; ++i
) {
206 if (!regs
->enabled
[i
]) {
208 regs
->enabled
[i
] = true;
213 if (!regs
->read_port3
) {
215 regs
->read_port3
= true;
219 bi_print_ports(regs
);
220 unreachable("Failed to find a free port for src");
223 static struct bi_registers
224 bi_assign_ports(bi_bundle now
, bi_bundle prev
)
226 struct bi_registers regs
= { 0 };
228 /* We assign ports for the main register mechanism. Special ops
229 * use the data registers, which has its own mechanism entirely
230 * and thus gets skipped over here. */
232 unsigned read_dreg
= now
.add
&&
233 bi_class_props
[now
.add
->type
] & BI_DATA_REG_SRC
;
235 unsigned write_dreg
= prev
.add
&&
236 bi_class_props
[prev
.add
->type
] & BI_DATA_REG_DEST
;
238 /* First, assign reads */
241 bi_foreach_src(now
.fma
, src
)
242 bi_assign_port_read(®s
, now
.fma
->src
[src
]);
245 bi_foreach_src(now
.add
, src
) {
246 if (!(src
== 0 && read_dreg
))
247 bi_assign_port_read(®s
, now
.add
->src
[src
]);
251 /* Next, assign writes */
253 if (prev
.add
&& prev
.add
->dest
& BIR_INDEX_REGISTER
&& !write_dreg
) {
254 regs
.port
[2] = prev
.add
->dest
& ~BIR_INDEX_REGISTER
;
255 regs
.write_add
= true;
258 if (prev
.fma
&& prev
.fma
->dest
& BIR_INDEX_REGISTER
) {
259 unsigned r
= prev
.fma
->dest
& ~BIR_INDEX_REGISTER
;
261 if (regs
.write_add
) {
262 /* Scheduler constraint: cannot read 3 and write 2 */
263 assert(!regs
.read_port3
);
269 regs
.write_fma
= true;
272 /* Finally, ensure port 1 > port 0 for the 63-x trick to function */
274 if (regs
.enabled
[0] && regs
.enabled
[1] && regs
.port
[1] < regs
.port
[0]) {
275 unsigned temp
= regs
.port
[0];
276 regs
.port
[0] = regs
.port
[1];
283 /* Determines the register control field, ignoring the first? flag */
285 static enum bifrost_reg_control
286 bi_pack_register_ctrl_lo(struct bi_registers r
)
290 assert(!r
.read_port3
);
291 return BIFROST_WRITE_ADD_P2_FMA_P3
;
294 return BIFROST_WRITE_FMA_P2_READ_P3
;
296 return BIFROST_WRITE_FMA_P2
;
298 } else if (r
.write_add
) {
300 return BIFROST_WRITE_ADD_P2_READ_P3
;
302 return BIFROST_WRITE_ADD_P2
;
303 } else if (r
.read_port3
)
304 return BIFROST_READ_P3
;
306 return BIFROST_REG_NONE
;
309 /* Ditto but account for the first? flag this time */
311 static enum bifrost_reg_control
312 bi_pack_register_ctrl(struct bi_registers r
)
314 enum bifrost_reg_control ctrl
= bi_pack_register_ctrl_lo(r
);
316 if (r
.first_instruction
) {
317 if (ctrl
== BIFROST_REG_NONE
)
318 ctrl
= BIFROST_FIRST_NONE
;
319 else if (ctrl
== BIFROST_WRITE_FMA_P2_READ_P3
)
320 ctrl
= BIFROST_FIRST_WRITE_FMA_P2_READ_P3
;
322 ctrl
|= BIFROST_FIRST_NONE
;
329 bi_pack_registers(struct bi_registers regs
)
331 enum bifrost_reg_control ctrl
= bi_pack_register_ctrl(regs
);
332 struct bifrost_regs s
= { 0 };
335 if (regs
.enabled
[1]) {
336 /* Gotta save that bit!~ Required by the 63-x trick */
337 assert(regs
.port
[1] > regs
.port
[0]);
338 assert(regs
.enabled
[0]);
340 /* Do the 63-x trick, see docs/disasm */
341 if (regs
.port
[0] > 31) {
342 regs
.port
[0] = 63 - regs
.port
[0];
343 regs
.port
[1] = 63 - regs
.port
[1];
346 assert(regs
.port
[0] <= 31);
347 assert(regs
.port
[1] <= 63);
350 s
.reg1
= regs
.port
[1];
351 s
.reg0
= regs
.port
[0];
353 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
357 if (regs
.enabled
[0]) {
358 /* Bit 0 upper bit of port 0 */
359 s
.reg1
|= (regs
.port
[0] >> 5);
361 /* Rest of port 0 in usual spot */
362 s
.reg0
= (regs
.port
[0] & 0b11111);
364 /* Bit 1 set if port 0 also disabled */
369 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
370 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
372 bool has_port2
= regs
.write_fma
|| regs
.write_add
;
373 bool has_port3
= regs
.read_port3
|| (regs
.write_fma
&& regs
.write_add
);
376 regs
.port
[3] = regs
.port
[2];
379 regs
.port
[2] = regs
.port
[3];
381 s
.reg3
= regs
.port
[3];
382 s
.reg2
= regs
.port
[2];
383 s
.uniform_const
= regs
.uniform_constant
;
385 memcpy(&packed
, &s
, sizeof(s
));
390 bi_set_data_register(bi_clause
*clause
, unsigned idx
)
392 assert(idx
& BIR_INDEX_REGISTER
);
393 unsigned reg
= idx
& ~BIR_INDEX_REGISTER
;
395 clause
->data_register
= reg
;
399 bi_read_data_register(bi_clause
*clause
, bi_instruction
*ins
)
401 bi_set_data_register(clause
, ins
->src
[0]);
405 bi_write_data_register(bi_clause
*clause
, bi_instruction
*ins
)
407 bi_set_data_register(clause
, ins
->dest
);
410 static enum bifrost_packed_src
411 bi_get_src_reg_port(struct bi_registers
*regs
, unsigned src
)
413 unsigned reg
= src
& ~BIR_INDEX_REGISTER
;
415 if (regs
->port
[0] == reg
&& regs
->enabled
[0])
416 return BIFROST_SRC_PORT0
;
417 else if (regs
->port
[1] == reg
&& regs
->enabled
[1])
418 return BIFROST_SRC_PORT1
;
419 else if (regs
->port
[3] == reg
&& regs
->read_port3
)
420 return BIFROST_SRC_PORT3
;
422 unreachable("Tried to access register with no port");
425 static enum bifrost_packed_src
426 bi_get_src(bi_instruction
*ins
, struct bi_registers
*regs
, unsigned s
)
428 unsigned src
= ins
->src
[s
];
430 if (src
& BIR_INDEX_REGISTER
)
431 return bi_get_src_reg_port(regs
, src
);
432 else if (src
& BIR_INDEX_PASS
)
433 return src
& ~BIR_INDEX_PASS
;
435 bi_print_instruction(ins
, stderr
);
436 unreachable("Unknown src in above instruction");
440 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
441 * 16-bit and written components must correspond to valid swizzles (component x
445 bi_swiz16(bi_instruction
*ins
, unsigned src
)
447 assert(nir_alu_type_get_type_size(ins
->src_types
[src
]) == 16);
448 unsigned swizzle
= 0;
450 for (unsigned c
= 0; c
< 2; ++c
) {
451 if (!bi_writes_component(ins
, src
)) continue;
453 unsigned k
= ins
->swizzle
[src
][c
];
462 bi_pack_fma_fma(bi_instruction
*ins
, struct bi_registers
*regs
)
464 /* (-a)(-b) = ab, so we only need one negate bit */
465 bool negate_mul
= ins
->src_neg
[0] ^ ins
->src_neg
[1];
467 if (ins
->op
.mscale
) {
468 assert(!(ins
->src_abs
[0] && ins
->src_abs
[1]));
469 assert(!ins
->src_abs
[2] || !ins
->src_neg
[3] || !ins
->src_abs
[3]);
471 /* We can have exactly one abs, and can flip the multiplication
472 * to make it fit if we have to */
473 bool flip_ab
= ins
->src_abs
[1];
475 struct bifrost_fma_mscale pack
= {
476 .src0
= bi_get_src(ins
, regs
, flip_ab
? 1 : 0),
477 .src1
= bi_get_src(ins
, regs
, flip_ab
? 0 : 1),
478 .src2
= bi_get_src(ins
, regs
, 2),
479 .src3
= bi_get_src(ins
, regs
, 3),
482 .src0_abs
= ins
->src_abs
[0] || ins
->src_abs
[1],
483 .src1_neg
= negate_mul
,
484 .src2_neg
= ins
->src_neg
[2],
485 .op
= BIFROST_FMA_OP_MSCALE
,
489 } else if (ins
->dest_type
== nir_type_float32
) {
490 struct bifrost_fma_fma pack
= {
491 .src0
= bi_get_src(ins
, regs
, 0),
492 .src1
= bi_get_src(ins
, regs
, 1),
493 .src2
= bi_get_src(ins
, regs
, 2),
494 .src0_abs
= ins
->src_abs
[0],
495 .src1_abs
= ins
->src_abs
[1],
496 .src2_abs
= ins
->src_abs
[2],
497 .src0_neg
= negate_mul
,
498 .src2_neg
= ins
->src_neg
[2],
499 .outmod
= ins
->outmod
,
500 .roundmode
= ins
->roundmode
,
501 .op
= BIFROST_FMA_OP_FMA
505 } else if (ins
->dest_type
== nir_type_float16
) {
506 struct bifrost_fma_fma16 pack
= {
507 .src0
= bi_get_src(ins
, regs
, 0),
508 .src1
= bi_get_src(ins
, regs
, 1),
509 .src2
= bi_get_src(ins
, regs
, 2),
510 .swizzle_0
= bi_swiz16(ins
, 0),
511 .swizzle_1
= bi_swiz16(ins
, 1),
512 .swizzle_2
= bi_swiz16(ins
, 2),
513 .src0_neg
= negate_mul
,
514 .src2_neg
= ins
->src_neg
[2],
515 .outmod
= ins
->outmod
,
516 .roundmode
= ins
->roundmode
,
517 .op
= BIFROST_FMA_OP_FMA16
522 unreachable("Invalid fma dest type");
527 bi_pack_fma_addmin_f32(bi_instruction
*ins
, struct bi_registers
*regs
)
530 (ins
->type
== BI_ADD
) ? BIFROST_FMA_OP_FADD32
:
531 (ins
->op
.minmax
== BI_MINMAX_MIN
) ? BIFROST_FMA_OP_FMIN32
:
532 BIFROST_FMA_OP_FMAX32
;
534 struct bifrost_fma_add pack
= {
535 .src0
= bi_get_src(ins
, regs
, 0),
536 .src1
= bi_get_src(ins
, regs
, 1),
537 .src0_abs
= ins
->src_abs
[0],
538 .src1_abs
= ins
->src_abs
[1],
539 .src0_neg
= ins
->src_neg
[0],
540 .src1_neg
= ins
->src_neg
[1],
542 .outmod
= ins
->outmod
,
543 .roundmode
= (ins
->type
== BI_ADD
) ? ins
->roundmode
: ins
->minmax
,
551 bi_pack_fp16_abs(bi_instruction
*ins
, struct bi_registers
*regs
, bool *flip
)
553 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
554 * l be an auxiliary bit we encode. Then the hardware determines:
559 * Since add/min/max are commutative, this saves a bit by using the
560 * order of the operands as a bit (k). To pack this, first note:
562 * (l && k) implies (l || k).
564 * That is, if the second argument is abs'd, then the first argument
565 * also has abs. So there are three cases:
567 * Case 0: Neither src has absolute value. Then we have l = k = 0.
569 * Case 1: Exactly one src has absolute value. Assign that source to
570 * src0 and the other source to src1. Compute k = src1 < src0 based on
571 * that assignment. Then l = ~k.
573 * Case 2: Both sources have absolute value. Then we have l = k = 1.
574 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
575 * That is, this encoding is only valid if src1 and src0 are distinct.
576 * This is a scheduling restriction (XXX); if an op of this type
577 * requires both identical sources to have abs value, then we must
578 * schedule to ADD (which does not use this ordering trick).
581 unsigned abs_0
= ins
->src_abs
[0], abs_1
= ins
->src_abs
[1];
582 unsigned src_0
= bi_get_src(ins
, regs
, 0);
583 unsigned src_1
= bi_get_src(ins
, regs
, 1);
585 assert(!(abs_0
&& abs_1
&& src_0
== src_1
));
587 if (!abs_0
&& !abs_1
) {
588 /* Force k = 0 <===> NOT(src1 < src0) */
589 *flip
= (src_1
< src_0
);
591 } else if (abs_0
&& !abs_1
) {
592 return src_1
>= src_0
;
593 } else if (abs_1
&& !abs_0
) {
595 return src_0
>= src_1
;
597 *flip
= !(src_1
< src_0
);
603 bi_pack_fmadd_min_f16(bi_instruction
*ins
, struct bi_registers
*regs
, bool FMA
)
606 (!FMA
) ? ((ins
->op
.minmax
== BI_MINMAX_MIN
) ?
607 BIFROST_ADD_OP_FMIN16
: BIFROST_ADD_OP_FMAX16
) :
608 (ins
->type
== BI_ADD
) ? BIFROST_FMA_OP_FADD16
:
609 (ins
->op
.minmax
== BI_MINMAX_MIN
) ? BIFROST_FMA_OP_FMIN16
:
610 BIFROST_FMA_OP_FMAX16
;
613 bool l
= bi_pack_fp16_abs(ins
, regs
, &flip
);
614 unsigned src_0
= bi_get_src(ins
, regs
, 0);
615 unsigned src_1
= bi_get_src(ins
, regs
, 1);
618 struct bifrost_fma_add_minmax16 pack
= {
619 .src0
= flip
? src_1
: src_0
,
620 .src1
= flip
? src_0
: src_1
,
621 .src0_neg
= ins
->src_neg
[flip
? 1 : 0],
622 .src1_neg
= ins
->src_neg
[flip
? 0 : 1],
623 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
624 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
626 .outmod
= ins
->outmod
,
627 .mode
= (ins
->type
== BI_ADD
) ? ins
->roundmode
: ins
->minmax
,
633 /* Can't have modes for fp16 */
634 assert(ins
->outmod
== 0);
636 struct bifrost_add_fmin16 pack
= {
637 .src0
= flip
? src_1
: src_0
,
638 .src1
= flip
? src_0
: src_1
,
639 .src0_neg
= ins
->src_neg
[flip
? 1 : 0],
640 .src1_neg
= ins
->src_neg
[flip
? 0 : 1],
642 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
643 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
653 bi_pack_fma_addmin(bi_instruction
*ins
, struct bi_registers
*regs
)
655 if (ins
->dest_type
== nir_type_float32
)
656 return bi_pack_fma_addmin_f32(ins
, regs
);
657 else if(ins
->dest_type
== nir_type_float16
)
658 return bi_pack_fmadd_min_f16(ins
, regs
, true);
660 unreachable("Unknown FMA/ADD type");
664 bi_pack_fma_1src(bi_instruction
*ins
, struct bi_registers
*regs
, unsigned op
)
666 struct bifrost_fma_inst pack
= {
667 .src0
= bi_get_src(ins
, regs
, 0),
675 bi_pack_fma_2src(bi_instruction
*ins
, struct bi_registers
*regs
, unsigned op
)
677 struct bifrost_fma_2src pack
= {
678 .src0
= bi_get_src(ins
, regs
, 0),
679 .src1
= bi_get_src(ins
, regs
, 1),
687 bi_pack_add_1src(bi_instruction
*ins
, struct bi_registers
*regs
, unsigned op
)
689 struct bifrost_add_inst pack
= {
690 .src0
= bi_get_src(ins
, regs
, 0),
697 static enum bifrost_csel_cond
698 bi_cond_to_csel(enum bi_cond cond
, bool *flip
, bool *invert
, nir_alu_type T
)
700 nir_alu_type B
= nir_alu_type_get_base_type(T
);
701 unsigned idx
= (B
== nir_type_float
) ? 0 :
702 ((B
== nir_type_int
) ? 1 : 2);
708 const enum bifrost_csel_cond ops
[] = {
719 const enum bifrost_csel_cond ops
[] = {
730 const enum bifrost_csel_cond ops
[] = {
733 BIFROST_IEQ_F
/* sign is irrelevant */
739 unreachable("Invalid op for csel");
744 bi_pack_fma_csel(bi_instruction
*ins
, struct bi_registers
*regs
)
746 /* TODO: Use csel3 as well */
747 bool flip
= false, invert
= false;
749 enum bifrost_csel_cond cond
=
750 bi_cond_to_csel(ins
->cond
, &flip
, &invert
, ins
->src_types
[0]);
752 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
754 unsigned cmp_0
= (flip
? 1 : 0);
755 unsigned cmp_1
= (flip
? 0 : 1);
756 unsigned res_0
= (invert
? 3 : 2);
757 unsigned res_1
= (invert
? 2 : 3);
759 struct bifrost_csel4 pack
= {
760 .src0
= bi_get_src(ins
, regs
, cmp_0
),
761 .src1
= bi_get_src(ins
, regs
, cmp_1
),
762 .src2
= bi_get_src(ins
, regs
, res_0
),
763 .src3
= bi_get_src(ins
, regs
, res_1
),
765 .op
= (size
== 16) ? BIFROST_FMA_OP_CSEL4_V16
:
773 bi_pack_fma_frexp(bi_instruction
*ins
, struct bi_registers
*regs
)
775 unsigned op
= BIFROST_FMA_OP_FREXPE_LOG
;
776 return bi_pack_fma_1src(ins
, regs
, op
);
780 bi_pack_fma_reduce(bi_instruction
*ins
, struct bi_registers
*regs
)
782 if (ins
->op
.reduce
== BI_REDUCE_ADD_FREXPM
) {
783 return bi_pack_fma_2src(ins
, regs
, BIFROST_FMA_OP_ADD_FREXPM
);
785 unreachable("Invalid reduce op");
789 /* We have a single convert opcode in the IR but a number of opcodes that could
790 * come out. In particular we have native opcodes for:
792 * [ui]16 --> [fui]32 -- int16_to_32
793 * f16 --> f32 -- float16_to_32
794 * f32 --> f16 -- float32_to_16
795 * f32 --> [ui]32 -- float32_to_int
796 * [ui]32 --> f32 -- int_to_float32
797 * [fui]16 --> [fui]16 -- f2i_i2f16
801 bi_pack_convert(bi_instruction
*ins
, struct bi_registers
*regs
, bool FMA
)
803 nir_alu_type from_base
= nir_alu_type_get_base_type(ins
->src_types
[0]);
804 unsigned from_size
= nir_alu_type_get_type_size(ins
->src_types
[0]);
805 bool from_unsigned
= from_base
== nir_type_uint
;
807 nir_alu_type to_base
= nir_alu_type_get_base_type(ins
->dest_type
);
808 unsigned to_size
= nir_alu_type_get_type_size(ins
->dest_type
);
809 bool to_unsigned
= to_base
== nir_type_uint
;
810 bool to_float
= to_base
== nir_type_float
;
813 assert((from_base
!= to_base
) || (from_size
!= to_size
));
814 assert((MAX2(from_size
, to_size
) / MIN2(from_size
, to_size
)) <= 2);
816 /* f32 to f16 is special */
817 if (from_size
== 32 && to_size
== 16 && from_base
== nir_type_float
&& to_base
== from_base
) {
818 /* TODO: second vectorized source? */
819 struct bifrost_fma_2src pfma
= {
820 .src0
= bi_get_src(ins
, regs
, 0),
821 .src1
= BIFROST_SRC_STAGE
, /* 0 */
822 .op
= BIFROST_FMA_FLOAT32_TO_16
825 struct bifrost_add_2src padd
= {
826 .src0
= bi_get_src(ins
, regs
, 0),
827 .src1
= BIFROST_SRC_STAGE
, /* 0 */
828 .op
= BIFROST_ADD_FLOAT32_TO_16
838 /* Otherwise, figure out the mode */
841 if (from_size
== 16 && to_size
== 32) {
842 unsigned component
= ins
->swizzle
[0][0];
843 assert(component
<= 1);
845 if (from_base
== nir_type_float
)
846 op
= BIFROST_CONVERT_5(component
);
848 op
= BIFROST_CONVERT_4(from_unsigned
, component
, to_float
);
851 unsigned swizzle
= (from_size
== 16) ? bi_swiz16(ins
, 0) : 0;
852 bool is_unsigned
= from_unsigned
;
854 if (from_base
== nir_type_float
) {
855 assert(to_base
!= nir_type_float
);
856 is_unsigned
= to_unsigned
;
858 if (from_size
== 32 && to_size
== 32)
859 mode
= BIFROST_CONV_F32_TO_I32
;
860 else if (from_size
== 16 && to_size
== 16)
861 mode
= BIFROST_CONV_F16_TO_I16
;
863 unreachable("Invalid float conversion");
865 assert(to_base
== nir_type_float
);
866 assert(from_size
== to_size
);
869 mode
= BIFROST_CONV_I32_TO_F32
;
870 else if (to_size
== 16)
871 mode
= BIFROST_CONV_I16_TO_F16
;
873 unreachable("Invalid int conversion");
876 /* Fixup swizzle for 32-bit only modes */
878 if (mode
== BIFROST_CONV_I32_TO_F32
)
880 else if (mode
== BIFROST_CONV_F32_TO_I32
)
883 op
= BIFROST_CONVERT(is_unsigned
, ins
->roundmode
, swizzle
, mode
);
885 /* Unclear what the top bit is for... maybe 16-bit related */
886 bool mode2
= mode
== BIFROST_CONV_F16_TO_I16
;
887 bool mode6
= mode
== BIFROST_CONV_I16_TO_F16
;
889 if (!(mode2
|| mode6
))
894 return bi_pack_fma_1src(ins
, regs
, BIFROST_FMA_CONVERT
| op
);
896 return bi_pack_add_1src(ins
, regs
, BIFROST_ADD_CONVERT
| op
);
900 bi_pack_fma_select(bi_instruction
*ins
, struct bi_registers
*regs
)
902 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[0]);
905 unsigned swiz
= (ins
->swizzle
[0][0] | (ins
->swizzle
[1][0] << 1));
906 unsigned op
= BIFROST_FMA_SEL_16(swiz
);
907 return bi_pack_fma_2src(ins
, regs
, op
);
908 } else if (size
== 8) {
911 for (unsigned c
= 0; c
< 4; ++c
) {
912 if (ins
->swizzle
[c
][0]) {
913 /* Ensure lowering restriction is met */
914 assert(ins
->swizzle
[c
][0] == 2);
919 struct bifrost_fma_sel8 pack
= {
920 .src0
= bi_get_src(ins
, regs
, 0),
921 .src1
= bi_get_src(ins
, regs
, 1),
922 .src2
= bi_get_src(ins
, regs
, 2),
923 .src3
= bi_get_src(ins
, regs
, 3),
925 .op
= BIFROST_FMA_OP_SEL8
930 unreachable("Unimplemented");
934 static enum bifrost_fcmp_cond
935 bi_fcmp_cond(enum bi_cond cond
)
938 case BI_COND_LT
: return BIFROST_OLT
;
939 case BI_COND_LE
: return BIFROST_OLE
;
940 case BI_COND_GE
: return BIFROST_OGE
;
941 case BI_COND_GT
: return BIFROST_OGT
;
942 case BI_COND_EQ
: return BIFROST_OEQ
;
943 case BI_COND_NE
: return BIFROST_UNE
;
944 default: unreachable("Unknown bi_cond");
948 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
950 static enum bifrost_fcmp_cond
951 bi_flip_fcmp(enum bifrost_fcmp_cond cond
)
966 unreachable("Unknown fcmp cond");
971 bi_pack_fma_cmp(bi_instruction
*ins
, struct bi_registers
*regs
)
973 nir_alu_type Tl
= ins
->src_types
[0];
974 nir_alu_type Tr
= ins
->src_types
[1];
976 if (Tl
== nir_type_float32
|| Tr
== nir_type_float32
) {
977 /* TODO: Mixed 32/16 cmp */
980 enum bifrost_fcmp_cond cond
= bi_fcmp_cond(ins
->cond
);
982 /* Only src1 has neg, so we arrange:
985 * -a < -b <===> a > b
986 * -a < b <===> a > -b
987 * TODO: Is this NaN-precise?
990 bool flip
= ins
->src_neg
[0];
991 bool neg
= ins
->src_neg
[0] ^ ins
->src_neg
[1];
994 cond
= bi_flip_fcmp(cond
);
996 struct bifrost_fma_fcmp pack
= {
997 .src0
= bi_get_src(ins
, regs
, 0),
998 .src1
= bi_get_src(ins
, regs
, 1),
999 .src0_abs
= ins
->src_abs
[0],
1000 .src1_abs
= ins
->src_abs
[1],
1005 .op
= BIFROST_FMA_OP_FCMP_GL
1008 RETURN_PACKED(pack
);
1009 } else if (Tl
== nir_type_float16
&& Tr
== nir_type_float16
) {
1011 bool l
= bi_pack_fp16_abs(ins
, regs
, &flip
);
1012 enum bifrost_fcmp_cond cond
= bi_fcmp_cond(ins
->cond
);
1015 cond
= bi_flip_fcmp(cond
);
1017 struct bifrost_fma_fcmp16 pack
= {
1018 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1019 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1020 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
1021 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
1025 .op
= BIFROST_FMA_OP_FCMP_GL_16
,
1028 RETURN_PACKED(pack
);
1030 unreachable("Unknown cmp type");
1035 bi_fma_bitwise_op(enum bi_bitwise_op op
, bool rshift
)
1039 /* Via De Morgan's */
1041 BIFROST_FMA_OP_RSHIFT_NAND
:
1042 BIFROST_FMA_OP_LSHIFT_NAND
;
1043 case BI_BITWISE_AND
:
1045 BIFROST_FMA_OP_RSHIFT_AND
:
1046 BIFROST_FMA_OP_LSHIFT_AND
;
1047 case BI_BITWISE_XOR
:
1048 /* Shift direction handled out of band */
1049 return BIFROST_FMA_OP_RSHIFT_XOR
;
1051 unreachable("Unknown op");
1056 bi_pack_fma_bitwise(bi_instruction
*ins
, struct bi_registers
*regs
)
1058 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
1061 bool invert_0
= ins
->bitwise
.src_invert
[0];
1062 bool invert_1
= ins
->bitwise
.src_invert
[1];
1064 if (ins
->op
.bitwise
== BI_BITWISE_OR
) {
1065 /* Becomes NAND, so via De Morgan's:
1066 * f(A) | f(B) = ~(~f(A) & ~f(B))
1067 * = NAND(~f(A), ~f(B))
1070 invert_0
= !invert_0
;
1071 invert_1
= !invert_1
;
1072 } else if (ins
->op
.bitwise
== BI_BITWISE_XOR
) {
1073 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1074 * ~A ^ B = ~(A ^ B) = A ^ ~B
1077 invert_0
^= invert_1
;
1080 /* invert_1 ends up specifying shift direction */
1081 invert_1
= !ins
->bitwise
.rshift
;
1084 struct bifrost_shift_fma pack
= {
1085 .src0
= bi_get_src(ins
, regs
, 0),
1086 .src1
= bi_get_src(ins
, regs
, 1),
1087 .src2
= bi_get_src(ins
, regs
, 2),
1088 .half
= (size
== 32) ? 0 : (size
== 16) ? 0x7 : (size
== 8) ? 0x4 : 0,
1090 .invert_1
= invert_0
,
1091 .invert_2
= invert_1
,
1092 .op
= bi_fma_bitwise_op(ins
->op
.bitwise
, ins
->bitwise
.rshift
)
1095 RETURN_PACKED(pack
);
1099 bi_pack_fma_round(bi_instruction
*ins
, struct bi_registers
*regs
)
1101 bool fp16
= ins
->dest_type
== nir_type_float16
;
1102 assert(fp16
|| ins
->dest_type
== nir_type_float32
);
1105 ? BIFROST_FMA_ROUND_16(ins
->roundmode
, bi_swiz16(ins
, 0))
1106 : BIFROST_FMA_ROUND_32(ins
->roundmode
);
1108 return bi_pack_fma_1src(ins
, regs
, op
);
1112 bi_pack_fma_imath(bi_instruction
*ins
, struct bi_registers
*regs
)
1114 /* Scheduler: only ADD can have 8/16-bit imath */
1115 assert(ins
->dest_type
== nir_type_int32
|| ins
->dest_type
== nir_type_uint32
);
1117 unsigned op
= ins
->op
.imath
== BI_IMATH_ADD
1118 ? BIFROST_FMA_IADD_32
1119 : BIFROST_FMA_ISUB_32
;
1121 return bi_pack_fma_2src(ins
, regs
, op
);
1125 bi_pack_fma(bi_clause
*clause
, bi_bundle bundle
, struct bi_registers
*regs
)
1128 return BIFROST_FMA_NOP
;
1130 switch (bundle
.fma
->type
) {
1132 return bi_pack_fma_addmin(bundle
.fma
, regs
);
1134 return bi_pack_fma_cmp(bundle
.fma
, regs
);
1136 return bi_pack_fma_bitwise(bundle
.fma
, regs
);
1138 return bi_pack_convert(bundle
.fma
, regs
, true);
1140 return bi_pack_fma_csel(bundle
.fma
, regs
);
1142 return bi_pack_fma_fma(bundle
.fma
, regs
);
1144 return bi_pack_fma_frexp(bundle
.fma
, regs
);
1146 return bi_pack_fma_imath(bundle
.fma
, regs
);
1148 return bi_pack_fma_addmin(bundle
.fma
, regs
);
1150 return bi_pack_fma_1src(bundle
.fma
, regs
, BIFROST_FMA_OP_MOV
);
1152 unreachable("Packing todo");
1154 return bi_pack_fma_select(bundle
.fma
, regs
);
1156 return bi_pack_fma_round(bundle
.fma
, regs
);
1158 return bi_pack_fma_reduce(bundle
.fma
, regs
);
1160 unreachable("Cannot encode class as FMA");
1165 bi_pack_add_ld_vary(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1167 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
1168 assert(size
== 32 || size
== 16);
1170 unsigned op
= (size
== 32) ?
1171 BIFROST_ADD_OP_LD_VAR_32
:
1172 BIFROST_ADD_OP_LD_VAR_16
;
1174 unsigned packed_addr
= 0;
1176 if (ins
->src
[0] & BIR_INDEX_CONSTANT
) {
1177 /* Direct uses address field directly */
1178 packed_addr
= bi_get_immediate(ins
, 0);
1180 /* Indirect gets an extra source */
1181 packed_addr
= bi_get_src(ins
, regs
, 0) | 0b11000;
1184 /* The destination is thrown in the data register */
1185 assert(ins
->dest
& BIR_INDEX_REGISTER
);
1186 clause
->data_register
= ins
->dest
& ~BIR_INDEX_REGISTER
;
1188 unsigned channels
= ins
->vector_channels
;
1189 assert(channels
>= 1 && channels
<= 4);
1191 struct bifrost_ld_var pack
= {
1192 .src0
= bi_get_src(ins
, regs
, 1),
1193 .addr
= packed_addr
,
1194 .channels
= MALI_POSITIVE(channels
),
1195 .interp_mode
= ins
->load_vary
.interp_mode
,
1196 .reuse
= ins
->load_vary
.reuse
,
1197 .flat
= ins
->load_vary
.flat
,
1201 RETURN_PACKED(pack
);
1205 bi_pack_add_2src(bi_instruction
*ins
, struct bi_registers
*regs
, unsigned op
)
1207 struct bifrost_add_2src pack
= {
1208 .src0
= bi_get_src(ins
, regs
, 0),
1209 .src1
= bi_get_src(ins
, regs
, 1),
1213 RETURN_PACKED(pack
);
1217 bi_pack_add_addmin_f32(bi_instruction
*ins
, struct bi_registers
*regs
)
1220 (ins
->type
== BI_ADD
) ? BIFROST_ADD_OP_FADD32
:
1221 (ins
->op
.minmax
== BI_MINMAX_MIN
) ? BIFROST_ADD_OP_FMIN32
:
1222 BIFROST_ADD_OP_FMAX32
;
1224 struct bifrost_add_faddmin pack
= {
1225 .src0
= bi_get_src(ins
, regs
, 0),
1226 .src1
= bi_get_src(ins
, regs
, 1),
1227 .src0_abs
= ins
->src_abs
[0],
1228 .src1_abs
= ins
->src_abs
[1],
1229 .src0_neg
= ins
->src_neg
[0],
1230 .src1_neg
= ins
->src_neg
[1],
1231 .outmod
= ins
->outmod
,
1232 .mode
= (ins
->type
== BI_ADD
) ? ins
->roundmode
: ins
->minmax
,
1236 RETURN_PACKED(pack
);
1240 bi_pack_add_add_f16(bi_instruction
*ins
, struct bi_registers
*regs
)
1242 /* ADD.v2f16 can't have outmod */
1243 assert(ins
->outmod
== BIFROST_NONE
);
1245 struct bifrost_add_faddmin pack
= {
1246 .src0
= bi_get_src(ins
, regs
, 0),
1247 .src1
= bi_get_src(ins
, regs
, 1),
1248 .src0_abs
= ins
->src_abs
[0],
1249 .src1_abs
= ins
->src_abs
[1],
1250 .src0_neg
= ins
->src_neg
[0],
1251 .src1_neg
= ins
->src_neg
[1],
1252 .select
= bi_swiz16(ins
, 0), /* swizzle_0 */
1253 .outmod
= bi_swiz16(ins
, 1), /* swizzle_1 */
1254 .mode
= ins
->roundmode
,
1255 .op
= BIFROST_ADD_OP_FADD16
1258 RETURN_PACKED(pack
);
1262 bi_pack_add_addmin(bi_instruction
*ins
, struct bi_registers
*regs
)
1264 if (ins
->dest_type
== nir_type_float32
)
1265 return bi_pack_add_addmin_f32(ins
, regs
);
1266 else if (ins
->dest_type
== nir_type_float16
) {
1267 if (ins
->type
== BI_ADD
)
1268 return bi_pack_add_add_f16(ins
, regs
);
1270 return bi_pack_fmadd_min_f16(ins
, regs
, false);
1272 unreachable("Unknown FMA/ADD type");
1276 bi_pack_add_ld_ubo(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1278 assert(ins
->vector_channels
>= 1 && ins
->vector_channels
<= 4);
1280 const unsigned ops
[4] = {
1281 BIFROST_ADD_OP_LD_UBO_1
,
1282 BIFROST_ADD_OP_LD_UBO_2
,
1283 BIFROST_ADD_OP_LD_UBO_3
,
1284 BIFROST_ADD_OP_LD_UBO_4
1287 bi_write_data_register(clause
, ins
);
1288 return bi_pack_add_2src(ins
, regs
, ops
[ins
->vector_channels
- 1]);
1291 static enum bifrost_ldst_type
1292 bi_pack_ldst_type(nir_alu_type T
)
1295 case nir_type_float16
: return BIFROST_LDST_F16
;
1296 case nir_type_float32
: return BIFROST_LDST_F32
;
1297 case nir_type_int32
: return BIFROST_LDST_I32
;
1298 case nir_type_uint32
: return BIFROST_LDST_U32
;
1299 default: unreachable("Invalid type loaded");
1304 bi_pack_add_ld_var_addr(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1306 struct bifrost_ld_var_addr pack
= {
1307 .src0
= bi_get_src(ins
, regs
, 1),
1308 .src1
= bi_get_src(ins
, regs
, 2),
1309 .location
= bi_get_immediate(ins
, 0),
1310 .type
= bi_pack_ldst_type(ins
->src_types
[3]),
1311 .op
= BIFROST_ADD_OP_LD_VAR_ADDR
1314 bi_write_data_register(clause
, ins
);
1315 RETURN_PACKED(pack
);
1319 bi_pack_add_ld_attr(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1321 assert(ins
->vector_channels
>= 0 && ins
->vector_channels
<= 4);
1323 struct bifrost_ld_attr pack
= {
1324 .src0
= bi_get_src(ins
, regs
, 1),
1325 .src1
= bi_get_src(ins
, regs
, 2),
1326 .location
= bi_get_immediate(ins
, 0),
1327 .channels
= MALI_POSITIVE(ins
->vector_channels
),
1328 .type
= bi_pack_ldst_type(ins
->dest_type
),
1329 .op
= BIFROST_ADD_OP_LD_ATTR
1332 bi_write_data_register(clause
, ins
);
1333 RETURN_PACKED(pack
);
1337 bi_pack_add_st_vary(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1339 assert(ins
->vector_channels
>= 1 && ins
->vector_channels
<= 4);
1341 struct bifrost_st_vary pack
= {
1342 .src0
= bi_get_src(ins
, regs
, 1),
1343 .src1
= bi_get_src(ins
, regs
, 2),
1344 .src2
= bi_get_src(ins
, regs
, 3),
1345 .channels
= MALI_POSITIVE(ins
->vector_channels
),
1346 .op
= BIFROST_ADD_OP_ST_VAR
1349 bi_read_data_register(clause
, ins
);
1350 RETURN_PACKED(pack
);
1354 bi_pack_add_atest(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1356 bool fp16
= (ins
->src_types
[1] == nir_type_float16
);
1358 struct bifrost_add_atest pack
= {
1359 .src0
= bi_get_src(ins
, regs
, 0),
1360 .src1
= bi_get_src(ins
, regs
, 1),
1362 .component
= fp16
? ins
->swizzle
[1][0] : 1, /* Set for fp32 */
1363 .op
= BIFROST_ADD_OP_ATEST
,
1366 /* Despite *also* writing with the usual mechanism... quirky and
1367 * perhaps unnecessary, but let's match the blob */
1368 clause
->data_register
= ins
->dest
& ~BIR_INDEX_REGISTER
;
1370 RETURN_PACKED(pack
);
1374 bi_pack_add_blend(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
)
1376 struct bifrost_add_inst pack
= {
1377 .src0
= bi_get_src(ins
, regs
, 1),
1378 .op
= BIFROST_ADD_OP_BLEND
1381 /* TODO: Pack location in uniform_const */
1382 assert(ins
->blend_location
== 0);
1384 bi_read_data_register(clause
, ins
);
1385 RETURN_PACKED(pack
);
1389 bi_pack_add_special(bi_instruction
*ins
, struct bi_registers
*regs
)
1392 bool fp16
= ins
->dest_type
== nir_type_float16
;
1393 bool Y
= ins
->swizzle
[0][0];
1395 if (ins
->op
.special
== BI_SPECIAL_FRCP
) {
1397 (Y
? BIFROST_ADD_OP_FRCP_FAST_F16_Y
:
1398 BIFROST_ADD_OP_FRCP_FAST_F16_X
) :
1399 BIFROST_ADD_OP_FRCP_FAST_F32
;
1400 } else if (ins
->op
.special
== BI_SPECIAL_FRSQ
) {
1402 (Y
? BIFROST_ADD_OP_FRSQ_FAST_F16_Y
:
1403 BIFROST_ADD_OP_FRSQ_FAST_F16_X
) :
1404 BIFROST_ADD_OP_FRSQ_FAST_F32
;
1406 } else if (ins
->op
.special
== BI_SPECIAL_EXP2_LOW
) {
1408 op
= BIFROST_ADD_OP_FEXP2_FAST
;
1410 unreachable("Unknown special op");
1413 return bi_pack_add_1src(ins
, regs
, op
);
1417 bi_pack_add_table(bi_instruction
*ins
, struct bi_registers
*regs
)
1420 assert(ins
->dest_type
== nir_type_float32
);
1422 op
= BIFROST_ADD_OP_LOG2_HELP
;
1423 return bi_pack_add_1src(ins
, regs
, op
);
1426 bi_pack_add_tex_compact(bi_clause
*clause
, bi_instruction
*ins
, struct bi_registers
*regs
, gl_shader_stage stage
)
1428 bool f16
= ins
->dest_type
== nir_type_float16
;
1429 bool vtx
= stage
!= MESA_SHADER_FRAGMENT
;
1431 struct bifrost_tex_compact pack
= {
1432 .src0
= bi_get_src(ins
, regs
, 0),
1433 .src1
= bi_get_src(ins
, regs
, 1),
1434 .op
= f16
? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx
) :
1435 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx
),
1436 .compute_lod
= !vtx
,
1437 .tex_index
= ins
->texture
.texture_index
,
1438 .sampler_index
= ins
->texture
.sampler_index
1441 bi_write_data_register(clause
, ins
);
1442 RETURN_PACKED(pack
);
1446 bi_pack_add_select(bi_instruction
*ins
, struct bi_registers
*regs
)
1448 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[0]);
1451 unsigned swiz
= (ins
->swizzle
[0][0] | (ins
->swizzle
[1][0] << 1));
1452 unsigned op
= BIFROST_ADD_SEL_16(swiz
);
1453 return bi_pack_add_2src(ins
, regs
, op
);
1456 static enum bifrost_discard_cond
1457 bi_cond_to_discard(enum bi_cond cond
, bool *flip
)
1464 return BIFROST_DISCARD_FLT
;
1469 return BIFROST_DISCARD_FLE
;
1471 return BIFROST_DISCARD_FNE
;
1473 return BIFROST_DISCARD_FEQ
;
1475 unreachable("Invalid op for discard");
1480 bi_pack_add_discard(bi_instruction
*ins
, struct bi_registers
*regs
)
1482 bool fp16
= ins
->src_types
[0] == nir_type_float16
;
1483 assert(fp16
|| ins
->src_types
[0] == nir_type_float32
);
1486 enum bifrost_discard_cond cond
= bi_cond_to_discard(ins
->cond
, &flip
);
1488 struct bifrost_add_discard pack
= {
1489 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1490 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1492 .src0_select
= fp16
? ins
->swizzle
[0][0] : 0,
1493 .src1_select
= fp16
? ins
->swizzle
[1][0] : 0,
1494 .fp32
= fp16
? 0 : 1,
1495 .op
= BIFROST_ADD_OP_DISCARD
1498 RETURN_PACKED(pack
);
1501 static enum bifrost_icmp_cond
1502 bi_cond_to_icmp(enum bi_cond cond
, bool *flip
, bool is_unsigned
, bool is_16
)
1509 return is_unsigned
? (is_16
? BIFROST_ICMP_IGE
: BIFROST_ICMP_UGT
)
1515 return is_unsigned
? BIFROST_ICMP_UGE
:
1516 (is_16
? BIFROST_ICMP_UGT
: BIFROST_ICMP_IGE
);
1518 return BIFROST_ICMP_NEQ
;
1520 return BIFROST_ICMP_EQ
;
1522 unreachable("Invalid op for icmp");
1527 bi_pack_add_icmp32(bi_instruction
*ins
, struct bi_registers
*regs
, bool flip
,
1528 enum bifrost_icmp_cond cond
)
1530 struct bifrost_add_icmp pack
= {
1531 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1532 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1536 .op
= BIFROST_ADD_OP_ICMP_32
1539 RETURN_PACKED(pack
);
1543 bi_pack_add_icmp16(bi_instruction
*ins
, struct bi_registers
*regs
, bool flip
,
1544 enum bifrost_icmp_cond cond
)
1546 struct bifrost_add_icmp16 pack
= {
1547 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1548 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1549 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
1550 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
1553 .op
= BIFROST_ADD_OP_ICMP_16
1556 RETURN_PACKED(pack
);
1560 bi_pack_add_cmp(bi_instruction
*ins
, struct bi_registers
*regs
)
1562 nir_alu_type Tl
= ins
->src_types
[0];
1563 nir_alu_type Tr
= ins
->src_types
[1];
1564 nir_alu_type Bl
= nir_alu_type_get_base_type(Tl
);
1566 if (Bl
== nir_type_uint
|| Bl
== nir_type_int
) {
1568 unsigned sz
= nir_alu_type_get_type_size(Tl
);
1572 enum bifrost_icmp_cond cond
= bi_cond_to_icmp(
1573 sz
== 16 ? /*bi_invert_cond*/(ins
->cond
) : ins
->cond
,
1574 &flip
, Bl
== nir_type_uint
, sz
== 16);
1577 return bi_pack_add_icmp32(ins
, regs
, flip
, cond
);
1579 return bi_pack_add_icmp16(ins
, regs
, flip
, cond
);
1581 unreachable("TODO");
1583 unreachable("TODO");
1588 bi_pack_add_imath(bi_instruction
*ins
, struct bi_registers
*regs
)
1590 /* TODO: 32+16 add */
1591 assert(ins
->src_types
[0] == ins
->src_types
[1]);
1592 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[0]);
1593 enum bi_imath_op p
= ins
->op
.imath
;
1598 op
= (p
== BI_IMATH_ADD
) ? BIFROST_ADD_IADD_8
:
1600 } else if (sz
== 16) {
1601 op
= (p
== BI_IMATH_ADD
) ? BIFROST_ADD_IADD_16
:
1602 BIFROST_ADD_ISUB_16
;
1603 } else if (sz
== 32) {
1604 op
= (p
== BI_IMATH_ADD
) ? BIFROST_ADD_IADD_32
:
1605 BIFROST_ADD_ISUB_32
;
1607 unreachable("64-bit todo");
1610 return bi_pack_add_2src(ins
, regs
, op
);
1614 bi_pack_add(bi_clause
*clause
, bi_bundle bundle
, struct bi_registers
*regs
, gl_shader_stage stage
)
1617 return BIFROST_ADD_NOP
;
1619 switch (bundle
.add
->type
) {
1621 return bi_pack_add_addmin(bundle
.add
, regs
);
1623 return bi_pack_add_atest(clause
, bundle
.add
, regs
);
1625 unreachable("Packing todo");
1627 return bi_pack_add_cmp(bundle
.add
, regs
);
1629 return bi_pack_add_blend(clause
, bundle
.add
, regs
);
1631 unreachable("Packing todo");
1633 return bi_pack_convert(bundle
.add
, regs
, false);
1635 return bi_pack_add_discard(bundle
.add
, regs
);
1637 unreachable("Packing todo");
1639 return bi_pack_add_imath(bundle
.add
, regs
);
1641 unreachable("Packing todo");
1643 return bi_pack_add_ld_attr(clause
, bundle
.add
, regs
);
1644 case BI_LOAD_UNIFORM
:
1645 return bi_pack_add_ld_ubo(clause
, bundle
.add
, regs
);
1647 return bi_pack_add_ld_vary(clause
, bundle
.add
, regs
);
1648 case BI_LOAD_VAR_ADDRESS
:
1649 return bi_pack_add_ld_var_addr(clause
, bundle
.add
, regs
);
1651 return bi_pack_add_addmin(bundle
.add
, regs
);
1655 unreachable("Packing todo");
1657 return bi_pack_add_st_vary(clause
, bundle
.add
, regs
);
1659 return bi_pack_add_special(bundle
.add
, regs
);
1661 return bi_pack_add_table(bundle
.add
, regs
);
1663 return bi_pack_add_select(bundle
.add
, regs
);
1665 if (bundle
.add
->op
.texture
== BI_TEX_COMPACT
)
1666 return bi_pack_add_tex_compact(clause
, bundle
.add
, regs
, stage
);
1668 unreachable("Unknown tex type");
1670 unreachable("Packing todo");
1672 unreachable("Cannot encode class as ADD");
1676 struct bi_packed_bundle
{
1681 static struct bi_packed_bundle
1682 bi_pack_bundle(bi_clause
*clause
, bi_bundle bundle
, bi_bundle prev
, bool first_bundle
, gl_shader_stage stage
)
1684 struct bi_registers regs
= bi_assign_ports(bundle
, prev
);
1685 bi_assign_uniform_constant(clause
, ®s
, bundle
);
1686 regs
.first_instruction
= first_bundle
;
1688 uint64_t reg
= bi_pack_registers(regs
);
1689 uint64_t fma
= bi_pack_fma(clause
, bundle
, ®s
);
1690 uint64_t add
= bi_pack_add(clause
, bundle
, ®s
, stage
);
1692 struct bi_packed_bundle packed
= {
1693 .lo
= reg
| (fma
<< 35) | ((add
& 0b111111) << 58),
1700 /* Packs the next two constants as a dedicated constant quadword at the end of
1701 * the clause, returning the number packed. */
1704 bi_pack_constants(bi_context
*ctx
, bi_clause
*clause
,
1706 struct util_dynarray
*emission
)
1708 /* After these two, are we done? Determines tag */
1709 bool done
= clause
->constant_count
<= (index
+ 2);
1710 bool only
= clause
->constant_count
<= (index
+ 1);
1713 assert(index
== 0 && clause
->bundle_count
== 1);
1716 uint64_t hi
= clause
->constants
[index
+ 0] >> 60ull;
1718 struct bifrost_fmt_constant quad
= {
1719 .pos
= 0, /* TODO */
1720 .tag
= done
? BIFROST_FMTC_FINAL
: BIFROST_FMTC_CONSTANTS
,
1721 .imm_1
= clause
->constants
[index
+ 0] >> 4,
1722 .imm_2
= ((hi
< 8) ? (hi
<< 60ull) : 0) >> 4,
1725 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1726 * of the second constant with the first must be less than 8, otherwise
1727 * we have to swap them. On G52, I'm able to reproduce a similar issue
1728 * but with a different workaround (modeled above with a single
1729 * constant, unclear how to workaround for multiple constants.) Further
1730 * investigation needed. Possibly an errata. XXX */
1732 util_dynarray_append(emission
, struct bifrost_fmt_constant
, quad
);
1738 bi_pack_clause(bi_context
*ctx
, bi_clause
*clause
, bi_clause
*next
,
1739 struct util_dynarray
*emission
, gl_shader_stage stage
)
1741 struct bi_packed_bundle ins_1
= bi_pack_bundle(clause
, clause
->bundles
[0], clause
->bundles
[0], true, stage
);
1742 assert(clause
->bundle_count
== 1);
1744 /* Used to decide if we elide writes */
1745 bool is_fragment
= ctx
->stage
== MESA_SHADER_FRAGMENT
;
1747 /* State for packing constants throughout */
1748 unsigned constant_index
= 0;
1750 struct bifrost_fmt1 quad_1
= {
1751 .tag
= clause
->constant_count
? BIFROST_FMT1_CONSTANTS
: BIFROST_FMT1_FINAL
,
1752 .header
= bi_pack_header(clause
, next
, is_fragment
),
1754 .ins_2
= ins_1
.hi
& ((1 << 11) - 1),
1755 .ins_0
= (ins_1
.hi
>> 11) & 0b111,
1758 util_dynarray_append(emission
, struct bifrost_fmt1
, quad_1
);
1760 /* Pack the remaining constants */
1762 while (constant_index
< clause
->constant_count
) {
1763 constant_index
+= bi_pack_constants(ctx
, clause
,
1764 constant_index
, emission
);
1769 bi_next_clause(bi_context
*ctx
, pan_block
*block
, bi_clause
*clause
)
1771 /* Try the next clause in this block */
1772 if (clause
->link
.next
!= &((bi_block
*) block
)->clauses
)
1773 return list_first_entry(&(clause
->link
), bi_clause
, link
);
1775 /* Try the next block, or the one after that if it's empty, etc .*/
1776 pan_block
*next_block
= pan_next_block(block
);
1778 bi_foreach_block_from(ctx
, next_block
, block
) {
1779 bi_block
*blk
= (bi_block
*) block
;
1781 if (!list_is_empty(&blk
->clauses
))
1782 return list_first_entry(&(blk
->clauses
), bi_clause
, link
);
1789 bi_pack(bi_context
*ctx
, struct util_dynarray
*emission
)
1791 util_dynarray_init(emission
, NULL
);
1793 bi_foreach_block(ctx
, _block
) {
1794 bi_block
*block
= (bi_block
*) _block
;
1796 bi_foreach_clause_in_block(block
, clause
) {
1797 bi_clause
*next
= bi_next_clause(ctx
, _block
, clause
);
1798 bi_pack_clause(ctx
, clause
, next
, emission
, ctx
->stage
);