2 * Copyright (C) 2020 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #define RETURN_PACKED(str) { \
29 memcpy(&temp, &str, sizeof(str)); \
33 /* This file contains the final passes of the compiler. Running after
34 * scheduling and RA, the IR is now finalized, so we need to emit it to actual
35 * bits on the wire (as well as fixup branches) */
38 bi_pack_header(bi_clause
*clause
, bi_clause
*next
, bool is_fragment
)
40 struct bifrost_header header
= {
41 .back_to_back
= clause
->back_to_back
,
42 .no_end_of_shader
= (next
!= NULL
),
43 .elide_writes
= is_fragment
,
44 .branch_cond
= clause
->branch_conditional
,
45 .datareg_writebarrier
= clause
->data_register_write_barrier
,
46 .datareg
= clause
->data_register
,
47 .scoreboard_deps
= next
? next
->dependencies
: 0,
48 .scoreboard_index
= clause
->scoreboard_id
,
49 .clause_type
= clause
->clause_type
,
50 .next_clause_type
= next
? next
->clause_type
: 0,
55 header
.branch_cond
|= header
.back_to_back
;
58 memcpy(&u
, &header
, sizeof(header
));
62 /* The uniform/constant slot allows loading a contiguous 64-bit immediate or
63 * pushed uniform per bundle. Figure out which one we need in the bundle (the
64 * scheduler needs to ensure we only have one type per bundle), validate
65 * everything, and rewrite away the register/uniform indices to use 3-bit
66 * sources directly. */
69 bi_lookup_constant(bi_clause
*clause
, uint64_t cons
, bool *hi
, bool b64
)
71 uint64_t want
= (cons
>> 4);
73 for (unsigned i
= 0; i
< clause
->constant_count
; ++i
) {
74 /* Only check top 60-bits since that's what's actually embedded
75 * in the clause, the bottom 4-bits are bundle-inline */
77 uint64_t candidates
[2] = {
78 clause
->constants
[i
] >> 4,
79 clause
->constants
[i
] >> 36
82 /* For <64-bit mode, we treat lo/hi separately */
85 candidates
[0] &= (0xFFFFFFFF >> 4);
87 if (candidates
[0] == want
)
90 if (candidates
[1] == want
&& !b64
) {
96 unreachable("Invalid constant accessed");
100 bi_constant_field(unsigned idx
)
104 const unsigned values
[] = {
108 return values
[idx
] << 4;
112 bi_assign_uniform_constant_single(
115 bi_instruction
*ins
, bool assigned
, bool fast_zero
)
120 if (ins
->type
== BI_BLEND
) {
122 regs
->uniform_constant
= 0x8;
126 if (ins
->type
== BI_BRANCH
&& clause
->branch_constant
) {
127 /* By convention branch constant is last */
128 unsigned idx
= clause
->constant_count
- 1;
130 /* We can only jump to clauses which are qword aligned so the
131 * bottom 4-bits of the offset are necessarily 0 */
134 /* Build the constant */
135 unsigned C
= bi_constant_field(idx
) | lo
;
137 if (assigned
&& regs
->uniform_constant
!= C
)
138 unreachable("Mismatched uniform/const field: branch");
140 regs
->uniform_constant
= C
;
144 bi_foreach_src(ins
, s
) {
145 if (s
== 0 && (ins
->type
== BI_LOAD_VAR_ADDRESS
|| ins
->type
== BI_LOAD_ATTR
)) continue;
146 if (s
== 1 && (ins
->type
== BI_BRANCH
)) continue;
148 if (ins
->src
[s
] & BIR_INDEX_CONSTANT
) {
149 /* Let direct addresses through */
150 if (ins
->type
== BI_LOAD_VAR
)
154 bool b64
= nir_alu_type_get_type_size(ins
->src_types
[s
]) > 32;
155 uint64_t cons
= bi_get_immediate(ins
, s
);
156 unsigned idx
= bi_lookup_constant(clause
, cons
, &hi
, b64
);
157 unsigned lo
= clause
->constants
[idx
] & 0xF;
158 unsigned f
= bi_constant_field(idx
) | lo
;
160 if (assigned
&& regs
->uniform_constant
!= f
)
161 unreachable("Mismatched uniform/const field: imm");
163 regs
->uniform_constant
= f
;
164 ins
->src
[s
] = BIR_INDEX_PASS
| (hi
? BIFROST_SRC_CONST_HI
: BIFROST_SRC_CONST_LO
);
166 } else if (ins
->src
[s
] & BIR_INDEX_ZERO
&& (ins
->type
== BI_LOAD_UNIFORM
|| ins
->type
== BI_LOAD_VAR
)) {
167 /* XXX: HACK UNTIL WE HAVE HI MATCHING DUE TO OVERFLOW XXX */
168 ins
->src
[s
] = BIR_INDEX_PASS
| BIFROST_SRC_CONST_HI
;
169 } else if (ins
->src
[s
] & BIR_INDEX_ZERO
&& !fast_zero
) {
170 /* FMAs have a fast zero port, ADD needs to use the
171 * uniform/const port's special 0 mode handled here */
174 if (assigned
&& regs
->uniform_constant
!= f
)
175 unreachable("Mismatched uniform/const field: 0");
177 regs
->uniform_constant
= f
;
178 ins
->src
[s
] = BIR_INDEX_PASS
| BIFROST_SRC_CONST_LO
;
180 } else if (ins
->src
[s
] & BIR_INDEX_ZERO
&& fast_zero
) {
181 ins
->src
[s
] = BIR_INDEX_PASS
| BIFROST_SRC_STAGE
;
182 } else if (s
& BIR_INDEX_UNIFORM
) {
183 unreachable("Push uniforms not implemented yet");
191 bi_assign_uniform_constant(
197 bi_assign_uniform_constant_single(regs
, clause
, bundle
.fma
, false, true);
199 bi_assign_uniform_constant_single(regs
, clause
, bundle
.add
, assigned
, false);
202 /* Assigns a port for reading, before anything is written */
205 bi_assign_port_read(bi_registers
*regs
, unsigned src
)
207 /* We only assign for registers */
208 if (!(src
& BIR_INDEX_REGISTER
))
211 unsigned reg
= src
& ~BIR_INDEX_REGISTER
;
213 /* Check if we already assigned the port */
214 for (unsigned i
= 0; i
<= 1; ++i
) {
215 if (regs
->port
[i
] == reg
&& regs
->enabled
[i
])
219 if (regs
->port
[3] == reg
&& regs
->read_port3
)
224 for (unsigned i
= 0; i
<= 1; ++i
) {
225 if (!regs
->enabled
[i
]) {
227 regs
->enabled
[i
] = true;
232 if (!regs
->read_port3
) {
234 regs
->read_port3
= true;
238 bi_print_ports(regs
, stderr
);
239 unreachable("Failed to find a free port for src");
243 bi_assign_ports(bi_bundle
*now
, bi_bundle
*prev
)
245 /* We assign ports for the main register mechanism. Special ops
246 * use the data registers, which has its own mechanism entirely
247 * and thus gets skipped over here. */
249 unsigned read_dreg
= now
->add
&&
250 bi_class_props
[now
->add
->type
] & BI_DATA_REG_SRC
;
252 unsigned write_dreg
= prev
->add
&&
253 bi_class_props
[prev
->add
->type
] & BI_DATA_REG_DEST
;
255 /* First, assign reads */
258 bi_foreach_src(now
->fma
, src
)
259 bi_assign_port_read(&now
->regs
, now
->fma
->src
[src
]);
262 bi_foreach_src(now
->add
, src
) {
263 if (!(src
== 0 && read_dreg
))
264 bi_assign_port_read(&now
->regs
, now
->add
->src
[src
]);
268 /* Next, assign writes */
270 if (prev
->add
&& prev
->add
->dest
& BIR_INDEX_REGISTER
&& !write_dreg
) {
271 now
->regs
.port
[2] = prev
->add
->dest
& ~BIR_INDEX_REGISTER
;
272 now
->regs
.write_add
= true;
275 if (prev
->fma
&& prev
->fma
->dest
& BIR_INDEX_REGISTER
) {
276 unsigned r
= prev
->fma
->dest
& ~BIR_INDEX_REGISTER
;
278 if (now
->regs
.write_add
) {
279 /* Scheduler constraint: cannot read 3 and write 2 */
280 assert(!now
->regs
.read_port3
);
281 now
->regs
.port
[3] = r
;
283 now
->regs
.port
[2] = r
;
286 now
->regs
.write_fma
= true;
292 /* Determines the register control field, ignoring the first? flag */
294 static enum bifrost_reg_control
295 bi_pack_register_ctrl_lo(bi_registers r
)
299 assert(!r
.read_port3
);
300 return BIFROST_WRITE_ADD_P2_FMA_P3
;
303 return BIFROST_WRITE_FMA_P2_READ_P3
;
305 return BIFROST_WRITE_FMA_P2
;
307 } else if (r
.write_add
) {
309 return BIFROST_WRITE_ADD_P2_READ_P3
;
311 return BIFROST_WRITE_ADD_P2
;
312 } else if (r
.read_port3
)
313 return BIFROST_READ_P3
;
315 return BIFROST_REG_NONE
;
318 /* Ditto but account for the first? flag this time */
320 static enum bifrost_reg_control
321 bi_pack_register_ctrl(bi_registers r
)
323 enum bifrost_reg_control ctrl
= bi_pack_register_ctrl_lo(r
);
325 if (r
.first_instruction
) {
326 if (ctrl
== BIFROST_REG_NONE
)
327 ctrl
= BIFROST_FIRST_NONE
;
328 else if (ctrl
== BIFROST_WRITE_FMA_P2_READ_P3
)
329 ctrl
= BIFROST_FIRST_WRITE_FMA_P2_READ_P3
;
331 ctrl
|= BIFROST_FIRST_NONE
;
338 bi_pack_registers(bi_registers regs
)
340 enum bifrost_reg_control ctrl
= bi_pack_register_ctrl(regs
);
341 struct bifrost_regs s
= { 0 };
344 if (regs
.enabled
[1]) {
345 /* Gotta save that bit!~ Required by the 63-x trick */
346 assert(regs
.port
[1] > regs
.port
[0]);
347 assert(regs
.enabled
[0]);
349 /* Do the 63-x trick, see docs/disasm */
350 if (regs
.port
[0] > 31) {
351 regs
.port
[0] = 63 - regs
.port
[0];
352 regs
.port
[1] = 63 - regs
.port
[1];
355 assert(regs
.port
[0] <= 31);
356 assert(regs
.port
[1] <= 63);
359 s
.reg1
= regs
.port
[1];
360 s
.reg0
= regs
.port
[0];
362 /* Port 1 disabled, so set to zero and use port 1 for ctrl */
366 if (regs
.enabled
[0]) {
367 /* Bit 0 upper bit of port 0 */
368 s
.reg1
|= (regs
.port
[0] >> 5);
370 /* Rest of port 0 in usual spot */
371 s
.reg0
= (regs
.port
[0] & 0b11111);
373 /* Bit 1 set if port 0 also disabled */
378 /* When port 3 isn't used, we have to set it to port 2, and vice versa,
379 * or INSTR_INVALID_ENC is raised. The reason is unknown. */
381 bool has_port2
= regs
.write_fma
|| regs
.write_add
;
382 bool has_port3
= regs
.read_port3
|| (regs
.write_fma
&& regs
.write_add
);
385 regs
.port
[3] = regs
.port
[2];
388 regs
.port
[2] = regs
.port
[3];
390 s
.reg3
= regs
.port
[3];
391 s
.reg2
= regs
.port
[2];
392 s
.uniform_const
= regs
.uniform_constant
;
394 memcpy(&packed
, &s
, sizeof(s
));
399 bi_set_data_register(bi_clause
*clause
, unsigned idx
)
401 assert(idx
& BIR_INDEX_REGISTER
);
402 unsigned reg
= idx
& ~BIR_INDEX_REGISTER
;
404 clause
->data_register
= reg
;
408 bi_read_data_register(bi_clause
*clause
, bi_instruction
*ins
)
410 bi_set_data_register(clause
, ins
->src
[0]);
414 bi_write_data_register(bi_clause
*clause
, bi_instruction
*ins
)
416 bi_set_data_register(clause
, ins
->dest
);
419 static enum bifrost_packed_src
420 bi_get_src_reg_port(bi_registers
*regs
, unsigned src
)
422 unsigned reg
= src
& ~BIR_INDEX_REGISTER
;
424 if (regs
->port
[0] == reg
&& regs
->enabled
[0])
425 return BIFROST_SRC_PORT0
;
426 else if (regs
->port
[1] == reg
&& regs
->enabled
[1])
427 return BIFROST_SRC_PORT1
;
428 else if (regs
->port
[3] == reg
&& regs
->read_port3
)
429 return BIFROST_SRC_PORT3
;
431 unreachable("Tried to access register with no port");
434 static enum bifrost_packed_src
435 bi_get_src(bi_instruction
*ins
, bi_registers
*regs
, unsigned s
)
437 unsigned src
= ins
->src
[s
];
439 if (src
& BIR_INDEX_REGISTER
)
440 return bi_get_src_reg_port(regs
, src
);
441 else if (src
& BIR_INDEX_PASS
)
442 return src
& ~BIR_INDEX_PASS
;
444 bi_print_instruction(ins
, stderr
);
445 unreachable("Unknown src in above instruction");
449 /* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be
450 * 16-bit and written components must correspond to valid swizzles (component x
454 bi_swiz16(bi_instruction
*ins
, unsigned src
)
456 assert(nir_alu_type_get_type_size(ins
->src_types
[src
]) == 16);
457 unsigned swizzle
= 0;
459 for (unsigned c
= 0; c
< 2; ++c
) {
460 if (!bi_writes_component(ins
, src
)) continue;
462 unsigned k
= ins
->swizzle
[src
][c
];
471 bi_pack_fma_fma(bi_instruction
*ins
, bi_registers
*regs
)
473 /* (-a)(-b) = ab, so we only need one negate bit */
474 bool negate_mul
= ins
->src_neg
[0] ^ ins
->src_neg
[1];
476 if (ins
->op
.mscale
) {
477 assert(!(ins
->src_abs
[0] && ins
->src_abs
[1]));
478 assert(!ins
->src_abs
[2] || !ins
->src_neg
[3] || !ins
->src_abs
[3]);
480 /* We can have exactly one abs, and can flip the multiplication
481 * to make it fit if we have to */
482 bool flip_ab
= ins
->src_abs
[1];
484 struct bifrost_fma_mscale pack
= {
485 .src0
= bi_get_src(ins
, regs
, flip_ab
? 1 : 0),
486 .src1
= bi_get_src(ins
, regs
, flip_ab
? 0 : 1),
487 .src2
= bi_get_src(ins
, regs
, 2),
488 .src3
= bi_get_src(ins
, regs
, 3),
491 .src0_abs
= ins
->src_abs
[0] || ins
->src_abs
[1],
492 .src1_neg
= negate_mul
,
493 .src2_neg
= ins
->src_neg
[2],
494 .op
= BIFROST_FMA_OP_MSCALE
,
498 } else if (ins
->dest_type
== nir_type_float32
) {
499 struct bifrost_fma_fma pack
= {
500 .src0
= bi_get_src(ins
, regs
, 0),
501 .src1
= bi_get_src(ins
, regs
, 1),
502 .src2
= bi_get_src(ins
, regs
, 2),
503 .src0_abs
= ins
->src_abs
[0],
504 .src1_abs
= ins
->src_abs
[1],
505 .src2_abs
= ins
->src_abs
[2],
506 .src0_neg
= negate_mul
,
507 .src2_neg
= ins
->src_neg
[2],
508 .outmod
= ins
->outmod
,
509 .roundmode
= ins
->roundmode
,
510 .op
= BIFROST_FMA_OP_FMA
514 } else if (ins
->dest_type
== nir_type_float16
) {
515 struct bifrost_fma_fma16 pack
= {
516 .src0
= bi_get_src(ins
, regs
, 0),
517 .src1
= bi_get_src(ins
, regs
, 1),
518 .src2
= bi_get_src(ins
, regs
, 2),
519 .swizzle_0
= bi_swiz16(ins
, 0),
520 .swizzle_1
= bi_swiz16(ins
, 1),
521 .swizzle_2
= bi_swiz16(ins
, 2),
522 .src0_neg
= negate_mul
,
523 .src2_neg
= ins
->src_neg
[2],
524 .outmod
= ins
->outmod
,
525 .roundmode
= ins
->roundmode
,
526 .op
= BIFROST_FMA_OP_FMA16
531 unreachable("Invalid fma dest type");
536 bi_pack_fma_addmin_f32(bi_instruction
*ins
, bi_registers
*regs
)
539 (ins
->type
== BI_ADD
) ? BIFROST_FMA_OP_FADD32
:
540 (ins
->op
.minmax
== BI_MINMAX_MIN
) ? BIFROST_FMA_OP_FMIN32
:
541 BIFROST_FMA_OP_FMAX32
;
543 struct bifrost_fma_add pack
= {
544 .src0
= bi_get_src(ins
, regs
, 0),
545 .src1
= bi_get_src(ins
, regs
, 1),
546 .src0_abs
= ins
->src_abs
[0],
547 .src1_abs
= ins
->src_abs
[1],
548 .src0_neg
= ins
->src_neg
[0],
549 .src1_neg
= ins
->src_neg
[1],
551 .outmod
= ins
->outmod
,
552 .roundmode
= (ins
->type
== BI_ADD
) ? ins
->roundmode
: ins
->minmax
,
560 bi_pack_fp16_abs(bi_instruction
*ins
, bi_registers
*regs
, bool *flip
)
562 /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let
563 * l be an auxiliary bit we encode. Then the hardware determines:
568 * Since add/min/max are commutative, this saves a bit by using the
569 * order of the operands as a bit (k). To pack this, first note:
571 * (l && k) implies (l || k).
573 * That is, if the second argument is abs'd, then the first argument
574 * also has abs. So there are three cases:
576 * Case 0: Neither src has absolute value. Then we have l = k = 0.
578 * Case 1: Exactly one src has absolute value. Assign that source to
579 * src0 and the other source to src1. Compute k = src1 < src0 based on
580 * that assignment. Then l = ~k.
582 * Case 2: Both sources have absolute value. Then we have l = k = 1.
583 * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1).
584 * That is, this encoding is only valid if src1 and src0 are distinct.
585 * This is a scheduling restriction (XXX); if an op of this type
586 * requires both identical sources to have abs value, then we must
587 * schedule to ADD (which does not use this ordering trick).
590 unsigned abs_0
= ins
->src_abs
[0], abs_1
= ins
->src_abs
[1];
591 unsigned src_0
= bi_get_src(ins
, regs
, 0);
592 unsigned src_1
= bi_get_src(ins
, regs
, 1);
594 assert(!(abs_0
&& abs_1
&& src_0
== src_1
));
596 if (!abs_0
&& !abs_1
) {
597 /* Force k = 0 <===> NOT(src1 < src0) */
598 *flip
= (src_1
< src_0
);
600 } else if (abs_0
&& !abs_1
) {
601 return src_1
>= src_0
;
602 } else if (abs_1
&& !abs_0
) {
604 return src_0
>= src_1
;
606 *flip
= !(src_1
< src_0
);
612 bi_pack_fmadd_min_f16(bi_instruction
*ins
, bi_registers
*regs
, bool FMA
)
615 (!FMA
) ? ((ins
->op
.minmax
== BI_MINMAX_MIN
) ?
616 BIFROST_ADD_OP_FMIN16
: BIFROST_ADD_OP_FMAX16
) :
617 (ins
->type
== BI_ADD
) ? BIFROST_FMA_OP_FADD16
:
618 (ins
->op
.minmax
== BI_MINMAX_MIN
) ? BIFROST_FMA_OP_FMIN16
:
619 BIFROST_FMA_OP_FMAX16
;
622 bool l
= bi_pack_fp16_abs(ins
, regs
, &flip
);
623 unsigned src_0
= bi_get_src(ins
, regs
, 0);
624 unsigned src_1
= bi_get_src(ins
, regs
, 1);
627 struct bifrost_fma_add_minmax16 pack
= {
628 .src0
= flip
? src_1
: src_0
,
629 .src1
= flip
? src_0
: src_1
,
630 .src0_neg
= ins
->src_neg
[flip
? 1 : 0],
631 .src1_neg
= ins
->src_neg
[flip
? 0 : 1],
632 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
633 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
635 .outmod
= ins
->outmod
,
636 .mode
= (ins
->type
== BI_ADD
) ? ins
->roundmode
: ins
->minmax
,
642 /* Can't have modes for fp16 */
643 assert(ins
->outmod
== 0);
645 struct bifrost_add_fmin16 pack
= {
646 .src0
= flip
? src_1
: src_0
,
647 .src1
= flip
? src_0
: src_1
,
648 .src0_neg
= ins
->src_neg
[flip
? 1 : 0],
649 .src1_neg
= ins
->src_neg
[flip
? 0 : 1],
651 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
652 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
662 bi_pack_fma_addmin(bi_instruction
*ins
, bi_registers
*regs
)
664 if (ins
->dest_type
== nir_type_float32
)
665 return bi_pack_fma_addmin_f32(ins
, regs
);
666 else if(ins
->dest_type
== nir_type_float16
)
667 return bi_pack_fmadd_min_f16(ins
, regs
, true);
669 unreachable("Unknown FMA/ADD type");
673 bi_pack_fma_1src(bi_instruction
*ins
, bi_registers
*regs
, unsigned op
)
675 struct bifrost_fma_inst pack
= {
676 .src0
= bi_get_src(ins
, regs
, 0),
684 bi_pack_fma_2src(bi_instruction
*ins
, bi_registers
*regs
, unsigned op
)
686 struct bifrost_fma_2src pack
= {
687 .src0
= bi_get_src(ins
, regs
, 0),
688 .src1
= bi_get_src(ins
, regs
, 1),
696 bi_pack_add_1src(bi_instruction
*ins
, bi_registers
*regs
, unsigned op
)
698 struct bifrost_add_inst pack
= {
699 .src0
= bi_get_src(ins
, regs
, 0),
706 static enum bifrost_csel_cond
707 bi_cond_to_csel(enum bi_cond cond
, bool *flip
, bool *invert
, nir_alu_type T
)
709 nir_alu_type B
= nir_alu_type_get_base_type(T
);
710 unsigned idx
= (B
== nir_type_float
) ? 0 :
711 ((B
== nir_type_int
) ? 1 : 2);
717 const enum bifrost_csel_cond ops
[] = {
728 const enum bifrost_csel_cond ops
[] = {
739 const enum bifrost_csel_cond ops
[] = {
742 BIFROST_IEQ_F
/* sign is irrelevant */
748 unreachable("Invalid op for csel");
753 bi_pack_fma_csel(bi_instruction
*ins
, bi_registers
*regs
)
755 /* TODO: Use csel3 as well */
756 bool flip
= false, invert
= false;
758 enum bifrost_csel_cond cond
=
759 bi_cond_to_csel(ins
->cond
, &flip
, &invert
, ins
->src_types
[0]);
761 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
763 unsigned cmp_0
= (flip
? 1 : 0);
764 unsigned cmp_1
= (flip
? 0 : 1);
765 unsigned res_0
= (invert
? 3 : 2);
766 unsigned res_1
= (invert
? 2 : 3);
768 struct bifrost_csel4 pack
= {
769 .src0
= bi_get_src(ins
, regs
, cmp_0
),
770 .src1
= bi_get_src(ins
, regs
, cmp_1
),
771 .src2
= bi_get_src(ins
, regs
, res_0
),
772 .src3
= bi_get_src(ins
, regs
, res_1
),
774 .op
= (size
== 16) ? BIFROST_FMA_OP_CSEL4_V16
:
782 bi_pack_fma_frexp(bi_instruction
*ins
, bi_registers
*regs
)
784 unsigned op
= BIFROST_FMA_OP_FREXPE_LOG
;
785 return bi_pack_fma_1src(ins
, regs
, op
);
789 bi_pack_fma_reduce(bi_instruction
*ins
, bi_registers
*regs
)
791 if (ins
->op
.reduce
== BI_REDUCE_ADD_FREXPM
) {
792 return bi_pack_fma_2src(ins
, regs
, BIFROST_FMA_OP_ADD_FREXPM
);
794 unreachable("Invalid reduce op");
798 /* We have a single convert opcode in the IR but a number of opcodes that could
799 * come out. In particular we have native opcodes for:
801 * [ui]16 --> [fui]32 -- int16_to_32
802 * f16 --> f32 -- float16_to_32
803 * f32 --> f16 -- float32_to_16
804 * f32 --> [ui]32 -- float32_to_int
805 * [ui]32 --> f32 -- int_to_float32
806 * [fui]16 --> [fui]16 -- f2i_i2f16
810 bi_pack_convert(bi_instruction
*ins
, bi_registers
*regs
, bool FMA
)
812 nir_alu_type from_base
= nir_alu_type_get_base_type(ins
->src_types
[0]);
813 unsigned from_size
= nir_alu_type_get_type_size(ins
->src_types
[0]);
814 bool from_unsigned
= from_base
== nir_type_uint
;
816 nir_alu_type to_base
= nir_alu_type_get_base_type(ins
->dest_type
);
817 unsigned to_size
= nir_alu_type_get_type_size(ins
->dest_type
);
818 bool to_unsigned
= to_base
== nir_type_uint
;
819 bool to_float
= to_base
== nir_type_float
;
822 assert((from_base
!= to_base
) || (from_size
!= to_size
));
823 assert((MAX2(from_size
, to_size
) / MIN2(from_size
, to_size
)) <= 2);
825 /* f32 to f16 is special */
826 if (from_size
== 32 && to_size
== 16 && from_base
== nir_type_float
&& to_base
== from_base
) {
827 /* TODO: second vectorized source? */
828 struct bifrost_fma_2src pfma
= {
829 .src0
= bi_get_src(ins
, regs
, 0),
830 .src1
= BIFROST_SRC_STAGE
, /* 0 */
831 .op
= BIFROST_FMA_FLOAT32_TO_16
834 struct bifrost_add_2src padd
= {
835 .src0
= bi_get_src(ins
, regs
, 0),
836 .src1
= BIFROST_SRC_STAGE
, /* 0 */
837 .op
= BIFROST_ADD_FLOAT32_TO_16
847 /* Otherwise, figure out the mode */
850 if (from_size
== 16 && to_size
== 32) {
851 unsigned component
= ins
->swizzle
[0][0];
852 assert(component
<= 1);
854 if (from_base
== nir_type_float
)
855 op
= BIFROST_CONVERT_5(component
);
857 op
= BIFROST_CONVERT_4(from_unsigned
, component
, to_float
);
860 unsigned swizzle
= (from_size
== 16) ? bi_swiz16(ins
, 0) : 0;
861 bool is_unsigned
= from_unsigned
;
863 if (from_base
== nir_type_float
) {
864 assert(to_base
!= nir_type_float
);
865 is_unsigned
= to_unsigned
;
867 if (from_size
== 32 && to_size
== 32)
868 mode
= BIFROST_CONV_F32_TO_I32
;
869 else if (from_size
== 16 && to_size
== 16)
870 mode
= BIFROST_CONV_F16_TO_I16
;
872 unreachable("Invalid float conversion");
874 assert(to_base
== nir_type_float
);
875 assert(from_size
== to_size
);
878 mode
= BIFROST_CONV_I32_TO_F32
;
879 else if (to_size
== 16)
880 mode
= BIFROST_CONV_I16_TO_F16
;
882 unreachable("Invalid int conversion");
885 /* Fixup swizzle for 32-bit only modes */
887 if (mode
== BIFROST_CONV_I32_TO_F32
)
889 else if (mode
== BIFROST_CONV_F32_TO_I32
)
892 op
= BIFROST_CONVERT(is_unsigned
, ins
->roundmode
, swizzle
, mode
);
894 /* Unclear what the top bit is for... maybe 16-bit related */
895 bool mode2
= mode
== BIFROST_CONV_F16_TO_I16
;
896 bool mode6
= mode
== BIFROST_CONV_I16_TO_F16
;
898 if (!(mode2
|| mode6
))
903 return bi_pack_fma_1src(ins
, regs
, BIFROST_FMA_CONVERT
| op
);
905 return bi_pack_add_1src(ins
, regs
, BIFROST_ADD_CONVERT
| op
);
909 bi_pack_fma_select(bi_instruction
*ins
, bi_registers
*regs
)
911 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[0]);
914 unsigned swiz
= (ins
->swizzle
[0][0] | (ins
->swizzle
[1][0] << 1));
915 unsigned op
= BIFROST_FMA_SEL_16(swiz
);
916 return bi_pack_fma_2src(ins
, regs
, op
);
917 } else if (size
== 8) {
920 for (unsigned c
= 0; c
< 4; ++c
) {
921 if (ins
->swizzle
[c
][0]) {
922 /* Ensure lowering restriction is met */
923 assert(ins
->swizzle
[c
][0] == 2);
928 struct bifrost_fma_sel8 pack
= {
929 .src0
= bi_get_src(ins
, regs
, 0),
930 .src1
= bi_get_src(ins
, regs
, 1),
931 .src2
= bi_get_src(ins
, regs
, 2),
932 .src3
= bi_get_src(ins
, regs
, 3),
934 .op
= BIFROST_FMA_OP_SEL8
939 unreachable("Unimplemented");
943 static enum bifrost_fcmp_cond
944 bi_fcmp_cond(enum bi_cond cond
)
947 case BI_COND_LT
: return BIFROST_OLT
;
948 case BI_COND_LE
: return BIFROST_OLE
;
949 case BI_COND_GE
: return BIFROST_OGE
;
950 case BI_COND_GT
: return BIFROST_OGT
;
951 case BI_COND_EQ
: return BIFROST_OEQ
;
952 case BI_COND_NE
: return BIFROST_UNE
;
953 default: unreachable("Unknown bi_cond");
957 /* a <?> b <==> b <flip(?)> a (TODO: NaN behaviour?) */
959 static enum bifrost_fcmp_cond
960 bi_flip_fcmp(enum bifrost_fcmp_cond cond
)
975 unreachable("Unknown fcmp cond");
980 bi_pack_fma_cmp(bi_instruction
*ins
, bi_registers
*regs
)
982 nir_alu_type Tl
= ins
->src_types
[0];
983 nir_alu_type Tr
= ins
->src_types
[1];
985 if (Tl
== nir_type_float32
|| Tr
== nir_type_float32
) {
986 /* TODO: Mixed 32/16 cmp */
989 enum bifrost_fcmp_cond cond
= bi_fcmp_cond(ins
->cond
);
991 /* Only src1 has neg, so we arrange:
994 * -a < -b <===> a > b
995 * -a < b <===> a > -b
996 * TODO: Is this NaN-precise?
999 bool flip
= ins
->src_neg
[0];
1000 bool neg
= ins
->src_neg
[0] ^ ins
->src_neg
[1];
1003 cond
= bi_flip_fcmp(cond
);
1005 struct bifrost_fma_fcmp pack
= {
1006 .src0
= bi_get_src(ins
, regs
, 0),
1007 .src1
= bi_get_src(ins
, regs
, 1),
1008 .src0_abs
= ins
->src_abs
[0],
1009 .src1_abs
= ins
->src_abs
[1],
1014 .op
= BIFROST_FMA_OP_FCMP_GL
1017 RETURN_PACKED(pack
);
1018 } else if (Tl
== nir_type_float16
&& Tr
== nir_type_float16
) {
1020 bool l
= bi_pack_fp16_abs(ins
, regs
, &flip
);
1021 enum bifrost_fcmp_cond cond
= bi_fcmp_cond(ins
->cond
);
1024 cond
= bi_flip_fcmp(cond
);
1026 struct bifrost_fma_fcmp16 pack
= {
1027 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1028 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1029 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
1030 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
1034 .op
= BIFROST_FMA_OP_FCMP_GL_16
,
1037 RETURN_PACKED(pack
);
1039 unreachable("Unknown cmp type");
1044 bi_fma_bitwise_op(enum bi_bitwise_op op
, bool rshift
)
1048 /* Via De Morgan's */
1050 BIFROST_FMA_OP_RSHIFT_NAND
:
1051 BIFROST_FMA_OP_LSHIFT_NAND
;
1052 case BI_BITWISE_AND
:
1054 BIFROST_FMA_OP_RSHIFT_AND
:
1055 BIFROST_FMA_OP_LSHIFT_AND
;
1056 case BI_BITWISE_XOR
:
1057 /* Shift direction handled out of band */
1058 return BIFROST_FMA_OP_RSHIFT_XOR
;
1060 unreachable("Unknown op");
1065 bi_pack_fma_bitwise(bi_instruction
*ins
, bi_registers
*regs
)
1067 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
1070 bool invert_0
= ins
->bitwise
.src_invert
[0];
1071 bool invert_1
= ins
->bitwise
.src_invert
[1];
1073 if (ins
->op
.bitwise
== BI_BITWISE_OR
) {
1074 /* Becomes NAND, so via De Morgan's:
1075 * f(A) | f(B) = ~(~f(A) & ~f(B))
1076 * = NAND(~f(A), ~f(B))
1079 invert_0
= !invert_0
;
1080 invert_1
= !invert_1
;
1081 } else if (ins
->op
.bitwise
== BI_BITWISE_XOR
) {
1082 /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B
1083 * ~A ^ B = ~(A ^ B) = A ^ ~B
1086 invert_0
^= invert_1
;
1089 /* invert_1 ends up specifying shift direction */
1090 invert_1
= !ins
->bitwise
.rshift
;
1093 struct bifrost_shift_fma pack
= {
1094 .src0
= bi_get_src(ins
, regs
, 0),
1095 .src1
= bi_get_src(ins
, regs
, 1),
1096 .src2
= bi_get_src(ins
, regs
, 2),
1097 .half
= (size
== 32) ? 0 : (size
== 16) ? 0x7 : (size
== 8) ? 0x4 : 0,
1099 .invert_1
= invert_0
,
1100 .invert_2
= invert_1
,
1101 .op
= bi_fma_bitwise_op(ins
->op
.bitwise
, ins
->bitwise
.rshift
)
1104 RETURN_PACKED(pack
);
1108 bi_pack_fma_round(bi_instruction
*ins
, bi_registers
*regs
)
1110 bool fp16
= ins
->dest_type
== nir_type_float16
;
1111 assert(fp16
|| ins
->dest_type
== nir_type_float32
);
1114 ? BIFROST_FMA_ROUND_16(ins
->roundmode
, bi_swiz16(ins
, 0))
1115 : BIFROST_FMA_ROUND_32(ins
->roundmode
);
1117 return bi_pack_fma_1src(ins
, regs
, op
);
1121 bi_pack_fma_imath(bi_instruction
*ins
, bi_registers
*regs
)
1123 /* Scheduler: only ADD can have 8/16-bit imath */
1124 assert(ins
->dest_type
== nir_type_int32
|| ins
->dest_type
== nir_type_uint32
);
1126 unsigned op
= ins
->op
.imath
== BI_IMATH_ADD
1127 ? BIFROST_FMA_IADD_32
1128 : BIFROST_FMA_ISUB_32
;
1130 return bi_pack_fma_2src(ins
, regs
, op
);
1134 bi_pack_fma(bi_clause
*clause
, bi_bundle bundle
, bi_registers
*regs
)
1137 return BIFROST_FMA_NOP
;
1139 switch (bundle
.fma
->type
) {
1141 return bi_pack_fma_addmin(bundle
.fma
, regs
);
1143 return bi_pack_fma_cmp(bundle
.fma
, regs
);
1145 return bi_pack_fma_bitwise(bundle
.fma
, regs
);
1147 return bi_pack_convert(bundle
.fma
, regs
, true);
1149 return bi_pack_fma_csel(bundle
.fma
, regs
);
1151 return bi_pack_fma_fma(bundle
.fma
, regs
);
1153 return bi_pack_fma_frexp(bundle
.fma
, regs
);
1155 return bi_pack_fma_imath(bundle
.fma
, regs
);
1157 return bi_pack_fma_addmin(bundle
.fma
, regs
);
1159 return bi_pack_fma_1src(bundle
.fma
, regs
, BIFROST_FMA_OP_MOV
);
1161 unreachable("Packing todo");
1163 return bi_pack_fma_select(bundle
.fma
, regs
);
1165 return bi_pack_fma_round(bundle
.fma
, regs
);
1167 return bi_pack_fma_reduce(bundle
.fma
, regs
);
1169 unreachable("Cannot encode class as FMA");
1174 bi_pack_add_ld_vary(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1176 unsigned size
= nir_alu_type_get_type_size(ins
->dest_type
);
1177 assert(size
== 32 || size
== 16);
1179 unsigned op
= (size
== 32) ?
1180 BIFROST_ADD_OP_LD_VAR_32
:
1181 BIFROST_ADD_OP_LD_VAR_16
;
1183 unsigned packed_addr
= 0;
1185 if (ins
->src
[0] & BIR_INDEX_CONSTANT
) {
1186 /* Direct uses address field directly */
1187 packed_addr
= bi_get_immediate(ins
, 0);
1189 /* Indirect gets an extra source */
1190 packed_addr
= bi_get_src(ins
, regs
, 0) | 0b11000;
1193 /* The destination is thrown in the data register */
1194 assert(ins
->dest
& BIR_INDEX_REGISTER
);
1195 clause
->data_register
= ins
->dest
& ~BIR_INDEX_REGISTER
;
1197 unsigned channels
= ins
->vector_channels
;
1198 assert(channels
>= 1 && channels
<= 4);
1200 struct bifrost_ld_var pack
= {
1201 .src0
= bi_get_src(ins
, regs
, 1),
1202 .addr
= packed_addr
,
1203 .channels
= MALI_POSITIVE(channels
),
1204 .interp_mode
= ins
->load_vary
.interp_mode
,
1205 .reuse
= ins
->load_vary
.reuse
,
1206 .flat
= ins
->load_vary
.flat
,
1210 RETURN_PACKED(pack
);
1214 bi_pack_add_2src(bi_instruction
*ins
, bi_registers
*regs
, unsigned op
)
1216 struct bifrost_add_2src pack
= {
1217 .src0
= bi_get_src(ins
, regs
, 0),
1218 .src1
= bi_get_src(ins
, regs
, 1),
1222 RETURN_PACKED(pack
);
1226 bi_pack_add_addmin_f32(bi_instruction
*ins
, bi_registers
*regs
)
1229 (ins
->type
== BI_ADD
) ? BIFROST_ADD_OP_FADD32
:
1230 (ins
->op
.minmax
== BI_MINMAX_MIN
) ? BIFROST_ADD_OP_FMIN32
:
1231 BIFROST_ADD_OP_FMAX32
;
1233 struct bifrost_add_faddmin pack
= {
1234 .src0
= bi_get_src(ins
, regs
, 0),
1235 .src1
= bi_get_src(ins
, regs
, 1),
1236 .src0_abs
= ins
->src_abs
[0],
1237 .src1_abs
= ins
->src_abs
[1],
1238 .src0_neg
= ins
->src_neg
[0],
1239 .src1_neg
= ins
->src_neg
[1],
1240 .outmod
= ins
->outmod
,
1241 .mode
= (ins
->type
== BI_ADD
) ? ins
->roundmode
: ins
->minmax
,
1245 RETURN_PACKED(pack
);
1249 bi_pack_add_add_f16(bi_instruction
*ins
, bi_registers
*regs
)
1251 /* ADD.v2f16 can't have outmod */
1252 assert(ins
->outmod
== BIFROST_NONE
);
1254 struct bifrost_add_faddmin pack
= {
1255 .src0
= bi_get_src(ins
, regs
, 0),
1256 .src1
= bi_get_src(ins
, regs
, 1),
1257 .src0_abs
= ins
->src_abs
[0],
1258 .src1_abs
= ins
->src_abs
[1],
1259 .src0_neg
= ins
->src_neg
[0],
1260 .src1_neg
= ins
->src_neg
[1],
1261 .select
= bi_swiz16(ins
, 0), /* swizzle_0 */
1262 .outmod
= bi_swiz16(ins
, 1), /* swizzle_1 */
1263 .mode
= ins
->roundmode
,
1264 .op
= BIFROST_ADD_OP_FADD16
1267 RETURN_PACKED(pack
);
1271 bi_pack_add_addmin(bi_instruction
*ins
, bi_registers
*regs
)
1273 if (ins
->dest_type
== nir_type_float32
)
1274 return bi_pack_add_addmin_f32(ins
, regs
);
1275 else if (ins
->dest_type
== nir_type_float16
) {
1276 if (ins
->type
== BI_ADD
)
1277 return bi_pack_add_add_f16(ins
, regs
);
1279 return bi_pack_fmadd_min_f16(ins
, regs
, false);
1281 unreachable("Unknown FMA/ADD type");
1285 bi_pack_add_ld_ubo(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1287 assert(ins
->vector_channels
>= 1 && ins
->vector_channels
<= 4);
1289 const unsigned ops
[4] = {
1290 BIFROST_ADD_OP_LD_UBO_1
,
1291 BIFROST_ADD_OP_LD_UBO_2
,
1292 BIFROST_ADD_OP_LD_UBO_3
,
1293 BIFROST_ADD_OP_LD_UBO_4
1296 bi_write_data_register(clause
, ins
);
1297 return bi_pack_add_2src(ins
, regs
, ops
[ins
->vector_channels
- 1]);
1300 static enum bifrost_ldst_type
1301 bi_pack_ldst_type(nir_alu_type T
)
1304 case nir_type_float16
: return BIFROST_LDST_F16
;
1305 case nir_type_float32
: return BIFROST_LDST_F32
;
1306 case nir_type_int32
: return BIFROST_LDST_I32
;
1307 case nir_type_uint32
: return BIFROST_LDST_U32
;
1308 default: unreachable("Invalid type loaded");
1313 bi_pack_add_ld_var_addr(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1315 struct bifrost_ld_var_addr pack
= {
1316 .src0
= bi_get_src(ins
, regs
, 1),
1317 .src1
= bi_get_src(ins
, regs
, 2),
1318 .location
= bi_get_immediate(ins
, 0),
1319 .type
= bi_pack_ldst_type(ins
->src_types
[3]),
1320 .op
= BIFROST_ADD_OP_LD_VAR_ADDR
1323 bi_write_data_register(clause
, ins
);
1324 RETURN_PACKED(pack
);
1328 bi_pack_add_ld_attr(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1330 assert(ins
->vector_channels
>= 0 && ins
->vector_channels
<= 4);
1332 struct bifrost_ld_attr pack
= {
1333 .src0
= bi_get_src(ins
, regs
, 1),
1334 .src1
= bi_get_src(ins
, regs
, 2),
1335 .location
= bi_get_immediate(ins
, 0),
1336 .channels
= MALI_POSITIVE(ins
->vector_channels
),
1337 .type
= bi_pack_ldst_type(ins
->dest_type
),
1338 .op
= BIFROST_ADD_OP_LD_ATTR
1341 bi_write_data_register(clause
, ins
);
1342 RETURN_PACKED(pack
);
1346 bi_pack_add_st_vary(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1348 assert(ins
->vector_channels
>= 1 && ins
->vector_channels
<= 4);
1350 struct bifrost_st_vary pack
= {
1351 .src0
= bi_get_src(ins
, regs
, 1),
1352 .src1
= bi_get_src(ins
, regs
, 2),
1353 .src2
= bi_get_src(ins
, regs
, 3),
1354 .channels
= MALI_POSITIVE(ins
->vector_channels
),
1355 .op
= BIFROST_ADD_OP_ST_VAR
1358 bi_read_data_register(clause
, ins
);
1359 RETURN_PACKED(pack
);
1363 bi_pack_add_atest(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1365 bool fp16
= (ins
->src_types
[1] == nir_type_float16
);
1367 struct bifrost_add_atest pack
= {
1368 .src0
= bi_get_src(ins
, regs
, 0),
1369 .src1
= bi_get_src(ins
, regs
, 1),
1371 .component
= fp16
? ins
->swizzle
[1][0] : 1, /* Set for fp32 */
1372 .op
= BIFROST_ADD_OP_ATEST
,
1375 /* Despite *also* writing with the usual mechanism... quirky and
1376 * perhaps unnecessary, but let's match the blob */
1377 clause
->data_register
= ins
->dest
& ~BIR_INDEX_REGISTER
;
1379 RETURN_PACKED(pack
);
1383 bi_pack_add_blend(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
)
1385 struct bifrost_add_inst pack
= {
1386 .src0
= bi_get_src(ins
, regs
, 1),
1387 .op
= BIFROST_ADD_OP_BLEND
1390 /* TODO: Pack location in uniform_const */
1391 assert(ins
->blend_location
== 0);
1393 bi_read_data_register(clause
, ins
);
1394 RETURN_PACKED(pack
);
1398 bi_pack_add_special(bi_instruction
*ins
, bi_registers
*regs
)
1401 bool fp16
= ins
->dest_type
== nir_type_float16
;
1402 bool Y
= ins
->swizzle
[0][0];
1404 if (ins
->op
.special
== BI_SPECIAL_FRCP
) {
1406 (Y
? BIFROST_ADD_OP_FRCP_FAST_F16_Y
:
1407 BIFROST_ADD_OP_FRCP_FAST_F16_X
) :
1408 BIFROST_ADD_OP_FRCP_FAST_F32
;
1409 } else if (ins
->op
.special
== BI_SPECIAL_FRSQ
) {
1411 (Y
? BIFROST_ADD_OP_FRSQ_FAST_F16_Y
:
1412 BIFROST_ADD_OP_FRSQ_FAST_F16_X
) :
1413 BIFROST_ADD_OP_FRSQ_FAST_F32
;
1415 } else if (ins
->op
.special
== BI_SPECIAL_EXP2_LOW
) {
1417 op
= BIFROST_ADD_OP_FEXP2_FAST
;
1419 unreachable("Unknown special op");
1422 return bi_pack_add_1src(ins
, regs
, op
);
1426 bi_pack_add_table(bi_instruction
*ins
, bi_registers
*regs
)
1429 assert(ins
->dest_type
== nir_type_float32
);
1431 op
= BIFROST_ADD_OP_LOG2_HELP
;
1432 return bi_pack_add_1src(ins
, regs
, op
);
1435 bi_pack_add_tex_compact(bi_clause
*clause
, bi_instruction
*ins
, bi_registers
*regs
, gl_shader_stage stage
)
1437 bool f16
= ins
->dest_type
== nir_type_float16
;
1438 bool vtx
= stage
!= MESA_SHADER_FRAGMENT
;
1440 struct bifrost_tex_compact pack
= {
1441 .src0
= bi_get_src(ins
, regs
, 0),
1442 .src1
= bi_get_src(ins
, regs
, 1),
1443 .op
= f16
? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx
) :
1444 BIFROST_ADD_OP_TEX_COMPACT_F32(vtx
),
1445 .compute_lod
= !vtx
,
1446 .tex_index
= ins
->texture
.texture_index
,
1447 .sampler_index
= ins
->texture
.sampler_index
1450 bi_write_data_register(clause
, ins
);
1451 RETURN_PACKED(pack
);
1455 bi_pack_add_select(bi_instruction
*ins
, bi_registers
*regs
)
1457 unsigned size
= nir_alu_type_get_type_size(ins
->src_types
[0]);
1460 unsigned swiz
= (ins
->swizzle
[0][0] | (ins
->swizzle
[1][0] << 1));
1461 unsigned op
= BIFROST_ADD_SEL_16(swiz
);
1462 return bi_pack_add_2src(ins
, regs
, op
);
1465 static enum bifrost_discard_cond
1466 bi_cond_to_discard(enum bi_cond cond
, bool *flip
)
1473 return BIFROST_DISCARD_FLT
;
1478 return BIFROST_DISCARD_FLE
;
1480 return BIFROST_DISCARD_FNE
;
1482 return BIFROST_DISCARD_FEQ
;
1484 unreachable("Invalid op for discard");
1489 bi_pack_add_discard(bi_instruction
*ins
, bi_registers
*regs
)
1491 bool fp16
= ins
->src_types
[0] == nir_type_float16
;
1492 assert(fp16
|| ins
->src_types
[0] == nir_type_float32
);
1495 enum bifrost_discard_cond cond
= bi_cond_to_discard(ins
->cond
, &flip
);
1497 struct bifrost_add_discard pack
= {
1498 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1499 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1501 .src0_select
= fp16
? ins
->swizzle
[0][0] : 0,
1502 .src1_select
= fp16
? ins
->swizzle
[1][0] : 0,
1503 .fp32
= fp16
? 0 : 1,
1504 .op
= BIFROST_ADD_OP_DISCARD
1507 RETURN_PACKED(pack
);
1510 static enum bifrost_icmp_cond
1511 bi_cond_to_icmp(enum bi_cond cond
, bool *flip
, bool is_unsigned
, bool is_16
)
1518 return is_unsigned
? (is_16
? BIFROST_ICMP_IGE
: BIFROST_ICMP_UGT
)
1524 return is_unsigned
? BIFROST_ICMP_UGE
:
1525 (is_16
? BIFROST_ICMP_UGT
: BIFROST_ICMP_IGE
);
1527 return BIFROST_ICMP_NEQ
;
1529 return BIFROST_ICMP_EQ
;
1531 unreachable("Invalid op for icmp");
1536 bi_pack_add_icmp32(bi_instruction
*ins
, bi_registers
*regs
, bool flip
,
1537 enum bifrost_icmp_cond cond
)
1539 struct bifrost_add_icmp pack
= {
1540 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1541 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1545 .op
= BIFROST_ADD_OP_ICMP_32
1548 RETURN_PACKED(pack
);
1552 bi_pack_add_icmp16(bi_instruction
*ins
, bi_registers
*regs
, bool flip
,
1553 enum bifrost_icmp_cond cond
)
1555 struct bifrost_add_icmp16 pack
= {
1556 .src0
= bi_get_src(ins
, regs
, flip
? 1 : 0),
1557 .src1
= bi_get_src(ins
, regs
, flip
? 0 : 1),
1558 .src0_swizzle
= bi_swiz16(ins
, flip
? 1 : 0),
1559 .src1_swizzle
= bi_swiz16(ins
, flip
? 0 : 1),
1562 .op
= BIFROST_ADD_OP_ICMP_16
1565 RETURN_PACKED(pack
);
1569 bi_pack_add_cmp(bi_instruction
*ins
, bi_registers
*regs
)
1571 nir_alu_type Tl
= ins
->src_types
[0];
1572 nir_alu_type Tr
= ins
->src_types
[1];
1573 nir_alu_type Bl
= nir_alu_type_get_base_type(Tl
);
1575 if (Bl
== nir_type_uint
|| Bl
== nir_type_int
) {
1577 unsigned sz
= nir_alu_type_get_type_size(Tl
);
1581 enum bifrost_icmp_cond cond
= bi_cond_to_icmp(
1582 sz
== 16 ? /*bi_invert_cond*/(ins
->cond
) : ins
->cond
,
1583 &flip
, Bl
== nir_type_uint
, sz
== 16);
1586 return bi_pack_add_icmp32(ins
, regs
, flip
, cond
);
1588 return bi_pack_add_icmp16(ins
, regs
, flip
, cond
);
1590 unreachable("TODO");
1592 unreachable("TODO");
1597 bi_pack_add_imath(bi_instruction
*ins
, bi_registers
*regs
)
1599 /* TODO: 32+16 add */
1600 assert(ins
->src_types
[0] == ins
->src_types
[1]);
1601 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[0]);
1602 enum bi_imath_op p
= ins
->op
.imath
;
1607 op
= (p
== BI_IMATH_ADD
) ? BIFROST_ADD_IADD_8
:
1609 } else if (sz
== 16) {
1610 op
= (p
== BI_IMATH_ADD
) ? BIFROST_ADD_IADD_16
:
1611 BIFROST_ADD_ISUB_16
;
1612 } else if (sz
== 32) {
1613 op
= (p
== BI_IMATH_ADD
) ? BIFROST_ADD_IADD_32
:
1614 BIFROST_ADD_ISUB_32
;
1616 unreachable("64-bit todo");
1619 return bi_pack_add_2src(ins
, regs
, op
);
1623 bi_pack_add(bi_clause
*clause
, bi_bundle bundle
, bi_registers
*regs
, gl_shader_stage stage
)
1626 return BIFROST_ADD_NOP
;
1628 switch (bundle
.add
->type
) {
1630 return bi_pack_add_addmin(bundle
.add
, regs
);
1632 return bi_pack_add_atest(clause
, bundle
.add
, regs
);
1634 unreachable("Packing todo");
1636 return bi_pack_add_cmp(bundle
.add
, regs
);
1638 return bi_pack_add_blend(clause
, bundle
.add
, regs
);
1640 unreachable("Packing todo");
1642 return bi_pack_convert(bundle
.add
, regs
, false);
1644 return bi_pack_add_discard(bundle
.add
, regs
);
1646 unreachable("Packing todo");
1648 return bi_pack_add_imath(bundle
.add
, regs
);
1650 unreachable("Packing todo");
1652 return bi_pack_add_ld_attr(clause
, bundle
.add
, regs
);
1653 case BI_LOAD_UNIFORM
:
1654 return bi_pack_add_ld_ubo(clause
, bundle
.add
, regs
);
1656 return bi_pack_add_ld_vary(clause
, bundle
.add
, regs
);
1657 case BI_LOAD_VAR_ADDRESS
:
1658 return bi_pack_add_ld_var_addr(clause
, bundle
.add
, regs
);
1660 return bi_pack_add_addmin(bundle
.add
, regs
);
1664 unreachable("Packing todo");
1666 return bi_pack_add_st_vary(clause
, bundle
.add
, regs
);
1668 return bi_pack_add_special(bundle
.add
, regs
);
1670 return bi_pack_add_table(bundle
.add
, regs
);
1672 return bi_pack_add_select(bundle
.add
, regs
);
1674 if (bundle
.add
->op
.texture
== BI_TEX_COMPACT
)
1675 return bi_pack_add_tex_compact(clause
, bundle
.add
, regs
, stage
);
1677 unreachable("Unknown tex type");
1679 unreachable("Packing todo");
1681 unreachable("Cannot encode class as ADD");
1685 struct bi_packed_bundle
{
1690 /* We must ensure port 1 > port 0 for the 63-x trick to function, so we fix
1691 * this up at pack time. (Scheduling doesn't care.) */
1694 bi_flip_ports(bi_registers
*regs
)
1696 if (regs
->enabled
[0] && regs
->enabled
[1] && regs
->port
[1] < regs
->port
[0]) {
1697 unsigned temp
= regs
->port
[0];
1698 regs
->port
[0] = regs
->port
[1];
1699 regs
->port
[1] = temp
;
1704 static struct bi_packed_bundle
1705 bi_pack_bundle(bi_clause
*clause
, bi_bundle bundle
, bi_bundle prev
, bool first_bundle
, gl_shader_stage stage
)
1707 bi_assign_ports(&bundle
, &prev
);
1708 bi_assign_uniform_constant(clause
, &bundle
.regs
, bundle
);
1709 bundle
.regs
.first_instruction
= first_bundle
;
1711 bi_flip_ports(&bundle
.regs
);
1713 uint64_t reg
= bi_pack_registers(bundle
.regs
);
1714 uint64_t fma
= bi_pack_fma(clause
, bundle
, &bundle
.regs
);
1715 uint64_t add
= bi_pack_add(clause
, bundle
, &bundle
.regs
, stage
);
1717 struct bi_packed_bundle packed
= {
1718 .lo
= reg
| (fma
<< 35) | ((add
& 0b111111) << 58),
1725 /* Packs the next two constants as a dedicated constant quadword at the end of
1726 * the clause, returning the number packed. There are two cases to consider:
1728 * Case #1: Branching is not used. For a single constant copy the upper nibble
1731 * Case #2: Branching is used. For a single constant, it suffices to set the
1732 * upper nibble to 4 and leave the latter constant 0, which matches what the
1735 * Extending to multiple constants is considerably more tricky and left for
1740 bi_pack_constants(bi_context
*ctx
, bi_clause
*clause
,
1742 struct util_dynarray
*emission
)
1744 /* After these two, are we done? Determines tag */
1745 bool done
= clause
->constant_count
<= (index
+ 2);
1746 bool only
= clause
->constant_count
<= (index
+ 1);
1748 /* Is the constant we're packing for a branch? */
1749 bool branches
= clause
->branch_constant
&& done
;
1752 assert(index
== 0 && clause
->bundle_count
== 1);
1755 uint64_t hi
= clause
->constants
[index
+ 0] >> 60ull;
1757 struct bifrost_fmt_constant quad
= {
1758 .pos
= 0, /* TODO */
1759 .tag
= done
? BIFROST_FMTC_FINAL
: BIFROST_FMTC_CONSTANTS
,
1760 .imm_1
= clause
->constants
[index
+ 0] >> 4,
1761 .imm_2
= ((hi
< 8) ? (hi
<< 60ull) : 0) >> 4,
1765 /* Branch offsets are less than 60-bits so this should work at
1767 quad
.imm_1
|= (4ull << 60ull) >> 4;
1771 /* XXX: On G71, Connor observed that the difference of the top 4 bits
1772 * of the second constant with the first must be less than 8, otherwise
1773 * we have to swap them. On G52, I'm able to reproduce a similar issue
1774 * but with a different workaround (modeled above with a single
1775 * constant, unclear how to workaround for multiple constants.) Further
1776 * investigation needed. Possibly an errata. XXX */
1778 util_dynarray_append(emission
, struct bifrost_fmt_constant
, quad
);
1784 bi_pack_clause(bi_context
*ctx
, bi_clause
*clause
, bi_clause
*next
,
1785 struct util_dynarray
*emission
, gl_shader_stage stage
)
1787 struct bi_packed_bundle ins_1
= bi_pack_bundle(clause
, clause
->bundles
[0], clause
->bundles
[0], true, stage
);
1788 assert(clause
->bundle_count
== 1);
1790 /* Used to decide if we elide writes */
1791 bool is_fragment
= ctx
->stage
== MESA_SHADER_FRAGMENT
;
1793 /* State for packing constants throughout */
1794 unsigned constant_index
= 0;
1796 struct bifrost_fmt1 quad_1
= {
1797 .tag
= clause
->constant_count
? BIFROST_FMT1_CONSTANTS
: BIFROST_FMT1_FINAL
,
1798 .header
= bi_pack_header(clause
, next
, is_fragment
),
1800 .ins_2
= ins_1
.hi
& ((1 << 11) - 1),
1801 .ins_0
= (ins_1
.hi
>> 11) & 0b111,
1804 util_dynarray_append(emission
, struct bifrost_fmt1
, quad_1
);
1806 /* Pack the remaining constants */
1808 while (constant_index
< clause
->constant_count
) {
1809 constant_index
+= bi_pack_constants(ctx
, clause
,
1810 constant_index
, emission
);
1815 bi_next_clause(bi_context
*ctx
, pan_block
*block
, bi_clause
*clause
)
1817 /* Try the next clause in this block */
1818 if (clause
->link
.next
!= &((bi_block
*) block
)->clauses
)
1819 return list_first_entry(&(clause
->link
), bi_clause
, link
);
1821 /* Try the next block, or the one after that if it's empty, etc .*/
1822 pan_block
*next_block
= pan_next_block(block
);
1824 bi_foreach_block_from(ctx
, next_block
, block
) {
1825 bi_block
*blk
= (bi_block
*) block
;
1827 if (!list_is_empty(&blk
->clauses
))
1828 return list_first_entry(&(blk
->clauses
), bi_clause
, link
);
1835 bi_pack(bi_context
*ctx
, struct util_dynarray
*emission
)
1837 util_dynarray_init(emission
, NULL
);
1839 bi_foreach_block(ctx
, _block
) {
1840 bi_block
*block
= (bi_block
*) _block
;
1842 bi_foreach_clause_in_block(block
, clause
) {
1843 bi_clause
*next
= bi_next_clause(ctx
, _block
, clause
);
1844 bi_pack_clause(ctx
, clause
, next
, emission
, ctx
->stage
);