2 * Copyright (C) 2019 Connor Abbott <cwabbott0@gmail.com>
3 * Copyright (C) 2019 Lyude Paul <thatslyude@gmail.com>
4 * Copyright (C) 2019 Ryan Houdek <Sonicadvance1@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "bifrost_ops.h"
35 #include "disassemble.h"
36 #include "util/macros.h"
38 // return bits (high, lo]
39 static uint64_t bits(uint32_t word
, unsigned lo
, unsigned high
)
43 return (word
& ((1 << high
) - 1)) >> lo
;
46 // each of these structs represents an instruction that's dispatched in one
47 // cycle. Note that these instructions are packed in funny ways within the
48 // clause, hence the need for a separate struct.
49 struct bifrost_alu_inst
{
56 unsigned uniform_const
: 8;
64 static unsigned get_reg0(struct bifrost_regs regs
)
67 return regs
.reg0
| ((regs
.reg1
& 0x1) << 5);
69 return regs
.reg0
<= regs
.reg1
? regs
.reg0
: 63 - regs
.reg0
;
72 static unsigned get_reg1(struct bifrost_regs regs
)
74 return regs
.reg0
<= regs
.reg1
? regs
.reg1
: 63 - regs
.reg1
;
77 enum bifrost_reg_write_unit
{
78 REG_WRITE_NONE
= 0, // don't write
79 REG_WRITE_TWO
, // write using reg2
80 REG_WRITE_THREE
, // write using reg3
83 // this represents the decoded version of the ctrl register field.
84 struct bifrost_reg_ctrl
{
88 enum bifrost_reg_write_unit fma_write_unit
;
89 enum bifrost_reg_write_unit add_write_unit
;
113 enum fma_src_type src_type
;
127 ADD_TEX_COMPACT
, // texture instruction with embedded sampler
128 ADD_TEX
, // texture instruction with sampler/etc. in uniform port
139 enum add_src_type src_type
;
143 struct bifrost_tex_ctrl
{
144 unsigned sampler_index
: 4; // also used to signal indirects
145 unsigned tex_index
: 7;
146 bool no_merge_index
: 1; // whether to merge (direct) sampler & texture indices
147 bool filter
: 1; // use the usual filtering pipeline (0 for texelFetch & textureGather)
149 bool texel_offset
: 1; // *Offset()
152 unsigned tex_type
: 2; // 2D, 3D, Cube, Buffer
153 bool compute_lod
: 1; // 0 for *Lod()
154 bool not_supply_lod
: 1; // 0 for *Lod() or when a bias is applied
155 bool calc_gradients
: 1; // 0 for *Grad()
157 unsigned result_type
: 4; // integer, unsigned, float TODO: why is this 4 bits?
161 struct bifrost_dual_tex_ctrl
{
162 unsigned sampler_index0
: 2;
164 unsigned tex_index0
: 2;
165 unsigned sampler_index1
: 2;
166 unsigned tex_index1
: 2;
170 enum branch_bit_size
{
174 // For the above combinations of bitsize and location, an extra bit is
175 // encoded via comparing the sources. The only possible source of ambiguity
176 // would be if the sources were the same, but then the branch condition
177 // would be always true or always false anyways, so we can ignore it. But
178 // this no longer works when comparing the y component to the x component,
179 // since it's valid to compare the y component of a source against its own
180 // x component. Instead, the extra bit is encoded via an extra bitsize.
183 BR_SIZE_32_AND_16X
= 5,
184 BR_SIZE_32_AND_16Y
= 6,
185 // Used for comparisons with zero and always-true, see below. I think this
186 // only works for integer comparisons.
190 void dump_header(FILE *fp
, struct bifrost_header header
, bool verbose
);
191 void dump_instr(FILE *fp
, const struct bifrost_alu_inst
*instr
,
192 struct bifrost_regs next_regs
, uint64_t *consts
,
193 unsigned data_reg
, unsigned offset
, bool verbose
);
194 bool dump_clause(FILE *fp
, uint32_t *words
, unsigned *size
, unsigned offset
, bool verbose
);
196 void dump_header(FILE *fp
, struct bifrost_header header
, bool verbose
)
198 if (header
.clause_type
!= 0) {
199 fprintf(fp
, "id(%du) ", header
.scoreboard_index
);
202 if (header
.scoreboard_deps
!= 0) {
203 fprintf(fp
, "next-wait(");
205 for (unsigned i
= 0; i
< 8; i
++) {
206 if (header
.scoreboard_deps
& (1 << i
)) {
210 fprintf(fp
, "%d", i
);
217 if (header
.datareg_writebarrier
)
218 fprintf(fp
, "data-reg-barrier ");
220 if (!header
.no_end_of_shader
)
223 if (!header
.back_to_back
) {
225 if (header
.branch_cond
)
226 fprintf(fp
, "branch-cond ");
228 fprintf(fp
, "branch-uncond ");
231 if (header
.elide_writes
)
234 if (header
.suppress_inf
)
235 fprintf(fp
, "suppress-inf ");
236 if (header
.suppress_nan
)
237 fprintf(fp
, "suppress-nan ");
240 fprintf(fp
, "unk0 ");
242 fprintf(fp
, "unk1 ");
244 fprintf(fp
, "unk2 ");
246 fprintf(fp
, "unk3 ");
248 fprintf(fp
, "unk4 ");
253 fprintf(fp
, "# clause type %d, next clause type %d\n",
254 header
.clause_type
, header
.next_clause_type
);
258 static struct bifrost_reg_ctrl
DecodeRegCtrl(FILE *fp
, struct bifrost_regs regs
)
260 struct bifrost_reg_ctrl decoded
= {};
262 if (regs
.ctrl
== 0) {
263 ctrl
= regs
.reg1
>> 2;
264 decoded
.read_reg0
= !(regs
.reg1
& 0x2);
265 decoded
.read_reg1
= false;
268 decoded
.read_reg0
= decoded
.read_reg1
= true;
272 decoded
.fma_write_unit
= REG_WRITE_TWO
;
276 decoded
.fma_write_unit
= REG_WRITE_TWO
;
277 decoded
.read_reg3
= true;
280 decoded
.read_reg3
= true;
283 decoded
.add_write_unit
= REG_WRITE_TWO
;
286 decoded
.add_write_unit
= REG_WRITE_TWO
;
287 decoded
.read_reg3
= true;
290 decoded
.clause_start
= true;
293 decoded
.fma_write_unit
= REG_WRITE_TWO
;
294 decoded
.clause_start
= true;
299 decoded
.read_reg3
= true;
300 decoded
.clause_start
= true;
303 decoded
.add_write_unit
= REG_WRITE_TWO
;
304 decoded
.clause_start
= true;
309 decoded
.fma_write_unit
= REG_WRITE_THREE
;
310 decoded
.add_write_unit
= REG_WRITE_TWO
;
313 fprintf(fp
, "# unknown reg ctrl %d\n", ctrl
);
319 // Pass in the add_write_unit or fma_write_unit, and this returns which register
320 // the ADD/FMA units are writing to
321 static unsigned GetRegToWrite(enum bifrost_reg_write_unit unit
, struct bifrost_regs regs
)
326 case REG_WRITE_THREE
:
328 default: /* REG_WRITE_NONE */
334 static void dump_regs(FILE *fp
, struct bifrost_regs srcs
)
336 struct bifrost_reg_ctrl ctrl
= DecodeRegCtrl(fp
, srcs
);
339 fprintf(fp
, "port 0: R%d ", get_reg0(srcs
));
341 fprintf(fp
, "port 1: R%d ", get_reg1(srcs
));
343 if (ctrl
.fma_write_unit
== REG_WRITE_TWO
)
344 fprintf(fp
, "port 2: R%d (write FMA) ", srcs
.reg2
);
345 else if (ctrl
.add_write_unit
== REG_WRITE_TWO
)
346 fprintf(fp
, "port 2: R%d (write ADD) ", srcs
.reg2
);
348 if (ctrl
.fma_write_unit
== REG_WRITE_THREE
)
349 fprintf(fp
, "port 3: R%d (write FMA) ", srcs
.reg3
);
350 else if (ctrl
.add_write_unit
== REG_WRITE_THREE
)
351 fprintf(fp
, "port 3: R%d (write ADD) ", srcs
.reg3
);
352 else if (ctrl
.read_reg3
)
353 fprintf(fp
, "port 3: R%d (read) ", srcs
.reg3
);
355 if (srcs
.uniform_const
) {
356 if (srcs
.uniform_const
& 0x80) {
357 fprintf(fp
, "uniform: U%d", (srcs
.uniform_const
& 0x7f) * 2);
363 static void dump_const_imm(FILE *fp
, uint32_t imm
)
370 fprintf(fp
, "0x%08x /* %f */", imm
, fi
.f
);
373 static uint64_t get_const(uint64_t *consts
, struct bifrost_regs srcs
)
375 unsigned low_bits
= srcs
.uniform_const
& 0xf;
377 switch (srcs
.uniform_const
>> 4) {
400 return imm
| low_bits
;
403 static void dump_uniform_const_src(FILE *fp
, struct bifrost_regs srcs
, uint64_t *consts
, bool high32
)
405 if (srcs
.uniform_const
& 0x80) {
406 unsigned uniform
= (srcs
.uniform_const
& 0x7f) * 2;
407 fprintf(fp
, "U%d", uniform
+ (high32
? 1 : 0));
408 } else if (srcs
.uniform_const
>= 0x20) {
409 uint64_t imm
= get_const(consts
, srcs
);
411 dump_const_imm(fp
, imm
>> 32);
413 dump_const_imm(fp
, imm
);
415 switch (srcs
.uniform_const
) {
420 fprintf(fp
, "atest-data");
423 fprintf(fp
, "sample-ptr");
433 fprintf(fp
, "blend-descriptor%u", (unsigned) srcs
.uniform_const
- 8);
436 fprintf(fp
, "unkConst%u", (unsigned) srcs
.uniform_const
);
447 static void dump_src(FILE *fp
, unsigned src
, struct bifrost_regs srcs
, uint64_t *consts
, bool isFMA
)
451 fprintf(fp
, "R%d", get_reg0(srcs
));
454 fprintf(fp
, "R%d", get_reg1(srcs
));
457 fprintf(fp
, "R%d", srcs
.reg3
);
463 fprintf(fp
, "T"); // i.e. the output of FMA this cycle
466 dump_uniform_const_src(fp
, srcs
, consts
, false);
469 dump_uniform_const_src(fp
, srcs
, consts
, true);
480 static void dump_output_mod(FILE *fp
, unsigned mod
)
486 fprintf(fp
, ".clamp_0_inf");
487 break; // max(out, 0)
489 fprintf(fp
, ".clamp_m1_1");
490 break; // clamp(out, -1, 1)
492 fprintf(fp
, ".clamp_0_1");
493 break; // clamp(out, 0, 1)
499 static void dump_minmax_mode(FILE *fp
, unsigned mod
)
503 /* Same as fmax() and fmin() -- return the other number if any
504 * number is NaN. Also always return +0 if one argument is +0 and
509 /* Instead of never returning a NaN, always return one. The
510 * "greater"/"lesser" NaN is always returned, first by checking the
511 * sign and then the mantissa bits.
513 fprintf(fp
, ".nan_wins");
516 /* For max, implement src0 > src1 ? src0 : src1
517 * For min, implement src0 < src1 ? src0 : src1
519 * This includes handling NaN's and signedness of 0 differently
520 * from above, since +0 and -0 compare equal and comparisons always
521 * return false for NaN's. As a result, this mode is *not*
524 fprintf(fp
, ".src1_wins");
527 /* For max, implement src0 < src1 ? src1 : src0
528 * For min, implement src0 > src1 ? src1 : src0
530 fprintf(fp
, ".src0_wins");
537 static void dump_round_mode(FILE *fp
, unsigned mod
)
541 /* roundTiesToEven, the IEEE default. */
544 /* roundTowardPositive in the IEEE spec. */
545 fprintf(fp
, ".round_pos");
548 /* roundTowardNegative in the IEEE spec. */
549 fprintf(fp
, ".round_neg");
552 /* roundTowardZero in the IEEE spec. */
553 fprintf(fp
, ".round_zero");
560 static const struct fma_op_info FMAOpInfos
[] = {
561 { 0x00000, "FMA.f32", FMA_FMA
},
562 { 0x40000, "MAX.f32", FMA_FMINMAX
},
563 { 0x44000, "MIN.f32", FMA_FMINMAX
},
564 { 0x48000, "FCMP.GL", FMA_FCMP
},
565 { 0x4c000, "FCMP.D3D", FMA_FCMP
},
566 { 0x4ff98, "ADD.i32", FMA_TWO_SRC
},
567 { 0x4ffd8, "SUB.i32", FMA_TWO_SRC
},
568 { 0x4fff0, "SUBB.i32", FMA_TWO_SRC
},
569 { 0x50000, "FMA_MSCALE", FMA_FMA_MSCALE
},
570 { 0x58000, "ADD.f32", FMA_FADD
},
571 { 0x5c000, "CSEL.FEQ.f32", FMA_FOUR_SRC
},
572 { 0x5c200, "CSEL.FGT.f32", FMA_FOUR_SRC
},
573 { 0x5c400, "CSEL.FGE.f32", FMA_FOUR_SRC
},
574 { 0x5c600, "CSEL.IEQ.f32", FMA_FOUR_SRC
},
575 { 0x5c800, "CSEL.IGT.i32", FMA_FOUR_SRC
},
576 { 0x5ca00, "CSEL.IGE.i32", FMA_FOUR_SRC
},
577 { 0x5cc00, "CSEL.UGT.i32", FMA_FOUR_SRC
},
578 { 0x5ce00, "CSEL.UGE.i32", FMA_FOUR_SRC
},
579 { 0x5d8d0, "ICMP.D3D.GT.v2i16", FMA_TWO_SRC
},
580 { 0x5d9d0, "UCMP.D3D.GT.v2i16", FMA_TWO_SRC
},
581 { 0x5dad0, "ICMP.D3D.GE.v2i16", FMA_TWO_SRC
},
582 { 0x5dbd0, "UCMP.D3D.GE.v2i16", FMA_TWO_SRC
},
583 { 0x5dcd0, "ICMP.D3D.EQ.v2i16", FMA_TWO_SRC
},
584 { 0x5de40, "ICMP.GL.GT.i32", FMA_TWO_SRC
}, // src0 > src1 ? 1 : 0
585 { 0x5de48, "ICMP.GL.GE.i32", FMA_TWO_SRC
},
586 { 0x5de50, "UCMP.GL.GT.i32", FMA_TWO_SRC
},
587 { 0x5de58, "UCMP.GL.GE.i32", FMA_TWO_SRC
},
588 { 0x5de60, "ICMP.GL.EQ.i32", FMA_TWO_SRC
},
589 { 0x5dec0, "ICMP.D3D.GT.i32", FMA_TWO_SRC
}, // src0 > src1 ? ~0 : 0
590 { 0x5dec8, "ICMP.D3D.GE.i32", FMA_TWO_SRC
},
591 { 0x5ded0, "UCMP.D3D.GT.i32", FMA_TWO_SRC
},
592 { 0x5ded8, "UCMP.D3D.GE.i32", FMA_TWO_SRC
},
593 { 0x5dee0, "ICMP.D3D.EQ.i32", FMA_TWO_SRC
},
594 { 0x60200, "RSHIFT_NAND.i32", FMA_THREE_SRC
},
595 { 0x603c0, "RSHIFT_NAND.v2i16", FMA_THREE_SRC
},
596 { 0x60e00, "RSHIFT_OR.i32", FMA_THREE_SRC
},
597 { 0x60fc0, "RSHIFT_OR.v2i16", FMA_THREE_SRC
},
598 { 0x61200, "RSHIFT_AND.i32", FMA_THREE_SRC
},
599 { 0x613c0, "RSHIFT_AND.v2i16", FMA_THREE_SRC
},
600 { 0x61e00, "RSHIFT_NOR.i32", FMA_THREE_SRC
}, // ~((src0 << src2) | src1)
601 { 0x61fc0, "RSHIFT_NOR.v2i16", FMA_THREE_SRC
}, // ~((src0 << src2) | src1)
602 { 0x62200, "LSHIFT_NAND.i32", FMA_THREE_SRC
},
603 { 0x623c0, "LSHIFT_NAND.v2i16", FMA_THREE_SRC
},
604 { 0x62e00, "LSHIFT_OR.i32", FMA_THREE_SRC
}, // (src0 << src2) | src1
605 { 0x62fc0, "LSHIFT_OR.v2i16", FMA_THREE_SRC
}, // (src0 << src2) | src1
606 { 0x63200, "LSHIFT_AND.i32", FMA_THREE_SRC
}, // (src0 << src2) & src1
607 { 0x633c0, "LSHIFT_AND.v2i16", FMA_THREE_SRC
},
608 { 0x63e00, "LSHIFT_NOR.i32", FMA_THREE_SRC
},
609 { 0x63fc0, "LSHIFT_NOR.v2i16", FMA_THREE_SRC
},
610 { 0x64200, "RSHIFT_XOR.i32", FMA_THREE_SRC
},
611 { 0x643c0, "RSHIFT_XOR.v2i16", FMA_THREE_SRC
},
612 { 0x64600, "RSHIFT_XNOR.i32", FMA_THREE_SRC
}, // ~((src0 >> src2) ^ src1)
613 { 0x647c0, "RSHIFT_XNOR.v2i16", FMA_THREE_SRC
}, // ~((src0 >> src2) ^ src1)
614 { 0x64a00, "LSHIFT_XOR.i32", FMA_THREE_SRC
},
615 { 0x64bc0, "LSHIFT_XOR.v2i16", FMA_THREE_SRC
},
616 { 0x64e00, "LSHIFT_XNOR.i32", FMA_THREE_SRC
}, // ~((src0 >> src2) ^ src1)
617 { 0x64fc0, "LSHIFT_XNOR.v2i16", FMA_THREE_SRC
}, // ~((src0 >> src2) ^ src1)
618 { 0x65200, "LSHIFT_ADD.i32", FMA_THREE_SRC
},
619 { 0x65600, "LSHIFT_SUB.i32", FMA_THREE_SRC
}, // (src0 << src2) - src1
620 { 0x65a00, "LSHIFT_RSUB.i32", FMA_THREE_SRC
}, // src1 - (src0 << src2)
621 { 0x65e00, "RSHIFT_ADD.i32", FMA_THREE_SRC
},
622 { 0x66200, "RSHIFT_SUB.i32", FMA_THREE_SRC
},
623 { 0x66600, "RSHIFT_RSUB.i32", FMA_THREE_SRC
},
624 { 0x66a00, "ARSHIFT_ADD.i32", FMA_THREE_SRC
},
625 { 0x66e00, "ARSHIFT_SUB.i32", FMA_THREE_SRC
},
626 { 0x67200, "ARSHIFT_RSUB.i32", FMA_THREE_SRC
},
627 { 0x80000, "FMA.v2f16", FMA_FMA16
},
628 { 0xc0000, "MAX.v2f16", FMA_FMINMAX16
},
629 { 0xc4000, "MIN.v2f16", FMA_FMINMAX16
},
630 { 0xc8000, "FCMP.GL", FMA_FCMP16
},
631 { 0xcc000, "FCMP.D3D", FMA_FCMP16
},
632 { 0xcf900, "ADD.v2i16", FMA_TWO_SRC
},
633 { 0xcfc10, "ADDC.i32", FMA_TWO_SRC
},
634 { 0xcfd80, "ADD.i32.i16.X", FMA_TWO_SRC
},
635 { 0xcfd90, "ADD.i32.u16.X", FMA_TWO_SRC
},
636 { 0xcfdc0, "ADD.i32.i16.Y", FMA_TWO_SRC
},
637 { 0xcfdd0, "ADD.i32.u16.Y", FMA_TWO_SRC
},
638 { 0xd8000, "ADD.v2f16", FMA_FADD16
},
639 { 0xdc000, "CSEL.FEQ.v2f16", FMA_FOUR_SRC
},
640 { 0xdc200, "CSEL.FGT.v2f16", FMA_FOUR_SRC
},
641 { 0xdc400, "CSEL.FGE.v2f16", FMA_FOUR_SRC
},
642 { 0xdc600, "CSEL.IEQ.v2f16", FMA_FOUR_SRC
},
643 { 0xdc800, "CSEL.IGT.v2i16", FMA_FOUR_SRC
},
644 { 0xdca00, "CSEL.IGE.v2i16", FMA_FOUR_SRC
},
645 { 0xdcc00, "CSEL.UGT.v2i16", FMA_FOUR_SRC
},
646 { 0xdce00, "CSEL.UGE.v2i16", FMA_FOUR_SRC
},
647 { 0xdd000, "F32_TO_F16", FMA_TWO_SRC
},
648 { 0xe0046, "F16_TO_I16.XX", FMA_ONE_SRC
},
649 { 0xe0047, "F16_TO_U16.XX", FMA_ONE_SRC
},
650 { 0xe004e, "F16_TO_I16.YX", FMA_ONE_SRC
},
651 { 0xe004f, "F16_TO_U16.YX", FMA_ONE_SRC
},
652 { 0xe0056, "F16_TO_I16.XY", FMA_ONE_SRC
},
653 { 0xe0057, "F16_TO_U16.XY", FMA_ONE_SRC
},
654 { 0xe005e, "F16_TO_I16.YY", FMA_ONE_SRC
},
655 { 0xe005f, "F16_TO_U16.YY", FMA_ONE_SRC
},
656 { 0xe00c0, "I16_TO_F16.XX", FMA_ONE_SRC
},
657 { 0xe00c1, "U16_TO_F16.XX", FMA_ONE_SRC
},
658 { 0xe00c8, "I16_TO_F16.YX", FMA_ONE_SRC
},
659 { 0xe00c9, "U16_TO_F16.YX", FMA_ONE_SRC
},
660 { 0xe00d0, "I16_TO_F16.XY", FMA_ONE_SRC
},
661 { 0xe00d1, "U16_TO_F16.XY", FMA_ONE_SRC
},
662 { 0xe00d8, "I16_TO_F16.YY", FMA_ONE_SRC
},
663 { 0xe00d9, "U16_TO_F16.YY", FMA_ONE_SRC
},
664 { 0xe0136, "F32_TO_I32", FMA_ONE_SRC
},
665 { 0xe0137, "F32_TO_U32", FMA_ONE_SRC
},
666 { 0xe0178, "I32_TO_F32", FMA_ONE_SRC
},
667 { 0xe0179, "U32_TO_F32", FMA_ONE_SRC
},
668 { 0xe0198, "I16_TO_I32.X", FMA_ONE_SRC
},
669 { 0xe0199, "U16_TO_U32.X", FMA_ONE_SRC
},
670 { 0xe019a, "I16_TO_I32.Y", FMA_ONE_SRC
},
671 { 0xe019b, "U16_TO_U32.Y", FMA_ONE_SRC
},
672 { 0xe019c, "I16_TO_F32.X", FMA_ONE_SRC
},
673 { 0xe019d, "U16_TO_F32.X", FMA_ONE_SRC
},
674 { 0xe019e, "I16_TO_F32.Y", FMA_ONE_SRC
},
675 { 0xe019f, "U16_TO_F32.Y", FMA_ONE_SRC
},
676 { 0xe01a2, "F16_TO_F32.X", FMA_ONE_SRC
},
677 { 0xe01a3, "F16_TO_F32.Y", FMA_ONE_SRC
},
678 { 0xe032c, "NOP", FMA_ONE_SRC
},
679 { 0xe032d, "MOV", FMA_ONE_SRC
},
680 { 0xe032f, "SWZ.YY.v2i16", FMA_ONE_SRC
},
681 // From the ARM patent US20160364209A1:
682 // "Decompose v (the input) into numbers x1 and s such that v = x1 * 2^s,
683 // and x1 is a floating point value in a predetermined range where the
684 // value 1 is within the range and not at one extremity of the range (e.g.
685 // choose a range where 1 is towards middle of range)."
688 { 0xe0345, "LOG_FREXPM", FMA_ONE_SRC
},
689 // Given a floating point number m * 2^e, returns m * 2^{-1}. This is
690 // exactly the same as the mantissa part of frexp().
691 { 0xe0365, "FRCP_FREXPM", FMA_ONE_SRC
},
692 // Given a floating point number m * 2^e, returns m * 2^{-2} if e is even,
693 // and m * 2^{-1} if e is odd. In other words, scales by powers of 4 until
694 // within the range [0.25, 1). Used for square-root and reciprocal
696 { 0xe0375, "FSQRT_FREXPM", FMA_ONE_SRC
},
697 // Given a floating point number m * 2^e, computes -e - 1 as an integer.
698 // Zero and infinity/NaN return 0.
699 { 0xe038d, "FRCP_FREXPE", FMA_ONE_SRC
},
700 // Computes floor(e/2) + 1.
701 { 0xe03a5, "FSQRT_FREXPE", FMA_ONE_SRC
},
702 // Given a floating point number m * 2^e, computes -floor(e/2) - 1 as an
704 { 0xe03ad, "FRSQ_FREXPE", FMA_ONE_SRC
},
705 { 0xe03c5, "LOG_FREXPE", FMA_ONE_SRC
},
706 { 0xe03fa, "CLZ", FMA_ONE_SRC
},
707 { 0xe0b80, "IMAX3", FMA_THREE_SRC
},
708 { 0xe0bc0, "UMAX3", FMA_THREE_SRC
},
709 { 0xe0c00, "IMIN3", FMA_THREE_SRC
},
710 { 0xe0c40, "UMIN3", FMA_THREE_SRC
},
711 { 0xe0ec5, "ROUND", FMA_ONE_SRC
},
712 { 0xe0f40, "CSEL", FMA_THREE_SRC
}, // src2 != 0 ? src1 : src0
713 { 0xe0fc0, "MUX.i32", FMA_THREE_SRC
}, // see ADD comment
714 { 0xe1805, "ROUNDEVEN", FMA_ONE_SRC
},
715 { 0xe1845, "CEIL", FMA_ONE_SRC
},
716 { 0xe1885, "FLOOR", FMA_ONE_SRC
},
717 { 0xe18c5, "TRUNC", FMA_ONE_SRC
},
718 { 0xe19b0, "ATAN_LDEXP.Y.f32", FMA_TWO_SRC
},
719 { 0xe19b8, "ATAN_LDEXP.X.f32", FMA_TWO_SRC
},
720 // These instructions in the FMA slot, together with LSHIFT_ADD_HIGH32.i32
721 // in the ADD slot, allow one to do a 64-bit addition with an extra small
722 // shift on one of the sources. There are three possible scenarios:
724 // 1) Full 64-bit addition. Do:
725 // out.x = LSHIFT_ADD_LOW32.i64 src1.x, src2.x, shift
726 // out.y = LSHIFT_ADD_HIGH32.i32 src1.y, src2.y
728 // The shift amount is applied to src2 before adding. The shift amount, and
729 // any extra bits from src2 plus the overflow bit, are sent directly from
730 // FMA to ADD instead of being passed explicitly. Hence, these two must be
731 // bundled together into the same instruction.
733 // 2) Add a 64-bit value src1 to a zero-extended 32-bit value src2. Do:
734 // out.x = LSHIFT_ADD_LOW32.u32 src1.x, src2, shift
735 // out.y = LSHIFT_ADD_HIGH32.i32 src1.x, 0
737 // Note that in this case, the second argument to LSHIFT_ADD_HIGH32 is
738 // ignored, so it can actually be anything. As before, the shift is applied
739 // to src2 before adding.
741 // 3) Add a 64-bit value to a sign-extended 32-bit value src2. Do:
742 // out.x = LSHIFT_ADD_LOW32.i32 src1.x, src2, shift
743 // out.y = LSHIFT_ADD_HIGH32.i32 src1.x, 0
745 // The only difference is the .i32 instead of .u32. Otherwise, this is
746 // exactly the same as before.
748 // In all these instructions, the shift amount is stored where the third
749 // source would be, so the shift has to be a small immediate from 0 to 7.
750 // This is fine for the expected use-case of these instructions, which is
751 // manipulating 64-bit pointers.
753 // These instructions can also be combined with various load/store
754 // instructions which normally take a 64-bit pointer in order to add a
755 // 32-bit or 64-bit offset to the pointer before doing the operation,
756 // optionally shifting the offset. The load/store op implicity does
757 // LSHIFT_ADD_HIGH32.i32 internally. Letting ptr be the pointer, and offset
758 // the desired offset, the cases go as follows:
760 // 1) Add a 64-bit offset:
761 // LSHIFT_ADD_LOW32.i64 ptr.x, offset.x, shift
762 // ld_st_op ptr.y, offset.y, ...
764 // Note that the output of LSHIFT_ADD_LOW32.i64 is not used, instead being
765 // implicitly sent to the load/store op to serve as the low 32 bits of the
768 // 2) Add a 32-bit unsigned offset:
769 // temp = LSHIFT_ADD_LOW32.u32 ptr.x, offset, shift
770 // ld_st_op temp, ptr.y, ...
772 // Now, the low 32 bits of offset << shift + ptr are passed explicitly to
773 // the ld_st_op, to match the case where there is no offset and ld_st_op is
776 // 3) Add a 32-bit signed offset:
777 // temp = LSHIFT_ADD_LOW32.i32 ptr.x, offset, shift
778 // ld_st_op temp, ptr.y, ...
780 // Again, the same as the unsigned case except for the offset.
781 { 0xe1c80, "LSHIFT_ADD_LOW32.u32", FMA_SHIFT_ADD64
},
782 { 0xe1cc0, "LSHIFT_ADD_LOW32.i64", FMA_SHIFT_ADD64
},
783 { 0xe1d80, "LSHIFT_ADD_LOW32.i32", FMA_SHIFT_ADD64
},
784 { 0xe1e00, "SEL.XX.i16", FMA_TWO_SRC
},
785 { 0xe1e08, "SEL.YX.i16", FMA_TWO_SRC
},
786 { 0xe1e10, "SEL.XY.i16", FMA_TWO_SRC
},
787 { 0xe1e18, "SEL.YY.i16", FMA_TWO_SRC
},
788 { 0xe7800, "IMAD", FMA_THREE_SRC
},
789 { 0xe78db, "POPCNT", FMA_ONE_SRC
},
792 static struct fma_op_info
find_fma_op_info(unsigned op
)
794 for (unsigned i
= 0; i
< ARRAY_SIZE(FMAOpInfos
); i
++) {
796 switch (FMAOpInfos
[i
].src_type
) {
805 opCmp
= op
& ~0x1fff;
808 case FMA_SHIFT_ADD64
:
815 opCmp
= op
& ~0x3fff;
819 opCmp
= op
& ~0x3ffff;
825 opCmp
= op
& ~0x7fff;
831 if (FMAOpInfos
[i
].op
== opCmp
)
832 return FMAOpInfos
[i
];
835 struct fma_op_info info
;
836 snprintf(info
.name
, sizeof(info
.name
), "op%04x", op
);
838 info
.src_type
= FMA_THREE_SRC
;
842 static void dump_fcmp(FILE *fp
, unsigned op
)
864 fprintf(fp
, ".unk%d", op
);
869 static void dump_16swizzle(FILE *fp
, unsigned swiz
)
873 fprintf(fp
, ".%c%c", "xy"[swiz
& 1], "xy"[(swiz
>> 1) & 1]);
876 static void dump_fma_expand_src0(FILE *fp
, unsigned ctrl
)
898 static void dump_fma_expand_src1(FILE *fp
, unsigned ctrl
)
920 static void dump_fma(FILE *fp
, uint64_t word
, struct bifrost_regs regs
, struct bifrost_regs next_regs
, uint64_t *consts
, bool verbose
)
923 fprintf(fp
, "# FMA: %016" PRIx64
"\n", word
);
925 struct bifrost_fma_inst FMA
;
926 memcpy((char *) &FMA
, (char *) &word
, sizeof(struct bifrost_fma_inst
));
927 struct fma_op_info info
= find_fma_op_info(FMA
.op
);
929 fprintf(fp
, "%s", info
.name
);
930 if (info
.src_type
== FMA_FADD
||
931 info
.src_type
== FMA_FMINMAX
||
932 info
.src_type
== FMA_FMA
||
933 info
.src_type
== FMA_FADD16
||
934 info
.src_type
== FMA_FMINMAX16
||
935 info
.src_type
== FMA_FMA16
) {
936 dump_output_mod(fp
, bits(FMA
.op
, 12, 14));
937 switch (info
.src_type
) {
942 dump_round_mode(fp
, bits(FMA
.op
, 10, 12));
946 dump_minmax_mode(fp
, bits(FMA
.op
, 10, 12));
951 } else if (info
.src_type
== FMA_FCMP
|| info
.src_type
== FMA_FCMP16
) {
952 dump_fcmp(fp
, bits(FMA
.op
, 10, 13));
953 if (info
.src_type
== FMA_FCMP
)
956 fprintf(fp
, ".v2f16");
957 } else if (info
.src_type
== FMA_FMA_MSCALE
) {
958 if (FMA
.op
& (1 << 11)) {
959 switch ((FMA
.op
>> 9) & 0x3) {
961 /* This mode seems to do a few things:
962 * - Makes 0 * infinity (and incidentally 0 * nan) return 0,
963 * since generating a nan would poison the result of
964 * 1/infinity and 1/0.
965 * - Fiddles with which nan is returned in nan * nan,
966 * presumably to make sure that the same exact nan is
967 * returned for 1/nan.
969 fprintf(fp
, ".rcp_mode");
972 /* Similar to the above, but src0 always wins when multiplying
975 fprintf(fp
, ".sqrt_mode");
978 fprintf(fp
, ".unk%d_mode", (int) (FMA
.op
>> 9) & 0x3);
981 dump_output_mod(fp
, bits(FMA
.op
, 9, 11));
987 struct bifrost_reg_ctrl next_ctrl
= DecodeRegCtrl(fp
, next_regs
);
988 if (next_ctrl
.fma_write_unit
!= REG_WRITE_NONE
) {
989 fprintf(fp
, "{R%d, T0}, ", GetRegToWrite(next_ctrl
.fma_write_unit
, next_regs
));
994 switch (info
.src_type
) {
996 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
999 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1001 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1008 fprintf(fp
, "abs(");
1009 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1010 dump_fma_expand_src0(fp
, (FMA
.op
>> 6) & 0x7);
1017 fprintf(fp
, "abs(");
1018 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1019 dump_fma_expand_src1(fp
, (FMA
.op
>> 6) & 0x7);
1024 case FMA_FMINMAX16
: {
1025 bool abs1
= FMA
.op
& 0x8;
1026 bool abs2
= (FMA
.op
& 0x7) < FMA
.src0
;
1030 fprintf(fp
, "abs(");
1031 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1032 dump_16swizzle(fp
, (FMA
.op
>> 6) & 0x3);
1039 fprintf(fp
, "abs(");
1040 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1041 dump_16swizzle(fp
, (FMA
.op
>> 8) & 0x3);
1048 fprintf(fp
, "abs(");
1049 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1050 dump_fma_expand_src0(fp
, (FMA
.op
>> 6) & 0x7);
1057 fprintf(fp
, "abs(");
1058 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1059 dump_fma_expand_src1(fp
, (FMA
.op
>> 6) & 0x7);
1064 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1065 // Note: this is kinda a guess, I haven't seen the blob set this to
1066 // anything other than the identity, but it matches FMA_TWO_SRCFmod16
1067 dump_16swizzle(fp
, (FMA
.op
>> 6) & 0x3);
1069 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1070 dump_16swizzle(fp
, (FMA
.op
>> 8) & 0x3);
1072 case FMA_SHIFT_ADD64
:
1073 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1075 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1077 fprintf(fp
, "shift:%u", (FMA
.op
>> 3) & 0x7);
1080 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1082 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1084 dump_src(fp
, (FMA
.op
>> 3) & 0x7, regs
, consts
, true);
1087 if (FMA
.op
& (1 << 14))
1089 if (FMA
.op
& (1 << 9))
1090 fprintf(fp
, "abs(");
1091 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1092 dump_fma_expand_src0(fp
, (FMA
.op
>> 6) & 0x7);
1093 if (FMA
.op
& (1 << 9))
1096 if (FMA
.op
& (1 << 16))
1097 fprintf(fp
, "abs(");
1098 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1099 dump_fma_expand_src1(fp
, (FMA
.op
>> 6) & 0x7);
1100 if (FMA
.op
& (1 << 16))
1103 if (FMA
.op
& (1 << 15))
1105 if (FMA
.op
& (1 << 17))
1106 fprintf(fp
, "abs(");
1107 dump_src(fp
, (FMA
.op
>> 3) & 0x7, regs
, consts
, true);
1108 if (FMA
.op
& (1 << 17))
1112 if (FMA
.op
& (1 << 14))
1114 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1115 dump_16swizzle(fp
, (FMA
.op
>> 6) & 0x3);
1117 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1118 dump_16swizzle(fp
, (FMA
.op
>> 8) & 0x3);
1120 if (FMA
.op
& (1 << 15))
1122 dump_src(fp
, (FMA
.op
>> 3) & 0x7, regs
, consts
, true);
1123 dump_16swizzle(fp
, (FMA
.op
>> 16) & 0x3);
1126 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1128 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1130 dump_src(fp
, (FMA
.op
>> 3) & 0x7, regs
, consts
, true);
1132 dump_src(fp
, (FMA
.op
>> 6) & 0x7, regs
, consts
, true);
1134 case FMA_FMA_MSCALE
:
1135 if (FMA
.op
& (1 << 12))
1136 fprintf(fp
, "abs(");
1137 dump_src(fp
, FMA
.src0
, regs
, consts
, true);
1138 if (FMA
.op
& (1 << 12))
1141 if (FMA
.op
& (1 << 13))
1143 dump_src(fp
, FMA
.op
& 0x7, regs
, consts
, true);
1145 if (FMA
.op
& (1 << 14))
1147 dump_src(fp
, (FMA
.op
>> 3) & 0x7, regs
, consts
, true);
1149 dump_src(fp
, (FMA
.op
>> 6) & 0x7, regs
, consts
, true);
1155 static const struct add_op_info add_op_infos
[] = {
1156 { 0x00000, "MAX.f32", ADD_FMINMAX
},
1157 { 0x02000, "MIN.f32", ADD_FMINMAX
},
1158 { 0x04000, "ADD.f32", ADD_FADD
},
1159 { 0x06000, "FCMP.GL", ADD_FCMP
},
1160 { 0x07000, "FCMP.D3D", ADD_FCMP
},
1161 { 0x07856, "F16_TO_I16", ADD_ONE_SRC
},
1162 { 0x07857, "F16_TO_U16", ADD_ONE_SRC
},
1163 { 0x078c0, "I16_TO_F16.XX", ADD_ONE_SRC
},
1164 { 0x078c1, "U16_TO_F16.XX", ADD_ONE_SRC
},
1165 { 0x078c8, "I16_TO_F16.YX", ADD_ONE_SRC
},
1166 { 0x078c9, "U16_TO_F16.YX", ADD_ONE_SRC
},
1167 { 0x078d0, "I16_TO_F16.XY", ADD_ONE_SRC
},
1168 { 0x078d1, "U16_TO_F16.XY", ADD_ONE_SRC
},
1169 { 0x078d8, "I16_TO_F16.YY", ADD_ONE_SRC
},
1170 { 0x078d9, "U16_TO_F16.YY", ADD_ONE_SRC
},
1171 { 0x07936, "F32_TO_I32", ADD_ONE_SRC
},
1172 { 0x07937, "F32_TO_U32", ADD_ONE_SRC
},
1173 { 0x07978, "I32_TO_F32", ADD_ONE_SRC
},
1174 { 0x07979, "U32_TO_F32", ADD_ONE_SRC
},
1175 { 0x07998, "I16_TO_I32.X", ADD_ONE_SRC
},
1176 { 0x07999, "U16_TO_U32.X", ADD_ONE_SRC
},
1177 { 0x0799a, "I16_TO_I32.Y", ADD_ONE_SRC
},
1178 { 0x0799b, "U16_TO_U32.Y", ADD_ONE_SRC
},
1179 { 0x0799c, "I16_TO_F32.X", ADD_ONE_SRC
},
1180 { 0x0799d, "U16_TO_F32.X", ADD_ONE_SRC
},
1181 { 0x0799e, "I16_TO_F32.Y", ADD_ONE_SRC
},
1182 { 0x0799f, "U16_TO_F32.Y", ADD_ONE_SRC
},
1183 // take the low 16 bits, and expand it to a 32-bit float
1184 { 0x079a2, "F16_TO_F32.X", ADD_ONE_SRC
},
1185 // take the high 16 bits, ...
1186 { 0x079a3, "F16_TO_F32.Y", ADD_ONE_SRC
},
1187 { 0x07b2b, "SWZ.YX.v2i16", ADD_ONE_SRC
},
1188 { 0x07b2c, "NOP", ADD_ONE_SRC
},
1189 { 0x07b29, "SWZ.XX.v2i16", ADD_ONE_SRC
},
1190 // Logically, this should be SWZ.XY, but that's equivalent to a move, and
1191 // this seems to be the canonical way the blob generates a MOV.
1192 { 0x07b2d, "MOV", ADD_ONE_SRC
},
1193 { 0x07b2f, "SWZ.YY.v2i16", ADD_ONE_SRC
},
1194 // Given a floating point number m * 2^e, returns m ^ 2^{-1}.
1195 { 0x07b65, "FRCP_FREXPM", ADD_ONE_SRC
},
1196 { 0x07b75, "FSQRT_FREXPM", ADD_ONE_SRC
},
1197 { 0x07b8d, "FRCP_FREXPE", ADD_ONE_SRC
},
1198 { 0x07ba5, "FSQRT_FREXPE", ADD_ONE_SRC
},
1199 { 0x07bad, "FRSQ_FREXPE", ADD_ONE_SRC
},
1200 // From the ARM patent US20160364209A1:
1201 // "Decompose v (the input) into numbers x1 and s such that v = x1 * 2^s,
1202 // and x1 is a floating point value in a predetermined range where the
1203 // value 1 is within the range and not at one extremity of the range (e.g.
1204 // choose a range where 1 is towards middle of range)."
1207 { 0x07bc5, "FLOG_FREXPE", ADD_ONE_SRC
},
1208 { 0x07d45, "CEIL", ADD_ONE_SRC
},
1209 { 0x07d85, "FLOOR", ADD_ONE_SRC
},
1210 { 0x07dc5, "TRUNC", ADD_ONE_SRC
},
1211 { 0x07f18, "LSHIFT_ADD_HIGH32.i32", ADD_TWO_SRC
},
1212 { 0x08000, "LD_ATTR.f16", ADD_LOAD_ATTR
, true },
1213 { 0x08100, "LD_ATTR.v2f16", ADD_LOAD_ATTR
, true },
1214 { 0x08200, "LD_ATTR.v3f16", ADD_LOAD_ATTR
, true },
1215 { 0x08300, "LD_ATTR.v4f16", ADD_LOAD_ATTR
, true },
1216 { 0x08400, "LD_ATTR.f32", ADD_LOAD_ATTR
, true },
1217 { 0x08500, "LD_ATTR.v3f32", ADD_LOAD_ATTR
, true },
1218 { 0x08600, "LD_ATTR.v3f32", ADD_LOAD_ATTR
, true },
1219 { 0x08700, "LD_ATTR.v4f32", ADD_LOAD_ATTR
, true },
1220 { 0x08800, "LD_ATTR.i32", ADD_LOAD_ATTR
, true },
1221 { 0x08900, "LD_ATTR.v3i32", ADD_LOAD_ATTR
, true },
1222 { 0x08a00, "LD_ATTR.v3i32", ADD_LOAD_ATTR
, true },
1223 { 0x08b00, "LD_ATTR.v4i32", ADD_LOAD_ATTR
, true },
1224 { 0x08c00, "LD_ATTR.u32", ADD_LOAD_ATTR
, true },
1225 { 0x08d00, "LD_ATTR.v3u32", ADD_LOAD_ATTR
, true },
1226 { 0x08e00, "LD_ATTR.v3u32", ADD_LOAD_ATTR
, true },
1227 { 0x08f00, "LD_ATTR.v4u32", ADD_LOAD_ATTR
, true },
1228 { 0x0a000, "LD_VAR.32", ADD_VARYING_INTERP
, true },
1229 { 0x0b000, "TEX", ADD_TEX_COMPACT
, true },
1230 { 0x0c188, "LOAD.i32", ADD_TWO_SRC
, true },
1231 { 0x0c1a0, "LD_UBO.i32", ADD_TWO_SRC
, true },
1232 { 0x0c1b8, "LD_SCRATCH.v2i32", ADD_TWO_SRC
, true },
1233 { 0x0c1c8, "LOAD.v2i32", ADD_TWO_SRC
, true },
1234 { 0x0c1e0, "LD_UBO.v2i32", ADD_TWO_SRC
, true },
1235 { 0x0c1f8, "LD_SCRATCH.v2i32", ADD_TWO_SRC
, true },
1236 { 0x0c208, "LOAD.v4i32", ADD_TWO_SRC
, true },
1237 // src0 = offset, src1 = binding
1238 { 0x0c220, "LD_UBO.v4i32", ADD_TWO_SRC
, true },
1239 { 0x0c238, "LD_SCRATCH.v4i32", ADD_TWO_SRC
, true },
1240 { 0x0c248, "STORE.v4i32", ADD_TWO_SRC
, true },
1241 { 0x0c278, "ST_SCRATCH.v4i32", ADD_TWO_SRC
, true },
1242 { 0x0c588, "STORE.i32", ADD_TWO_SRC
, true },
1243 { 0x0c5b8, "ST_SCRATCH.i32", ADD_TWO_SRC
, true },
1244 { 0x0c5c8, "STORE.v2i32", ADD_TWO_SRC
, true },
1245 { 0x0c5f8, "ST_SCRATCH.v2i32", ADD_TWO_SRC
, true },
1246 { 0x0c648, "LOAD.u16", ADD_TWO_SRC
, true }, // zero-extends
1247 { 0x0ca88, "LOAD.v3i32", ADD_TWO_SRC
, true },
1248 { 0x0caa0, "LD_UBO.v3i32", ADD_TWO_SRC
, true },
1249 { 0x0cab8, "LD_SCRATCH.v3i32", ADD_TWO_SRC
, true },
1250 { 0x0cb88, "STORE.v3i32", ADD_TWO_SRC
, true },
1251 { 0x0cbb8, "ST_SCRATCH.v3i32", ADD_TWO_SRC
, true },
1252 // *_FAST does not exist on G71 (added to G51, G72, and everything after)
1253 { 0x0cc00, "FRCP_FAST.f32", ADD_ONE_SRC
},
1254 { 0x0cc20, "FRSQ_FAST.f32", ADD_ONE_SRC
},
1255 // Given a floating point number m * 2^e, produces a table-based
1256 // approximation of 2/m using the top 17 bits. Includes special cases for
1257 // infinity, NaN, and zero, and copies the sign bit.
1258 { 0x0ce00, "FRCP_TABLE", ADD_ONE_SRC
},
1260 { 0x0ce10, "FRCP_FAST.f16.X", ADD_ONE_SRC
},
1261 // A similar table for inverse square root, using the high 17 bits of the
1262 // mantissa as well as the low bit of the exponent.
1263 { 0x0ce20, "FRSQ_TABLE", ADD_ONE_SRC
},
1264 { 0x0ce30, "FRCP_FAST.f16.Y", ADD_ONE_SRC
},
1265 { 0x0ce50, "FRSQ_FAST.f16.X", ADD_ONE_SRC
},
1266 // Used in the argument reduction for log. Given a floating-point number
1267 // m * 2^e, uses the top 4 bits of m to produce an approximation to 1/m
1268 // with the exponent forced to 0 and only the top 5 bits are nonzero. 0,
1269 // infinity, and NaN all return 1.0.
1270 // See the ARM patent for more information.
1271 { 0x0ce60, "FRCP_APPROX", ADD_ONE_SRC
},
1272 { 0x0ce70, "FRSQ_FAST.f16.Y", ADD_ONE_SRC
},
1273 { 0x0cf40, "ATAN_ASSIST", ADD_TWO_SRC
},
1274 { 0x0cf48, "ATAN_TABLE", ADD_TWO_SRC
},
1275 { 0x0cf50, "SIN_TABLE", ADD_ONE_SRC
},
1276 { 0x0cf51, "COS_TABLE", ADD_ONE_SRC
},
1277 { 0x0cf58, "EXP_TABLE", ADD_ONE_SRC
},
1278 { 0x0cf60, "FLOG2_TABLE", ADD_ONE_SRC
},
1279 { 0x0cf64, "FLOGE_TABLE", ADD_ONE_SRC
},
1280 { 0x0d000, "BRANCH", ADD_BRANCH
},
1281 // For each bit i, return src2[i] ? src0[i] : src1[i]. In other words, this
1282 // is the same as (src2 & src0) | (~src2 & src1).
1283 { 0x0e8c0, "MUX", ADD_THREE_SRC
},
1284 { 0x0e9b0, "ATAN_LDEXP.Y.f32", ADD_TWO_SRC
},
1285 { 0x0e9b8, "ATAN_LDEXP.X.f32", ADD_TWO_SRC
},
1286 { 0x0ea60, "SEL.XX.i16", ADD_TWO_SRC
},
1287 { 0x0ea70, "SEL.XY.i16", ADD_TWO_SRC
},
1288 { 0x0ea68, "SEL.YX.i16", ADD_TWO_SRC
},
1289 { 0x0ea78, "SEL.YY.i16", ADD_TWO_SRC
},
1290 { 0x0ec00, "F32_TO_F16", ADD_TWO_SRC
},
1291 { 0x0f640, "ICMP.GL.GT", ADD_TWO_SRC
}, // src0 > src1 ? 1 : 0
1292 { 0x0f648, "ICMP.GL.GE", ADD_TWO_SRC
},
1293 { 0x0f650, "UCMP.GL.GT", ADD_TWO_SRC
},
1294 { 0x0f658, "UCMP.GL.GE", ADD_TWO_SRC
},
1295 { 0x0f660, "ICMP.GL.EQ", ADD_TWO_SRC
},
1296 { 0x0f6c0, "ICMP.D3D.GT", ADD_TWO_SRC
}, // src0 > src1 ? ~0 : 0
1297 { 0x0f6c8, "ICMP.D3D.GE", ADD_TWO_SRC
},
1298 { 0x0f6d0, "UCMP.D3D.GT", ADD_TWO_SRC
},
1299 { 0x0f6d8, "UCMP.D3D.GE", ADD_TWO_SRC
},
1300 { 0x0f6e0, "ICMP.D3D.EQ", ADD_TWO_SRC
},
1301 { 0x10000, "MAX.v2f16", ADD_FMINMAX16
},
1302 { 0x11000, "ADD_MSCALE.f32", ADD_FADDMscale
},
1303 { 0x12000, "MIN.v2f16", ADD_FMINMAX16
},
1304 { 0x14000, "ADD.v2f16", ADD_FADD16
},
1305 { 0x17000, "FCMP.D3D", ADD_FCMP16
},
1306 { 0x178c0, "ADD.i32", ADD_TWO_SRC
},
1307 { 0x17900, "ADD.v2i16", ADD_TWO_SRC
},
1308 { 0x17ac0, "SUB.i32", ADD_TWO_SRC
},
1309 { 0x17c10, "ADDC.i32", ADD_TWO_SRC
}, // adds src0 to the bottom bit of src1
1310 { 0x17d80, "ADD.i32.i16.X", ADD_TWO_SRC
},
1311 { 0x17d90, "ADD.i32.u16.X", ADD_TWO_SRC
},
1312 { 0x17dc0, "ADD.i32.i16.Y", ADD_TWO_SRC
},
1313 { 0x17dd0, "ADD.i32.u16.Y", ADD_TWO_SRC
},
1314 // Compute varying address and datatype (for storing in the vertex shader),
1315 // and store the vec3 result in the data register. The result is passed as
1316 // the 3 normal arguments to ST_VAR.
1317 { 0x18000, "LD_VAR_ADDR.f16", ADD_VARYING_ADDRESS
, true },
1318 { 0x18100, "LD_VAR_ADDR.f32", ADD_VARYING_ADDRESS
, true },
1319 { 0x18200, "LD_VAR_ADDR.i32", ADD_VARYING_ADDRESS
, true },
1320 { 0x18300, "LD_VAR_ADDR.u32", ADD_VARYING_ADDRESS
, true },
1321 // Conditional discards (discard_if) in NIR. Compares the first two
1322 // sources and discards if the result is true
1323 { 0x19181, "DISCARD.FEQ.f32", ADD_TWO_SRC
, true },
1324 { 0x19189, "DISCARD.FNE.f32", ADD_TWO_SRC
, true },
1325 { 0x1918C, "DISCARD.GL.f32", ADD_TWO_SRC
, true }, /* Consumes ICMP.GL/etc with fixed 0 argument */
1326 { 0x19190, "DISCARD.FLE.f32", ADD_TWO_SRC
, true },
1327 { 0x19198, "DISCARD.FLT.f32", ADD_TWO_SRC
, true },
1328 // Implements alpha-to-coverage, as well as possibly the late depth and
1329 // stencil tests. The first source is the existing sample mask in R60
1330 // (possibly modified by gl_SampleMask), and the second source is the alpha
1331 // value. The sample mask is written right away based on the
1332 // alpha-to-coverage result using the normal register write mechanism,
1333 // since that doesn't need to read from any memory, and then written again
1334 // later based on the result of the stencil and depth tests using the
1335 // special register.
1336 { 0x191e8, "ATEST.f32", ADD_TWO_SRC
, true },
1337 { 0x191f0, "ATEST.X.f16", ADD_TWO_SRC
, true },
1338 { 0x191f8, "ATEST.Y.f16", ADD_TWO_SRC
, true },
1339 // store a varying given the address and datatype from LD_VAR_ADDR
1340 { 0x19300, "ST_VAR.v1", ADD_THREE_SRC
, true },
1341 { 0x19340, "ST_VAR.v2", ADD_THREE_SRC
, true },
1342 { 0x19380, "ST_VAR.v3", ADD_THREE_SRC
, true },
1343 { 0x193c0, "ST_VAR.v4", ADD_THREE_SRC
, true },
1344 // This takes the sample coverage mask (computed by ATEST above) as a
1345 // regular argument, in addition to the vec4 color in the special register.
1346 { 0x1952c, "BLEND", ADD_BLENDING
, true },
1347 { 0x1a000, "LD_VAR.16", ADD_VARYING_INTERP
, true },
1348 { 0x1ae60, "TEX", ADD_TEX
, true },
1349 { 0x1c000, "RSHIFT_NAND.i32", ADD_THREE_SRC
},
1350 { 0x1c300, "RSHIFT_OR.i32", ADD_THREE_SRC
},
1351 { 0x1c400, "RSHIFT_AND.i32", ADD_THREE_SRC
},
1352 { 0x1c700, "RSHIFT_NOR.i32", ADD_THREE_SRC
},
1353 { 0x1c800, "LSHIFT_NAND.i32", ADD_THREE_SRC
},
1354 { 0x1cb00, "LSHIFT_OR.i32", ADD_THREE_SRC
},
1355 { 0x1cc00, "LSHIFT_AND.i32", ADD_THREE_SRC
},
1356 { 0x1cf00, "LSHIFT_NOR.i32", ADD_THREE_SRC
},
1357 { 0x1d000, "RSHIFT_XOR.i32", ADD_THREE_SRC
},
1358 { 0x1d100, "RSHIFT_XNOR.i32", ADD_THREE_SRC
},
1359 { 0x1d200, "LSHIFT_XOR.i32", ADD_THREE_SRC
},
1360 { 0x1d300, "LSHIFT_XNOR.i32", ADD_THREE_SRC
},
1361 { 0x1d400, "LSHIFT_ADD.i32", ADD_THREE_SRC
},
1362 { 0x1d500, "LSHIFT_SUB.i32", ADD_THREE_SRC
},
1363 { 0x1d500, "LSHIFT_RSUB.i32", ADD_THREE_SRC
},
1364 { 0x1d700, "RSHIFT_ADD.i32", ADD_THREE_SRC
},
1365 { 0x1d800, "RSHIFT_SUB.i32", ADD_THREE_SRC
},
1366 { 0x1d900, "RSHIFT_RSUB.i32", ADD_THREE_SRC
},
1367 { 0x1da00, "ARSHIFT_ADD.i32", ADD_THREE_SRC
},
1368 { 0x1db00, "ARSHIFT_SUB.i32", ADD_THREE_SRC
},
1369 { 0x1dc00, "ARSHIFT_RSUB.i32", ADD_THREE_SRC
},
1370 { 0x1dd18, "OR.i32", ADD_TWO_SRC
},
1371 { 0x1dd20, "AND.i32", ADD_TWO_SRC
},
1372 { 0x1dd60, "LSHIFT.i32", ADD_TWO_SRC
},
1373 { 0x1dd50, "XOR.i32", ADD_TWO_SRC
},
1374 { 0x1dd80, "RSHIFT.i32", ADD_TWO_SRC
},
1375 { 0x1dda0, "ARSHIFT.i32", ADD_TWO_SRC
},
1378 static struct add_op_info
find_add_op_info(unsigned op
)
1380 for (unsigned i
= 0; i
< ARRAY_SIZE(add_op_infos
); i
++) {
1381 unsigned opCmp
= ~0;
1382 switch (add_op_infos
[i
].src_type
) {
1399 opCmp
= op
& ~0x1fff;
1402 case ADD_FADDMscale
:
1403 opCmp
= op
& ~0xfff;
1407 opCmp
= op
& ~0x7ff;
1409 case ADD_TEX_COMPACT
:
1410 opCmp
= op
& ~0x3ff;
1412 case ADD_VARYING_INTERP
:
1413 opCmp
= op
& ~0x7ff;
1415 case ADD_VARYING_ADDRESS
:
1422 opCmp
= op
& ~0xfff;
1428 if (add_op_infos
[i
].op
== opCmp
)
1429 return add_op_infos
[i
];
1432 struct add_op_info info
;
1433 snprintf(info
.name
, sizeof(info
.name
), "op%04x", op
);
1435 info
.src_type
= ADD_TWO_SRC
;
1436 info
.has_data_reg
= true;
1440 static void dump_add(FILE *fp
, uint64_t word
, struct bifrost_regs regs
,
1441 struct bifrost_regs next_regs
, uint64_t *consts
,
1442 unsigned data_reg
, unsigned offset
, bool verbose
)
1445 fprintf(fp
, "# ADD: %016" PRIx64
"\n", word
);
1447 struct bifrost_add_inst ADD
;
1448 memcpy((char *) &ADD
, (char *) &word
, sizeof(ADD
));
1449 struct add_op_info info
= find_add_op_info(ADD
.op
);
1451 fprintf(fp
, "%s", info
.name
);
1453 // float16 seems like it doesn't support output modifiers
1454 if (info
.src_type
== ADD_FADD
|| info
.src_type
== ADD_FMINMAX
) {
1456 dump_output_mod(fp
, bits(ADD
.op
, 8, 10));
1457 if (info
.src_type
== ADD_FADD
)
1458 dump_round_mode(fp
, bits(ADD
.op
, 10, 12));
1460 dump_minmax_mode(fp
, bits(ADD
.op
, 10, 12));
1461 } else if (info
.src_type
== ADD_FCMP
|| info
.src_type
== ADD_FCMP16
) {
1462 dump_fcmp(fp
, bits(ADD
.op
, 3, 6));
1463 if (info
.src_type
== ADD_FCMP
)
1464 fprintf(fp
, ".f32");
1466 fprintf(fp
, ".v2f16");
1467 } else if (info
.src_type
== ADD_FADDMscale
) {
1468 switch ((ADD
.op
>> 6) & 0x7) {
1471 // causes GPU hangs on G71
1473 fprintf(fp
, ".invalid");
1475 // Same as usual outmod value.
1477 fprintf(fp
, ".clamp_0_1");
1479 // If src0 is infinite or NaN, flush it to zero so that the other
1480 // source is passed through unmodified.
1482 fprintf(fp
, ".flush_src0_inf_nan");
1486 fprintf(fp
, ".flush_src1_inf_nan");
1488 // Every other case seems to behave the same as the above?
1490 fprintf(fp
, ".unk%d", (ADD
.op
>> 6) & 0x7);
1493 } else if (info
.src_type
== ADD_VARYING_INTERP
) {
1495 fprintf(fp
, ".reuse");
1497 fprintf(fp
, ".flat");
1498 switch ((ADD
.op
>> 7) & 0x3) {
1500 fprintf(fp
, ".per_frag");
1503 fprintf(fp
, ".centroid");
1508 fprintf(fp
, ".explicit");
1511 fprintf(fp
, ".v%d", ((ADD
.op
>> 5) & 0x3) + 1);
1512 } else if (info
.src_type
== ADD_BRANCH
) {
1513 enum branch_code branchCode
= (enum branch_code
) ((ADD
.op
>> 6) & 0x3f);
1514 if (branchCode
== BR_ALWAYS
) {
1515 // unconditional branch
1517 enum branch_cond cond
= (enum branch_cond
) ((ADD
.op
>> 6) & 0x7);
1518 enum branch_bit_size size
= (enum branch_bit_size
) ((ADD
.op
>> 9) & 0x7);
1519 bool portSwapped
= (ADD
.op
& 0x7) < ADD
.src0
;
1520 // See the comment in branch_bit_size
1521 if (size
== BR_SIZE_16YX0
)
1523 if (size
== BR_SIZE_16YX1
)
1524 portSwapped
= false;
1525 // These sizes are only for floating point comparisons, so the
1526 // non-floating-point comparisons are reused to encode the flipped
1528 if (size
== BR_SIZE_32_AND_16X
|| size
== BR_SIZE_32_AND_16Y
)
1529 portSwapped
= false;
1530 // There's only one argument, so we reuse the extra argument to
1532 if (size
== BR_SIZE_ZERO
)
1533 portSwapped
= !(ADD
.op
& 1);
1538 fprintf(fp
, ".LT.u");
1540 fprintf(fp
, ".LT.i");
1543 if (size
== BR_SIZE_32_AND_16X
|| size
== BR_SIZE_32_AND_16Y
) {
1544 fprintf(fp
, ".UNE.f");
1547 fprintf(fp
, ".LE.u");
1549 fprintf(fp
, ".LE.i");
1554 fprintf(fp
, ".GT.u");
1556 fprintf(fp
, ".GT.i");
1560 fprintf(fp
, ".GE.u");
1562 fprintf(fp
, ".GE.i");
1566 fprintf(fp
, ".NE.i");
1568 fprintf(fp
, ".EQ.i");
1572 fprintf(fp
, ".UNE.f");
1574 fprintf(fp
, ".OEQ.f");
1578 fprintf(fp
, ".OGT.unk.f");
1580 fprintf(fp
, ".OGT.f");
1584 fprintf(fp
, ".OLT.unk.f");
1586 fprintf(fp
, ".OLT.f");
1591 case BR_SIZE_32_AND_16X
:
1592 case BR_SIZE_32_AND_16Y
:
1601 case BR_SIZE_ZERO
: {
1602 unsigned ctrl
= (ADD
.op
>> 1) & 0x3;
1604 fprintf(fp
, "32.Z");
1606 fprintf(fp
, "16.Z");
1614 struct bifrost_reg_ctrl next_ctrl
= DecodeRegCtrl(fp
, next_regs
);
1615 if (next_ctrl
.add_write_unit
!= REG_WRITE_NONE
) {
1616 fprintf(fp
, "{R%d, T1}, ", GetRegToWrite(next_ctrl
.add_write_unit
, next_regs
));
1618 fprintf(fp
, "T1, ");
1621 switch (info
.src_type
) {
1623 // Note: in this case, regs.uniform_const == location | 0x8
1624 // This probably means we can't load uniforms or immediates in the
1625 // same instruction. This re-uses the encoding that normally means
1626 // "disabled", where the low 4 bits are ignored. Perhaps the extra
1627 // 0x8 or'd in indicates this is happening.
1628 fprintf(fp
, "location:%d, ", regs
.uniform_const
& 0x7);
1631 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1634 case ADD_TEX_COMPACT
: {
1637 bool dualTex
= false;
1638 if (info
.src_type
== ADD_TEX_COMPACT
) {
1639 tex_index
= (ADD
.op
>> 3) & 0x7;
1640 sampler_index
= (ADD
.op
>> 7) & 0x7;
1641 bool unknown
= (ADD
.op
& 0x40);
1642 // TODO: figure out if the unknown bit is ever 0
1644 fprintf(fp
, "unknown ");
1646 uint64_t constVal
= get_const(consts
, regs
);
1647 uint32_t controlBits
= (ADD
.op
& 0x8) ? (constVal
>> 32) : constVal
;
1648 struct bifrost_tex_ctrl ctrl
;
1649 memcpy((char *) &ctrl
, (char *) &controlBits
, sizeof(ctrl
));
1651 // TODO: figure out what actually triggers dual-tex
1652 if (ctrl
.result_type
== 9) {
1653 struct bifrost_dual_tex_ctrl dualCtrl
;
1654 memcpy((char *) &dualCtrl
, (char *) &controlBits
, sizeof(ctrl
));
1655 fprintf(fp
, "(dualtex) tex0:%d samp0:%d tex1:%d samp1:%d ",
1656 dualCtrl
.tex_index0
, dualCtrl
.sampler_index0
,
1657 dualCtrl
.tex_index1
, dualCtrl
.sampler_index1
);
1658 if (dualCtrl
.unk0
!= 3)
1659 fprintf(fp
, "unk:%d ", dualCtrl
.unk0
);
1662 if (ctrl
.no_merge_index
) {
1663 tex_index
= ctrl
.tex_index
;
1664 sampler_index
= ctrl
.sampler_index
;
1666 tex_index
= sampler_index
= ctrl
.tex_index
;
1667 unsigned unk
= ctrl
.sampler_index
>> 2;
1669 fprintf(fp
, "unk:%d ", unk
);
1670 if (ctrl
.sampler_index
& 1)
1672 if (ctrl
.sampler_index
& 2)
1677 fprintf(fp
, "unk0:%d ", ctrl
.unk0
);
1679 fprintf(fp
, "unk1 ");
1680 if (ctrl
.unk2
!= 0xf)
1681 fprintf(fp
, "unk2:%x ", ctrl
.unk2
);
1683 switch (ctrl
.result_type
) {
1685 fprintf(fp
, "f32 ");
1688 fprintf(fp
, "i32 ");
1691 fprintf(fp
, "u32 ");
1694 fprintf(fp
, "unktype(%x) ", ctrl
.result_type
);
1697 switch (ctrl
.tex_type
) {
1699 fprintf(fp
, "cube ");
1702 fprintf(fp
, "buffer ");
1713 fprintf(fp
, "shadow ");
1715 fprintf(fp
, "array ");
1718 if (ctrl
.calc_gradients
) {
1719 int comp
= (controlBits
>> 20) & 0x3;
1720 fprintf(fp
, "txg comp:%d ", comp
);
1722 fprintf(fp
, "txf ");
1725 if (!ctrl
.not_supply_lod
) {
1726 if (ctrl
.compute_lod
)
1727 fprintf(fp
, "lod_bias ");
1729 fprintf(fp
, "lod ");
1732 if (!ctrl
.calc_gradients
)
1733 fprintf(fp
, "grad ");
1736 if (ctrl
.texel_offset
)
1737 fprintf(fp
, "offset ");
1742 if (tex_index
== -1)
1743 fprintf(fp
, "tex:indirect ");
1745 fprintf(fp
, "tex:%d ", tex_index
);
1747 if (sampler_index
== -1)
1748 fprintf(fp
, "samp:indirect ");
1750 fprintf(fp
, "samp:%d ", sampler_index
);
1754 case ADD_VARYING_INTERP
: {
1755 unsigned addr
= ADD
.op
& 0x1f;
1756 if (addr
< 0b10100) {
1758 fprintf(fp
, "%d", addr
);
1759 } else if (addr
< 0b11000) {
1761 fprintf(fp
, "fragw");
1762 else if (addr
== 23)
1763 fprintf(fp
, "fragz");
1765 fprintf(fp
, "unk%d", addr
);
1767 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1770 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1773 case ADD_VARYING_ADDRESS
: {
1774 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1776 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1778 unsigned location
= (ADD
.op
>> 3) & 0x1f;
1779 if (location
< 16) {
1780 fprintf(fp
, "location:%d", location
);
1781 } else if (location
== 20) {
1782 fprintf(fp
, "location:%u", (uint32_t) get_const(consts
, regs
));
1783 } else if (location
== 21) {
1784 fprintf(fp
, "location:%u", (uint32_t) (get_const(consts
, regs
) >> 32));
1786 fprintf(fp
, "location:%d(unk)", location
);
1791 fprintf(fp
, "location:%d, ", (ADD
.op
>> 3) & 0xf);
1793 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1795 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1798 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1800 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1802 dump_src(fp
, (ADD
.op
>> 3) & 0x7, regs
, consts
, false);
1808 if (ADD
.op
& 0x1000)
1809 fprintf(fp
, "abs(");
1810 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1811 switch ((ADD
.op
>> 6) & 0x3) {
1818 if (ADD
.op
& 0x1000)
1824 fprintf(fp
, "abs(");
1825 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1826 switch ((ADD
.op
>> 6) & 0x3) {
1837 fprintf(fp
, ".unk");
1846 if (ADD
.op
& 0x1000)
1847 fprintf(fp
, "abs(");
1848 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1849 if (ADD
.op
& 0x1000)
1851 dump_16swizzle(fp
, (ADD
.op
>> 6) & 0x3);
1856 fprintf(fp
, "abs(");
1857 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1858 dump_16swizzle(fp
, (ADD
.op
>> 8) & 0x3);
1862 case ADD_FMINMAX16
: {
1863 bool abs1
= ADD
.op
& 0x8;
1864 bool abs2
= (ADD
.op
& 0x7) < ADD
.src0
;
1868 fprintf(fp
, "abs(");
1869 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1870 dump_16swizzle(fp
, (ADD
.op
>> 6) & 0x3);
1877 fprintf(fp
, "abs(");
1878 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1879 dump_16swizzle(fp
, (ADD
.op
>> 8) & 0x3);
1884 case ADD_FADDMscale
: {
1888 fprintf(fp
, "abs(");
1889 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1897 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1901 dump_src(fp
, (ADD
.op
>> 3) & 0x7, regs
, consts
, false);
1905 if (ADD
.op
& 0x400) {
1908 if (ADD
.op
& 0x100) {
1909 fprintf(fp
, "abs(");
1911 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1912 switch ((ADD
.op
>> 6) & 0x3) {
1919 if (ADD
.op
& 0x100) {
1923 if (ADD
.op
& 0x200) {
1924 fprintf(fp
, "abs(");
1926 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1927 switch ((ADD
.op
>> 6) & 0x3) {
1938 fprintf(fp
, ".unk");
1941 if (ADD
.op
& 0x200) {
1946 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1947 dump_16swizzle(fp
, (ADD
.op
>> 6) & 0x3);
1949 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1950 dump_16swizzle(fp
, (ADD
.op
>> 8) & 0x3);
1953 enum branch_code code
= (enum branch_code
) ((ADD
.op
>> 6) & 0x3f);
1954 enum branch_bit_size size
= (enum branch_bit_size
) ((ADD
.op
>> 9) & 0x7);
1955 if (code
!= BR_ALWAYS
) {
1956 dump_src(fp
, ADD
.src0
, regs
, consts
, false);
1966 case BR_SIZE_ZERO
: {
1967 unsigned ctrl
= (ADD
.op
>> 1) & 0x3;
1984 if (code
!= BR_ALWAYS
&& size
!= BR_SIZE_ZERO
) {
1985 dump_src(fp
, ADD
.op
& 0x7, regs
, consts
, false);
1990 case BR_SIZE_32_AND_16X
:
1994 case BR_SIZE_32_AND_16Y
:
2002 // I haven't had the chance to test if this actually specifies the
2003 // branch offset, since I couldn't get it to produce values other
2004 // than 5 (uniform/const high), but these three bits are always
2005 // consistent across branch instructions, so it makes sense...
2006 int offsetSrc
= (ADD
.op
>> 3) & 0x7;
2007 if (offsetSrc
== 4 || offsetSrc
== 5) {
2008 // If the offset is known/constant, we can decode it
2009 uint32_t raw_offset
;
2011 raw_offset
= get_const(consts
, regs
);
2013 raw_offset
= get_const(consts
, regs
) >> 32;
2014 // The high 4 bits are flags, while the rest is the
2015 // twos-complement offset in bytes (here we convert to
2017 int32_t branch_offset
= ((int32_t) raw_offset
<< 4) >> 8;
2019 // If high4 is the high 4 bits of the last 64-bit constant,
2020 // this is calculated as (high4 + 4) & 0xf, or 0 if the branch
2021 // offset itself is the last constant. Not sure if this is
2022 // actually used, or just garbage in unused bits, but in any
2023 // case, we can just ignore it here since it's redundant. Note
2024 // that if there is any padding, this will be 4 since the
2025 // padding counts as the last constant.
2026 unsigned flags
= raw_offset
>> 28;
2029 // Note: the offset is in bytes, relative to the beginning of the
2030 // current clause, so a zero offset would be a loop back to the
2031 // same clause (annoyingly different from Midgard).
2032 fprintf(fp
, "clause_%d", offset
+ branch_offset
);
2034 dump_src(fp
, offsetSrc
, regs
, consts
, false);
2038 if (info
.has_data_reg
) {
2039 fprintf(fp
, ", R%d", data_reg
);
2044 void dump_instr(FILE *fp
, const struct bifrost_alu_inst
*instr
,
2045 struct bifrost_regs next_regs
, uint64_t *consts
,
2046 unsigned data_reg
, unsigned offset
, bool verbose
)
2048 struct bifrost_regs regs
;
2049 memcpy((char *) ®s
, (char *) &instr
->reg_bits
, sizeof(regs
));
2052 fprintf(fp
, "# regs: %016" PRIx64
"\n", instr
->reg_bits
);
2053 dump_regs(fp
, regs
);
2055 dump_fma(fp
, instr
->fma_bits
, regs
, next_regs
, consts
, verbose
);
2056 dump_add(fp
, instr
->add_bits
, regs
, next_regs
, consts
, data_reg
, offset
, verbose
);
2059 bool dump_clause(FILE *fp
, uint32_t *words
, unsigned *size
, unsigned offset
, bool verbose
)
2061 // State for a decoded clause
2062 struct bifrost_alu_inst instrs
[8] = {};
2063 uint64_t consts
[6] = {};
2064 unsigned num_instrs
= 0;
2065 unsigned num_consts
= 0;
2066 uint64_t header_bits
= 0;
2067 bool stopbit
= false;
2070 for (i
= 0; ; i
++, words
+= 4) {
2073 for (int j
= 0; j
< 4; j
++)
2074 fprintf(fp
, "%08x ", words
[3 - j
]); // low bit on the right
2077 unsigned tag
= bits(words
[0], 0, 8);
2079 // speculatively decode some things that are common between many formats, so we can share some code
2080 struct bifrost_alu_inst main_instr
= {};
2082 main_instr
.add_bits
= bits(words
[2], 2, 32 - 13);
2084 main_instr
.fma_bits
= bits(words
[1], 11, 32) | bits(words
[2], 0, 2) << (32 - 11);
2086 main_instr
.reg_bits
= ((uint64_t) bits(words
[1], 0, 11)) << 24 | (uint64_t) bits(words
[0], 8, 32);
2088 uint64_t const0
= bits(words
[0], 8, 32) << 4 | (uint64_t) words
[1] << 28 | bits(words
[2], 0, 4) << 60;
2089 uint64_t const1
= bits(words
[2], 4, 32) << 4 | (uint64_t) words
[3] << 32;
2091 bool stop
= tag
& 0x40;
2094 fprintf(fp
, "# tag: 0x%02x\n", tag
);
2097 unsigned idx
= stop
? 5 : 2;
2098 main_instr
.add_bits
|= ((tag
>> 3) & 0x7) << 17;
2099 instrs
[idx
+ 1] = main_instr
;
2100 instrs
[idx
].add_bits
= bits(words
[3], 0, 17) | ((tag
& 0x7) << 17);
2101 instrs
[idx
].fma_bits
|= bits(words
[2], 19, 32) << 10;
2102 consts
[0] = bits(words
[3], 17, 32) << 4;
2105 switch ((tag
>> 3) & 0x7) {
2107 switch (tag
& 0x7) {
2109 main_instr
.add_bits
|= bits(words
[3], 29, 32) << 17;
2110 instrs
[1] = main_instr
;
2115 instrs
[2].add_bits
= bits(words
[3], 0, 17) | bits(words
[3], 29, 32) << 17;
2116 instrs
[2].fma_bits
|= bits(words
[2], 19, 32) << 10;
2124 instrs
[2].add_bits
= bits(words
[3], 0, 17) | bits(words
[3], 29, 32) << 17;
2125 instrs
[2].fma_bits
|= bits(words
[2], 19, 32) << 10;
2126 main_instr
.add_bits
|= bits(words
[3], 26, 29) << 17;
2127 instrs
[3] = main_instr
;
2128 if ((tag
& 0x7) == 0x5) {
2134 instrs
[5].add_bits
= bits(words
[3], 0, 17) | bits(words
[3], 29, 32) << 17;
2135 instrs
[5].fma_bits
|= bits(words
[2], 19, 32) << 10;
2142 instrs
[5].add_bits
= bits(words
[3], 0, 17) | bits(words
[3], 29, 32) << 17;
2143 instrs
[5].fma_bits
|= bits(words
[2], 19, 32) << 10;
2144 main_instr
.add_bits
|= bits(words
[3], 26, 29) << 17;
2145 instrs
[6] = main_instr
;
2150 fprintf(fp
, "unknown tag bits 0x%02x\n", tag
);
2155 unsigned idx
= ((tag
>> 3) & 0x7) == 2 ? 4 : 7;
2156 main_instr
.add_bits
|= (tag
& 0x7) << 17;
2157 instrs
[idx
] = main_instr
;
2158 consts
[0] |= (bits(words
[2], 19, 32) | ((uint64_t) words
[3] << 13)) << 19;
2160 num_instrs
= idx
+ 1;
2165 unsigned idx
= stop
? 4 : 1;
2166 main_instr
.add_bits
|= (tag
& 0x7) << 17;
2167 instrs
[idx
] = main_instr
;
2168 instrs
[idx
+ 1].fma_bits
|= bits(words
[3], 22, 32);
2169 instrs
[idx
+ 1].reg_bits
= bits(words
[2], 19, 32) | (bits(words
[3], 0, 22) << (32 - 19));
2173 // only constants can come after this
2177 header_bits
= bits(words
[2], 19, 32) | ((uint64_t) words
[3] << (32 - 19));
2178 main_instr
.add_bits
|= (tag
& 0x7) << 17;
2179 instrs
[0] = main_instr
;
2183 unsigned pos
= tag
& 0xf;
2184 // note that `pos' encodes both the total number of
2185 // instructions and the position in the constant stream,
2186 // presumably because decoded constants and instructions
2187 // share a buffer in the decoder, but we only care about
2188 // the position in the constant stream; the total number of
2189 // instructions is redundant.
2190 unsigned const_idx
= 0;
2217 fprintf(fp
, "# unknown pos 0x%x\n", pos
);
2221 if (num_consts
< const_idx
+ 2)
2222 num_consts
= const_idx
+ 2;
2224 consts
[const_idx
] = const0
;
2225 consts
[const_idx
+ 1] = const1
;
2241 fprintf(fp
, "# header: %012" PRIx64
"\n", header_bits
);
2244 struct bifrost_header header
;
2245 memcpy((char *) &header
, (char *) &header_bits
, sizeof(struct bifrost_header
));
2246 dump_header(fp
, header
, verbose
);
2247 if (!header
.no_end_of_shader
)
2251 for (i
= 0; i
< num_instrs
; i
++) {
2252 struct bifrost_regs next_regs
;
2253 if (i
+ 1 == num_instrs
) {
2254 memcpy((char *) &next_regs
, (char *) &instrs
[0].reg_bits
,
2257 memcpy((char *) &next_regs
, (char *) &instrs
[i
+ 1].reg_bits
,
2261 dump_instr(fp
, &instrs
[i
], next_regs
, consts
, header
.datareg
, offset
, verbose
);
2266 for (unsigned i
= 0; i
< num_consts
; i
++) {
2267 fprintf(fp
, "# const%d: %08" PRIx64
"\n", 2 * i
, consts
[i
] & 0xffffffff);
2268 fprintf(fp
, "# const%d: %08" PRIx64
"\n", 2 * i
+ 1, consts
[i
] >> 32);
2274 void disassemble_bifrost(FILE *fp
, uint8_t *code
, size_t size
, bool verbose
)
2276 uint32_t *words
= (uint32_t *) code
;
2277 uint32_t *words_end
= words
+ (size
/ 4);
2278 // used for displaying branch targets
2279 unsigned offset
= 0;
2280 while (words
!= words_end
) {
2281 // we don't know what the program-end bit is quite yet, so for now just
2282 // assume that an all-0 quadword is padding
2283 uint32_t zero
[4] = {};
2284 if (memcmp(words
, zero
, 4 * sizeof(uint32_t)) == 0)
2286 fprintf(fp
, "clause_%d:\n", offset
);
2288 if (dump_clause(fp
, words
, &size
, offset
, verbose
) == true) {