5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
35 #include "midgard_ops.h"
36 #include "midgard_quirks.h"
37 #include "disassemble.h"
39 #include "util/bitscan.h"
40 #include "util/half_float.h"
41 #include "util/u_math.h"
43 #define DEFINE_CASE(define, str) case define: { fprintf(fp, str); break; }
45 static unsigned *midg_tags
;
46 static bool is_instruction_int
= false;
50 static struct midgard_disasm_stats midg_stats
;
52 /* Transform an expanded writemask (duplicated 8-bit format) into its condensed
53 * form (one bit per component) */
55 static inline unsigned
56 condense_writemask(unsigned expanded_mask
,
57 unsigned bits_per_component
)
59 if (bits_per_component
== 8)
60 unreachable("XXX TODO: sort out how 8-bit constant encoding works");
62 unsigned slots_per_component
= bits_per_component
/ 16;
63 unsigned max_comp
= (16 * 8) / bits_per_component
;
64 unsigned condensed_mask
= 0;
66 for (unsigned i
= 0; i
< max_comp
; i
++) {
67 if (expanded_mask
& (1 << (i
* slots_per_component
)))
68 condensed_mask
|= (1 << i
);
71 return condensed_mask
;
75 print_alu_opcode(FILE *fp
, midgard_alu_op op
)
79 if (alu_opcode_props
[op
].name
) {
80 fprintf(fp
, "%s", alu_opcode_props
[op
].name
);
82 int_op
= midgard_is_integer_op(op
);
84 fprintf(fp
, "alu_op_%02X", op
);
86 /* For constant analysis */
87 is_instruction_int
= int_op
;
91 print_ld_st_opcode(FILE *fp
, midgard_load_store_op op
)
93 if (load_store_opcode_props
[op
].name
)
94 fprintf(fp
, "%s", load_store_opcode_props
[op
].name
);
96 fprintf(fp
, "ldst_op_%02X", op
);
99 static bool is_embedded_constant_half
= false;
100 static bool is_embedded_constant_int
= false;
103 prefix_for_bits(unsigned bits
)
117 /* For static analysis to ensure all registers are written at least once before
118 * use along the source code path (TODO: does this break done for complex CF?)
121 uint16_t midg_ever_written
= 0;
124 print_reg(FILE *fp
, unsigned reg
, unsigned bits
)
126 /* Perform basic static analysis for expanding constants correctly */
129 is_embedded_constant_int
= is_instruction_int
;
130 is_embedded_constant_half
= (bits
< 32);
133 unsigned uniform_reg
= 23 - reg
;
134 bool is_uniform
= false;
136 /* For r8-r15, it could be a work or uniform. We distinguish based on
137 * the fact work registers are ALWAYS written before use, but uniform
138 * registers are NEVER written before use. */
140 if ((reg
>= 8 && reg
< 16) && !(midg_ever_written
& (1 << reg
)))
143 /* r16-r23 are always uniform */
145 if (reg
>= 16 && reg
<= 23)
148 /* Update the uniform count appropriately */
151 midg_stats
.uniform_count
=
152 MAX2(uniform_reg
+ 1, midg_stats
.uniform_count
);
154 char prefix
= prefix_for_bits(bits
);
159 fprintf(fp
, "r%u", reg
);
162 static char *outmod_names_float
[4] = {
169 static char *outmod_names_int
[4] = {
176 static char *srcmod_names_int
[4] = {
184 print_outmod(FILE *fp
, unsigned outmod
, bool is_int
)
186 fprintf(fp
, "%s", is_int
? outmod_names_int
[outmod
] :
187 outmod_names_float
[outmod
]);
191 print_quad_word(FILE *fp
, uint32_t *words
, unsigned tabs
)
195 for (i
= 0; i
< 4; i
++)
196 fprintf(fp
, "0x%08X%s ", words
[i
], i
== 3 ? "" : ",");
201 static const char components
[16] = "xyzwefghijklmnop";
203 /* Helper to print 4 chars of a swizzle */
205 print_swizzle_helper(FILE *fp
, unsigned swizzle
, unsigned offset
)
207 for (unsigned i
= 0; i
< 4; ++i
) {
208 unsigned c
= (swizzle
>> (i
* 2)) & 3;
210 fprintf(fp
, "%c", components
[c
]);
214 /* Helper to print 8 chars of a swizzle, duplicating over */
216 print_swizzle_helper_8(FILE *fp
, unsigned swizzle
, bool upper
)
218 for (unsigned i
= 0; i
< 4; ++i
) {
219 unsigned c
= (swizzle
>> (i
* 2)) & 3;
222 fprintf(fp
, "%c%c", components
[c
], components
[c
+1]);
227 print_swizzle_vec16(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
,
228 midgard_dest_override override
)
232 if (override
== midgard_dest_override_upper
) {
234 fprintf(fp
, " /* rep_high */ ");
236 fprintf(fp
, " /* rep_low */ ");
238 if (!rep_high
&& rep_low
)
239 print_swizzle_helper_8(fp
, swizzle
, true);
241 print_swizzle_helper_8(fp
, swizzle
, false);
243 print_swizzle_helper_8(fp
, swizzle
, rep_high
& 1);
244 print_swizzle_helper_8(fp
, swizzle
, !(rep_low
& 1));
249 print_swizzle_vec8(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
253 /* TODO: Is it possible to unify half/full? */
256 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8));
257 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8) + !rep_high
* 4);
259 print_swizzle_helper(fp
, swizzle
, rep_high
* 4);
260 print_swizzle_helper(fp
, swizzle
, !rep_low
* 4);
265 print_swizzle_vec4(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
268 fprintf(fp
, " /* rep_high */ ");
270 if (!half
&& rep_low
)
271 fprintf(fp
, " /* rep_low */ ");
273 if (swizzle
== 0xE4 && !half
) return; /* xyzw */
276 print_swizzle_helper(fp
, swizzle
, rep_low
* 4);
279 print_swizzle_vec2(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
281 char *alphabet
= "XY";
284 alphabet
= rep_low
? "zw" : "xy";
286 fprintf(fp
, " /* rep_low */ ");
289 fprintf(fp
, " /* rep_high */ ");
291 if (swizzle
== 0xE4 && !half
) return; /* XY */
295 for (unsigned i
= 0; i
< 4; i
+= 2) {
296 unsigned a
= (swizzle
>> (i
* 2)) & 3;
297 unsigned b
= (swizzle
>> ((i
+1) * 2)) & 3;
299 /* Normally we're adjacent, but if there's an issue, don't make
303 fprintf(fp
, "%c", alphabet
[a
>> 1]);
305 fprintf(fp
, "[%c%c]", components
[a
], components
[b
]);
310 bits_for_mode(midgard_reg_mode mode
)
313 case midgard_reg_mode_8
:
315 case midgard_reg_mode_16
:
317 case midgard_reg_mode_32
:
319 case midgard_reg_mode_64
:
322 unreachable("Invalid reg mode");
328 bits_for_mode_halved(midgard_reg_mode mode
, bool half
)
330 unsigned bits
= bits_for_mode(mode
);
339 print_scalar_constant(FILE *fp
, unsigned src_binary
,
340 const midgard_constants
*consts
,
341 midgard_scalar_alu
*alu
)
343 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
344 assert(consts
!= NULL
);
347 mir_print_constant_component(fp
, consts
, src
->component
,
349 midgard_reg_mode_32
: midgard_reg_mode_16
,
350 false, src
->mod
, alu
->op
);
354 print_vector_constants(FILE *fp
, unsigned src_binary
,
355 const midgard_constants
*consts
,
356 midgard_vector_alu
*alu
)
358 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
359 unsigned bits
= bits_for_mode_halved(alu
->reg_mode
, src
->half
);
360 unsigned max_comp
= (sizeof(*consts
) * 8) / bits
;
361 unsigned comp_mask
, num_comp
= 0;
364 assert(max_comp
<= 16);
366 comp_mask
= effective_writemask(alu
, condense_writemask(alu
->mask
, bits
));
367 num_comp
= util_bitcount(comp_mask
);
372 for (unsigned i
= 0; i
< max_comp
; ++i
) {
373 if (!(comp_mask
& (1 << i
))) continue;
375 unsigned c
= (src
->swizzle
>> (i
* 2)) & 3;
377 if (bits
== 16 && !src
->half
) {
379 c
+= (src
->rep_high
* 4);
381 c
+= (!src
->rep_low
* 4);
382 } else if (bits
== 32 && !src
->half
) {
384 } else if (bits
== 8) {
386 unsigned index
= (i
>> 1) & 3;
387 unsigned base
= (src
->swizzle
>> (index
* 2)) & 3;
391 c
+= (src
->rep_high
) * 8;
393 c
+= (!src
->rep_low
) * 8;
395 /* We work on twos, actually */
399 printf(" (%d%d%d)", src
->rep_low
, src
->rep_high
, src
->half
);
407 mir_print_constant_component(fp
, consts
, c
, alu
->reg_mode
,
408 src
->half
, src
->mod
, alu
->op
);
416 print_srcmod(FILE *fp
, bool is_int
, unsigned mod
, bool scalar
)
418 /* Modifiers change meaning depending on the op's context */
420 midgard_int_mod int_mod
= mod
;
423 if (scalar
&& mod
== 2) {
427 fprintf(fp
, "%s", srcmod_names_int
[int_mod
]);
429 if (mod
& MIDGARD_FLOAT_MOD_NEG
)
432 if (mod
& MIDGARD_FLOAT_MOD_ABS
)
438 print_srcmod_end(FILE *fp
, bool is_int
, unsigned mod
, unsigned bits
)
440 /* Since we wrapped with a function-looking thing */
442 if (is_int
&& mod
== midgard_int_shift
)
443 fprintf(fp
, ") << %u", bits
);
444 else if ((is_int
&& (mod
!= midgard_int_normal
))
445 || (!is_int
&& mod
& MIDGARD_FLOAT_MOD_ABS
))
450 print_vector_src(FILE *fp
, unsigned src_binary
,
451 midgard_reg_mode mode
, unsigned reg
,
452 midgard_dest_override override
, bool is_int
)
454 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
455 print_srcmod(fp
, is_int
, src
->mod
, false);
458 unsigned bits
= bits_for_mode_halved(mode
, src
->half
);
459 print_reg(fp
, reg
, bits
);
461 /* When the source was stepped down via `half`, rep_low means "higher
462 * half" and rep_high is never seen. When it's not native,
463 * rep_low/rep_high are for, well, replication */
465 if (mode
== midgard_reg_mode_8
) {
467 print_swizzle_vec16(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, override
);
468 } else if (mode
== midgard_reg_mode_16
) {
469 print_swizzle_vec8(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
470 } else if (mode
== midgard_reg_mode_32
) {
471 print_swizzle_vec4(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
472 } else if (mode
== midgard_reg_mode_64
) {
473 print_swizzle_vec2(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
476 print_srcmod_end(fp
, is_int
, src
->mod
, bits
);
480 decode_vector_imm(unsigned src2_reg
, unsigned imm
)
483 ret
= src2_reg
<< 11;
484 ret
|= (imm
& 0x7) << 8;
485 ret
|= (imm
>> 3) & 0xFF;
490 print_immediate(FILE *fp
, uint16_t imm
)
492 if (is_instruction_int
)
493 fprintf(fp
, "#%u", imm
);
495 fprintf(fp
, "#%g", _mesa_half_to_float(imm
));
499 update_dest(unsigned reg
)
501 /* We should record writes as marking this as a work register. Store
502 * the max register in work_count; we'll add one at the end */
505 midg_stats
.work_count
= MAX2(reg
, midg_stats
.work_count
);
506 midg_ever_written
|= (1 << reg
);
511 print_dest(FILE *fp
, unsigned reg
, midgard_reg_mode mode
, midgard_dest_override override
)
513 /* Depending on the mode and override, we determine the type of
514 * destination addressed. Absent an override, we address just the
515 * type of the operation itself */
517 unsigned bits
= bits_for_mode(mode
);
519 if (override
!= midgard_dest_override_none
)
523 print_reg(fp
, reg
, bits
);
527 print_mask_vec16(FILE *fp
, uint8_t mask
, midgard_dest_override override
)
531 for (unsigned i
= 0; i
< 8; i
++) {
535 components
[i
*2 + 1]);
539 /* For 16-bit+ masks, we read off from the 8-bit mask field. For 16-bit (vec8),
540 * it's just one bit per channel, easy peasy. For 32-bit (vec4), it's one bit
541 * per channel with one duplicate bit in the middle. For 64-bit (vec2), it's
542 * one-bit per channel with _3_ duplicate bits in the middle. Basically, just
543 * subdividing the 128-bit word in 16-bit increments. For 64-bit, we uppercase
544 * the mask to make it obvious what happened */
547 print_mask(FILE *fp
, uint8_t mask
, unsigned bits
, midgard_dest_override override
)
550 print_mask_vec16(fp
, mask
, override
);
554 /* Skip 'complete' masks */
556 if (override
== midgard_dest_override_none
)
557 if (bits
>= 32 && mask
== 0xFF) return;
561 unsigned skip
= (bits
/ 16);
562 bool uppercase
= bits
> 32;
563 bool tripped
= false;
565 /* To apply an upper destination override, we "shift" the alphabet.
566 * E.g. with an upper override on 32-bit, instead of xyzw, print efgh.
567 * For upper 16-bit, instead of xyzwefgh, print ijklmnop */
569 const char *alphabet
= components
;
571 if (override
== midgard_dest_override_upper
)
572 alphabet
+= (128 / bits
);
574 for (unsigned i
= 0; i
< 8; i
+= skip
) {
575 bool a
= (mask
& (1 << i
)) != 0;
577 for (unsigned j
= 1; j
< skip
; ++j
) {
578 bool dupe
= (mask
& (1 << (i
+ j
))) != 0;
579 tripped
|= (dupe
!= a
);
583 char c
= alphabet
[i
/ skip
];
588 fprintf(fp
, "%c", c
);
593 fprintf(fp
, " /* %X */", mask
);
596 /* Prints the 4-bit masks found in texture and load/store ops, as opposed to
597 * the 8-bit masks found in (vector) ALU ops. Supports texture-style 16-bit
598 * mode as well, but not load/store-style 16-bit mode. */
601 print_mask_4(FILE *fp
, unsigned mask
, bool upper
)
612 for (unsigned i
= 0; i
< 4; ++i
) {
613 bool a
= (mask
& (1 << i
)) != 0;
615 fprintf(fp
, "%c", components
[i
+ (upper
? 4 : 0)]);
620 print_vector_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
621 const midgard_constants
*consts
, unsigned tabs
)
623 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
624 midgard_vector_alu
*alu_field
= (midgard_vector_alu
*) words
;
625 midgard_reg_mode mode
= alu_field
->reg_mode
;
626 unsigned override
= alu_field
->dest_override
;
628 /* For now, prefix instruction names with their unit, until we
629 * understand how this works on a deeper level */
630 fprintf(fp
, "%s.", name
);
632 print_alu_opcode(fp
, alu_field
->op
);
634 /* Postfix with the size to disambiguate if necessary */
635 char postfix
= prefix_for_bits(bits_for_mode(mode
));
636 bool size_ambiguous
= override
!= midgard_dest_override_none
;
639 fprintf(fp
, "%c", postfix
? postfix
: 'r');
641 /* Print the outmod, if there is one */
642 print_outmod(fp
, alu_field
->outmod
,
643 midgard_is_integer_out_op(alu_field
->op
));
647 /* Mask denoting status of 8-lanes */
648 uint8_t mask
= alu_field
->mask
;
650 /* First, print the destination */
651 print_dest(fp
, reg_info
->out_reg
, mode
, alu_field
->dest_override
);
653 if (override
!= midgard_dest_override_none
) {
654 bool modeable
= (mode
!= midgard_reg_mode_8
);
655 bool known
= override
!= 0x3; /* Unused value */
657 if (!(modeable
&& known
))
658 fprintf(fp
, "/* do%u */ ", override
);
661 /* Instructions like fdot4 do *not* replicate, ensure the
662 * mask is of only a single component */
664 unsigned rep
= GET_CHANNEL_COUNT(alu_opcode_props
[alu_field
->op
].props
);
667 unsigned comp_mask
= condense_writemask(mask
, bits_for_mode(mode
));
668 unsigned num_comp
= util_bitcount(comp_mask
);
670 fprintf(fp
, "/* err too many components */");
672 print_mask(fp
, mask
, bits_for_mode(mode
), override
);
676 bool is_int
= midgard_is_integer_op(alu_field
->op
);
678 if (reg_info
->src1_reg
== 26)
679 print_vector_constants(fp
, alu_field
->src1
, consts
, alu_field
);
681 print_vector_src(fp
, alu_field
->src1
, mode
, reg_info
->src1_reg
, override
, is_int
);
685 if (reg_info
->src2_imm
) {
686 uint16_t imm
= decode_vector_imm(reg_info
->src2_reg
, alu_field
->src2
>> 2);
687 print_immediate(fp
, imm
);
688 } else if (reg_info
->src2_reg
== 26) {
689 print_vector_constants(fp
, alu_field
->src2
, consts
, alu_field
);
691 print_vector_src(fp
, alu_field
->src2
, mode
,
692 reg_info
->src2_reg
, override
, is_int
);
695 midg_stats
.instruction_count
++;
700 print_scalar_src(FILE *fp
, bool is_int
, unsigned src_binary
, unsigned reg
)
702 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
704 print_srcmod(fp
, is_int
, src
->mod
, true);
705 print_reg(fp
, reg
, src
->full
? 32 : 16);
707 unsigned c
= src
->component
;
710 assert((c
& 1) == 0);
714 fprintf(fp
, ".%c", components
[c
]);
716 print_srcmod_end(fp
, is_int
, src
->mod
, src
->full
? 32 : 16);
720 decode_scalar_imm(unsigned src2_reg
, unsigned imm
)
723 ret
= src2_reg
<< 11;
724 ret
|= (imm
& 3) << 9;
725 ret
|= (imm
& 4) << 6;
726 ret
|= (imm
& 0x38) << 2;
732 print_scalar_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
733 const midgard_constants
*consts
, unsigned tabs
)
735 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
736 midgard_scalar_alu
*alu_field
= (midgard_scalar_alu
*) words
;
738 if (alu_field
->unknown
)
739 fprintf(fp
, "scalar ALU unknown bit set\n");
741 fprintf(fp
, "%s.", name
);
742 print_alu_opcode(fp
, alu_field
->op
);
743 print_outmod(fp
, alu_field
->outmod
,
744 midgard_is_integer_out_op(alu_field
->op
));
747 bool full
= alu_field
->output_full
;
748 update_dest(reg_info
->out_reg
);
749 print_reg(fp
, reg_info
->out_reg
, full
? 32 : 16);
750 unsigned c
= alu_field
->output_component
;
751 bool is_int
= midgard_is_integer_op(alu_field
->op
);
754 assert((c
& 1) == 0);
758 fprintf(fp
, ".%c, ", components
[c
]);
760 if (reg_info
->src1_reg
== 26)
761 print_scalar_constant(fp
, alu_field
->src1
, consts
, alu_field
);
763 print_scalar_src(fp
, is_int
, alu_field
->src1
, reg_info
->src1_reg
);
767 if (reg_info
->src2_imm
) {
768 uint16_t imm
= decode_scalar_imm(reg_info
->src2_reg
,
770 print_immediate(fp
, imm
);
771 } else if (reg_info
->src2_reg
== 26) {
772 print_scalar_constant(fp
, alu_field
->src2
, consts
, alu_field
);
774 print_scalar_src(fp
, is_int
, alu_field
->src2
, reg_info
->src2_reg
);
776 midg_stats
.instruction_count
++;
781 print_branch_op(FILE *fp
, unsigned op
)
784 case midgard_jmp_writeout_op_branch_uncond
:
785 fprintf(fp
, "uncond.");
788 case midgard_jmp_writeout_op_branch_cond
:
789 fprintf(fp
, "cond.");
792 case midgard_jmp_writeout_op_writeout
:
793 fprintf(fp
, "write.");
796 case midgard_jmp_writeout_op_tilebuffer_pending
:
797 fprintf(fp
, "tilebuffer.");
800 case midgard_jmp_writeout_op_discard
:
801 fprintf(fp
, "discard.");
805 fprintf(fp
, "unk%u.", op
);
811 print_branch_cond(FILE *fp
, int cond
)
814 case midgard_condition_write0
:
815 fprintf(fp
, "write0");
818 case midgard_condition_false
:
819 fprintf(fp
, "false");
822 case midgard_condition_true
:
826 case midgard_condition_always
:
827 fprintf(fp
, "always");
831 fprintf(fp
, "unk%X", cond
);
837 print_compact_branch_writeout_field(FILE *fp
, uint16_t word
)
839 midgard_jmp_writeout_op op
= word
& 0x7;
840 midg_stats
.instruction_count
++;
843 case midgard_jmp_writeout_op_branch_uncond
: {
844 midgard_branch_uncond br_uncond
;
845 memcpy((char *) &br_uncond
, (char *) &word
, sizeof(br_uncond
));
846 fprintf(fp
, "br.uncond ");
848 if (br_uncond
.unknown
!= 1)
849 fprintf(fp
, "unknown:%u, ", br_uncond
.unknown
);
851 if (br_uncond
.offset
>= 0)
854 fprintf(fp
, "%d -> %s", br_uncond
.offset
,
855 midgard_tag_props
[br_uncond
.dest_tag
].name
);
858 return br_uncond
.offset
>= 0;
861 case midgard_jmp_writeout_op_branch_cond
:
862 case midgard_jmp_writeout_op_writeout
:
863 case midgard_jmp_writeout_op_discard
:
865 midgard_branch_cond br_cond
;
866 memcpy((char *) &br_cond
, (char *) &word
, sizeof(br_cond
));
870 print_branch_op(fp
, br_cond
.op
);
871 print_branch_cond(fp
, br_cond
.cond
);
875 if (br_cond
.offset
>= 0)
878 fprintf(fp
, "%d -> %s", br_cond
.offset
,
879 midgard_tag_props
[br_cond
.dest_tag
].name
);
882 return br_cond
.offset
>= 0;
890 print_extended_branch_writeout_field(FILE *fp
, uint8_t *words
, unsigned next
)
892 midgard_branch_extended br
;
893 memcpy((char *) &br
, (char *) words
, sizeof(br
));
897 print_branch_op(fp
, br
.op
);
899 /* Condition codes are a LUT in the general case, but simply repeated 8 times for single-channel conditions.. Check this. */
901 bool single_channel
= true;
903 for (unsigned i
= 0; i
< 16; i
+= 2) {
904 single_channel
&= (((br
.cond
>> i
) & 0x3) == (br
.cond
& 0x3));
908 print_branch_cond(fp
, br
.cond
& 0x3);
910 fprintf(fp
, "lut%X", br
.cond
);
913 fprintf(fp
, ".unknown%u", br
.unknown
);
920 fprintf(fp
, "%d -> %s\n", br
.offset
,
921 midgard_tag_props
[br
.dest_tag
].name
);
923 unsigned I
= next
+ br
.offset
* 4;
925 if (midg_tags
[I
] && midg_tags
[I
] != br
.dest_tag
) {
926 fprintf(fp
, "\t/* XXX TAG ERROR: jumping to %s but tagged %s \n",
927 midgard_tag_props
[br
.dest_tag
].name
,
928 midgard_tag_props
[midg_tags
[I
]].name
);
931 midg_tags
[I
] = br
.dest_tag
;
933 midg_stats
.instruction_count
++;
934 return br
.offset
>= 0;
938 num_alu_fields_enabled(uint32_t control_word
)
942 if ((control_word
>> 17) & 1)
945 if ((control_word
>> 19) & 1)
948 if ((control_word
>> 21) & 1)
951 if ((control_word
>> 23) & 1)
954 if ((control_word
>> 25) & 1)
961 print_alu_word(FILE *fp
, uint32_t *words
, unsigned num_quad_words
,
962 unsigned tabs
, unsigned next
)
964 uint32_t control_word
= words
[0];
965 uint16_t *beginning_ptr
= (uint16_t *)(words
+ 1);
966 unsigned num_fields
= num_alu_fields_enabled(control_word
);
967 uint16_t *word_ptr
= beginning_ptr
+ num_fields
;
968 unsigned num_words
= 2 + num_fields
;
969 const midgard_constants
*consts
= NULL
;
970 bool branch_forward
= false;
972 if ((control_word
>> 17) & 1)
975 if ((control_word
>> 19) & 1)
978 if ((control_word
>> 21) & 1)
981 if ((control_word
>> 23) & 1)
984 if ((control_word
>> 25) & 1)
987 if ((control_word
>> 26) & 1)
990 if ((control_word
>> 27) & 1)
993 if (num_quad_words
> (num_words
+ 7) / 8) {
994 assert(num_quad_words
== (num_words
+ 15) / 8);
995 //Assume that the extra quadword is constants
996 consts
= (midgard_constants
*)(words
+ (4 * num_quad_words
- 4));
999 if ((control_word
>> 16) & 1)
1000 fprintf(fp
, "unknown bit 16 enabled\n");
1002 if ((control_word
>> 17) & 1) {
1003 print_vector_field(fp
, "vmul", word_ptr
, *beginning_ptr
, consts
, tabs
);
1008 if ((control_word
>> 18) & 1)
1009 fprintf(fp
, "unknown bit 18 enabled\n");
1011 if ((control_word
>> 19) & 1) {
1012 print_scalar_field(fp
, "sadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
1017 if ((control_word
>> 20) & 1)
1018 fprintf(fp
, "unknown bit 20 enabled\n");
1020 if ((control_word
>> 21) & 1) {
1021 print_vector_field(fp
, "vadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
1026 if ((control_word
>> 22) & 1)
1027 fprintf(fp
, "unknown bit 22 enabled\n");
1029 if ((control_word
>> 23) & 1) {
1030 print_scalar_field(fp
, "smul", word_ptr
, *beginning_ptr
, consts
, tabs
);
1035 if ((control_word
>> 24) & 1)
1036 fprintf(fp
, "unknown bit 24 enabled\n");
1038 if ((control_word
>> 25) & 1) {
1039 print_vector_field(fp
, "lut", word_ptr
, *beginning_ptr
, consts
, tabs
);
1043 if ((control_word
>> 26) & 1) {
1044 branch_forward
|= print_compact_branch_writeout_field(fp
, *word_ptr
);
1048 if ((control_word
>> 27) & 1) {
1049 branch_forward
|= print_extended_branch_writeout_field(fp
, (uint8_t *) word_ptr
, next
);
1054 fprintf(fp
, "uconstants 0x%X, 0x%X, 0x%X, 0x%X\n",
1055 consts
->u32
[0], consts
->u32
[1],
1056 consts
->u32
[2], consts
->u32
[3]);
1058 return branch_forward
;
1062 print_varying_parameters(FILE *fp
, midgard_load_store_word
*word
)
1064 midgard_varying_parameter param
;
1065 unsigned v
= word
->varying_parameters
;
1066 memcpy(¶m
, &v
, sizeof(param
));
1068 if (param
.is_varying
) {
1069 /* If a varying, there are qualifiers */
1071 fprintf(fp
, ".flat");
1073 if (param
.interpolation
!= midgard_interp_default
) {
1074 if (param
.interpolation
== midgard_interp_centroid
)
1075 fprintf(fp
, ".centroid");
1077 fprintf(fp
, ".interp%d", param
.interpolation
);
1080 if (param
.modifier
!= midgard_varying_mod_none
) {
1081 if (param
.modifier
== midgard_varying_mod_perspective_w
)
1082 fprintf(fp
, ".perspectivew");
1083 else if (param
.modifier
== midgard_varying_mod_perspective_z
)
1084 fprintf(fp
, ".perspectivez");
1086 fprintf(fp
, ".mod%d", param
.modifier
);
1088 } else if (param
.flat
|| param
.interpolation
|| param
.modifier
) {
1089 fprintf(fp
, " /* is_varying not set but varying metadata attached */");
1092 if (param
.zero0
|| param
.zero1
|| param
.zero2
)
1093 fprintf(fp
, " /* zero tripped, %u %u %u */ ", param
.zero0
, param
.zero1
, param
.zero2
);
1097 is_op_varying(unsigned op
)
1100 case midgard_op_st_vary_16
:
1101 case midgard_op_st_vary_32
:
1102 case midgard_op_st_vary_32i
:
1103 case midgard_op_st_vary_32u
:
1104 case midgard_op_ld_vary_16
:
1105 case midgard_op_ld_vary_32
:
1106 case midgard_op_ld_vary_32i
:
1107 case midgard_op_ld_vary_32u
:
1115 is_op_attribute(unsigned op
)
1118 case midgard_op_ld_attr_16
:
1119 case midgard_op_ld_attr_32
:
1120 case midgard_op_ld_attr_32i
:
1121 case midgard_op_ld_attr_32u
:
1129 print_load_store_arg(FILE *fp
, uint8_t arg
, unsigned index
)
1131 /* Try to interpret as a register */
1132 midgard_ldst_register_select sel
;
1133 memcpy(&sel
, &arg
, sizeof(arg
));
1135 /* If unknown is set, we're not sure what this is or how to
1136 * interpret it. But if it's zero, we get it. */
1139 fprintf(fp
, "0x%02X", arg
);
1143 unsigned reg
= REGISTER_LDST_BASE
+ sel
.select
;
1144 char comp
= components
[sel
.component
];
1146 fprintf(fp
, "r%u.%c", reg
, comp
);
1148 /* Only print a shift if it's non-zero. Shifts only make sense for the
1149 * second index. For the first, we're not sure what it means yet */
1153 fprintf(fp
, " << %u", sel
.shift
);
1155 fprintf(fp
, " /* %X */", sel
.shift
);
1160 update_stats(signed *stat
, unsigned address
)
1163 *stat
= MAX2(*stat
, address
+ 1);
1167 print_load_store_instr(FILE *fp
, uint64_t data
,
1170 midgard_load_store_word
*word
= (midgard_load_store_word
*) &data
;
1172 print_ld_st_opcode(fp
, word
->op
);
1174 unsigned address
= word
->address
;
1176 if (is_op_varying(word
->op
)) {
1177 print_varying_parameters(fp
, word
);
1179 /* Do some analysis: check if direct cacess */
1181 if ((word
->arg_2
== 0x1E) && midg_stats
.varying_count
>= 0)
1182 update_stats(&midg_stats
.varying_count
, address
);
1184 midg_stats
.varying_count
= -16;
1185 } else if (is_op_attribute(word
->op
)) {
1186 if ((word
->arg_2
== 0x1E) && midg_stats
.attribute_count
>= 0)
1187 update_stats(&midg_stats
.attribute_count
, address
);
1189 midg_stats
.attribute_count
= -16;
1192 fprintf(fp
, " r%u", word
->reg
+ (OP_IS_STORE(word
->op
) ? 26 : 0));
1193 print_mask_4(fp
, word
->mask
, false);
1195 if (!OP_IS_STORE(word
->op
))
1196 update_dest(word
->reg
);
1198 bool is_ubo
= OP_IS_UBO_READ(word
->op
);
1201 /* UBOs use their own addressing scheme */
1203 int lo
= word
->varying_parameters
>> 7;
1204 int hi
= word
->address
;
1206 /* TODO: Combine fields logically */
1207 address
= (hi
<< 3) | lo
;
1210 fprintf(fp
, ", %u", address
);
1212 print_swizzle_vec4(fp
, word
->swizzle
, false, false, false);
1217 fprintf(fp
, "ubo%u", word
->arg_1
);
1218 update_stats(&midg_stats
.uniform_buffer_count
, word
->arg_1
);
1220 print_load_store_arg(fp
, word
->arg_1
, 0);
1223 print_load_store_arg(fp
, word
->arg_2
, 1);
1224 fprintf(fp
, " /* %X */\n", word
->varying_parameters
);
1226 midg_stats
.instruction_count
++;
1230 print_load_store_word(FILE *fp
, uint32_t *word
, unsigned tabs
)
1232 midgard_load_store
*load_store
= (midgard_load_store
*) word
;
1234 if (load_store
->word1
!= 3) {
1235 print_load_store_instr(fp
, load_store
->word1
, tabs
);
1238 if (load_store
->word2
!= 3) {
1239 print_load_store_instr(fp
, load_store
->word2
, tabs
);
1244 print_texture_reg_select(FILE *fp
, uint8_t u
, unsigned base
)
1246 midgard_tex_register_select sel
;
1247 memcpy(&sel
, &u
, sizeof(u
));
1252 fprintf(fp
, "r%u", base
+ sel
.select
);
1254 unsigned component
= sel
.component
;
1256 /* Use the upper half in half-reg mode */
1262 fprintf(fp
, ".%c", components
[component
]);
1264 assert(sel
.zero
== 0);
1268 print_texture_format(FILE *fp
, int format
)
1270 /* Act like a modifier */
1274 DEFINE_CASE(MALI_TEX_1D
, "1d");
1275 DEFINE_CASE(MALI_TEX_2D
, "2d");
1276 DEFINE_CASE(MALI_TEX_3D
, "3d");
1277 DEFINE_CASE(MALI_TEX_CUBE
, "cube");
1280 unreachable("Bad format");
1285 midgard_op_has_helpers(unsigned op
, bool gather
)
1291 case TEXTURE_OP_NORMAL
:
1292 case TEXTURE_OP_DFDX
:
1293 case TEXTURE_OP_DFDY
:
1301 print_texture_op(FILE *fp
, unsigned op
, bool gather
)
1303 /* Act like a bare name, like ESSL functions */
1306 fprintf(fp
, "textureGather");
1308 unsigned component
= op
>> 4;
1309 unsigned bottom
= op
& 0xF;
1312 fprintf(fp
, "_unk%u", bottom
);
1314 fprintf(fp
, ".%c", components
[component
]);
1319 DEFINE_CASE(TEXTURE_OP_NORMAL
, "texture");
1320 DEFINE_CASE(TEXTURE_OP_LOD
, "textureLod");
1321 DEFINE_CASE(TEXTURE_OP_TEXEL_FETCH
, "texelFetch");
1322 DEFINE_CASE(TEXTURE_OP_BARRIER
, "barrier");
1323 DEFINE_CASE(TEXTURE_OP_DFDX
, "dFdx");
1324 DEFINE_CASE(TEXTURE_OP_DFDY
, "dFdy");
1327 fprintf(fp
, "tex_%X", op
);
1333 texture_op_takes_bias(unsigned op
)
1335 return op
== TEXTURE_OP_NORMAL
;
1339 sampler_type_name(enum mali_sampler_type t
)
1342 case MALI_SAMPLER_FLOAT
:
1344 case MALI_SAMPLER_UNSIGNED
:
1346 case MALI_SAMPLER_SIGNED
:
1355 print_texture_barrier(FILE *fp
, uint32_t *word
)
1357 midgard_texture_barrier_word
*barrier
= (midgard_texture_barrier_word
*) word
;
1359 if (barrier
->type
!= TAG_TEXTURE_4_BARRIER
)
1360 fprintf(fp
, "/* barrier tag %X != tex/bar */ ", barrier
->type
);
1363 fprintf(fp
, "/* cont missing? */");
1366 fprintf(fp
, "/* last missing? */");
1369 fprintf(fp
, "/* zero1 = 0x%X */ ", barrier
->zero1
);
1372 fprintf(fp
, "/* zero2 = 0x%X */ ", barrier
->zero2
);
1375 fprintf(fp
, "/* zero3 = 0x%X */ ", barrier
->zero3
);
1378 fprintf(fp
, "/* zero4 = 0x%X */ ", barrier
->zero4
);
1381 fprintf(fp
, "/* zero4 = 0x%" PRIx64
" */ ", barrier
->zero5
);
1384 /* Control barriers are always implied, so include for obviousness */
1385 fprintf(fp
, " control");
1387 if (barrier
->buffer
)
1388 fprintf(fp
, " | buffer");
1390 if (barrier
->shared
)
1391 fprintf(fp
, " | shared");
1394 fprintf(fp
, " | stack");
1402 print_texture_word(FILE *fp
, uint32_t *word
, unsigned tabs
, unsigned in_reg_base
, unsigned out_reg_base
)
1404 midgard_texture_word
*texture
= (midgard_texture_word
*) word
;
1406 midg_stats
.helper_invocations
|=
1407 midgard_op_has_helpers(texture
->op
, texture
->is_gather
);
1409 /* Broad category of texture operation in question */
1410 print_texture_op(fp
, texture
->op
, texture
->is_gather
);
1412 /* Barriers use a dramatically different code path */
1413 if (texture
->op
== TEXTURE_OP_BARRIER
) {
1414 print_texture_barrier(fp
, word
);
1416 } else if (texture
->type
== TAG_TEXTURE_4_BARRIER
)
1417 fprintf (fp
, "/* nonbarrier had tex/bar tag */ ");
1418 else if (texture
->type
== TAG_TEXTURE_4_VTX
)
1419 fprintf (fp
, ".vtx");
1421 /* Specific format in question */
1422 print_texture_format(fp
, texture
->format
);
1424 /* Instruction "modifiers" parallel the ALU instructions. */
1426 if (texture
->shadow
)
1427 fprintf(fp
, ".shadow");
1430 fprintf(fp
, ".cont");
1433 fprintf(fp
, ".last");
1435 if (texture
->barrier_buffer
)
1436 fprintf(fp
, ".barrier_buffer /* XXX */");
1438 if (texture
->barrier_shared
)
1439 fprintf(fp
, ".barrier_shared /* XXX */");
1441 /* Output modifiers are always interpreted floatly */
1442 print_outmod(fp
, texture
->outmod
, false);
1444 fprintf(fp
, " %sr%u", texture
->out_full
? "" : "h",
1445 out_reg_base
+ texture
->out_reg_select
);
1446 print_mask_4(fp
, texture
->mask
, texture
->out_upper
);
1447 assert(!(texture
->out_full
&& texture
->out_upper
));
1450 /* Depending on whether we read from textures directly or indirectly,
1451 * we may be able to update our analysis */
1453 if (texture
->texture_register
) {
1454 fprintf(fp
, "texture[");
1455 print_texture_reg_select(fp
, texture
->texture_handle
, in_reg_base
);
1458 /* Indirect, tut tut */
1459 midg_stats
.texture_count
= -16;
1461 fprintf(fp
, "texture%u, ", texture
->texture_handle
);
1462 update_stats(&midg_stats
.texture_count
, texture
->texture_handle
);
1465 /* Print the type, GL style */
1466 fprintf(fp
, "%csampler", sampler_type_name(texture
->sampler_type
));
1468 if (texture
->sampler_register
) {
1470 print_texture_reg_select(fp
, texture
->sampler_handle
, in_reg_base
);
1473 midg_stats
.sampler_count
= -16;
1475 fprintf(fp
, "%u", texture
->sampler_handle
);
1476 update_stats(&midg_stats
.sampler_count
, texture
->sampler_handle
);
1479 print_swizzle_vec4(fp
, texture
->swizzle
, false, false, false);
1480 fprintf(fp
, ", %sr%u", texture
->in_reg_full
? "" : "h", in_reg_base
+ texture
->in_reg_select
);
1481 assert(!(texture
->in_reg_full
&& texture
->in_reg_upper
));
1483 /* TODO: integrate with swizzle */
1484 if (texture
->in_reg_upper
)
1487 print_swizzle_vec4(fp
, texture
->in_reg_swizzle
, false, false, false);
1489 /* There is *always* an offset attached. Of
1490 * course, that offset is just immediate #0 for a
1491 * GLES call that doesn't take an offset. If there
1492 * is a non-negative non-zero offset, this is
1493 * specified in immediate offset mode, with the
1494 * values in the offset_* fields as immediates. If
1495 * this is a negative offset, we instead switch to
1496 * a register offset mode, where the offset_*
1497 * fields become register triplets */
1499 if (texture
->offset_register
) {
1502 bool full
= texture
->offset
& 1;
1503 bool select
= texture
->offset
& 2;
1504 bool upper
= texture
->offset
& 4;
1506 fprintf(fp
, "%sr%u", full
? "" : "h", in_reg_base
+ select
);
1507 assert(!(texture
->out_full
&& texture
->out_upper
));
1509 /* TODO: integrate with swizzle */
1513 print_swizzle_vec4(fp
, texture
->offset
>> 3, false, false, false);
1516 } else if (texture
->offset
) {
1517 /* Only select ops allow negative immediate offsets, verify */
1519 signed offset_x
= (texture
->offset
& 0xF);
1520 signed offset_y
= ((texture
->offset
>> 4) & 0xF);
1521 signed offset_z
= ((texture
->offset
>> 8) & 0xF);
1523 bool neg_x
= offset_x
< 0;
1524 bool neg_y
= offset_y
< 0;
1525 bool neg_z
= offset_z
< 0;
1526 bool any_neg
= neg_x
|| neg_y
|| neg_z
;
1528 if (any_neg
&& texture
->op
!= TEXTURE_OP_TEXEL_FETCH
)
1529 fprintf(fp
, "/* invalid negative */ ");
1531 /* Regardless, just print the immediate offset */
1533 fprintf(fp
, " + <%d, %d, %d>, ", offset_x
, offset_y
, offset_z
);
1538 char lod_operand
= texture_op_takes_bias(texture
->op
) ? '+' : '=';
1540 if (texture
->lod_register
) {
1541 fprintf(fp
, "lod %c ", lod_operand
);
1542 print_texture_reg_select(fp
, texture
->bias
, in_reg_base
);
1545 if (texture
->bias_int
)
1546 fprintf(fp
, " /* bias_int = 0x%X */", texture
->bias_int
);
1547 } else if (texture
->op
== TEXTURE_OP_TEXEL_FETCH
) {
1548 /* For texel fetch, the int LOD is in the fractional place and
1549 * there is no fraction / possibility of bias. We *always* have
1550 * an explicit LOD, even if it's zero. */
1552 if (texture
->bias_int
)
1553 fprintf(fp
, " /* bias_int = 0x%X */ ", texture
->bias_int
);
1555 fprintf(fp
, "lod = %u, ", texture
->bias
);
1556 } else if (texture
->bias
|| texture
->bias_int
) {
1557 signed bias_int
= texture
->bias_int
;
1558 float bias_frac
= texture
->bias
/ 256.0f
;
1559 float bias
= bias_int
+ bias_frac
;
1561 bool is_bias
= texture_op_takes_bias(texture
->op
);
1562 char sign
= (bias
>= 0.0) ? '+' : '-';
1563 char operand
= is_bias
? sign
: '=';
1565 fprintf(fp
, "lod %c %f, ", operand
, fabsf(bias
));
1570 /* While not zero in general, for these simple instructions the
1571 * following unknowns are zero, so we don't include them */
1573 if (texture
->unknown4
||
1574 texture
->unknown8
) {
1575 fprintf(fp
, "// unknown4 = 0x%x\n", texture
->unknown4
);
1576 fprintf(fp
, "// unknown8 = 0x%x\n", texture
->unknown8
);
1579 midg_stats
.instruction_count
++;
1582 struct midgard_disasm_stats
1583 disassemble_midgard(FILE *fp
, uint8_t *code
, size_t size
, unsigned gpu_id
, gl_shader_stage stage
)
1585 uint32_t *words
= (uint32_t *) code
;
1586 unsigned num_words
= size
/ 4;
1589 bool branch_forward
= false;
1591 int last_next_tag
= -1;
1595 midg_tags
= calloc(sizeof(midg_tags
[0]), num_words
);
1597 /* Stats for shader-db */
1598 memset(&midg_stats
, 0, sizeof(midg_stats
));
1599 midg_ever_written
= 0;
1601 while (i
< num_words
) {
1602 unsigned tag
= words
[i
] & 0xF;
1603 unsigned next_tag
= (words
[i
] >> 4) & 0xF;
1604 unsigned num_quad_words
= midgard_tag_props
[tag
].size
;
1606 if (midg_tags
[i
] && midg_tags
[i
] != tag
) {
1607 fprintf(fp
, "\t/* XXX: TAG ERROR branch, got %s expected %s */\n",
1608 midgard_tag_props
[tag
].name
,
1609 midgard_tag_props
[midg_tags
[i
]].name
);
1614 /* Check the tag. The idea is to ensure that next_tag is
1615 * *always* recoverable from the disassembly, such that we may
1616 * safely omit printing next_tag. To show this, we first
1617 * consider that next tags are semantically off-byone -- we end
1618 * up parsing tag n during step n+1. So, we ensure after we're
1619 * done disassembling the next tag of the final bundle is BREAK
1620 * and warn otherwise. We also ensure that the next tag is
1621 * never INVALID. Beyond that, since the last tag is checked
1622 * outside the loop, we can check one tag prior. If equal to
1623 * the current tag (which is unique), we're done. Otherwise, we
1624 * print if that tag was > TAG_BREAK, which implies the tag was
1625 * not TAG_BREAK or TAG_INVALID. But we already checked for
1626 * TAG_INVALID, so it's just if the last tag was TAG_BREAK that
1627 * we're silent. So we throw in a print for break-next on at
1628 * the end of the bundle (if it's not the final bundle, which
1629 * we already check for above), disambiguating this case as
1630 * well. Hence in all cases we are unambiguous, QED. */
1632 if (next_tag
== TAG_INVALID
)
1633 fprintf(fp
, "\t/* XXX: invalid next tag */\n");
1635 if (last_next_tag
> TAG_BREAK
&& last_next_tag
!= tag
) {
1636 fprintf(fp
, "\t/* XXX: TAG ERROR sequence, got %s expexted %s */\n",
1637 midgard_tag_props
[tag
].name
,
1638 midgard_tag_props
[last_next_tag
].name
);
1641 last_next_tag
= next_tag
;
1643 /* Tags are unique in the following way:
1645 * INVALID, BREAK, UNKNOWN_*: verbosely printed
1646 * TEXTURE_4_BARRIER: verified by barrier/!barrier op
1647 * TEXTURE_4_VTX: .vtx tag printed
1648 * TEXTURE_4: tetxure lack of barriers or .vtx
1649 * TAG_LOAD_STORE_4: only load/store
1650 * TAG_ALU_4/8/12/16: by number of instructions/constants
1651 * TAG_ALU_4_8/12/16_WRITEOUT: ^^ with .writeout tag
1655 case TAG_TEXTURE_4_VTX
... TAG_TEXTURE_4_BARRIER
: {
1656 bool interpipe_aliasing
=
1657 midgard_get_quirks(gpu_id
) & MIDGARD_INTERPIPE_REG_ALIASING
;
1659 print_texture_word(fp
, &words
[i
], tabs
,
1660 interpipe_aliasing
? 0 : REG_TEX_BASE
,
1661 interpipe_aliasing
? REGISTER_LDST_BASE
: REG_TEX_BASE
);
1665 case TAG_LOAD_STORE_4
:
1666 print_load_store_word(fp
, &words
[i
], tabs
);
1669 case TAG_ALU_4
... TAG_ALU_16_WRITEOUT
:
1670 branch_forward
= print_alu_word(fp
, &words
[i
], num_quad_words
, tabs
, i
+ 4*num_quad_words
);
1672 /* Reset word static analysis state */
1673 is_embedded_constant_half
= false;
1674 is_embedded_constant_int
= false;
1676 /* TODO: infer/verify me */
1677 if (tag
>= TAG_ALU_4_WRITEOUT
)
1678 fprintf(fp
, "writeout\n");
1683 fprintf(fp
, "Unknown word type %u:\n", words
[i
] & 0xF);
1685 print_quad_word(fp
, &words
[i
], tabs
);
1690 /* We are parsing per bundle anyway. Add before we start
1691 * breaking out so we don't miss the final bundle. */
1693 midg_stats
.bundle_count
++;
1694 midg_stats
.quadword_count
+= num_quad_words
;
1696 /* Include a synthetic "break" instruction at the end of the
1697 * bundle to signify that if, absent a branch, the shader
1698 * execution will stop here. Stop disassembly at such a break
1699 * based on a heuristic */
1701 if (next_tag
== TAG_BREAK
) {
1702 if (branch_forward
) {
1703 fprintf(fp
, "break\n");
1712 i
+= 4 * num_quad_words
;
1715 if (last_next_tag
!= TAG_BREAK
) {
1716 fprintf(fp
, "/* XXX: shader ended with tag %s */\n",
1717 midgard_tag_props
[last_next_tag
].name
);
1722 /* We computed work_count as max_work_registers, so add one to get the
1723 * count. If no work registers are written, you still have one work
1724 * reported, which is exactly what the hardware expects */
1726 midg_stats
.work_count
++;