5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
35 #include "midgard_ops.h"
36 #include "midgard_quirks.h"
37 #include "disassemble.h"
39 #include "util/bitscan.h"
40 #include "util/half_float.h"
41 #include "util/u_math.h"
43 #define DEFINE_CASE(define, str) case define: { fprintf(fp, str); break; }
45 static unsigned *midg_tags
;
46 static bool is_instruction_int
= false;
50 static struct midgard_disasm_stats midg_stats
;
52 /* Transform an expanded writemask (duplicated 8-bit format) into its condensed
53 * form (one bit per component) */
55 static inline unsigned
56 condense_writemask(unsigned expanded_mask
,
57 unsigned bits_per_component
)
59 if (bits_per_component
== 8)
60 unreachable("XXX TODO: sort out how 8-bit constant encoding works");
62 unsigned slots_per_component
= bits_per_component
/ 16;
63 unsigned max_comp
= (16 * 8) / bits_per_component
;
64 unsigned condensed_mask
= 0;
66 for (unsigned i
= 0; i
< max_comp
; i
++) {
67 if (expanded_mask
& (1 << (i
* slots_per_component
)))
68 condensed_mask
|= (1 << i
);
71 return condensed_mask
;
75 print_alu_opcode(FILE *fp
, midgard_alu_op op
)
79 if (alu_opcode_props
[op
].name
) {
80 fprintf(fp
, "%s", alu_opcode_props
[op
].name
);
82 int_op
= midgard_is_integer_op(op
);
84 fprintf(fp
, "alu_op_%02X", op
);
86 /* For constant analysis */
87 is_instruction_int
= int_op
;
91 print_ld_st_opcode(FILE *fp
, midgard_load_store_op op
)
93 if (load_store_opcode_props
[op
].name
)
94 fprintf(fp
, "%s", load_store_opcode_props
[op
].name
);
96 fprintf(fp
, "ldst_op_%02X", op
);
99 static bool is_embedded_constant_half
= false;
100 static bool is_embedded_constant_int
= false;
103 prefix_for_bits(unsigned bits
)
117 /* For static analysis to ensure all registers are written at least once before
118 * use along the source code path (TODO: does this break done for complex CF?)
121 uint16_t midg_ever_written
= 0;
124 print_reg(FILE *fp
, unsigned reg
, unsigned bits
)
126 /* Perform basic static analysis for expanding constants correctly */
129 is_embedded_constant_int
= is_instruction_int
;
130 is_embedded_constant_half
= (bits
< 32);
133 unsigned uniform_reg
= 23 - reg
;
134 bool is_uniform
= false;
136 /* For r8-r15, it could be a work or uniform. We distinguish based on
137 * the fact work registers are ALWAYS written before use, but uniform
138 * registers are NEVER written before use. */
140 if ((reg
>= 8 && reg
< 16) && !(midg_ever_written
& (1 << reg
)))
143 /* r16-r23 are always uniform */
145 if (reg
>= 16 && reg
<= 23)
148 /* Update the uniform count appropriately */
151 midg_stats
.uniform_count
=
152 MAX2(uniform_reg
+ 1, midg_stats
.uniform_count
);
154 char prefix
= prefix_for_bits(bits
);
159 fprintf(fp
, "r%u", reg
);
162 static char *outmod_names_float
[4] = {
169 static char *outmod_names_int
[4] = {
176 static char *srcmod_names_int
[4] = {
184 print_outmod(FILE *fp
, unsigned outmod
, bool is_int
)
186 fprintf(fp
, "%s", is_int
? outmod_names_int
[outmod
] :
187 outmod_names_float
[outmod
]);
191 print_quad_word(FILE *fp
, uint32_t *words
, unsigned tabs
)
195 for (i
= 0; i
< 4; i
++)
196 fprintf(fp
, "0x%08X%s ", words
[i
], i
== 3 ? "" : ",");
201 static const char components
[16] = "xyzwefghijklmnop";
203 /* Helper to print 4 chars of a swizzle */
205 print_swizzle_helper(FILE *fp
, unsigned swizzle
, unsigned offset
)
207 for (unsigned i
= 0; i
< 4; ++i
) {
208 unsigned c
= (swizzle
>> (i
* 2)) & 3;
210 fprintf(fp
, "%c", components
[c
]);
214 /* Helper to print 8 chars of a swizzle, duplicating over */
216 print_swizzle_helper_8(FILE *fp
, unsigned swizzle
, bool upper
)
218 for (unsigned i
= 0; i
< 4; ++i
) {
219 unsigned c
= (swizzle
>> (i
* 2)) & 3;
222 fprintf(fp
, "%c%c", components
[c
], components
[c
+1]);
227 print_swizzle_vec16(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
,
228 midgard_dest_override override
)
232 if (override
== midgard_dest_override_upper
) {
234 fprintf(fp
, " /* rep_high */ ");
236 fprintf(fp
, " /* rep_low */ ");
238 if (!rep_high
&& rep_low
)
239 print_swizzle_helper_8(fp
, swizzle
, true);
241 print_swizzle_helper_8(fp
, swizzle
, false);
243 print_swizzle_helper_8(fp
, swizzle
, rep_high
& 1);
244 print_swizzle_helper_8(fp
, swizzle
, !(rep_low
& 1));
249 print_swizzle_vec8(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
253 /* TODO: Is it possible to unify half/full? */
256 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8));
257 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8) + !rep_high
* 4);
259 print_swizzle_helper(fp
, swizzle
, rep_high
* 4);
260 print_swizzle_helper(fp
, swizzle
, !rep_low
* 4);
265 print_swizzle_vec4(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
268 fprintf(fp
, " /* rep_high */ ");
270 if (!half
&& rep_low
)
271 fprintf(fp
, " /* rep_low */ ");
273 if (swizzle
== 0xE4 && !half
) return; /* xyzw */
276 print_swizzle_helper(fp
, swizzle
, rep_low
* 4);
279 print_swizzle_vec2(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
281 char *alphabet
= "XY";
284 alphabet
= rep_low
? "zw" : "xy";
286 fprintf(fp
, " /* rep_low */ ");
289 fprintf(fp
, " /* rep_high */ ");
291 if (swizzle
== 0xE4 && !half
) return; /* XY */
295 for (unsigned i
= 0; i
< 4; i
+= 2) {
296 unsigned a
= (swizzle
>> (i
* 2)) & 3;
297 unsigned b
= (swizzle
>> ((i
+1) * 2)) & 3;
299 /* Normally we're adjacent, but if there's an issue, don't make
303 fprintf(fp
, "%c", alphabet
[a
>> 1]);
305 fprintf(fp
, "[%c%c]", components
[a
], components
[b
]);
310 bits_for_mode(midgard_reg_mode mode
)
313 case midgard_reg_mode_8
:
315 case midgard_reg_mode_16
:
317 case midgard_reg_mode_32
:
319 case midgard_reg_mode_64
:
322 unreachable("Invalid reg mode");
328 bits_for_mode_halved(midgard_reg_mode mode
, bool half
)
330 unsigned bits
= bits_for_mode(mode
);
339 print_scalar_constant(FILE *fp
, unsigned src_binary
,
340 const midgard_constants
*consts
,
341 midgard_scalar_alu
*alu
)
343 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
344 assert(consts
!= NULL
);
347 mir_print_constant_component(fp
, consts
, src
->component
,
349 midgard_reg_mode_32
: midgard_reg_mode_16
,
350 false, src
->mod
, alu
->op
);
354 print_vector_constants(FILE *fp
, unsigned src_binary
,
355 const midgard_constants
*consts
,
356 midgard_vector_alu
*alu
)
358 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
359 unsigned bits
= bits_for_mode_halved(alu
->reg_mode
, src
->half
);
360 unsigned max_comp
= MIN2((sizeof(*consts
) * 8) / bits
, 8);
361 unsigned comp_mask
, num_comp
= 0;
365 comp_mask
= effective_writemask(alu
, condense_writemask(alu
->mask
, bits
));
366 num_comp
= util_bitcount(comp_mask
);
370 fprintf(fp
, "vec%d(", num_comp
);
374 for (unsigned i
= 0; i
< max_comp
; ++i
) {
375 if (!(comp_mask
& (1 << i
))) continue;
377 unsigned c
= (src
->swizzle
>> (i
* 2)) & 3;
384 mir_print_constant_component(fp
, consts
, c
, alu
->reg_mode
,
385 src
->half
, src
->mod
, alu
->op
);
393 print_srcmod(FILE *fp
, bool is_int
, unsigned mod
, bool scalar
)
395 /* Modifiers change meaning depending on the op's context */
397 midgard_int_mod int_mod
= mod
;
400 if (scalar
&& mod
== 2) {
404 fprintf(fp
, "%s", srcmod_names_int
[int_mod
]);
406 if (mod
& MIDGARD_FLOAT_MOD_NEG
)
409 if (mod
& MIDGARD_FLOAT_MOD_ABS
)
415 print_srcmod_end(FILE *fp
, bool is_int
, unsigned mod
, unsigned bits
)
417 /* Since we wrapped with a function-looking thing */
419 if (is_int
&& mod
== midgard_int_shift
)
420 fprintf(fp
, ") << %u", bits
);
421 else if ((is_int
&& (mod
!= midgard_int_normal
))
422 || (!is_int
&& mod
& MIDGARD_FLOAT_MOD_ABS
))
427 print_vector_src(FILE *fp
, unsigned src_binary
,
428 midgard_reg_mode mode
, unsigned reg
,
429 midgard_dest_override override
, bool is_int
)
431 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
432 print_srcmod(fp
, is_int
, src
->mod
, false);
435 unsigned bits
= bits_for_mode_halved(mode
, src
->half
);
436 print_reg(fp
, reg
, bits
);
438 /* When the source was stepped down via `half`, rep_low means "higher
439 * half" and rep_high is never seen. When it's not native,
440 * rep_low/rep_high are for, well, replication */
442 if (mode
== midgard_reg_mode_8
) {
444 print_swizzle_vec16(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, override
);
445 } else if (mode
== midgard_reg_mode_16
) {
446 print_swizzle_vec8(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
447 } else if (mode
== midgard_reg_mode_32
) {
448 print_swizzle_vec4(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
449 } else if (mode
== midgard_reg_mode_64
) {
450 print_swizzle_vec2(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
453 print_srcmod_end(fp
, is_int
, src
->mod
, bits
);
457 decode_vector_imm(unsigned src2_reg
, unsigned imm
)
460 ret
= src2_reg
<< 11;
461 ret
|= (imm
& 0x7) << 8;
462 ret
|= (imm
>> 3) & 0xFF;
467 print_immediate(FILE *fp
, uint16_t imm
)
469 if (is_instruction_int
)
470 fprintf(fp
, "#%u", imm
);
472 fprintf(fp
, "#%g", _mesa_half_to_float(imm
));
476 update_dest(unsigned reg
)
478 /* We should record writes as marking this as a work register. Store
479 * the max register in work_count; we'll add one at the end */
482 midg_stats
.work_count
= MAX2(reg
, midg_stats
.work_count
);
483 midg_ever_written
|= (1 << reg
);
488 print_dest(FILE *fp
, unsigned reg
, midgard_reg_mode mode
, midgard_dest_override override
)
490 /* Depending on the mode and override, we determine the type of
491 * destination addressed. Absent an override, we address just the
492 * type of the operation itself */
494 unsigned bits
= bits_for_mode(mode
);
496 if (override
!= midgard_dest_override_none
)
500 print_reg(fp
, reg
, bits
);
504 print_mask_vec16(FILE *fp
, uint8_t mask
, midgard_dest_override override
)
508 for (unsigned i
= 0; i
< 8; i
++) {
512 components
[i
*2 + 1]);
516 /* For 16-bit+ masks, we read off from the 8-bit mask field. For 16-bit (vec8),
517 * it's just one bit per channel, easy peasy. For 32-bit (vec4), it's one bit
518 * per channel with one duplicate bit in the middle. For 64-bit (vec2), it's
519 * one-bit per channel with _3_ duplicate bits in the middle. Basically, just
520 * subdividing the 128-bit word in 16-bit increments. For 64-bit, we uppercase
521 * the mask to make it obvious what happened */
524 print_mask(FILE *fp
, uint8_t mask
, unsigned bits
, midgard_dest_override override
)
527 print_mask_vec16(fp
, mask
, override
);
531 /* Skip 'complete' masks */
533 if (override
== midgard_dest_override_none
)
534 if (bits
>= 32 && mask
== 0xFF) return;
538 unsigned skip
= (bits
/ 16);
539 bool uppercase
= bits
> 32;
540 bool tripped
= false;
542 /* To apply an upper destination override, we "shift" the alphabet.
543 * E.g. with an upper override on 32-bit, instead of xyzw, print efgh.
544 * For upper 16-bit, instead of xyzwefgh, print ijklmnop */
546 const char *alphabet
= components
;
548 if (override
== midgard_dest_override_upper
)
549 alphabet
+= (128 / bits
);
551 for (unsigned i
= 0; i
< 8; i
+= skip
) {
552 bool a
= (mask
& (1 << i
)) != 0;
554 for (unsigned j
= 1; j
< skip
; ++j
) {
555 bool dupe
= (mask
& (1 << (i
+ j
))) != 0;
556 tripped
|= (dupe
!= a
);
560 char c
= alphabet
[i
/ skip
];
565 fprintf(fp
, "%c", c
);
570 fprintf(fp
, " /* %X */", mask
);
573 /* Prints the 4-bit masks found in texture and load/store ops, as opposed to
574 * the 8-bit masks found in (vector) ALU ops. Supports texture-style 16-bit
575 * mode as well, but not load/store-style 16-bit mode. */
578 print_mask_4(FILE *fp
, unsigned mask
, bool upper
)
589 for (unsigned i
= 0; i
< 4; ++i
) {
590 bool a
= (mask
& (1 << i
)) != 0;
592 fprintf(fp
, "%c", components
[i
+ (upper
? 4 : 0)]);
597 print_vector_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
598 const midgard_constants
*consts
, unsigned tabs
)
600 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
601 midgard_vector_alu
*alu_field
= (midgard_vector_alu
*) words
;
602 midgard_reg_mode mode
= alu_field
->reg_mode
;
603 unsigned override
= alu_field
->dest_override
;
605 /* For now, prefix instruction names with their unit, until we
606 * understand how this works on a deeper level */
607 fprintf(fp
, "%s.", name
);
609 print_alu_opcode(fp
, alu_field
->op
);
611 /* Postfix with the size to disambiguate if necessary */
612 char postfix
= prefix_for_bits(bits_for_mode(mode
));
613 bool size_ambiguous
= override
!= midgard_dest_override_none
;
616 fprintf(fp
, "%c", postfix
? postfix
: 'r');
618 /* Print the outmod, if there is one */
619 print_outmod(fp
, alu_field
->outmod
,
620 midgard_is_integer_out_op(alu_field
->op
));
624 /* Mask denoting status of 8-lanes */
625 uint8_t mask
= alu_field
->mask
;
627 /* First, print the destination */
628 print_dest(fp
, reg_info
->out_reg
, mode
, alu_field
->dest_override
);
630 if (override
!= midgard_dest_override_none
) {
631 bool modeable
= (mode
!= midgard_reg_mode_8
);
632 bool known
= override
!= 0x3; /* Unused value */
634 if (!(modeable
&& known
))
635 fprintf(fp
, "/* do%u */ ", override
);
638 /* Instructions like fdot4 do *not* replicate, ensure the
639 * mask is of only a single component */
641 unsigned rep
= GET_CHANNEL_COUNT(alu_opcode_props
[alu_field
->op
].props
);
644 unsigned comp_mask
= condense_writemask(mask
, bits_for_mode(mode
));
645 unsigned num_comp
= util_bitcount(comp_mask
);
647 fprintf(fp
, "/* err too many components */");
649 print_mask(fp
, mask
, bits_for_mode(mode
), override
);
653 bool is_int
= midgard_is_integer_op(alu_field
->op
);
655 if (reg_info
->src1_reg
== 26)
656 print_vector_constants(fp
, alu_field
->src1
, consts
, alu_field
);
658 print_vector_src(fp
, alu_field
->src1
, mode
, reg_info
->src1_reg
, override
, is_int
);
662 if (reg_info
->src2_imm
) {
663 uint16_t imm
= decode_vector_imm(reg_info
->src2_reg
, alu_field
->src2
>> 2);
664 print_immediate(fp
, imm
);
665 } else if (reg_info
->src2_reg
== 26) {
666 print_vector_constants(fp
, alu_field
->src2
, consts
, alu_field
);
668 print_vector_src(fp
, alu_field
->src2
, mode
,
669 reg_info
->src2_reg
, override
, is_int
);
672 midg_stats
.instruction_count
++;
677 print_scalar_src(FILE *fp
, bool is_int
, unsigned src_binary
, unsigned reg
)
679 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
681 print_srcmod(fp
, is_int
, src
->mod
, true);
682 print_reg(fp
, reg
, src
->full
? 32 : 16);
684 unsigned c
= src
->component
;
687 assert((c
& 1) == 0);
691 fprintf(fp
, ".%c", components
[c
]);
693 print_srcmod_end(fp
, is_int
, src
->mod
, src
->full
? 32 : 16);
697 decode_scalar_imm(unsigned src2_reg
, unsigned imm
)
700 ret
= src2_reg
<< 11;
701 ret
|= (imm
& 3) << 9;
702 ret
|= (imm
& 4) << 6;
703 ret
|= (imm
& 0x38) << 2;
709 print_scalar_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
710 const midgard_constants
*consts
, unsigned tabs
)
712 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
713 midgard_scalar_alu
*alu_field
= (midgard_scalar_alu
*) words
;
715 if (alu_field
->unknown
)
716 fprintf(fp
, "scalar ALU unknown bit set\n");
718 fprintf(fp
, "%s.", name
);
719 print_alu_opcode(fp
, alu_field
->op
);
720 print_outmod(fp
, alu_field
->outmod
,
721 midgard_is_integer_out_op(alu_field
->op
));
724 bool full
= alu_field
->output_full
;
725 update_dest(reg_info
->out_reg
);
726 print_reg(fp
, reg_info
->out_reg
, full
? 32 : 16);
727 unsigned c
= alu_field
->output_component
;
728 bool is_int
= midgard_is_integer_op(alu_field
->op
);
731 assert((c
& 1) == 0);
735 fprintf(fp
, ".%c, ", components
[c
]);
737 if (reg_info
->src1_reg
== 26)
738 print_scalar_constant(fp
, alu_field
->src1
, consts
, alu_field
);
740 print_scalar_src(fp
, is_int
, alu_field
->src1
, reg_info
->src1_reg
);
744 if (reg_info
->src2_imm
) {
745 uint16_t imm
= decode_scalar_imm(reg_info
->src2_reg
,
747 print_immediate(fp
, imm
);
748 } else if (reg_info
->src2_reg
== 26) {
749 print_scalar_constant(fp
, alu_field
->src2
, consts
, alu_field
);
751 print_scalar_src(fp
, is_int
, alu_field
->src2
, reg_info
->src2_reg
);
753 midg_stats
.instruction_count
++;
758 print_branch_op(FILE *fp
, unsigned op
)
761 case midgard_jmp_writeout_op_branch_uncond
:
762 fprintf(fp
, "uncond.");
765 case midgard_jmp_writeout_op_branch_cond
:
766 fprintf(fp
, "cond.");
769 case midgard_jmp_writeout_op_writeout
:
770 fprintf(fp
, "write.");
773 case midgard_jmp_writeout_op_tilebuffer_pending
:
774 fprintf(fp
, "tilebuffer.");
777 case midgard_jmp_writeout_op_discard
:
778 fprintf(fp
, "discard.");
782 fprintf(fp
, "unk%u.", op
);
788 print_branch_cond(FILE *fp
, int cond
)
791 case midgard_condition_write0
:
792 fprintf(fp
, "write0");
795 case midgard_condition_false
:
796 fprintf(fp
, "false");
799 case midgard_condition_true
:
803 case midgard_condition_always
:
804 fprintf(fp
, "always");
808 fprintf(fp
, "unk%X", cond
);
814 print_compact_branch_writeout_field(FILE *fp
, uint16_t word
)
816 midgard_jmp_writeout_op op
= word
& 0x7;
817 midg_stats
.instruction_count
++;
820 case midgard_jmp_writeout_op_branch_uncond
: {
821 midgard_branch_uncond br_uncond
;
822 memcpy((char *) &br_uncond
, (char *) &word
, sizeof(br_uncond
));
823 fprintf(fp
, "br.uncond ");
825 if (br_uncond
.unknown
!= 1)
826 fprintf(fp
, "unknown:%u, ", br_uncond
.unknown
);
828 if (br_uncond
.offset
>= 0)
831 fprintf(fp
, "%d -> %s", br_uncond
.offset
,
832 midgard_tag_props
[br_uncond
.dest_tag
].name
);
835 return br_uncond
.offset
>= 0;
838 case midgard_jmp_writeout_op_branch_cond
:
839 case midgard_jmp_writeout_op_writeout
:
840 case midgard_jmp_writeout_op_discard
:
842 midgard_branch_cond br_cond
;
843 memcpy((char *) &br_cond
, (char *) &word
, sizeof(br_cond
));
847 print_branch_op(fp
, br_cond
.op
);
848 print_branch_cond(fp
, br_cond
.cond
);
852 if (br_cond
.offset
>= 0)
855 fprintf(fp
, "%d -> %s", br_cond
.offset
,
856 midgard_tag_props
[br_cond
.dest_tag
].name
);
859 return br_cond
.offset
>= 0;
867 print_extended_branch_writeout_field(FILE *fp
, uint8_t *words
, unsigned next
)
869 midgard_branch_extended br
;
870 memcpy((char *) &br
, (char *) words
, sizeof(br
));
874 print_branch_op(fp
, br
.op
);
876 /* Condition codes are a LUT in the general case, but simply repeated 8 times for single-channel conditions.. Check this. */
878 bool single_channel
= true;
880 for (unsigned i
= 0; i
< 16; i
+= 2) {
881 single_channel
&= (((br
.cond
>> i
) & 0x3) == (br
.cond
& 0x3));
885 print_branch_cond(fp
, br
.cond
& 0x3);
887 fprintf(fp
, "lut%X", br
.cond
);
890 fprintf(fp
, ".unknown%u", br
.unknown
);
897 fprintf(fp
, "%d -> %s\n", br
.offset
,
898 midgard_tag_props
[br
.dest_tag
].name
);
900 unsigned I
= next
+ br
.offset
* 4;
902 if (midg_tags
[I
] && midg_tags
[I
] != br
.dest_tag
) {
903 fprintf(fp
, "\t/* XXX TAG ERROR: jumping to %s but tagged %s \n",
904 midgard_tag_props
[br
.dest_tag
].name
,
905 midgard_tag_props
[midg_tags
[I
]].name
);
908 midg_tags
[I
] = br
.dest_tag
;
910 midg_stats
.instruction_count
++;
911 return br
.offset
>= 0;
915 num_alu_fields_enabled(uint32_t control_word
)
919 if ((control_word
>> 17) & 1)
922 if ((control_word
>> 19) & 1)
925 if ((control_word
>> 21) & 1)
928 if ((control_word
>> 23) & 1)
931 if ((control_word
>> 25) & 1)
938 print_alu_word(FILE *fp
, uint32_t *words
, unsigned num_quad_words
,
939 unsigned tabs
, unsigned next
)
941 uint32_t control_word
= words
[0];
942 uint16_t *beginning_ptr
= (uint16_t *)(words
+ 1);
943 unsigned num_fields
= num_alu_fields_enabled(control_word
);
944 uint16_t *word_ptr
= beginning_ptr
+ num_fields
;
945 unsigned num_words
= 2 + num_fields
;
946 const midgard_constants
*consts
= NULL
;
947 bool branch_forward
= false;
949 if ((control_word
>> 17) & 1)
952 if ((control_word
>> 19) & 1)
955 if ((control_word
>> 21) & 1)
958 if ((control_word
>> 23) & 1)
961 if ((control_word
>> 25) & 1)
964 if ((control_word
>> 26) & 1)
967 if ((control_word
>> 27) & 1)
970 if (num_quad_words
> (num_words
+ 7) / 8) {
971 assert(num_quad_words
== (num_words
+ 15) / 8);
972 //Assume that the extra quadword is constants
973 consts
= (midgard_constants
*)(words
+ (4 * num_quad_words
- 4));
976 if ((control_word
>> 16) & 1)
977 fprintf(fp
, "unknown bit 16 enabled\n");
979 if ((control_word
>> 17) & 1) {
980 print_vector_field(fp
, "vmul", word_ptr
, *beginning_ptr
, consts
, tabs
);
985 if ((control_word
>> 18) & 1)
986 fprintf(fp
, "unknown bit 18 enabled\n");
988 if ((control_word
>> 19) & 1) {
989 print_scalar_field(fp
, "sadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
994 if ((control_word
>> 20) & 1)
995 fprintf(fp
, "unknown bit 20 enabled\n");
997 if ((control_word
>> 21) & 1) {
998 print_vector_field(fp
, "vadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
1003 if ((control_word
>> 22) & 1)
1004 fprintf(fp
, "unknown bit 22 enabled\n");
1006 if ((control_word
>> 23) & 1) {
1007 print_scalar_field(fp
, "smul", word_ptr
, *beginning_ptr
, consts
, tabs
);
1012 if ((control_word
>> 24) & 1)
1013 fprintf(fp
, "unknown bit 24 enabled\n");
1015 if ((control_word
>> 25) & 1) {
1016 print_vector_field(fp
, "lut", word_ptr
, *beginning_ptr
, consts
, tabs
);
1020 if ((control_word
>> 26) & 1) {
1021 branch_forward
|= print_compact_branch_writeout_field(fp
, *word_ptr
);
1025 if ((control_word
>> 27) & 1) {
1026 branch_forward
|= print_extended_branch_writeout_field(fp
, (uint8_t *) word_ptr
, next
);
1031 fprintf(fp
, "uconstants 0x%X, 0x%X, 0x%X, 0x%X\n",
1032 consts
->u32
[0], consts
->u32
[1],
1033 consts
->u32
[2], consts
->u32
[3]);
1035 return branch_forward
;
1039 print_varying_parameters(FILE *fp
, midgard_load_store_word
*word
)
1041 midgard_varying_parameter param
;
1042 unsigned v
= word
->varying_parameters
;
1043 memcpy(¶m
, &v
, sizeof(param
));
1045 if (param
.is_varying
) {
1046 /* If a varying, there are qualifiers */
1048 fprintf(fp
, ".flat");
1050 if (param
.interpolation
!= midgard_interp_default
) {
1051 if (param
.interpolation
== midgard_interp_centroid
)
1052 fprintf(fp
, ".centroid");
1054 fprintf(fp
, ".interp%d", param
.interpolation
);
1057 if (param
.modifier
!= midgard_varying_mod_none
) {
1058 if (param
.modifier
== midgard_varying_mod_perspective_w
)
1059 fprintf(fp
, ".perspectivew");
1060 else if (param
.modifier
== midgard_varying_mod_perspective_z
)
1061 fprintf(fp
, ".perspectivez");
1063 fprintf(fp
, ".mod%d", param
.modifier
);
1065 } else if (param
.flat
|| param
.interpolation
|| param
.modifier
) {
1066 fprintf(fp
, " /* is_varying not set but varying metadata attached */");
1069 if (param
.zero0
|| param
.zero1
|| param
.zero2
)
1070 fprintf(fp
, " /* zero tripped, %u %u %u */ ", param
.zero0
, param
.zero1
, param
.zero2
);
1074 is_op_varying(unsigned op
)
1077 case midgard_op_st_vary_16
:
1078 case midgard_op_st_vary_32
:
1079 case midgard_op_st_vary_32i
:
1080 case midgard_op_st_vary_32u
:
1081 case midgard_op_ld_vary_16
:
1082 case midgard_op_ld_vary_32
:
1083 case midgard_op_ld_vary_32i
:
1084 case midgard_op_ld_vary_32u
:
1092 is_op_attribute(unsigned op
)
1095 case midgard_op_ld_attr_16
:
1096 case midgard_op_ld_attr_32
:
1097 case midgard_op_ld_attr_32i
:
1098 case midgard_op_ld_attr_32u
:
1106 print_load_store_arg(FILE *fp
, uint8_t arg
, unsigned index
)
1108 /* Try to interpret as a register */
1109 midgard_ldst_register_select sel
;
1110 memcpy(&sel
, &arg
, sizeof(arg
));
1112 /* If unknown is set, we're not sure what this is or how to
1113 * interpret it. But if it's zero, we get it. */
1116 fprintf(fp
, "0x%02X", arg
);
1120 unsigned reg
= REGISTER_LDST_BASE
+ sel
.select
;
1121 char comp
= components
[sel
.component
];
1123 fprintf(fp
, "r%u.%c", reg
, comp
);
1125 /* Only print a shift if it's non-zero. Shifts only make sense for the
1126 * second index. For the first, we're not sure what it means yet */
1130 fprintf(fp
, " << %u", sel
.shift
);
1132 fprintf(fp
, " /* %X */", sel
.shift
);
1137 update_stats(signed *stat
, unsigned address
)
1140 *stat
= MAX2(*stat
, address
+ 1);
1144 print_load_store_instr(FILE *fp
, uint64_t data
,
1147 midgard_load_store_word
*word
= (midgard_load_store_word
*) &data
;
1149 print_ld_st_opcode(fp
, word
->op
);
1151 unsigned address
= word
->address
;
1153 if (is_op_varying(word
->op
)) {
1154 print_varying_parameters(fp
, word
);
1156 /* Do some analysis: check if direct cacess */
1158 if ((word
->arg_2
== 0x1E) && midg_stats
.varying_count
>= 0)
1159 update_stats(&midg_stats
.varying_count
, address
);
1161 midg_stats
.varying_count
= -16;
1162 } else if (is_op_attribute(word
->op
)) {
1163 if ((word
->arg_2
== 0x1E) && midg_stats
.attribute_count
>= 0)
1164 update_stats(&midg_stats
.attribute_count
, address
);
1166 midg_stats
.attribute_count
= -16;
1169 fprintf(fp
, " r%u", word
->reg
+ (OP_IS_STORE(word
->op
) ? 26 : 0));
1170 print_mask_4(fp
, word
->mask
, false);
1172 if (!OP_IS_STORE(word
->op
))
1173 update_dest(word
->reg
);
1175 bool is_ubo
= OP_IS_UBO_READ(word
->op
);
1178 /* UBOs use their own addressing scheme */
1180 int lo
= word
->varying_parameters
>> 7;
1181 int hi
= word
->address
;
1183 /* TODO: Combine fields logically */
1184 address
= (hi
<< 3) | lo
;
1187 fprintf(fp
, ", %u", address
);
1189 print_swizzle_vec4(fp
, word
->swizzle
, false, false, false);
1194 fprintf(fp
, "ubo%u", word
->arg_1
);
1195 update_stats(&midg_stats
.uniform_buffer_count
, word
->arg_1
);
1197 print_load_store_arg(fp
, word
->arg_1
, 0);
1200 print_load_store_arg(fp
, word
->arg_2
, 1);
1201 fprintf(fp
, " /* %X */\n", word
->varying_parameters
);
1203 midg_stats
.instruction_count
++;
1207 print_load_store_word(FILE *fp
, uint32_t *word
, unsigned tabs
)
1209 midgard_load_store
*load_store
= (midgard_load_store
*) word
;
1211 if (load_store
->word1
!= 3) {
1212 print_load_store_instr(fp
, load_store
->word1
, tabs
);
1215 if (load_store
->word2
!= 3) {
1216 print_load_store_instr(fp
, load_store
->word2
, tabs
);
1221 print_texture_reg_select(FILE *fp
, uint8_t u
, unsigned base
)
1223 midgard_tex_register_select sel
;
1224 memcpy(&sel
, &u
, sizeof(u
));
1229 fprintf(fp
, "r%u", base
+ sel
.select
);
1231 unsigned component
= sel
.component
;
1233 /* Use the upper half in half-reg mode */
1239 fprintf(fp
, ".%c", components
[component
]);
1241 assert(sel
.zero
== 0);
1245 print_texture_format(FILE *fp
, int format
)
1247 /* Act like a modifier */
1251 DEFINE_CASE(MALI_TEX_1D
, "1d");
1252 DEFINE_CASE(MALI_TEX_2D
, "2d");
1253 DEFINE_CASE(MALI_TEX_3D
, "3d");
1254 DEFINE_CASE(MALI_TEX_CUBE
, "cube");
1257 unreachable("Bad format");
1262 midgard_op_has_helpers(unsigned op
, bool gather
)
1268 case TEXTURE_OP_NORMAL
:
1269 case TEXTURE_OP_DFDX
:
1270 case TEXTURE_OP_DFDY
:
1278 print_texture_op(FILE *fp
, unsigned op
, bool gather
)
1280 /* Act like a bare name, like ESSL functions */
1283 fprintf(fp
, "textureGather");
1285 unsigned component
= op
>> 4;
1286 unsigned bottom
= op
& 0xF;
1289 fprintf(fp
, "_unk%u", bottom
);
1291 fprintf(fp
, ".%c", components
[component
]);
1296 DEFINE_CASE(TEXTURE_OP_NORMAL
, "texture");
1297 DEFINE_CASE(TEXTURE_OP_LOD
, "textureLod");
1298 DEFINE_CASE(TEXTURE_OP_TEXEL_FETCH
, "texelFetch");
1299 DEFINE_CASE(TEXTURE_OP_BARRIER
, "barrier");
1300 DEFINE_CASE(TEXTURE_OP_DFDX
, "dFdx");
1301 DEFINE_CASE(TEXTURE_OP_DFDY
, "dFdy");
1304 fprintf(fp
, "tex_%X", op
);
1310 texture_op_takes_bias(unsigned op
)
1312 return op
== TEXTURE_OP_NORMAL
;
1316 sampler_type_name(enum mali_sampler_type t
)
1319 case MALI_SAMPLER_FLOAT
:
1321 case MALI_SAMPLER_UNSIGNED
:
1323 case MALI_SAMPLER_SIGNED
:
1332 print_texture_barrier(FILE *fp
, uint32_t *word
)
1334 midgard_texture_barrier_word
*barrier
= (midgard_texture_barrier_word
*) word
;
1336 if (barrier
->type
!= TAG_TEXTURE_4_BARRIER
)
1337 fprintf(fp
, "/* barrier tag %X != tex/bar */ ", barrier
->type
);
1340 fprintf(fp
, "/* cont missing? */");
1343 fprintf(fp
, "/* last missing? */");
1346 fprintf(fp
, "/* zero1 = 0x%X */ ", barrier
->zero1
);
1349 fprintf(fp
, "/* zero2 = 0x%X */ ", barrier
->zero2
);
1352 fprintf(fp
, "/* zero3 = 0x%X */ ", barrier
->zero3
);
1355 fprintf(fp
, "/* zero4 = 0x%X */ ", barrier
->zero4
);
1358 fprintf(fp
, "/* zero4 = 0x%" PRIx64
" */ ", barrier
->zero5
);
1361 /* Control barriers are always implied, so include for obviousness */
1362 fprintf(fp
, " control");
1364 if (barrier
->buffer
)
1365 fprintf(fp
, " | buffer");
1367 if (barrier
->shared
)
1368 fprintf(fp
, " | shared");
1371 fprintf(fp
, " | stack");
1379 print_texture_word(FILE *fp
, uint32_t *word
, unsigned tabs
, unsigned in_reg_base
, unsigned out_reg_base
)
1381 midgard_texture_word
*texture
= (midgard_texture_word
*) word
;
1383 midg_stats
.helper_invocations
|=
1384 midgard_op_has_helpers(texture
->op
, texture
->is_gather
);
1386 /* Broad category of texture operation in question */
1387 print_texture_op(fp
, texture
->op
, texture
->is_gather
);
1389 /* Barriers use a dramatically different code path */
1390 if (texture
->op
== TEXTURE_OP_BARRIER
) {
1391 print_texture_barrier(fp
, word
);
1393 } else if (texture
->type
== TAG_TEXTURE_4_BARRIER
)
1394 fprintf (fp
, "/* nonbarrier had tex/bar tag */ ");
1395 else if (texture
->type
== TAG_TEXTURE_4_VTX
)
1396 fprintf (fp
, ".vtx");
1398 /* Specific format in question */
1399 print_texture_format(fp
, texture
->format
);
1401 /* Instruction "modifiers" parallel the ALU instructions. */
1403 if (texture
->shadow
)
1404 fprintf(fp
, ".shadow");
1407 fprintf(fp
, ".cont");
1410 fprintf(fp
, ".last");
1412 if (texture
->barrier_buffer
)
1413 fprintf(fp
, ".barrier_buffer /* XXX */");
1415 if (texture
->barrier_shared
)
1416 fprintf(fp
, ".barrier_shared /* XXX */");
1418 /* Output modifiers are always interpreted floatly */
1419 print_outmod(fp
, texture
->outmod
, false);
1421 fprintf(fp
, " %sr%u", texture
->out_full
? "" : "h",
1422 out_reg_base
+ texture
->out_reg_select
);
1423 print_mask_4(fp
, texture
->mask
, texture
->out_upper
);
1424 assert(!(texture
->out_full
&& texture
->out_upper
));
1427 /* Depending on whether we read from textures directly or indirectly,
1428 * we may be able to update our analysis */
1430 if (texture
->texture_register
) {
1431 fprintf(fp
, "texture[");
1432 print_texture_reg_select(fp
, texture
->texture_handle
, in_reg_base
);
1435 /* Indirect, tut tut */
1436 midg_stats
.texture_count
= -16;
1438 fprintf(fp
, "texture%u, ", texture
->texture_handle
);
1439 update_stats(&midg_stats
.texture_count
, texture
->texture_handle
);
1442 /* Print the type, GL style */
1443 fprintf(fp
, "%csampler", sampler_type_name(texture
->sampler_type
));
1445 if (texture
->sampler_register
) {
1447 print_texture_reg_select(fp
, texture
->sampler_handle
, in_reg_base
);
1450 midg_stats
.sampler_count
= -16;
1452 fprintf(fp
, "%u", texture
->sampler_handle
);
1453 update_stats(&midg_stats
.sampler_count
, texture
->sampler_handle
);
1456 print_swizzle_vec4(fp
, texture
->swizzle
, false, false, false);
1457 fprintf(fp
, ", %sr%u", texture
->in_reg_full
? "" : "h", in_reg_base
+ texture
->in_reg_select
);
1458 assert(!(texture
->in_reg_full
&& texture
->in_reg_upper
));
1460 /* TODO: integrate with swizzle */
1461 if (texture
->in_reg_upper
)
1464 print_swizzle_vec4(fp
, texture
->in_reg_swizzle
, false, false, false);
1466 /* There is *always* an offset attached. Of
1467 * course, that offset is just immediate #0 for a
1468 * GLES call that doesn't take an offset. If there
1469 * is a non-negative non-zero offset, this is
1470 * specified in immediate offset mode, with the
1471 * values in the offset_* fields as immediates. If
1472 * this is a negative offset, we instead switch to
1473 * a register offset mode, where the offset_*
1474 * fields become register triplets */
1476 if (texture
->offset_register
) {
1479 bool full
= texture
->offset
& 1;
1480 bool select
= texture
->offset
& 2;
1481 bool upper
= texture
->offset
& 4;
1483 fprintf(fp
, "%sr%u", full
? "" : "h", in_reg_base
+ select
);
1484 assert(!(texture
->out_full
&& texture
->out_upper
));
1486 /* TODO: integrate with swizzle */
1490 print_swizzle_vec4(fp
, texture
->offset
>> 3, false, false, false);
1493 } else if (texture
->offset
) {
1494 /* Only select ops allow negative immediate offsets, verify */
1496 signed offset_x
= (texture
->offset
& 0xF);
1497 signed offset_y
= ((texture
->offset
>> 4) & 0xF);
1498 signed offset_z
= ((texture
->offset
>> 8) & 0xF);
1500 bool neg_x
= offset_x
< 0;
1501 bool neg_y
= offset_y
< 0;
1502 bool neg_z
= offset_z
< 0;
1503 bool any_neg
= neg_x
|| neg_y
|| neg_z
;
1505 if (any_neg
&& texture
->op
!= TEXTURE_OP_TEXEL_FETCH
)
1506 fprintf(fp
, "/* invalid negative */ ");
1508 /* Regardless, just print the immediate offset */
1510 fprintf(fp
, " + <%d, %d, %d>, ", offset_x
, offset_y
, offset_z
);
1515 char lod_operand
= texture_op_takes_bias(texture
->op
) ? '+' : '=';
1517 if (texture
->lod_register
) {
1518 fprintf(fp
, "lod %c ", lod_operand
);
1519 print_texture_reg_select(fp
, texture
->bias
, in_reg_base
);
1522 if (texture
->bias_int
)
1523 fprintf(fp
, " /* bias_int = 0x%X */", texture
->bias_int
);
1524 } else if (texture
->op
== TEXTURE_OP_TEXEL_FETCH
) {
1525 /* For texel fetch, the int LOD is in the fractional place and
1526 * there is no fraction / possibility of bias. We *always* have
1527 * an explicit LOD, even if it's zero. */
1529 if (texture
->bias_int
)
1530 fprintf(fp
, " /* bias_int = 0x%X */ ", texture
->bias_int
);
1532 fprintf(fp
, "lod = %u, ", texture
->bias
);
1533 } else if (texture
->bias
|| texture
->bias_int
) {
1534 signed bias_int
= texture
->bias_int
;
1535 float bias_frac
= texture
->bias
/ 256.0f
;
1536 float bias
= bias_int
+ bias_frac
;
1538 bool is_bias
= texture_op_takes_bias(texture
->op
);
1539 char sign
= (bias
>= 0.0) ? '+' : '-';
1540 char operand
= is_bias
? sign
: '=';
1542 fprintf(fp
, "lod %c %f, ", operand
, fabsf(bias
));
1547 /* While not zero in general, for these simple instructions the
1548 * following unknowns are zero, so we don't include them */
1550 if (texture
->unknown4
||
1551 texture
->unknown8
) {
1552 fprintf(fp
, "// unknown4 = 0x%x\n", texture
->unknown4
);
1553 fprintf(fp
, "// unknown8 = 0x%x\n", texture
->unknown8
);
1556 midg_stats
.instruction_count
++;
1559 struct midgard_disasm_stats
1560 disassemble_midgard(FILE *fp
, uint8_t *code
, size_t size
, unsigned gpu_id
, gl_shader_stage stage
)
1562 uint32_t *words
= (uint32_t *) code
;
1563 unsigned num_words
= size
/ 4;
1566 bool branch_forward
= false;
1568 int last_next_tag
= -1;
1572 midg_tags
= calloc(sizeof(midg_tags
[0]), num_words
);
1574 /* Stats for shader-db */
1575 memset(&midg_stats
, 0, sizeof(midg_stats
));
1576 midg_ever_written
= 0;
1578 while (i
< num_words
) {
1579 unsigned tag
= words
[i
] & 0xF;
1580 unsigned next_tag
= (words
[i
] >> 4) & 0xF;
1581 unsigned num_quad_words
= midgard_tag_props
[tag
].size
;
1583 if (midg_tags
[i
] && midg_tags
[i
] != tag
) {
1584 fprintf(fp
, "\t/* XXX: TAG ERROR branch, got %s expected %s */\n",
1585 midgard_tag_props
[tag
].name
,
1586 midgard_tag_props
[midg_tags
[i
]].name
);
1591 /* Check the tag. The idea is to ensure that next_tag is
1592 * *always* recoverable from the disassembly, such that we may
1593 * safely omit printing next_tag. To show this, we first
1594 * consider that next tags are semantically off-byone -- we end
1595 * up parsing tag n during step n+1. So, we ensure after we're
1596 * done disassembling the next tag of the final bundle is BREAK
1597 * and warn otherwise. We also ensure that the next tag is
1598 * never INVALID. Beyond that, since the last tag is checked
1599 * outside the loop, we can check one tag prior. If equal to
1600 * the current tag (which is unique), we're done. Otherwise, we
1601 * print if that tag was > TAG_BREAK, which implies the tag was
1602 * not TAG_BREAK or TAG_INVALID. But we already checked for
1603 * TAG_INVALID, so it's just if the last tag was TAG_BREAK that
1604 * we're silent. So we throw in a print for break-next on at
1605 * the end of the bundle (if it's not the final bundle, which
1606 * we already check for above), disambiguating this case as
1607 * well. Hence in all cases we are unambiguous, QED. */
1609 if (next_tag
== TAG_INVALID
)
1610 fprintf(fp
, "\t/* XXX: invalid next tag */\n");
1612 if (last_next_tag
> TAG_BREAK
&& last_next_tag
!= tag
) {
1613 fprintf(fp
, "\t/* XXX: TAG ERROR sequence, got %s expexted %s */\n",
1614 midgard_tag_props
[tag
].name
,
1615 midgard_tag_props
[last_next_tag
].name
);
1618 last_next_tag
= next_tag
;
1620 /* Tags are unique in the following way:
1622 * INVALID, BREAK, UNKNOWN_*: verbosely printed
1623 * TEXTURE_4_BARRIER: verified by barrier/!barrier op
1624 * TEXTURE_4_VTX: .vtx tag printed
1625 * TEXTURE_4: tetxure lack of barriers or .vtx
1626 * TAG_LOAD_STORE_4: only load/store
1627 * TAG_ALU_4/8/12/16: by number of instructions/constants
1628 * TAG_ALU_4_8/12/16_WRITEOUT: ^^ with .writeout tag
1632 case TAG_TEXTURE_4_VTX
... TAG_TEXTURE_4_BARRIER
: {
1633 bool interpipe_aliasing
=
1634 midgard_get_quirks(gpu_id
) & MIDGARD_INTERPIPE_REG_ALIASING
;
1636 print_texture_word(fp
, &words
[i
], tabs
,
1637 interpipe_aliasing
? 0 : REG_TEX_BASE
,
1638 interpipe_aliasing
? REGISTER_LDST_BASE
: REG_TEX_BASE
);
1642 case TAG_LOAD_STORE_4
:
1643 print_load_store_word(fp
, &words
[i
], tabs
);
1646 case TAG_ALU_4
... TAG_ALU_16_WRITEOUT
:
1647 branch_forward
= print_alu_word(fp
, &words
[i
], num_quad_words
, tabs
, i
+ 4*num_quad_words
);
1649 /* Reset word static analysis state */
1650 is_embedded_constant_half
= false;
1651 is_embedded_constant_int
= false;
1653 /* TODO: infer/verify me */
1654 if (tag
>= TAG_ALU_4_WRITEOUT
)
1655 fprintf(fp
, "writeout\n");
1660 fprintf(fp
, "Unknown word type %u:\n", words
[i
] & 0xF);
1662 print_quad_word(fp
, &words
[i
], tabs
);
1667 /* We are parsing per bundle anyway. Add before we start
1668 * breaking out so we don't miss the final bundle. */
1670 midg_stats
.bundle_count
++;
1671 midg_stats
.quadword_count
+= num_quad_words
;
1673 /* Include a synthetic "break" instruction at the end of the
1674 * bundle to signify that if, absent a branch, the shader
1675 * execution will stop here. Stop disassembly at such a break
1676 * based on a heuristic */
1678 if (next_tag
== TAG_BREAK
) {
1679 if (branch_forward
) {
1680 fprintf(fp
, "break\n");
1689 i
+= 4 * num_quad_words
;
1692 if (last_next_tag
!= TAG_BREAK
) {
1693 fprintf(fp
, "/* XXX: shader ended with tag %s */\n",
1694 midgard_tag_props
[last_next_tag
].name
);
1699 /* We computed work_count as max_work_registers, so add one to get the
1700 * count. If no work registers are written, you still have one work
1701 * reported, which is exactly what the hardware expects */
1703 midg_stats
.work_count
++;