5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
35 #include "midgard_ops.h"
36 #include "midgard_quirks.h"
37 #include "disassemble.h"
39 #include "util/bitscan.h"
40 #include "util/half_float.h"
41 #include "util/u_math.h"
43 #define DEFINE_CASE(define, str) case define: { fprintf(fp, str); break; }
45 static unsigned *midg_tags
;
46 static bool is_instruction_int
= false;
50 static struct midgard_disasm_stats midg_stats
;
52 /* Transform an expanded writemask (duplicated 8-bit format) into its condensed
53 * form (one bit per component) */
55 static inline unsigned
56 condense_writemask(unsigned expanded_mask
,
57 unsigned bits_per_component
)
59 if (bits_per_component
== 8) {
60 /* Duplicate every bit to go from 8 to 16-channel wrmask */
63 for (unsigned i
= 0; i
< 8; ++i
) {
64 if (expanded_mask
& (1 << i
))
65 omask
|= (3 << (2 * i
));
71 unsigned slots_per_component
= bits_per_component
/ 16;
72 unsigned max_comp
= (16 * 8) / bits_per_component
;
73 unsigned condensed_mask
= 0;
75 for (unsigned i
= 0; i
< max_comp
; i
++) {
76 if (expanded_mask
& (1 << (i
* slots_per_component
)))
77 condensed_mask
|= (1 << i
);
80 return condensed_mask
;
84 print_alu_opcode(FILE *fp
, midgard_alu_op op
)
88 if (alu_opcode_props
[op
].name
) {
89 fprintf(fp
, "%s", alu_opcode_props
[op
].name
);
91 int_op
= midgard_is_integer_op(op
);
93 fprintf(fp
, "alu_op_%02X", op
);
95 /* For constant analysis */
96 is_instruction_int
= int_op
;
100 print_ld_st_opcode(FILE *fp
, midgard_load_store_op op
)
102 if (load_store_opcode_props
[op
].name
)
103 fprintf(fp
, "%s", load_store_opcode_props
[op
].name
);
105 fprintf(fp
, "ldst_op_%02X", op
);
108 static bool is_embedded_constant_half
= false;
109 static bool is_embedded_constant_int
= false;
112 prefix_for_bits(unsigned bits
)
126 /* For static analysis to ensure all registers are written at least once before
127 * use along the source code path (TODO: does this break done for complex CF?)
130 uint16_t midg_ever_written
= 0;
133 print_reg(FILE *fp
, unsigned reg
, unsigned bits
)
135 /* Perform basic static analysis for expanding constants correctly */
138 is_embedded_constant_int
= is_instruction_int
;
139 is_embedded_constant_half
= (bits
< 32);
142 unsigned uniform_reg
= 23 - reg
;
143 bool is_uniform
= false;
145 /* For r8-r15, it could be a work or uniform. We distinguish based on
146 * the fact work registers are ALWAYS written before use, but uniform
147 * registers are NEVER written before use. */
149 if ((reg
>= 8 && reg
< 16) && !(midg_ever_written
& (1 << reg
)))
152 /* r16-r23 are always uniform */
154 if (reg
>= 16 && reg
<= 23)
157 /* Update the uniform count appropriately */
160 midg_stats
.uniform_count
=
161 MAX2(uniform_reg
+ 1, midg_stats
.uniform_count
);
163 char prefix
= prefix_for_bits(bits
);
168 fprintf(fp
, "r%u", reg
);
171 static char *outmod_names_float
[4] = {
178 static char *outmod_names_int
[4] = {
185 static char *srcmod_names_int
[4] = {
193 print_outmod(FILE *fp
, unsigned outmod
, bool is_int
)
195 fprintf(fp
, "%s", is_int
? outmod_names_int
[outmod
] :
196 outmod_names_float
[outmod
]);
200 print_quad_word(FILE *fp
, uint32_t *words
, unsigned tabs
)
204 for (i
= 0; i
< 4; i
++)
205 fprintf(fp
, "0x%08X%s ", words
[i
], i
== 3 ? "" : ",");
210 static const char components
[16] = "xyzwefghijklmnop";
212 /* Helper to print 4 chars of a swizzle */
214 print_swizzle_helper(FILE *fp
, unsigned swizzle
, unsigned offset
)
216 for (unsigned i
= 0; i
< 4; ++i
) {
217 unsigned c
= (swizzle
>> (i
* 2)) & 3;
219 fprintf(fp
, "%c", components
[c
]);
223 /* Helper to print 8 chars of a swizzle, duplicating over */
225 print_swizzle_helper_8(FILE *fp
, unsigned swizzle
, bool upper
)
227 for (unsigned i
= 0; i
< 4; ++i
) {
228 unsigned c
= (swizzle
>> (i
* 2)) & 3;
231 fprintf(fp
, "%c%c", components
[c
], components
[c
+1]);
236 print_swizzle_vec16(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
,
237 midgard_dest_override override
)
241 if (override
== midgard_dest_override_upper
) {
243 fprintf(fp
, " /* rep_high */ ");
245 fprintf(fp
, " /* rep_low */ ");
247 if (!rep_high
&& rep_low
)
248 print_swizzle_helper_8(fp
, swizzle
, true);
250 print_swizzle_helper_8(fp
, swizzle
, false);
252 print_swizzle_helper_8(fp
, swizzle
, rep_high
& 1);
253 print_swizzle_helper_8(fp
, swizzle
, !(rep_low
& 1));
258 print_swizzle_vec8(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
262 /* TODO: Is it possible to unify half/full? */
265 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8));
266 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8) + !rep_high
* 4);
268 print_swizzle_helper(fp
, swizzle
, rep_high
* 4);
269 print_swizzle_helper(fp
, swizzle
, !rep_low
* 4);
274 print_swizzle_vec4(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
277 fprintf(fp
, " /* rep_high */ ");
279 if (!half
&& rep_low
)
280 fprintf(fp
, " /* rep_low */ ");
282 if (swizzle
== 0xE4 && !half
) return; /* xyzw */
285 print_swizzle_helper(fp
, swizzle
, rep_low
* 4);
288 print_swizzle_vec2(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
290 char *alphabet
= "XY";
293 alphabet
= rep_low
? "zw" : "xy";
295 fprintf(fp
, " /* rep_low */ ");
298 fprintf(fp
, " /* rep_high */ ");
300 if (swizzle
== 0xE4 && !half
) return; /* XY */
304 for (unsigned i
= 0; i
< 4; i
+= 2) {
305 unsigned a
= (swizzle
>> (i
* 2)) & 3;
306 unsigned b
= (swizzle
>> ((i
+1) * 2)) & 3;
308 /* Normally we're adjacent, but if there's an issue, don't make
312 fprintf(fp
, "%c", alphabet
[a
>> 1]);
314 fprintf(fp
, "[%c%c]", components
[a
], components
[b
]);
319 bits_for_mode(midgard_reg_mode mode
)
322 case midgard_reg_mode_8
:
324 case midgard_reg_mode_16
:
326 case midgard_reg_mode_32
:
328 case midgard_reg_mode_64
:
331 unreachable("Invalid reg mode");
337 bits_for_mode_halved(midgard_reg_mode mode
, bool half
)
339 unsigned bits
= bits_for_mode(mode
);
348 print_scalar_constant(FILE *fp
, unsigned src_binary
,
349 const midgard_constants
*consts
,
350 midgard_scalar_alu
*alu
)
352 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
353 assert(consts
!= NULL
);
356 mir_print_constant_component(fp
, consts
, src
->component
,
358 midgard_reg_mode_32
: midgard_reg_mode_16
,
359 false, src
->mod
, alu
->op
);
363 print_vector_constants(FILE *fp
, unsigned src_binary
,
364 const midgard_constants
*consts
,
365 midgard_vector_alu
*alu
)
367 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
368 unsigned bits
= bits_for_mode_halved(alu
->reg_mode
, src
->half
);
369 unsigned max_comp
= (sizeof(*consts
) * 8) / bits
;
370 unsigned comp_mask
, num_comp
= 0;
373 assert(max_comp
<= 16);
375 comp_mask
= effective_writemask(alu
, condense_writemask(alu
->mask
, bits
));
376 num_comp
= util_bitcount(comp_mask
);
381 for (unsigned i
= 0; i
< max_comp
; ++i
) {
382 if (!(comp_mask
& (1 << i
))) continue;
384 unsigned c
= (src
->swizzle
>> (i
* 2)) & 3;
386 if (bits
== 16 && !src
->half
) {
388 c
+= (src
->rep_high
* 4);
390 c
+= (!src
->rep_low
* 4);
391 } else if (bits
== 32 && !src
->half
) {
393 } else if (bits
== 8) {
395 unsigned index
= (i
>> 1) & 3;
396 unsigned base
= (src
->swizzle
>> (index
* 2)) & 3;
400 c
+= (src
->rep_high
) * 8;
402 c
+= (!src
->rep_low
) * 8;
404 /* We work on twos, actually */
408 printf(" (%d%d%d)", src
->rep_low
, src
->rep_high
, src
->half
);
416 mir_print_constant_component(fp
, consts
, c
, alu
->reg_mode
,
417 src
->half
, src
->mod
, alu
->op
);
425 print_srcmod(FILE *fp
, bool is_int
, unsigned mod
, bool scalar
)
427 /* Modifiers change meaning depending on the op's context */
429 midgard_int_mod int_mod
= mod
;
432 if (scalar
&& mod
== 2) {
436 fprintf(fp
, "%s", srcmod_names_int
[int_mod
]);
438 if (mod
& MIDGARD_FLOAT_MOD_NEG
)
441 if (mod
& MIDGARD_FLOAT_MOD_ABS
)
447 print_srcmod_end(FILE *fp
, bool is_int
, unsigned mod
, unsigned bits
)
449 /* Since we wrapped with a function-looking thing */
451 if (is_int
&& mod
== midgard_int_shift
)
452 fprintf(fp
, ") << %u", bits
);
453 else if ((is_int
&& (mod
!= midgard_int_normal
))
454 || (!is_int
&& mod
& MIDGARD_FLOAT_MOD_ABS
))
459 print_vector_src(FILE *fp
, unsigned src_binary
,
460 midgard_reg_mode mode
, unsigned reg
,
461 midgard_dest_override override
, bool is_int
)
463 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
464 print_srcmod(fp
, is_int
, src
->mod
, false);
467 unsigned bits
= bits_for_mode_halved(mode
, src
->half
);
468 print_reg(fp
, reg
, bits
);
470 /* When the source was stepped down via `half`, rep_low means "higher
471 * half" and rep_high is never seen. When it's not native,
472 * rep_low/rep_high are for, well, replication */
474 if (mode
== midgard_reg_mode_8
) {
476 print_swizzle_vec16(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, override
);
477 } else if (mode
== midgard_reg_mode_16
) {
478 print_swizzle_vec8(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
479 } else if (mode
== midgard_reg_mode_32
) {
480 print_swizzle_vec4(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
481 } else if (mode
== midgard_reg_mode_64
) {
482 print_swizzle_vec2(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
485 print_srcmod_end(fp
, is_int
, src
->mod
, bits
);
489 decode_vector_imm(unsigned src2_reg
, unsigned imm
)
492 ret
= src2_reg
<< 11;
493 ret
|= (imm
& 0x7) << 8;
494 ret
|= (imm
>> 3) & 0xFF;
499 print_immediate(FILE *fp
, uint16_t imm
)
501 if (is_instruction_int
)
502 fprintf(fp
, "#%u", imm
);
504 fprintf(fp
, "#%g", _mesa_half_to_float(imm
));
508 update_dest(unsigned reg
)
510 /* We should record writes as marking this as a work register. Store
511 * the max register in work_count; we'll add one at the end */
514 midg_stats
.work_count
= MAX2(reg
, midg_stats
.work_count
);
515 midg_ever_written
|= (1 << reg
);
520 print_dest(FILE *fp
, unsigned reg
, midgard_reg_mode mode
, midgard_dest_override override
)
522 /* Depending on the mode and override, we determine the type of
523 * destination addressed. Absent an override, we address just the
524 * type of the operation itself */
526 unsigned bits
= bits_for_mode(mode
);
528 if (override
!= midgard_dest_override_none
)
532 print_reg(fp
, reg
, bits
);
536 print_mask_vec16(FILE *fp
, uint8_t mask
, midgard_dest_override override
)
540 for (unsigned i
= 0; i
< 8; i
++) {
544 components
[i
*2 + 1]);
548 /* For 16-bit+ masks, we read off from the 8-bit mask field. For 16-bit (vec8),
549 * it's just one bit per channel, easy peasy. For 32-bit (vec4), it's one bit
550 * per channel with one duplicate bit in the middle. For 64-bit (vec2), it's
551 * one-bit per channel with _3_ duplicate bits in the middle. Basically, just
552 * subdividing the 128-bit word in 16-bit increments. For 64-bit, we uppercase
553 * the mask to make it obvious what happened */
556 print_mask(FILE *fp
, uint8_t mask
, unsigned bits
, midgard_dest_override override
)
559 print_mask_vec16(fp
, mask
, override
);
563 /* Skip 'complete' masks */
565 if (override
== midgard_dest_override_none
)
566 if (bits
>= 32 && mask
== 0xFF) return;
570 unsigned skip
= (bits
/ 16);
571 bool uppercase
= bits
> 32;
572 bool tripped
= false;
574 /* To apply an upper destination override, we "shift" the alphabet.
575 * E.g. with an upper override on 32-bit, instead of xyzw, print efgh.
576 * For upper 16-bit, instead of xyzwefgh, print ijklmnop */
578 const char *alphabet
= components
;
580 if (override
== midgard_dest_override_upper
)
581 alphabet
+= (128 / bits
);
583 for (unsigned i
= 0; i
< 8; i
+= skip
) {
584 bool a
= (mask
& (1 << i
)) != 0;
586 for (unsigned j
= 1; j
< skip
; ++j
) {
587 bool dupe
= (mask
& (1 << (i
+ j
))) != 0;
588 tripped
|= (dupe
!= a
);
592 char c
= alphabet
[i
/ skip
];
597 fprintf(fp
, "%c", c
);
602 fprintf(fp
, " /* %X */", mask
);
605 /* Prints the 4-bit masks found in texture and load/store ops, as opposed to
606 * the 8-bit masks found in (vector) ALU ops. Supports texture-style 16-bit
607 * mode as well, but not load/store-style 16-bit mode. */
610 print_mask_4(FILE *fp
, unsigned mask
, bool upper
)
621 for (unsigned i
= 0; i
< 4; ++i
) {
622 bool a
= (mask
& (1 << i
)) != 0;
624 fprintf(fp
, "%c", components
[i
+ (upper
? 4 : 0)]);
629 print_vector_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
630 const midgard_constants
*consts
, unsigned tabs
)
632 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
633 midgard_vector_alu
*alu_field
= (midgard_vector_alu
*) words
;
634 midgard_reg_mode mode
= alu_field
->reg_mode
;
635 unsigned override
= alu_field
->dest_override
;
637 /* For now, prefix instruction names with their unit, until we
638 * understand how this works on a deeper level */
639 fprintf(fp
, "%s.", name
);
641 print_alu_opcode(fp
, alu_field
->op
);
643 /* Postfix with the size to disambiguate if necessary */
644 char postfix
= prefix_for_bits(bits_for_mode(mode
));
645 bool size_ambiguous
= override
!= midgard_dest_override_none
;
648 fprintf(fp
, "%c", postfix
? postfix
: 'r');
650 /* Print the outmod, if there is one */
651 print_outmod(fp
, alu_field
->outmod
,
652 midgard_is_integer_out_op(alu_field
->op
));
656 /* Mask denoting status of 8-lanes */
657 uint8_t mask
= alu_field
->mask
;
659 /* First, print the destination */
660 print_dest(fp
, reg_info
->out_reg
, mode
, alu_field
->dest_override
);
662 if (override
!= midgard_dest_override_none
) {
663 bool modeable
= (mode
!= midgard_reg_mode_8
);
664 bool known
= override
!= 0x3; /* Unused value */
666 if (!(modeable
&& known
))
667 fprintf(fp
, "/* do%u */ ", override
);
670 /* Instructions like fdot4 do *not* replicate, ensure the
671 * mask is of only a single component */
673 unsigned rep
= GET_CHANNEL_COUNT(alu_opcode_props
[alu_field
->op
].props
);
676 unsigned comp_mask
= condense_writemask(mask
, bits_for_mode(mode
));
677 unsigned num_comp
= util_bitcount(comp_mask
);
679 fprintf(fp
, "/* err too many components */");
681 print_mask(fp
, mask
, bits_for_mode(mode
), override
);
685 bool is_int
= midgard_is_integer_op(alu_field
->op
);
687 if (reg_info
->src1_reg
== 26)
688 print_vector_constants(fp
, alu_field
->src1
, consts
, alu_field
);
690 print_vector_src(fp
, alu_field
->src1
, mode
, reg_info
->src1_reg
, override
, is_int
);
694 if (reg_info
->src2_imm
) {
695 uint16_t imm
= decode_vector_imm(reg_info
->src2_reg
, alu_field
->src2
>> 2);
696 print_immediate(fp
, imm
);
697 } else if (reg_info
->src2_reg
== 26) {
698 print_vector_constants(fp
, alu_field
->src2
, consts
, alu_field
);
700 print_vector_src(fp
, alu_field
->src2
, mode
,
701 reg_info
->src2_reg
, override
, is_int
);
704 midg_stats
.instruction_count
++;
709 print_scalar_src(FILE *fp
, bool is_int
, unsigned src_binary
, unsigned reg
)
711 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
713 print_srcmod(fp
, is_int
, src
->mod
, true);
714 print_reg(fp
, reg
, src
->full
? 32 : 16);
716 unsigned c
= src
->component
;
719 assert((c
& 1) == 0);
723 fprintf(fp
, ".%c", components
[c
]);
725 print_srcmod_end(fp
, is_int
, src
->mod
, src
->full
? 32 : 16);
729 decode_scalar_imm(unsigned src2_reg
, unsigned imm
)
732 ret
= src2_reg
<< 11;
733 ret
|= (imm
& 3) << 9;
734 ret
|= (imm
& 4) << 6;
735 ret
|= (imm
& 0x38) << 2;
741 print_scalar_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
742 const midgard_constants
*consts
, unsigned tabs
)
744 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
745 midgard_scalar_alu
*alu_field
= (midgard_scalar_alu
*) words
;
747 if (alu_field
->unknown
)
748 fprintf(fp
, "scalar ALU unknown bit set\n");
750 fprintf(fp
, "%s.", name
);
751 print_alu_opcode(fp
, alu_field
->op
);
752 print_outmod(fp
, alu_field
->outmod
,
753 midgard_is_integer_out_op(alu_field
->op
));
756 bool full
= alu_field
->output_full
;
757 update_dest(reg_info
->out_reg
);
758 print_reg(fp
, reg_info
->out_reg
, full
? 32 : 16);
759 unsigned c
= alu_field
->output_component
;
760 bool is_int
= midgard_is_integer_op(alu_field
->op
);
763 assert((c
& 1) == 0);
767 fprintf(fp
, ".%c, ", components
[c
]);
769 if (reg_info
->src1_reg
== 26)
770 print_scalar_constant(fp
, alu_field
->src1
, consts
, alu_field
);
772 print_scalar_src(fp
, is_int
, alu_field
->src1
, reg_info
->src1_reg
);
776 if (reg_info
->src2_imm
) {
777 uint16_t imm
= decode_scalar_imm(reg_info
->src2_reg
,
779 print_immediate(fp
, imm
);
780 } else if (reg_info
->src2_reg
== 26) {
781 print_scalar_constant(fp
, alu_field
->src2
, consts
, alu_field
);
783 print_scalar_src(fp
, is_int
, alu_field
->src2
, reg_info
->src2_reg
);
785 midg_stats
.instruction_count
++;
790 print_branch_op(FILE *fp
, unsigned op
)
793 case midgard_jmp_writeout_op_branch_uncond
:
794 fprintf(fp
, "uncond.");
797 case midgard_jmp_writeout_op_branch_cond
:
798 fprintf(fp
, "cond.");
801 case midgard_jmp_writeout_op_writeout
:
802 fprintf(fp
, "write.");
805 case midgard_jmp_writeout_op_tilebuffer_pending
:
806 fprintf(fp
, "tilebuffer.");
809 case midgard_jmp_writeout_op_discard
:
810 fprintf(fp
, "discard.");
814 fprintf(fp
, "unk%u.", op
);
820 print_branch_cond(FILE *fp
, int cond
)
823 case midgard_condition_write0
:
824 fprintf(fp
, "write0");
827 case midgard_condition_false
:
828 fprintf(fp
, "false");
831 case midgard_condition_true
:
835 case midgard_condition_always
:
836 fprintf(fp
, "always");
840 fprintf(fp
, "unk%X", cond
);
846 print_compact_branch_writeout_field(FILE *fp
, uint16_t word
)
848 midgard_jmp_writeout_op op
= word
& 0x7;
849 midg_stats
.instruction_count
++;
852 case midgard_jmp_writeout_op_branch_uncond
: {
853 midgard_branch_uncond br_uncond
;
854 memcpy((char *) &br_uncond
, (char *) &word
, sizeof(br_uncond
));
855 fprintf(fp
, "br.uncond ");
857 if (br_uncond
.unknown
!= 1)
858 fprintf(fp
, "unknown:%u, ", br_uncond
.unknown
);
860 if (br_uncond
.offset
>= 0)
863 fprintf(fp
, "%d -> %s", br_uncond
.offset
,
864 midgard_tag_props
[br_uncond
.dest_tag
].name
);
867 return br_uncond
.offset
>= 0;
870 case midgard_jmp_writeout_op_branch_cond
:
871 case midgard_jmp_writeout_op_writeout
:
872 case midgard_jmp_writeout_op_discard
:
874 midgard_branch_cond br_cond
;
875 memcpy((char *) &br_cond
, (char *) &word
, sizeof(br_cond
));
879 print_branch_op(fp
, br_cond
.op
);
880 print_branch_cond(fp
, br_cond
.cond
);
884 if (br_cond
.offset
>= 0)
887 fprintf(fp
, "%d -> %s", br_cond
.offset
,
888 midgard_tag_props
[br_cond
.dest_tag
].name
);
891 return br_cond
.offset
>= 0;
899 print_extended_branch_writeout_field(FILE *fp
, uint8_t *words
, unsigned next
)
901 midgard_branch_extended br
;
902 memcpy((char *) &br
, (char *) words
, sizeof(br
));
906 print_branch_op(fp
, br
.op
);
908 /* Condition codes are a LUT in the general case, but simply repeated 8 times for single-channel conditions.. Check this. */
910 bool single_channel
= true;
912 for (unsigned i
= 0; i
< 16; i
+= 2) {
913 single_channel
&= (((br
.cond
>> i
) & 0x3) == (br
.cond
& 0x3));
917 print_branch_cond(fp
, br
.cond
& 0x3);
919 fprintf(fp
, "lut%X", br
.cond
);
922 fprintf(fp
, ".unknown%u", br
.unknown
);
929 fprintf(fp
, "%d -> %s\n", br
.offset
,
930 midgard_tag_props
[br
.dest_tag
].name
);
932 unsigned I
= next
+ br
.offset
* 4;
934 if (midg_tags
[I
] && midg_tags
[I
] != br
.dest_tag
) {
935 fprintf(fp
, "\t/* XXX TAG ERROR: jumping to %s but tagged %s \n",
936 midgard_tag_props
[br
.dest_tag
].name
,
937 midgard_tag_props
[midg_tags
[I
]].name
);
940 midg_tags
[I
] = br
.dest_tag
;
942 midg_stats
.instruction_count
++;
943 return br
.offset
>= 0;
947 num_alu_fields_enabled(uint32_t control_word
)
951 if ((control_word
>> 17) & 1)
954 if ((control_word
>> 19) & 1)
957 if ((control_word
>> 21) & 1)
960 if ((control_word
>> 23) & 1)
963 if ((control_word
>> 25) & 1)
970 print_alu_word(FILE *fp
, uint32_t *words
, unsigned num_quad_words
,
971 unsigned tabs
, unsigned next
)
973 uint32_t control_word
= words
[0];
974 uint16_t *beginning_ptr
= (uint16_t *)(words
+ 1);
975 unsigned num_fields
= num_alu_fields_enabled(control_word
);
976 uint16_t *word_ptr
= beginning_ptr
+ num_fields
;
977 unsigned num_words
= 2 + num_fields
;
978 const midgard_constants
*consts
= NULL
;
979 bool branch_forward
= false;
981 if ((control_word
>> 17) & 1)
984 if ((control_word
>> 19) & 1)
987 if ((control_word
>> 21) & 1)
990 if ((control_word
>> 23) & 1)
993 if ((control_word
>> 25) & 1)
996 if ((control_word
>> 26) & 1)
999 if ((control_word
>> 27) & 1)
1002 if (num_quad_words
> (num_words
+ 7) / 8) {
1003 assert(num_quad_words
== (num_words
+ 15) / 8);
1004 //Assume that the extra quadword is constants
1005 consts
= (midgard_constants
*)(words
+ (4 * num_quad_words
- 4));
1008 if ((control_word
>> 16) & 1)
1009 fprintf(fp
, "unknown bit 16 enabled\n");
1011 if ((control_word
>> 17) & 1) {
1012 print_vector_field(fp
, "vmul", word_ptr
, *beginning_ptr
, consts
, tabs
);
1017 if ((control_word
>> 18) & 1)
1018 fprintf(fp
, "unknown bit 18 enabled\n");
1020 if ((control_word
>> 19) & 1) {
1021 print_scalar_field(fp
, "sadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
1026 if ((control_word
>> 20) & 1)
1027 fprintf(fp
, "unknown bit 20 enabled\n");
1029 if ((control_word
>> 21) & 1) {
1030 print_vector_field(fp
, "vadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
1035 if ((control_word
>> 22) & 1)
1036 fprintf(fp
, "unknown bit 22 enabled\n");
1038 if ((control_word
>> 23) & 1) {
1039 print_scalar_field(fp
, "smul", word_ptr
, *beginning_ptr
, consts
, tabs
);
1044 if ((control_word
>> 24) & 1)
1045 fprintf(fp
, "unknown bit 24 enabled\n");
1047 if ((control_word
>> 25) & 1) {
1048 print_vector_field(fp
, "lut", word_ptr
, *beginning_ptr
, consts
, tabs
);
1052 if ((control_word
>> 26) & 1) {
1053 branch_forward
|= print_compact_branch_writeout_field(fp
, *word_ptr
);
1057 if ((control_word
>> 27) & 1) {
1058 branch_forward
|= print_extended_branch_writeout_field(fp
, (uint8_t *) word_ptr
, next
);
1063 fprintf(fp
, "uconstants 0x%X, 0x%X, 0x%X, 0x%X\n",
1064 consts
->u32
[0], consts
->u32
[1],
1065 consts
->u32
[2], consts
->u32
[3]);
1067 return branch_forward
;
1071 print_varying_parameters(FILE *fp
, midgard_load_store_word
*word
)
1073 midgard_varying_parameter param
;
1074 unsigned v
= word
->varying_parameters
;
1075 memcpy(¶m
, &v
, sizeof(param
));
1077 if (param
.is_varying
) {
1078 /* If a varying, there are qualifiers */
1080 fprintf(fp
, ".flat");
1082 if (param
.interpolation
!= midgard_interp_default
) {
1083 if (param
.interpolation
== midgard_interp_centroid
)
1084 fprintf(fp
, ".centroid");
1085 else if (param
.interpolation
== midgard_interp_sample
)
1086 fprintf(fp
, ".sample");
1088 fprintf(fp
, ".interp%d", param
.interpolation
);
1091 if (param
.modifier
!= midgard_varying_mod_none
) {
1092 if (param
.modifier
== midgard_varying_mod_perspective_w
)
1093 fprintf(fp
, ".perspectivew");
1094 else if (param
.modifier
== midgard_varying_mod_perspective_z
)
1095 fprintf(fp
, ".perspectivez");
1097 fprintf(fp
, ".mod%d", param
.modifier
);
1099 } else if (param
.flat
|| param
.interpolation
|| param
.modifier
) {
1100 fprintf(fp
, " /* is_varying not set but varying metadata attached */");
1103 if (param
.zero0
|| param
.zero1
|| param
.zero2
)
1104 fprintf(fp
, " /* zero tripped, %u %u %u */ ", param
.zero0
, param
.zero1
, param
.zero2
);
1108 is_op_varying(unsigned op
)
1111 case midgard_op_st_vary_16
:
1112 case midgard_op_st_vary_32
:
1113 case midgard_op_st_vary_32i
:
1114 case midgard_op_st_vary_32u
:
1115 case midgard_op_ld_vary_16
:
1116 case midgard_op_ld_vary_32
:
1117 case midgard_op_ld_vary_32i
:
1118 case midgard_op_ld_vary_32u
:
1126 is_op_attribute(unsigned op
)
1129 case midgard_op_ld_attr_16
:
1130 case midgard_op_ld_attr_32
:
1131 case midgard_op_ld_attr_32i
:
1132 case midgard_op_ld_attr_32u
:
1140 print_load_store_arg(FILE *fp
, uint8_t arg
, unsigned index
)
1142 /* Try to interpret as a register */
1143 midgard_ldst_register_select sel
;
1144 memcpy(&sel
, &arg
, sizeof(arg
));
1146 /* If unknown is set, we're not sure what this is or how to
1147 * interpret it. But if it's zero, we get it. */
1150 fprintf(fp
, "0x%02X", arg
);
1154 unsigned reg
= REGISTER_LDST_BASE
+ sel
.select
;
1155 char comp
= components
[sel
.component
];
1157 fprintf(fp
, "r%u.%c", reg
, comp
);
1159 /* Only print a shift if it's non-zero. Shifts only make sense for the
1160 * second index. For the first, we're not sure what it means yet */
1164 fprintf(fp
, " << %u", sel
.shift
);
1166 fprintf(fp
, " /* %X */", sel
.shift
);
1171 update_stats(signed *stat
, unsigned address
)
1174 *stat
= MAX2(*stat
, address
+ 1);
1178 print_load_store_instr(FILE *fp
, uint64_t data
,
1181 midgard_load_store_word
*word
= (midgard_load_store_word
*) &data
;
1183 print_ld_st_opcode(fp
, word
->op
);
1185 unsigned address
= word
->address
;
1187 if (is_op_varying(word
->op
)) {
1188 print_varying_parameters(fp
, word
);
1190 /* Do some analysis: check if direct cacess */
1192 if ((word
->arg_2
== 0x1E) && midg_stats
.varying_count
>= 0)
1193 update_stats(&midg_stats
.varying_count
, address
);
1195 midg_stats
.varying_count
= -16;
1196 } else if (is_op_attribute(word
->op
)) {
1197 if ((word
->arg_2
== 0x1E) && midg_stats
.attribute_count
>= 0)
1198 update_stats(&midg_stats
.attribute_count
, address
);
1200 midg_stats
.attribute_count
= -16;
1203 fprintf(fp
, " r%u", word
->reg
+ (OP_IS_STORE(word
->op
) ? 26 : 0));
1204 print_mask_4(fp
, word
->mask
, false);
1206 if (!OP_IS_STORE(word
->op
))
1207 update_dest(word
->reg
);
1209 bool is_ubo
= OP_IS_UBO_READ(word
->op
);
1212 /* UBOs use their own addressing scheme */
1214 int lo
= word
->varying_parameters
>> 7;
1215 int hi
= word
->address
;
1217 /* TODO: Combine fields logically */
1218 address
= (hi
<< 3) | lo
;
1221 fprintf(fp
, ", %u", address
);
1223 print_swizzle_vec4(fp
, word
->swizzle
, false, false, false);
1228 fprintf(fp
, "ubo%u", word
->arg_1
);
1229 update_stats(&midg_stats
.uniform_buffer_count
, word
->arg_1
);
1231 print_load_store_arg(fp
, word
->arg_1
, 0);
1234 print_load_store_arg(fp
, word
->arg_2
, 1);
1235 fprintf(fp
, " /* %X */\n", word
->varying_parameters
);
1237 midg_stats
.instruction_count
++;
1241 print_load_store_word(FILE *fp
, uint32_t *word
, unsigned tabs
)
1243 midgard_load_store
*load_store
= (midgard_load_store
*) word
;
1245 if (load_store
->word1
!= 3) {
1246 print_load_store_instr(fp
, load_store
->word1
, tabs
);
1249 if (load_store
->word2
!= 3) {
1250 print_load_store_instr(fp
, load_store
->word2
, tabs
);
1255 print_texture_reg_select(FILE *fp
, uint8_t u
, unsigned base
)
1257 midgard_tex_register_select sel
;
1258 memcpy(&sel
, &u
, sizeof(u
));
1263 fprintf(fp
, "r%u", base
+ sel
.select
);
1265 unsigned component
= sel
.component
;
1267 /* Use the upper half in half-reg mode */
1273 fprintf(fp
, ".%c", components
[component
]);
1275 assert(sel
.zero
== 0);
1279 print_texture_format(FILE *fp
, int format
)
1281 /* Act like a modifier */
1285 DEFINE_CASE(MALI_TEX_1D
, "1d");
1286 DEFINE_CASE(MALI_TEX_2D
, "2d");
1287 DEFINE_CASE(MALI_TEX_3D
, "3d");
1288 DEFINE_CASE(MALI_TEX_CUBE
, "cube");
1291 unreachable("Bad format");
1296 midgard_op_has_helpers(unsigned op
, bool gather
)
1302 case TEXTURE_OP_NORMAL
:
1303 case TEXTURE_OP_DFDX
:
1304 case TEXTURE_OP_DFDY
:
1312 print_texture_op(FILE *fp
, unsigned op
, bool gather
)
1314 /* Act like a bare name, like ESSL functions */
1317 fprintf(fp
, "textureGather");
1319 unsigned component
= op
>> 4;
1320 unsigned bottom
= op
& 0xF;
1323 fprintf(fp
, "_unk%u", bottom
);
1325 fprintf(fp
, ".%c", components
[component
]);
1330 DEFINE_CASE(TEXTURE_OP_NORMAL
, "texture");
1331 DEFINE_CASE(TEXTURE_OP_LOD
, "textureLod");
1332 DEFINE_CASE(TEXTURE_OP_TEXEL_FETCH
, "texelFetch");
1333 DEFINE_CASE(TEXTURE_OP_BARRIER
, "barrier");
1334 DEFINE_CASE(TEXTURE_OP_DFDX
, "dFdx");
1335 DEFINE_CASE(TEXTURE_OP_DFDY
, "dFdy");
1338 fprintf(fp
, "tex_%X", op
);
1344 texture_op_takes_bias(unsigned op
)
1346 return op
== TEXTURE_OP_NORMAL
;
1350 sampler_type_name(enum mali_sampler_type t
)
1353 case MALI_SAMPLER_FLOAT
:
1355 case MALI_SAMPLER_UNSIGNED
:
1357 case MALI_SAMPLER_SIGNED
:
1366 print_texture_barrier(FILE *fp
, uint32_t *word
)
1368 midgard_texture_barrier_word
*barrier
= (midgard_texture_barrier_word
*) word
;
1370 if (barrier
->type
!= TAG_TEXTURE_4_BARRIER
)
1371 fprintf(fp
, "/* barrier tag %X != tex/bar */ ", barrier
->type
);
1374 fprintf(fp
, "/* cont missing? */");
1377 fprintf(fp
, "/* last missing? */");
1380 fprintf(fp
, "/* zero1 = 0x%X */ ", barrier
->zero1
);
1383 fprintf(fp
, "/* zero2 = 0x%X */ ", barrier
->zero2
);
1386 fprintf(fp
, "/* zero3 = 0x%X */ ", barrier
->zero3
);
1389 fprintf(fp
, "/* zero4 = 0x%X */ ", barrier
->zero4
);
1392 fprintf(fp
, "/* zero4 = 0x%" PRIx64
" */ ", barrier
->zero5
);
1395 /* Control barriers are always implied, so include for obviousness */
1396 fprintf(fp
, " control");
1398 if (barrier
->buffer
)
1399 fprintf(fp
, " | buffer");
1401 if (barrier
->shared
)
1402 fprintf(fp
, " | shared");
1405 fprintf(fp
, " | stack");
1413 print_texture_word(FILE *fp
, uint32_t *word
, unsigned tabs
, unsigned in_reg_base
, unsigned out_reg_base
)
1415 midgard_texture_word
*texture
= (midgard_texture_word
*) word
;
1417 midg_stats
.helper_invocations
|=
1418 midgard_op_has_helpers(texture
->op
, texture
->is_gather
);
1420 /* Broad category of texture operation in question */
1421 print_texture_op(fp
, texture
->op
, texture
->is_gather
);
1423 /* Barriers use a dramatically different code path */
1424 if (texture
->op
== TEXTURE_OP_BARRIER
) {
1425 print_texture_barrier(fp
, word
);
1427 } else if (texture
->type
== TAG_TEXTURE_4_BARRIER
)
1428 fprintf (fp
, "/* nonbarrier had tex/bar tag */ ");
1429 else if (texture
->type
== TAG_TEXTURE_4_VTX
)
1430 fprintf (fp
, ".vtx");
1432 /* Specific format in question */
1433 print_texture_format(fp
, texture
->format
);
1435 /* Instruction "modifiers" parallel the ALU instructions. */
1437 if (texture
->shadow
)
1438 fprintf(fp
, ".shadow");
1441 fprintf(fp
, ".cont");
1444 fprintf(fp
, ".last");
1446 if (texture
->out_of_order
)
1447 fprintf(fp
, ".ooo%u", texture
->out_of_order
);
1449 /* Output modifiers are always interpreted floatly */
1450 print_outmod(fp
, texture
->outmod
, false);
1452 fprintf(fp
, " %sr%u", texture
->out_full
? "" : "h",
1453 out_reg_base
+ texture
->out_reg_select
);
1454 print_mask_4(fp
, texture
->mask
, texture
->out_upper
);
1455 assert(!(texture
->out_full
&& texture
->out_upper
));
1458 /* Depending on whether we read from textures directly or indirectly,
1459 * we may be able to update our analysis */
1461 if (texture
->texture_register
) {
1462 fprintf(fp
, "texture[");
1463 print_texture_reg_select(fp
, texture
->texture_handle
, in_reg_base
);
1466 /* Indirect, tut tut */
1467 midg_stats
.texture_count
= -16;
1469 fprintf(fp
, "texture%u, ", texture
->texture_handle
);
1470 update_stats(&midg_stats
.texture_count
, texture
->texture_handle
);
1473 /* Print the type, GL style */
1474 fprintf(fp
, "%csampler", sampler_type_name(texture
->sampler_type
));
1476 if (texture
->sampler_register
) {
1478 print_texture_reg_select(fp
, texture
->sampler_handle
, in_reg_base
);
1481 midg_stats
.sampler_count
= -16;
1483 fprintf(fp
, "%u", texture
->sampler_handle
);
1484 update_stats(&midg_stats
.sampler_count
, texture
->sampler_handle
);
1487 print_swizzle_vec4(fp
, texture
->swizzle
, false, false, false);
1488 fprintf(fp
, ", %sr%u", texture
->in_reg_full
? "" : "h", in_reg_base
+ texture
->in_reg_select
);
1489 assert(!(texture
->in_reg_full
&& texture
->in_reg_upper
));
1491 /* TODO: integrate with swizzle */
1492 if (texture
->in_reg_upper
)
1495 print_swizzle_vec4(fp
, texture
->in_reg_swizzle
, false, false, false);
1497 /* There is *always* an offset attached. Of
1498 * course, that offset is just immediate #0 for a
1499 * GLES call that doesn't take an offset. If there
1500 * is a non-negative non-zero offset, this is
1501 * specified in immediate offset mode, with the
1502 * values in the offset_* fields as immediates. If
1503 * this is a negative offset, we instead switch to
1504 * a register offset mode, where the offset_*
1505 * fields become register triplets */
1507 if (texture
->offset_register
) {
1510 bool full
= texture
->offset
& 1;
1511 bool select
= texture
->offset
& 2;
1512 bool upper
= texture
->offset
& 4;
1514 fprintf(fp
, "%sr%u", full
? "" : "h", in_reg_base
+ select
);
1515 assert(!(texture
->out_full
&& texture
->out_upper
));
1517 /* TODO: integrate with swizzle */
1521 print_swizzle_vec4(fp
, texture
->offset
>> 3, false, false, false);
1524 } else if (texture
->offset
) {
1525 /* Only select ops allow negative immediate offsets, verify */
1527 signed offset_x
= (texture
->offset
& 0xF);
1528 signed offset_y
= ((texture
->offset
>> 4) & 0xF);
1529 signed offset_z
= ((texture
->offset
>> 8) & 0xF);
1531 bool neg_x
= offset_x
< 0;
1532 bool neg_y
= offset_y
< 0;
1533 bool neg_z
= offset_z
< 0;
1534 bool any_neg
= neg_x
|| neg_y
|| neg_z
;
1536 if (any_neg
&& texture
->op
!= TEXTURE_OP_TEXEL_FETCH
)
1537 fprintf(fp
, "/* invalid negative */ ");
1539 /* Regardless, just print the immediate offset */
1541 fprintf(fp
, " + <%d, %d, %d>, ", offset_x
, offset_y
, offset_z
);
1546 char lod_operand
= texture_op_takes_bias(texture
->op
) ? '+' : '=';
1548 if (texture
->lod_register
) {
1549 fprintf(fp
, "lod %c ", lod_operand
);
1550 print_texture_reg_select(fp
, texture
->bias
, in_reg_base
);
1553 if (texture
->bias_int
)
1554 fprintf(fp
, " /* bias_int = 0x%X */", texture
->bias_int
);
1555 } else if (texture
->op
== TEXTURE_OP_TEXEL_FETCH
) {
1556 /* For texel fetch, the int LOD is in the fractional place and
1557 * there is no fraction. We *always* have an explicit LOD, even
1560 if (texture
->bias_int
)
1561 fprintf(fp
, " /* bias_int = 0x%X */ ", texture
->bias_int
);
1563 fprintf(fp
, "lod = %u, ", texture
->bias
);
1564 } else if (texture
->bias
|| texture
->bias_int
) {
1565 signed bias_int
= texture
->bias_int
;
1566 float bias_frac
= texture
->bias
/ 256.0f
;
1567 float bias
= bias_int
+ bias_frac
;
1569 bool is_bias
= texture_op_takes_bias(texture
->op
);
1570 char sign
= (bias
>= 0.0) ? '+' : '-';
1571 char operand
= is_bias
? sign
: '=';
1573 fprintf(fp
, "lod %c %f, ", operand
, fabsf(bias
));
1578 /* While not zero in general, for these simple instructions the
1579 * following unknowns are zero, so we don't include them */
1581 if (texture
->unknown4
||
1582 texture
->unknown8
) {
1583 fprintf(fp
, "// unknown4 = 0x%x\n", texture
->unknown4
);
1584 fprintf(fp
, "// unknown8 = 0x%x\n", texture
->unknown8
);
1587 midg_stats
.instruction_count
++;
1590 struct midgard_disasm_stats
1591 disassemble_midgard(FILE *fp
, uint8_t *code
, size_t size
, unsigned gpu_id
, gl_shader_stage stage
)
1593 uint32_t *words
= (uint32_t *) code
;
1594 unsigned num_words
= size
/ 4;
1597 bool branch_forward
= false;
1599 int last_next_tag
= -1;
1603 midg_tags
= calloc(sizeof(midg_tags
[0]), num_words
);
1605 /* Stats for shader-db */
1606 memset(&midg_stats
, 0, sizeof(midg_stats
));
1607 midg_ever_written
= 0;
1609 while (i
< num_words
) {
1610 unsigned tag
= words
[i
] & 0xF;
1611 unsigned next_tag
= (words
[i
] >> 4) & 0xF;
1612 unsigned num_quad_words
= midgard_tag_props
[tag
].size
;
1614 if (midg_tags
[i
] && midg_tags
[i
] != tag
) {
1615 fprintf(fp
, "\t/* XXX: TAG ERROR branch, got %s expected %s */\n",
1616 midgard_tag_props
[tag
].name
,
1617 midgard_tag_props
[midg_tags
[i
]].name
);
1622 /* Check the tag. The idea is to ensure that next_tag is
1623 * *always* recoverable from the disassembly, such that we may
1624 * safely omit printing next_tag. To show this, we first
1625 * consider that next tags are semantically off-byone -- we end
1626 * up parsing tag n during step n+1. So, we ensure after we're
1627 * done disassembling the next tag of the final bundle is BREAK
1628 * and warn otherwise. We also ensure that the next tag is
1629 * never INVALID. Beyond that, since the last tag is checked
1630 * outside the loop, we can check one tag prior. If equal to
1631 * the current tag (which is unique), we're done. Otherwise, we
1632 * print if that tag was > TAG_BREAK, which implies the tag was
1633 * not TAG_BREAK or TAG_INVALID. But we already checked for
1634 * TAG_INVALID, so it's just if the last tag was TAG_BREAK that
1635 * we're silent. So we throw in a print for break-next on at
1636 * the end of the bundle (if it's not the final bundle, which
1637 * we already check for above), disambiguating this case as
1638 * well. Hence in all cases we are unambiguous, QED. */
1640 if (next_tag
== TAG_INVALID
)
1641 fprintf(fp
, "\t/* XXX: invalid next tag */\n");
1643 if (last_next_tag
> TAG_BREAK
&& last_next_tag
!= tag
) {
1644 fprintf(fp
, "\t/* XXX: TAG ERROR sequence, got %s expexted %s */\n",
1645 midgard_tag_props
[tag
].name
,
1646 midgard_tag_props
[last_next_tag
].name
);
1649 last_next_tag
= next_tag
;
1651 /* Tags are unique in the following way:
1653 * INVALID, BREAK, UNKNOWN_*: verbosely printed
1654 * TEXTURE_4_BARRIER: verified by barrier/!barrier op
1655 * TEXTURE_4_VTX: .vtx tag printed
1656 * TEXTURE_4: tetxure lack of barriers or .vtx
1657 * TAG_LOAD_STORE_4: only load/store
1658 * TAG_ALU_4/8/12/16: by number of instructions/constants
1659 * TAG_ALU_4_8/12/16_WRITEOUT: ^^ with .writeout tag
1663 case TAG_TEXTURE_4_VTX
... TAG_TEXTURE_4_BARRIER
: {
1664 bool interpipe_aliasing
=
1665 midgard_get_quirks(gpu_id
) & MIDGARD_INTERPIPE_REG_ALIASING
;
1667 print_texture_word(fp
, &words
[i
], tabs
,
1668 interpipe_aliasing
? 0 : REG_TEX_BASE
,
1669 interpipe_aliasing
? REGISTER_LDST_BASE
: REG_TEX_BASE
);
1673 case TAG_LOAD_STORE_4
:
1674 print_load_store_word(fp
, &words
[i
], tabs
);
1677 case TAG_ALU_4
... TAG_ALU_16_WRITEOUT
:
1678 branch_forward
= print_alu_word(fp
, &words
[i
], num_quad_words
, tabs
, i
+ 4*num_quad_words
);
1680 /* Reset word static analysis state */
1681 is_embedded_constant_half
= false;
1682 is_embedded_constant_int
= false;
1684 /* TODO: infer/verify me */
1685 if (tag
>= TAG_ALU_4_WRITEOUT
)
1686 fprintf(fp
, "writeout\n");
1691 fprintf(fp
, "Unknown word type %u:\n", words
[i
] & 0xF);
1693 print_quad_word(fp
, &words
[i
], tabs
);
1698 /* We are parsing per bundle anyway. Add before we start
1699 * breaking out so we don't miss the final bundle. */
1701 midg_stats
.bundle_count
++;
1702 midg_stats
.quadword_count
+= num_quad_words
;
1704 /* Include a synthetic "break" instruction at the end of the
1705 * bundle to signify that if, absent a branch, the shader
1706 * execution will stop here. Stop disassembly at such a break
1707 * based on a heuristic */
1709 if (next_tag
== TAG_BREAK
) {
1710 if (branch_forward
) {
1711 fprintf(fp
, "break\n");
1720 i
+= 4 * num_quad_words
;
1723 if (last_next_tag
!= TAG_BREAK
) {
1724 fprintf(fp
, "/* XXX: shader ended with tag %s */\n",
1725 midgard_tag_props
[last_next_tag
].name
);
1730 /* We computed work_count as max_work_registers, so add one to get the
1731 * count. If no work registers are written, you still have one work
1732 * reported, which is exactly what the hardware expects */
1734 midg_stats
.work_count
++;