5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
35 #include "midgard_ops.h"
36 #include "midgard_quirks.h"
37 #include "disassemble.h"
39 #include "util/bitscan.h"
40 #include "util/half_float.h"
41 #include "util/u_math.h"
43 #define DEFINE_CASE(define, str) case define: { fprintf(fp, str); break; }
45 static unsigned *midg_tags
;
46 static bool is_instruction_int
= false;
50 static struct midgard_disasm_stats midg_stats
;
52 /* Transform an expanded writemask (duplicated 8-bit format) into its condensed
53 * form (one bit per component) */
55 static inline unsigned
56 condense_writemask(unsigned expanded_mask
,
57 unsigned bits_per_component
)
59 if (bits_per_component
== 8)
60 unreachable("XXX TODO: sort out how 8-bit constant encoding works");
62 unsigned slots_per_component
= bits_per_component
/ 16;
63 unsigned max_comp
= (16 * 8) / bits_per_component
;
64 unsigned condensed_mask
= 0;
66 for (unsigned i
= 0; i
< max_comp
; i
++) {
67 if (expanded_mask
& (1 << (i
* slots_per_component
)))
68 condensed_mask
|= (1 << i
);
71 return condensed_mask
;
75 print_alu_opcode(FILE *fp
, midgard_alu_op op
)
79 if (alu_opcode_props
[op
].name
) {
80 fprintf(fp
, "%s", alu_opcode_props
[op
].name
);
82 int_op
= midgard_is_integer_op(op
);
84 fprintf(fp
, "alu_op_%02X", op
);
86 /* For constant analysis */
87 is_instruction_int
= int_op
;
91 print_ld_st_opcode(FILE *fp
, midgard_load_store_op op
)
93 if (load_store_opcode_props
[op
].name
)
94 fprintf(fp
, "%s", load_store_opcode_props
[op
].name
);
96 fprintf(fp
, "ldst_op_%02X", op
);
99 static bool is_embedded_constant_half
= false;
100 static bool is_embedded_constant_int
= false;
103 prefix_for_bits(unsigned bits
)
117 /* For static analysis to ensure all registers are written at least once before
118 * use along the source code path (TODO: does this break done for complex CF?)
121 uint16_t midg_ever_written
= 0;
124 print_reg(FILE *fp
, unsigned reg
, unsigned bits
)
126 /* Perform basic static analysis for expanding constants correctly */
129 is_embedded_constant_int
= is_instruction_int
;
130 is_embedded_constant_half
= (bits
< 32);
133 unsigned uniform_reg
= 23 - reg
;
134 bool is_uniform
= false;
136 /* For r8-r15, it could be a work or uniform. We distinguish based on
137 * the fact work registers are ALWAYS written before use, but uniform
138 * registers are NEVER written before use. */
140 if ((reg
>= 8 && reg
< 16) && !(midg_ever_written
& (1 << reg
)))
143 /* r16-r23 are always uniform */
145 if (reg
>= 16 && reg
<= 23)
148 /* Update the uniform count appropriately */
151 midg_stats
.uniform_count
=
152 MAX2(uniform_reg
+ 1, midg_stats
.uniform_count
);
154 char prefix
= prefix_for_bits(bits
);
159 fprintf(fp
, "r%u", reg
);
162 static char *outmod_names_float
[4] = {
169 static char *outmod_names_int
[4] = {
176 static char *srcmod_names_int
[4] = {
184 print_outmod(FILE *fp
, unsigned outmod
, bool is_int
)
186 fprintf(fp
, "%s", is_int
? outmod_names_int
[outmod
] :
187 outmod_names_float
[outmod
]);
191 print_quad_word(FILE *fp
, uint32_t *words
, unsigned tabs
)
195 for (i
= 0; i
< 4; i
++)
196 fprintf(fp
, "0x%08X%s ", words
[i
], i
== 3 ? "" : ",");
201 static const char components
[16] = "xyzwefghijklmnop";
203 /* Helper to print 4 chars of a swizzle */
205 print_swizzle_helper(FILE *fp
, unsigned swizzle
, unsigned offset
)
207 for (unsigned i
= 0; i
< 4; ++i
) {
208 unsigned c
= (swizzle
>> (i
* 2)) & 3;
210 fprintf(fp
, "%c", components
[c
]);
214 /* Helper to print 8 chars of a swizzle, duplicating over */
216 print_swizzle_helper_8(FILE *fp
, unsigned swizzle
, bool upper
)
218 for (unsigned i
= 0; i
< 4; ++i
) {
219 unsigned c
= (swizzle
>> (i
* 2)) & 3;
222 fprintf(fp
, "%c%c", components
[c
], components
[c
+1]);
227 print_swizzle_vec16(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
,
228 midgard_dest_override override
)
232 if (override
== midgard_dest_override_upper
) {
234 fprintf(fp
, " /* rep_high */ ");
236 fprintf(fp
, " /* rep_low */ ");
238 if (!rep_high
&& rep_low
)
239 print_swizzle_helper_8(fp
, swizzle
, true);
241 print_swizzle_helper_8(fp
, swizzle
, false);
243 print_swizzle_helper_8(fp
, swizzle
, rep_high
& 1);
244 print_swizzle_helper_8(fp
, swizzle
, !(rep_low
& 1));
249 print_swizzle_vec8(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
253 /* TODO: Is it possible to unify half/full? */
256 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8));
257 print_swizzle_helper(fp
, swizzle
, (rep_low
* 8) + !rep_high
* 4);
259 print_swizzle_helper(fp
, swizzle
, rep_high
* 4);
260 print_swizzle_helper(fp
, swizzle
, !rep_low
* 4);
265 print_swizzle_vec4(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
268 fprintf(fp
, " /* rep_high */ ");
270 if (!half
&& rep_low
)
271 fprintf(fp
, " /* rep_low */ ");
273 if (swizzle
== 0xE4 && !half
) return; /* xyzw */
276 print_swizzle_helper(fp
, swizzle
, rep_low
* 4);
279 print_swizzle_vec2(FILE *fp
, unsigned swizzle
, bool rep_high
, bool rep_low
, bool half
)
281 char *alphabet
= "XY";
284 alphabet
= rep_low
? "zw" : "xy";
286 fprintf(fp
, " /* rep_low */ ");
289 fprintf(fp
, " /* rep_high */ ");
291 if (swizzle
== 0xE4 && !half
) return; /* XY */
295 for (unsigned i
= 0; i
< 4; i
+= 2) {
296 unsigned a
= (swizzle
>> (i
* 2)) & 3;
297 unsigned b
= (swizzle
>> ((i
+1) * 2)) & 3;
299 /* Normally we're adjacent, but if there's an issue, don't make
303 fprintf(fp
, "%c", alphabet
[a
>> 1]);
305 fprintf(fp
, "[%c%c]", components
[a
], components
[b
]);
310 bits_for_mode(midgard_reg_mode mode
)
313 case midgard_reg_mode_8
:
315 case midgard_reg_mode_16
:
317 case midgard_reg_mode_32
:
319 case midgard_reg_mode_64
:
322 unreachable("Invalid reg mode");
328 bits_for_mode_halved(midgard_reg_mode mode
, bool half
)
330 unsigned bits
= bits_for_mode(mode
);
339 print_scalar_constant(FILE *fp
, unsigned src_binary
,
340 const midgard_constants
*consts
,
341 midgard_scalar_alu
*alu
)
343 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
346 assert(consts
!= NULL
);
348 if (!midgard_is_integer_op(alu
->op
)) {
350 mod
|= MIDGARD_FLOAT_MOD_ABS
;
352 mod
|= MIDGARD_FLOAT_MOD_NEG
;
354 mod
= midgard_int_normal
;
358 mir_print_constant_component(fp
, consts
, src
->component
,
360 midgard_reg_mode_32
: midgard_reg_mode_16
,
361 false, mod
, alu
->op
);
365 print_vector_constants(FILE *fp
, unsigned src_binary
,
366 const midgard_constants
*consts
,
367 midgard_vector_alu
*alu
)
369 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
370 unsigned bits
= bits_for_mode_halved(alu
->reg_mode
, src
->half
);
371 unsigned max_comp
= MIN2((sizeof(*consts
) * 8) / bits
, 8);
372 unsigned comp_mask
, num_comp
= 0;
376 comp_mask
= effective_writemask(alu
, condense_writemask(alu
->mask
, bits
));
377 num_comp
= util_bitcount(comp_mask
);
381 fprintf(fp
, "vec%d(", num_comp
);
385 for (unsigned i
= 0; i
< max_comp
; ++i
) {
386 if (!(comp_mask
& (1 << i
))) continue;
388 unsigned c
= (src
->swizzle
>> (i
* 2)) & 3;
395 mir_print_constant_component(fp
, consts
, c
, alu
->reg_mode
,
396 src
->half
, src
->mod
, alu
->op
);
404 print_vector_src(FILE *fp
, unsigned src_binary
,
405 midgard_reg_mode mode
, unsigned reg
,
406 midgard_dest_override override
, bool is_int
)
408 midgard_vector_alu_src
*src
= (midgard_vector_alu_src
*)&src_binary
;
410 /* Modifiers change meaning depending on the op's context */
412 midgard_int_mod int_mod
= src
->mod
;
415 fprintf(fp
, "%s", srcmod_names_int
[int_mod
]);
417 if (src
->mod
& MIDGARD_FLOAT_MOD_NEG
)
420 if (src
->mod
& MIDGARD_FLOAT_MOD_ABS
)
425 unsigned bits
= bits_for_mode_halved(mode
, src
->half
);
426 print_reg(fp
, reg
, bits
);
428 /* When the source was stepped down via `half`, rep_low means "higher
429 * half" and rep_high is never seen. When it's not native,
430 * rep_low/rep_high are for, well, replication */
432 if (mode
== midgard_reg_mode_8
) {
434 print_swizzle_vec16(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, override
);
435 } else if (mode
== midgard_reg_mode_16
) {
436 print_swizzle_vec8(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
437 } else if (mode
== midgard_reg_mode_32
) {
438 print_swizzle_vec4(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
439 } else if (mode
== midgard_reg_mode_64
) {
440 print_swizzle_vec2(fp
, src
->swizzle
, src
->rep_high
, src
->rep_low
, src
->half
);
443 /* Since we wrapped with a function-looking thing */
445 if (is_int
&& int_mod
== midgard_int_shift
)
446 fprintf(fp
, ") << %u", bits
);
447 else if ((is_int
&& (int_mod
!= midgard_int_normal
))
448 || (!is_int
&& src
->mod
& MIDGARD_FLOAT_MOD_ABS
))
453 decode_vector_imm(unsigned src2_reg
, unsigned imm
)
456 ret
= src2_reg
<< 11;
457 ret
|= (imm
& 0x7) << 8;
458 ret
|= (imm
>> 3) & 0xFF;
463 print_immediate(FILE *fp
, uint16_t imm
)
465 if (is_instruction_int
)
466 fprintf(fp
, "#%u", imm
);
468 fprintf(fp
, "#%g", _mesa_half_to_float(imm
));
472 update_dest(unsigned reg
)
474 /* We should record writes as marking this as a work register. Store
475 * the max register in work_count; we'll add one at the end */
478 midg_stats
.work_count
= MAX2(reg
, midg_stats
.work_count
);
479 midg_ever_written
|= (1 << reg
);
484 print_dest(FILE *fp
, unsigned reg
, midgard_reg_mode mode
, midgard_dest_override override
)
486 /* Depending on the mode and override, we determine the type of
487 * destination addressed. Absent an override, we address just the
488 * type of the operation itself */
490 unsigned bits
= bits_for_mode(mode
);
492 if (override
!= midgard_dest_override_none
)
496 print_reg(fp
, reg
, bits
);
500 print_mask_vec16(FILE *fp
, uint8_t mask
, midgard_dest_override override
)
504 for (unsigned i
= 0; i
< 8; i
++) {
508 components
[i
*2 + 1]);
512 /* For 16-bit+ masks, we read off from the 8-bit mask field. For 16-bit (vec8),
513 * it's just one bit per channel, easy peasy. For 32-bit (vec4), it's one bit
514 * per channel with one duplicate bit in the middle. For 64-bit (vec2), it's
515 * one-bit per channel with _3_ duplicate bits in the middle. Basically, just
516 * subdividing the 128-bit word in 16-bit increments. For 64-bit, we uppercase
517 * the mask to make it obvious what happened */
520 print_mask(FILE *fp
, uint8_t mask
, unsigned bits
, midgard_dest_override override
)
523 print_mask_vec16(fp
, mask
, override
);
527 /* Skip 'complete' masks */
529 if (override
== midgard_dest_override_none
)
530 if (bits
>= 32 && mask
== 0xFF) return;
534 unsigned skip
= (bits
/ 16);
535 bool uppercase
= bits
> 32;
536 bool tripped
= false;
538 /* To apply an upper destination override, we "shift" the alphabet.
539 * E.g. with an upper override on 32-bit, instead of xyzw, print efgh.
540 * For upper 16-bit, instead of xyzwefgh, print ijklmnop */
542 const char *alphabet
= components
;
544 if (override
== midgard_dest_override_upper
)
545 alphabet
+= (128 / bits
);
547 for (unsigned i
= 0; i
< 8; i
+= skip
) {
548 bool a
= (mask
& (1 << i
)) != 0;
550 for (unsigned j
= 1; j
< skip
; ++j
) {
551 bool dupe
= (mask
& (1 << (i
+ j
))) != 0;
552 tripped
|= (dupe
!= a
);
556 char c
= alphabet
[i
/ skip
];
561 fprintf(fp
, "%c", c
);
566 fprintf(fp
, " /* %X */", mask
);
569 /* Prints the 4-bit masks found in texture and load/store ops, as opposed to
570 * the 8-bit masks found in (vector) ALU ops. Supports texture-style 16-bit
571 * mode as well, but not load/store-style 16-bit mode. */
574 print_mask_4(FILE *fp
, unsigned mask
, bool upper
)
585 for (unsigned i
= 0; i
< 4; ++i
) {
586 bool a
= (mask
& (1 << i
)) != 0;
588 fprintf(fp
, "%c", components
[i
+ (upper
? 4 : 0)]);
593 print_vector_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
594 const midgard_constants
*consts
, unsigned tabs
)
596 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
597 midgard_vector_alu
*alu_field
= (midgard_vector_alu
*) words
;
598 midgard_reg_mode mode
= alu_field
->reg_mode
;
599 unsigned override
= alu_field
->dest_override
;
601 /* For now, prefix instruction names with their unit, until we
602 * understand how this works on a deeper level */
603 fprintf(fp
, "%s.", name
);
605 print_alu_opcode(fp
, alu_field
->op
);
607 /* Postfix with the size to disambiguate if necessary */
608 char postfix
= prefix_for_bits(bits_for_mode(mode
));
609 bool size_ambiguous
= override
!= midgard_dest_override_none
;
612 fprintf(fp
, "%c", postfix
? postfix
: 'r');
614 /* Print the outmod, if there is one */
615 print_outmod(fp
, alu_field
->outmod
,
616 midgard_is_integer_out_op(alu_field
->op
));
620 /* Mask denoting status of 8-lanes */
621 uint8_t mask
= alu_field
->mask
;
623 /* First, print the destination */
624 print_dest(fp
, reg_info
->out_reg
, mode
, alu_field
->dest_override
);
626 if (override
!= midgard_dest_override_none
) {
627 bool modeable
= (mode
!= midgard_reg_mode_8
);
628 bool known
= override
!= 0x3; /* Unused value */
630 if (!(modeable
&& known
))
631 fprintf(fp
, "/* do%u */ ", override
);
634 print_mask(fp
, mask
, bits_for_mode(mode
), override
);
638 bool is_int
= midgard_is_integer_op(alu_field
->op
);
640 if (reg_info
->src1_reg
== 26)
641 print_vector_constants(fp
, alu_field
->src1
, consts
, alu_field
);
643 print_vector_src(fp
, alu_field
->src1
, mode
, reg_info
->src1_reg
, override
, is_int
);
647 if (reg_info
->src2_imm
) {
648 uint16_t imm
= decode_vector_imm(reg_info
->src2_reg
, alu_field
->src2
>> 2);
649 print_immediate(fp
, imm
);
650 } else if (reg_info
->src2_reg
== 26) {
651 print_vector_constants(fp
, alu_field
->src2
, consts
, alu_field
);
653 print_vector_src(fp
, alu_field
->src2
, mode
,
654 reg_info
->src2_reg
, override
, is_int
);
657 midg_stats
.instruction_count
++;
662 print_scalar_src(FILE *fp
, unsigned src_binary
, unsigned reg
)
664 midgard_scalar_alu_src
*src
= (midgard_scalar_alu_src
*)&src_binary
;
672 print_reg(fp
, reg
, src
->full
? 32 : 16);
674 unsigned c
= src
->component
;
677 assert((c
& 1) == 0);
681 fprintf(fp
, ".%c", components
[c
]);
689 decode_scalar_imm(unsigned src2_reg
, unsigned imm
)
692 ret
= src2_reg
<< 11;
693 ret
|= (imm
& 3) << 9;
694 ret
|= (imm
& 4) << 6;
695 ret
|= (imm
& 0x38) << 2;
701 print_scalar_field(FILE *fp
, const char *name
, uint16_t *words
, uint16_t reg_word
,
702 const midgard_constants
*consts
, unsigned tabs
)
704 midgard_reg_info
*reg_info
= (midgard_reg_info
*)®_word
;
705 midgard_scalar_alu
*alu_field
= (midgard_scalar_alu
*) words
;
707 if (alu_field
->unknown
)
708 fprintf(fp
, "scalar ALU unknown bit set\n");
710 fprintf(fp
, "%s.", name
);
711 print_alu_opcode(fp
, alu_field
->op
);
712 print_outmod(fp
, alu_field
->outmod
,
713 midgard_is_integer_out_op(alu_field
->op
));
716 bool full
= alu_field
->output_full
;
717 update_dest(reg_info
->out_reg
);
718 print_reg(fp
, reg_info
->out_reg
, full
? 32 : 16);
719 unsigned c
= alu_field
->output_component
;
722 assert((c
& 1) == 0);
726 fprintf(fp
, ".%c, ", components
[c
]);
728 if (reg_info
->src1_reg
== 26)
729 print_scalar_constant(fp
, alu_field
->src1
, consts
, alu_field
);
731 print_scalar_src(fp
, alu_field
->src1
, reg_info
->src1_reg
);
735 if (reg_info
->src2_imm
) {
736 uint16_t imm
= decode_scalar_imm(reg_info
->src2_reg
,
738 print_immediate(fp
, imm
);
739 } else if (reg_info
->src2_reg
== 26) {
740 print_scalar_constant(fp
, alu_field
->src2
, consts
, alu_field
);
742 print_scalar_src(fp
, alu_field
->src2
, reg_info
->src2_reg
);
744 midg_stats
.instruction_count
++;
749 print_branch_op(FILE *fp
, unsigned op
)
752 case midgard_jmp_writeout_op_branch_uncond
:
753 fprintf(fp
, "uncond.");
756 case midgard_jmp_writeout_op_branch_cond
:
757 fprintf(fp
, "cond.");
760 case midgard_jmp_writeout_op_writeout
:
761 fprintf(fp
, "write.");
764 case midgard_jmp_writeout_op_tilebuffer_pending
:
765 fprintf(fp
, "tilebuffer.");
768 case midgard_jmp_writeout_op_discard
:
769 fprintf(fp
, "discard.");
773 fprintf(fp
, "unk%u.", op
);
779 print_branch_cond(FILE *fp
, int cond
)
782 case midgard_condition_write0
:
783 fprintf(fp
, "write0");
786 case midgard_condition_false
:
787 fprintf(fp
, "false");
790 case midgard_condition_true
:
794 case midgard_condition_always
:
795 fprintf(fp
, "always");
799 fprintf(fp
, "unk%X", cond
);
805 print_compact_branch_writeout_field(FILE *fp
, uint16_t word
)
807 midgard_jmp_writeout_op op
= word
& 0x7;
808 midg_stats
.instruction_count
++;
811 case midgard_jmp_writeout_op_branch_uncond
: {
812 midgard_branch_uncond br_uncond
;
813 memcpy((char *) &br_uncond
, (char *) &word
, sizeof(br_uncond
));
814 fprintf(fp
, "br.uncond ");
816 if (br_uncond
.unknown
!= 1)
817 fprintf(fp
, "unknown:%u, ", br_uncond
.unknown
);
819 if (br_uncond
.offset
>= 0)
822 fprintf(fp
, "%d -> %s", br_uncond
.offset
,
823 midgard_tag_props
[br_uncond
.dest_tag
].name
);
826 return br_uncond
.offset
>= 0;
829 case midgard_jmp_writeout_op_branch_cond
:
830 case midgard_jmp_writeout_op_writeout
:
831 case midgard_jmp_writeout_op_discard
:
833 midgard_branch_cond br_cond
;
834 memcpy((char *) &br_cond
, (char *) &word
, sizeof(br_cond
));
838 print_branch_op(fp
, br_cond
.op
);
839 print_branch_cond(fp
, br_cond
.cond
);
843 if (br_cond
.offset
>= 0)
846 fprintf(fp
, "%d -> %s", br_cond
.offset
,
847 midgard_tag_props
[br_cond
.dest_tag
].name
);
850 return br_cond
.offset
>= 0;
858 print_extended_branch_writeout_field(FILE *fp
, uint8_t *words
, unsigned next
)
860 midgard_branch_extended br
;
861 memcpy((char *) &br
, (char *) words
, sizeof(br
));
865 print_branch_op(fp
, br
.op
);
867 /* Condition codes are a LUT in the general case, but simply repeated 8 times for single-channel conditions.. Check this. */
869 bool single_channel
= true;
871 for (unsigned i
= 0; i
< 16; i
+= 2) {
872 single_channel
&= (((br
.cond
>> i
) & 0x3) == (br
.cond
& 0x3));
876 print_branch_cond(fp
, br
.cond
& 0x3);
878 fprintf(fp
, "lut%X", br
.cond
);
881 fprintf(fp
, ".unknown%u", br
.unknown
);
888 fprintf(fp
, "%d -> %s\n", br
.offset
,
889 midgard_tag_props
[br
.dest_tag
].name
);
891 unsigned I
= next
+ br
.offset
* 4;
893 if (midg_tags
[I
] && midg_tags
[I
] != br
.dest_tag
) {
894 fprintf(fp
, "\t/* XXX TAG ERROR: jumping to %s but tagged %s \n",
895 midgard_tag_props
[br
.dest_tag
].name
,
896 midgard_tag_props
[midg_tags
[I
]].name
);
899 midg_tags
[I
] = br
.dest_tag
;
901 midg_stats
.instruction_count
++;
902 return br
.offset
>= 0;
906 num_alu_fields_enabled(uint32_t control_word
)
910 if ((control_word
>> 17) & 1)
913 if ((control_word
>> 19) & 1)
916 if ((control_word
>> 21) & 1)
919 if ((control_word
>> 23) & 1)
922 if ((control_word
>> 25) & 1)
929 print_alu_word(FILE *fp
, uint32_t *words
, unsigned num_quad_words
,
930 unsigned tabs
, unsigned next
)
932 uint32_t control_word
= words
[0];
933 uint16_t *beginning_ptr
= (uint16_t *)(words
+ 1);
934 unsigned num_fields
= num_alu_fields_enabled(control_word
);
935 uint16_t *word_ptr
= beginning_ptr
+ num_fields
;
936 unsigned num_words
= 2 + num_fields
;
937 const midgard_constants
*consts
= NULL
;
938 bool branch_forward
= false;
940 if ((control_word
>> 17) & 1)
943 if ((control_word
>> 19) & 1)
946 if ((control_word
>> 21) & 1)
949 if ((control_word
>> 23) & 1)
952 if ((control_word
>> 25) & 1)
955 if ((control_word
>> 26) & 1)
958 if ((control_word
>> 27) & 1)
961 if (num_quad_words
> (num_words
+ 7) / 8) {
962 assert(num_quad_words
== (num_words
+ 15) / 8);
963 //Assume that the extra quadword is constants
964 consts
= (midgard_constants
*)(words
+ (4 * num_quad_words
- 4));
967 if ((control_word
>> 16) & 1)
968 fprintf(fp
, "unknown bit 16 enabled\n");
970 if ((control_word
>> 17) & 1) {
971 print_vector_field(fp
, "vmul", word_ptr
, *beginning_ptr
, consts
, tabs
);
976 if ((control_word
>> 18) & 1)
977 fprintf(fp
, "unknown bit 18 enabled\n");
979 if ((control_word
>> 19) & 1) {
980 print_scalar_field(fp
, "sadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
985 if ((control_word
>> 20) & 1)
986 fprintf(fp
, "unknown bit 20 enabled\n");
988 if ((control_word
>> 21) & 1) {
989 print_vector_field(fp
, "vadd", word_ptr
, *beginning_ptr
, consts
, tabs
);
994 if ((control_word
>> 22) & 1)
995 fprintf(fp
, "unknown bit 22 enabled\n");
997 if ((control_word
>> 23) & 1) {
998 print_scalar_field(fp
, "smul", word_ptr
, *beginning_ptr
, consts
, tabs
);
1003 if ((control_word
>> 24) & 1)
1004 fprintf(fp
, "unknown bit 24 enabled\n");
1006 if ((control_word
>> 25) & 1) {
1007 print_vector_field(fp
, "lut", word_ptr
, *beginning_ptr
, consts
, tabs
);
1011 if ((control_word
>> 26) & 1) {
1012 branch_forward
|= print_compact_branch_writeout_field(fp
, *word_ptr
);
1016 if ((control_word
>> 27) & 1) {
1017 branch_forward
|= print_extended_branch_writeout_field(fp
, (uint8_t *) word_ptr
, next
);
1022 fprintf(fp
, "uconstants 0x%X, 0x%X, 0x%X, 0x%X\n",
1023 consts
->u32
[0], consts
->u32
[1],
1024 consts
->u32
[2], consts
->u32
[3]);
1026 return branch_forward
;
1030 print_varying_parameters(FILE *fp
, midgard_load_store_word
*word
)
1032 midgard_varying_parameter param
;
1033 unsigned v
= word
->varying_parameters
;
1034 memcpy(¶m
, &v
, sizeof(param
));
1036 if (param
.is_varying
) {
1037 /* If a varying, there are qualifiers */
1039 fprintf(fp
, ".flat");
1041 if (param
.interpolation
!= midgard_interp_default
) {
1042 if (param
.interpolation
== midgard_interp_centroid
)
1043 fprintf(fp
, ".centroid");
1045 fprintf(fp
, ".interp%d", param
.interpolation
);
1048 if (param
.modifier
!= midgard_varying_mod_none
) {
1049 if (param
.modifier
== midgard_varying_mod_perspective_w
)
1050 fprintf(fp
, ".perspectivew");
1051 else if (param
.modifier
== midgard_varying_mod_perspective_z
)
1052 fprintf(fp
, ".perspectivez");
1054 fprintf(fp
, ".mod%d", param
.modifier
);
1056 } else if (param
.flat
|| param
.interpolation
|| param
.modifier
) {
1057 fprintf(fp
, " /* is_varying not set but varying metadata attached */");
1060 if (param
.zero0
|| param
.zero1
|| param
.zero2
)
1061 fprintf(fp
, " /* zero tripped, %u %u %u */ ", param
.zero0
, param
.zero1
, param
.zero2
);
1065 is_op_varying(unsigned op
)
1068 case midgard_op_st_vary_16
:
1069 case midgard_op_st_vary_32
:
1070 case midgard_op_st_vary_32i
:
1071 case midgard_op_st_vary_32u
:
1072 case midgard_op_ld_vary_16
:
1073 case midgard_op_ld_vary_32
:
1074 case midgard_op_ld_vary_32i
:
1075 case midgard_op_ld_vary_32u
:
1083 is_op_attribute(unsigned op
)
1086 case midgard_op_ld_attr_16
:
1087 case midgard_op_ld_attr_32
:
1088 case midgard_op_ld_attr_32i
:
1089 case midgard_op_ld_attr_32u
:
1097 print_load_store_arg(FILE *fp
, uint8_t arg
, unsigned index
)
1099 /* Try to interpret as a register */
1100 midgard_ldst_register_select sel
;
1101 memcpy(&sel
, &arg
, sizeof(arg
));
1103 /* If unknown is set, we're not sure what this is or how to
1104 * interpret it. But if it's zero, we get it. */
1107 fprintf(fp
, "0x%02X", arg
);
1111 unsigned reg
= REGISTER_LDST_BASE
+ sel
.select
;
1112 char comp
= components
[sel
.component
];
1114 fprintf(fp
, "r%u.%c", reg
, comp
);
1116 /* Only print a shift if it's non-zero. Shifts only make sense for the
1117 * second index. For the first, we're not sure what it means yet */
1121 fprintf(fp
, " << %u", sel
.shift
);
1123 fprintf(fp
, " /* %X */", sel
.shift
);
1128 update_stats(signed *stat
, unsigned address
)
1131 *stat
= MAX2(*stat
, address
+ 1);
1135 print_load_store_instr(FILE *fp
, uint64_t data
,
1138 midgard_load_store_word
*word
= (midgard_load_store_word
*) &data
;
1140 print_ld_st_opcode(fp
, word
->op
);
1142 unsigned address
= word
->address
;
1144 if (is_op_varying(word
->op
)) {
1145 print_varying_parameters(fp
, word
);
1147 /* Do some analysis: check if direct cacess */
1149 if ((word
->arg_2
== 0x1E) && midg_stats
.varying_count
>= 0)
1150 update_stats(&midg_stats
.varying_count
, address
);
1152 midg_stats
.varying_count
= -16;
1153 } else if (is_op_attribute(word
->op
)) {
1154 if ((word
->arg_2
== 0x1E) && midg_stats
.attribute_count
>= 0)
1155 update_stats(&midg_stats
.attribute_count
, address
);
1157 midg_stats
.attribute_count
= -16;
1160 fprintf(fp
, " r%u", word
->reg
+ (OP_IS_STORE(word
->op
) ? 26 : 0));
1161 print_mask_4(fp
, word
->mask
, false);
1163 if (!OP_IS_STORE(word
->op
))
1164 update_dest(word
->reg
);
1166 bool is_ubo
= OP_IS_UBO_READ(word
->op
);
1169 /* UBOs use their own addressing scheme */
1171 int lo
= word
->varying_parameters
>> 7;
1172 int hi
= word
->address
;
1174 /* TODO: Combine fields logically */
1175 address
= (hi
<< 3) | lo
;
1178 fprintf(fp
, ", %u", address
);
1180 print_swizzle_vec4(fp
, word
->swizzle
, false, false, false);
1185 fprintf(fp
, "ubo%u", word
->arg_1
);
1186 update_stats(&midg_stats
.uniform_buffer_count
, word
->arg_1
);
1188 print_load_store_arg(fp
, word
->arg_1
, 0);
1191 print_load_store_arg(fp
, word
->arg_2
, 1);
1192 fprintf(fp
, " /* %X */\n", word
->varying_parameters
);
1194 midg_stats
.instruction_count
++;
1198 print_load_store_word(FILE *fp
, uint32_t *word
, unsigned tabs
)
1200 midgard_load_store
*load_store
= (midgard_load_store
*) word
;
1202 if (load_store
->word1
!= 3) {
1203 print_load_store_instr(fp
, load_store
->word1
, tabs
);
1206 if (load_store
->word2
!= 3) {
1207 print_load_store_instr(fp
, load_store
->word2
, tabs
);
1212 print_texture_reg_select(FILE *fp
, uint8_t u
, unsigned base
)
1214 midgard_tex_register_select sel
;
1215 memcpy(&sel
, &u
, sizeof(u
));
1220 fprintf(fp
, "r%u", base
+ sel
.select
);
1222 unsigned component
= sel
.component
;
1224 /* Use the upper half in half-reg mode */
1230 fprintf(fp
, ".%c", components
[component
]);
1232 assert(sel
.zero
== 0);
1236 print_texture_format(FILE *fp
, int format
)
1238 /* Act like a modifier */
1242 DEFINE_CASE(MALI_TEX_1D
, "1d");
1243 DEFINE_CASE(MALI_TEX_2D
, "2d");
1244 DEFINE_CASE(MALI_TEX_3D
, "3d");
1245 DEFINE_CASE(MALI_TEX_CUBE
, "cube");
1248 unreachable("Bad format");
1253 midgard_op_has_helpers(unsigned op
, bool gather
)
1259 case TEXTURE_OP_NORMAL
:
1260 case TEXTURE_OP_DFDX
:
1261 case TEXTURE_OP_DFDY
:
1269 print_texture_op(FILE *fp
, unsigned op
, bool gather
)
1271 /* Act like a bare name, like ESSL functions */
1274 fprintf(fp
, "textureGather");
1276 unsigned component
= op
>> 4;
1277 unsigned bottom
= op
& 0xF;
1280 fprintf(fp
, "_unk%u", bottom
);
1282 fprintf(fp
, ".%c", components
[component
]);
1287 DEFINE_CASE(TEXTURE_OP_NORMAL
, "texture");
1288 DEFINE_CASE(TEXTURE_OP_LOD
, "textureLod");
1289 DEFINE_CASE(TEXTURE_OP_TEXEL_FETCH
, "texelFetch");
1290 DEFINE_CASE(TEXTURE_OP_BARRIER
, "barrier");
1291 DEFINE_CASE(TEXTURE_OP_DFDX
, "dFdx");
1292 DEFINE_CASE(TEXTURE_OP_DFDY
, "dFdy");
1295 fprintf(fp
, "tex_%X", op
);
1301 texture_op_takes_bias(unsigned op
)
1303 return op
== TEXTURE_OP_NORMAL
;
1307 sampler_type_name(enum mali_sampler_type t
)
1310 case MALI_SAMPLER_FLOAT
:
1312 case MALI_SAMPLER_UNSIGNED
:
1314 case MALI_SAMPLER_SIGNED
:
1323 print_texture_barrier(FILE *fp
, uint32_t *word
)
1325 midgard_texture_barrier_word
*barrier
= (midgard_texture_barrier_word
*) word
;
1327 if (barrier
->type
!= TAG_TEXTURE_4_BARRIER
)
1328 fprintf(fp
, "/* barrier tag %X != tex/bar */ ", barrier
->type
);
1331 fprintf(fp
, "/* cont missing? */");
1334 fprintf(fp
, "/* last missing? */");
1337 fprintf(fp
, "/* zero1 = 0x%X */ ", barrier
->zero1
);
1340 fprintf(fp
, "/* zero2 = 0x%X */ ", barrier
->zero2
);
1343 fprintf(fp
, "/* zero3 = 0x%X */ ", barrier
->zero3
);
1346 fprintf(fp
, "/* zero4 = 0x%X */ ", barrier
->zero4
);
1349 fprintf(fp
, "/* zero4 = 0x%" PRIx64
" */ ", barrier
->zero5
);
1352 /* Control barriers are always implied, so include for obviousness */
1353 fprintf(fp
, " control");
1355 if (barrier
->buffer
)
1356 fprintf(fp
, " | buffer");
1358 if (barrier
->shared
)
1359 fprintf(fp
, " | shared");
1362 fprintf(fp
, " | stack");
1370 print_texture_word(FILE *fp
, uint32_t *word
, unsigned tabs
, unsigned in_reg_base
, unsigned out_reg_base
)
1372 midgard_texture_word
*texture
= (midgard_texture_word
*) word
;
1374 midg_stats
.helper_invocations
|=
1375 midgard_op_has_helpers(texture
->op
, texture
->is_gather
);
1377 /* Broad category of texture operation in question */
1378 print_texture_op(fp
, texture
->op
, texture
->is_gather
);
1380 /* Barriers use a dramatically different code path */
1381 if (texture
->op
== TEXTURE_OP_BARRIER
) {
1382 print_texture_barrier(fp
, word
);
1384 } else if (texture
->type
== TAG_TEXTURE_4_BARRIER
)
1385 fprintf (fp
, "/* nonbarrier had tex/bar tag */ ");
1386 else if (texture
->type
== TAG_TEXTURE_4_VTX
)
1387 fprintf (fp
, ".vtx");
1389 /* Specific format in question */
1390 print_texture_format(fp
, texture
->format
);
1392 /* Instruction "modifiers" parallel the ALU instructions. */
1394 if (texture
->shadow
)
1395 fprintf(fp
, ".shadow");
1398 fprintf(fp
, ".cont");
1401 fprintf(fp
, ".last");
1403 if (texture
->barrier_buffer
)
1404 fprintf(fp
, ".barrier_buffer /* XXX */");
1406 if (texture
->barrier_shared
)
1407 fprintf(fp
, ".barrier_shared /* XXX */");
1409 /* Output modifiers are always interpreted floatly */
1410 print_outmod(fp
, texture
->outmod
, false);
1412 fprintf(fp
, " %sr%u", texture
->out_full
? "" : "h",
1413 out_reg_base
+ texture
->out_reg_select
);
1414 print_mask_4(fp
, texture
->mask
, texture
->out_upper
);
1415 assert(!(texture
->out_full
&& texture
->out_upper
));
1418 /* Depending on whether we read from textures directly or indirectly,
1419 * we may be able to update our analysis */
1421 if (texture
->texture_register
) {
1422 fprintf(fp
, "texture[");
1423 print_texture_reg_select(fp
, texture
->texture_handle
, in_reg_base
);
1426 /* Indirect, tut tut */
1427 midg_stats
.texture_count
= -16;
1429 fprintf(fp
, "texture%u, ", texture
->texture_handle
);
1430 update_stats(&midg_stats
.texture_count
, texture
->texture_handle
);
1433 /* Print the type, GL style */
1434 fprintf(fp
, "%csampler", sampler_type_name(texture
->sampler_type
));
1436 if (texture
->sampler_register
) {
1438 print_texture_reg_select(fp
, texture
->sampler_handle
, in_reg_base
);
1441 midg_stats
.sampler_count
= -16;
1443 fprintf(fp
, "%u", texture
->sampler_handle
);
1444 update_stats(&midg_stats
.sampler_count
, texture
->sampler_handle
);
1447 print_swizzle_vec4(fp
, texture
->swizzle
, false, false, false);
1448 fprintf(fp
, ", %sr%u", texture
->in_reg_full
? "" : "h", in_reg_base
+ texture
->in_reg_select
);
1449 assert(!(texture
->in_reg_full
&& texture
->in_reg_upper
));
1451 /* TODO: integrate with swizzle */
1452 if (texture
->in_reg_upper
)
1455 print_swizzle_vec4(fp
, texture
->in_reg_swizzle
, false, false, false);
1457 /* There is *always* an offset attached. Of
1458 * course, that offset is just immediate #0 for a
1459 * GLES call that doesn't take an offset. If there
1460 * is a non-negative non-zero offset, this is
1461 * specified in immediate offset mode, with the
1462 * values in the offset_* fields as immediates. If
1463 * this is a negative offset, we instead switch to
1464 * a register offset mode, where the offset_*
1465 * fields become register triplets */
1467 if (texture
->offset_register
) {
1470 bool full
= texture
->offset
& 1;
1471 bool select
= texture
->offset
& 2;
1472 bool upper
= texture
->offset
& 4;
1474 fprintf(fp
, "%sr%u", full
? "" : "h", in_reg_base
+ select
);
1475 assert(!(texture
->out_full
&& texture
->out_upper
));
1477 /* TODO: integrate with swizzle */
1481 print_swizzle_vec4(fp
, texture
->offset
>> 3, false, false, false);
1484 } else if (texture
->offset
) {
1485 /* Only select ops allow negative immediate offsets, verify */
1487 signed offset_x
= (texture
->offset
& 0xF);
1488 signed offset_y
= ((texture
->offset
>> 4) & 0xF);
1489 signed offset_z
= ((texture
->offset
>> 8) & 0xF);
1491 bool neg_x
= offset_x
< 0;
1492 bool neg_y
= offset_y
< 0;
1493 bool neg_z
= offset_z
< 0;
1494 bool any_neg
= neg_x
|| neg_y
|| neg_z
;
1496 if (any_neg
&& texture
->op
!= TEXTURE_OP_TEXEL_FETCH
)
1497 fprintf(fp
, "/* invalid negative */ ");
1499 /* Regardless, just print the immediate offset */
1501 fprintf(fp
, " + <%d, %d, %d>, ", offset_x
, offset_y
, offset_z
);
1506 char lod_operand
= texture_op_takes_bias(texture
->op
) ? '+' : '=';
1508 if (texture
->lod_register
) {
1509 fprintf(fp
, "lod %c ", lod_operand
);
1510 print_texture_reg_select(fp
, texture
->bias
, in_reg_base
);
1513 if (texture
->bias_int
)
1514 fprintf(fp
, " /* bias_int = 0x%X */", texture
->bias_int
);
1515 } else if (texture
->op
== TEXTURE_OP_TEXEL_FETCH
) {
1516 /* For texel fetch, the int LOD is in the fractional place and
1517 * there is no fraction / possibility of bias. We *always* have
1518 * an explicit LOD, even if it's zero. */
1520 if (texture
->bias_int
)
1521 fprintf(fp
, " /* bias_int = 0x%X */ ", texture
->bias_int
);
1523 fprintf(fp
, "lod = %u, ", texture
->bias
);
1524 } else if (texture
->bias
|| texture
->bias_int
) {
1525 signed bias_int
= texture
->bias_int
;
1526 float bias_frac
= texture
->bias
/ 256.0f
;
1527 float bias
= bias_int
+ bias_frac
;
1529 bool is_bias
= texture_op_takes_bias(texture
->op
);
1530 char sign
= (bias
>= 0.0) ? '+' : '-';
1531 char operand
= is_bias
? sign
: '=';
1533 fprintf(fp
, "lod %c %f, ", operand
, fabsf(bias
));
1538 /* While not zero in general, for these simple instructions the
1539 * following unknowns are zero, so we don't include them */
1541 if (texture
->unknown4
||
1542 texture
->unknown8
) {
1543 fprintf(fp
, "// unknown4 = 0x%x\n", texture
->unknown4
);
1544 fprintf(fp
, "// unknown8 = 0x%x\n", texture
->unknown8
);
1547 midg_stats
.instruction_count
++;
1550 struct midgard_disasm_stats
1551 disassemble_midgard(FILE *fp
, uint8_t *code
, size_t size
, unsigned gpu_id
, gl_shader_stage stage
)
1553 uint32_t *words
= (uint32_t *) code
;
1554 unsigned num_words
= size
/ 4;
1557 bool branch_forward
= false;
1559 int last_next_tag
= -1;
1563 midg_tags
= calloc(sizeof(midg_tags
[0]), num_words
);
1565 /* Stats for shader-db */
1566 memset(&midg_stats
, 0, sizeof(midg_stats
));
1567 midg_ever_written
= 0;
1569 while (i
< num_words
) {
1570 unsigned tag
= words
[i
] & 0xF;
1571 unsigned next_tag
= (words
[i
] >> 4) & 0xF;
1572 unsigned num_quad_words
= midgard_tag_props
[tag
].size
;
1574 if (midg_tags
[i
] && midg_tags
[i
] != tag
) {
1575 fprintf(fp
, "\t/* XXX: TAG ERROR branch, got %s expected %s */\n",
1576 midgard_tag_props
[tag
].name
,
1577 midgard_tag_props
[midg_tags
[i
]].name
);
1582 /* Check the tag. The idea is to ensure that next_tag is
1583 * *always* recoverable from the disassembly, such that we may
1584 * safely omit printing next_tag. To show this, we first
1585 * consider that next tags are semantically off-byone -- we end
1586 * up parsing tag n during step n+1. So, we ensure after we're
1587 * done disassembling the next tag of the final bundle is BREAK
1588 * and warn otherwise. We also ensure that the next tag is
1589 * never INVALID. Beyond that, since the last tag is checked
1590 * outside the loop, we can check one tag prior. If equal to
1591 * the current tag (which is unique), we're done. Otherwise, we
1592 * print if that tag was > TAG_BREAK, which implies the tag was
1593 * not TAG_BREAK or TAG_INVALID. But we already checked for
1594 * TAG_INVALID, so it's just if the last tag was TAG_BREAK that
1595 * we're silent. So we throw in a print for break-next on at
1596 * the end of the bundle (if it's not the final bundle, which
1597 * we already check for above), disambiguating this case as
1598 * well. Hence in all cases we are unambiguous, QED. */
1600 if (next_tag
== TAG_INVALID
)
1601 fprintf(fp
, "\t/* XXX: invalid next tag */\n");
1603 if (last_next_tag
> TAG_BREAK
&& last_next_tag
!= tag
) {
1604 fprintf(fp
, "\t/* XXX: TAG ERROR sequence, got %s expexted %s */\n",
1605 midgard_tag_props
[tag
].name
,
1606 midgard_tag_props
[last_next_tag
].name
);
1609 last_next_tag
= next_tag
;
1611 /* Tags are unique in the following way:
1613 * INVALID, BREAK, UNKNOWN_*: verbosely printed
1614 * TEXTURE_4_BARRIER: verified by barrier/!barrier op
1615 * TEXTURE_4_VTX: .vtx tag printed
1616 * TEXTURE_4: tetxure lack of barriers or .vtx
1617 * TAG_LOAD_STORE_4: only load/store
1618 * TAG_ALU_4/8/12/16: by number of instructions/constants
1619 * TAG_ALU_4_8/12/16_WRITEOUT: ^^ with .writeout tag
1623 case TAG_TEXTURE_4_VTX
... TAG_TEXTURE_4_BARRIER
: {
1624 bool interpipe_aliasing
=
1625 midgard_get_quirks(gpu_id
) & MIDGARD_INTERPIPE_REG_ALIASING
;
1627 print_texture_word(fp
, &words
[i
], tabs
,
1628 interpipe_aliasing
? 0 : REG_TEX_BASE
,
1629 interpipe_aliasing
? REGISTER_LDST_BASE
: REG_TEX_BASE
);
1633 case TAG_LOAD_STORE_4
:
1634 print_load_store_word(fp
, &words
[i
], tabs
);
1637 case TAG_ALU_4
... TAG_ALU_16_WRITEOUT
:
1638 branch_forward
= print_alu_word(fp
, &words
[i
], num_quad_words
, tabs
, i
+ 4*num_quad_words
);
1640 /* Reset word static analysis state */
1641 is_embedded_constant_half
= false;
1642 is_embedded_constant_int
= false;
1644 /* TODO: infer/verify me */
1645 if (tag
>= TAG_ALU_4_WRITEOUT
)
1646 fprintf(fp
, "writeout\n");
1651 fprintf(fp
, "Unknown word type %u:\n", words
[i
] & 0xF);
1653 print_quad_word(fp
, &words
[i
], tabs
);
1658 /* We are parsing per bundle anyway. Add before we start
1659 * breaking out so we don't miss the final bundle. */
1661 midg_stats
.bundle_count
++;
1662 midg_stats
.quadword_count
+= num_quad_words
;
1664 /* Include a synthetic "break" instruction at the end of the
1665 * bundle to signify that if, absent a branch, the shader
1666 * execution will stop here. Stop disassembly at such a break
1667 * based on a heuristic */
1669 if (next_tag
== TAG_BREAK
) {
1670 if (branch_forward
) {
1671 fprintf(fp
, "break\n");
1680 i
+= 4 * num_quad_words
;
1683 if (last_next_tag
!= TAG_BREAK
) {
1684 fprintf(fp
, "/* XXX: shader ended with tag %s */\n",
1685 midgard_tag_props
[last_next_tag
].name
);
1690 /* We computed work_count as max_work_registers, so add one to get the
1691 * count. If no work registers are written, you still have one work
1692 * reported, which is exactly what the hardware expects */
1694 midg_stats
.work_count
++;