#include "pan_encoder.h"
-int pandecode_jc(mali_ptr jc_gpu_va, bool bifrost);
+static void pandecode_swizzle(unsigned swizzle, enum mali_format format);
#define MEMORY_PROP(obj, p) {\
if (obj->p) { \
} \
}
+FILE *pandecode_dump_stream;
+
/* Semantic logging type.
*
* Raw: for raw messages to be printed as is.
pandecode_make_indent(void)
{
for (unsigned i = 0; i < pandecode_indent; ++i)
- printf(" ");
+ fprintf(pandecode_dump_stream, " ");
}
static void
pandecode_make_indent();
if (type == PANDECODE_MESSAGE)
- printf("// ");
+ fprintf(pandecode_dump_stream, "// ");
else if (type == PANDECODE_PROPERTY)
- printf(".");
+ fprintf(pandecode_dump_stream, ".");
va_start(ap, format);
- vprintf(format, ap);
+ vfprintf(pandecode_dump_stream, format, ap);
va_end(ap);
if (type == PANDECODE_PROPERTY)
- printf(",\n");
+ fprintf(pandecode_dump_stream, ",\n");
}
static void
va_list ap;
va_start(ap, format);
- vprintf(format, ap);
+ vfprintf(pandecode_dump_stream, format, ap);
va_end(ap);
}
unsigned total = offset + sz;
if (total > bo->length) {
- pandecode_msg("XXX: buffer overrun."
- "Chunk of size %d at offset %d in buffer of size %d. "
- "Overrun by %d bytes.",
+ pandecode_msg("XXX: buffer overrun. "
+ "Chunk of size %zu at offset %d in buffer of size %zu. "
+ "Overrun by %zu bytes. \n",
sz, offset, bo->length, total - bo->length);
return;
}
FLAG_INFO(HAS_MSAA),
FLAG_INFO(CAN_DISCARD),
FLAG_INFO(HAS_BLEND_SHADER),
- FLAG_INFO(DEPTH_TEST),
+ FLAG_INFO(DEPTH_WRITEMASK),
{}
};
};
#undef FLAG_INFO
-#define FLAG_INFO(flag) { MALI_FRAMEBUFFER_##flag, "MALI_FRAMEBUFFER_" #flag }
-static const struct pandecode_flag_info fb_fmt_flag_info[] = {
- FLAG_INFO(MSAA_A),
- FLAG_INFO(MSAA_B),
- FLAG_INFO(MSAA_8),
- {}
-};
-#undef FLAG_INFO
-
#define FLAG_INFO(flag) { MALI_MFBD_FORMAT_##flag, "MALI_MFBD_FORMAT_" #flag }
static const struct pandecode_flag_info mfbd_fmt_flag_info[] = {
FLAG_INFO(MSAA),
#undef FLAG_INFO
#define FLAG_INFO(flag) { MALI_EXTRA_##flag, "MALI_EXTRA_" #flag }
-static const struct pandecode_flag_info mfbd_extra_flag_info[] = {
+static const struct pandecode_flag_info mfbd_extra_flag_hi_info[] = {
FLAG_INFO(PRESENT),
- FLAG_INFO(AFBC),
+ {}
+};
+#undef FLAG_INFO
+
+#define FLAG_INFO(flag) { MALI_EXTRA_##flag, "MALI_EXTRA_" #flag }
+static const struct pandecode_flag_info mfbd_extra_flag_lo_info[] = {
FLAG_INFO(ZS),
{}
};
#undef FLAG_INFO
#define FLAG_INFO(flag) { MALI_##flag, "MALI_" #flag }
-static const struct pandecode_flag_info shader_midgard1_flag_info [] = {
+static const struct pandecode_flag_info shader_midgard1_flag_lo_info [] = {
+ FLAG_INFO(WRITES_Z),
FLAG_INFO(EARLY_Z),
- FLAG_INFO(HELPER_INVOCATIONS),
FLAG_INFO(READS_TILEBUFFER),
FLAG_INFO(READS_ZS),
{}
};
+
+static const struct pandecode_flag_info shader_midgard1_flag_hi_info [] = {
+ FLAG_INFO(WRITES_S),
+ {}
+};
#undef FLAG_INFO
#define FLAG_INFO(flag) { MALI_MFBD_##flag, "MALI_MFBD_" #flag }
};
#undef FLAG_INFO
+#define FLAG_INFO(flag) { MALI_SFBD_FORMAT_##flag, "MALI_SFBD_FORMAT_" #flag }
+static const struct pandecode_flag_info sfbd_unk1_info [] = {
+ FLAG_INFO(MSAA_8),
+ FLAG_INFO(MSAA_A),
+ {}
+};
+#undef FLAG_INFO
+
+#define FLAG_INFO(flag) { MALI_SFBD_FORMAT_##flag, "MALI_SFBD_FORMAT_" #flag }
+static const struct pandecode_flag_info sfbd_unk2_info [] = {
+ FLAG_INFO(MSAA_B),
+ FLAG_INFO(SRGB),
+ {}
+};
+#undef FLAG_INFO
+
extern char *replace_fragment;
extern char *replace_vertex;
switch (type) {
DEFINE_CASE(NULL);
- DEFINE_CASE(SET_VALUE);
+ DEFINE_CASE(WRITE_VALUE);
DEFINE_CASE(CACHE_FLUSH);
DEFINE_CASE(COMPUTE);
DEFINE_CASE(VERTEX);
}
#undef DEFINE_CASE
-/* Why is this duplicated? Who knows... */
-#define DEFINE_CASE(name) case MALI_ALT_FUNC_ ## name: return "MALI_ALT_FUNC_" #name
-static char *
-pandecode_alt_func(enum mali_alt_func mode)
-{
- switch (mode) {
- DEFINE_CASE(NEVER);
- DEFINE_CASE(LESS);
- DEFINE_CASE(EQUAL);
- DEFINE_CASE(LEQUAL);
- DEFINE_CASE(GREATER);
- DEFINE_CASE(NOTEQUAL);
- DEFINE_CASE(GEQUAL);
- DEFINE_CASE(ALWAYS);
-
- default:
- pandecode_msg("XXX: invalid alt func %X\n", mode);
- return "";
- }
-}
-#undef DEFINE_CASE
-
#define DEFINE_CASE(name) case MALI_STENCIL_ ## name: return "MALI_STENCIL_" #name
static char *
pandecode_stencil_op(enum mali_stencil_op op)
#undef DEFINE_CASE
-#define DEFINE_CASE(name) case MALI_ATTR_ ## name: return "MALI_ATTR_" #name
static char *pandecode_attr_mode_short(enum mali_attr_mode mode)
{
switch(mode) {
return "instanced_npot";
case MALI_ATTR_IMAGE:
return "image";
- case MALI_ATTR_INTERNAL:
- return "internal";
default:
pandecode_msg("XXX: invalid attribute mode %X\n", mode);
return "";
}
}
-#undef DEFINE_CASE
+static const char *
+pandecode_special_record(uint64_t v, bool* attribute)
+{
+ switch(v) {
+ case MALI_ATTR_VERTEXID:
+ *attribute = true;
+ return "gl_VertexID";
+ case MALI_ATTR_INSTANCEID:
+ *attribute = true;
+ return "gl_InstanceID";
+ case MALI_VARYING_FRAG_COORD:
+ return "gl_FragCoord";
+ case MALI_VARYING_FRONT_FACING:
+ return "gl_FrontFacing";
+ case MALI_VARYING_POINT_COORD:
+ return "gl_PointCoord";
+ default:
+ pandecode_msg("XXX: invalid special record %" PRIx64 "\n", v);
+ return "";
+ }
+}
#define DEFINE_CASE(name) case MALI_WRAP_## name: return "MALI_WRAP_" #name
static char *
}
#undef DEFINE_CASE
-#define DEFINE_CASE(name) case MALI_MFBD_BLOCK_## name: return "MALI_MFBD_BLOCK_" #name
+#define DEFINE_CASE(name) case MALI_BLOCK_## name: return "MALI_BLOCK_" #name
static char *
-pandecode_mfbd_block_format(enum mali_mfbd_block_format fmt)
+pandecode_block_format(enum mali_block_format fmt)
{
switch (fmt) {
DEFINE_CASE(TILED);
#undef DEFINE_CASE
#define DEFINE_CASE(name) case MALI_EXCEPTION_ACCESS_## name: return ""#name
-static char *
-pandecode_exception_access(enum mali_exception_access access)
+char *
+pandecode_exception_access(unsigned access)
{
switch (access) {
DEFINE_CASE(NONE);
const struct midgard_tiler_descriptor *t,
unsigned width,
unsigned height,
- bool is_fragment)
+ bool is_fragment,
+ bool has_hierarchy)
{
pandecode_log(".tiler = {\n");
pandecode_indent++;
/* Now that we've sanity checked, we'll try to calculate the sizes
* ourselves for comparison */
- unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask);
- unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask);
+ unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask, has_hierarchy);
+ unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask, has_hierarchy);
if (!((ref_header == body_offset) && (ref_size == t->polygon_list_size))) {
pandecode_msg("XXX: bad polygon list size (expected %d / 0x%x)\n",
/* When tiling is enabled, the heap should be a tight fit */
unsigned heap_offset = t->heap_start - heap->gpu_va;
if ((heap_offset + heap_size) != heap->length) {
- pandecode_msg("XXX: heap size %d (expected %d)\n",
+ pandecode_msg("XXX: heap size %u (expected %zu)\n",
heap_size, heap->length - heap_offset);
}
bool has_extra;
};
+static void
+pandecode_sfbd_format(struct mali_sfbd_format format)
+{
+ pandecode_log(".format = {\n");
+ pandecode_indent++;
+
+ pandecode_log(".unk1 = ");
+ pandecode_log_decoded_flags(sfbd_unk1_info, format.unk1);
+ pandecode_log_cont(",\n");
+
+ /* TODO: Map formats so we can check swizzles and print nicely */
+ pandecode_log("swizzle");
+ pandecode_swizzle(format.swizzle, MALI_RGBA8_UNORM);
+ pandecode_log_cont(",\n");
+
+ pandecode_prop("nr_channels = MALI_POSITIVE(%d)",
+ (format.nr_channels + 1));
+
+ pandecode_log(".unk2 = ");
+ pandecode_log_decoded_flags(sfbd_unk2_info, format.unk2);
+ pandecode_log_cont(",\n");
+
+ pandecode_prop("block = %s", pandecode_block_format(format.block));
+
+ pandecode_prop("unk3 = 0x%" PRIx32, format.unk3);
+
+ pandecode_indent--;
+ pandecode_log("},\n");
+}
+
+static void
+pandecode_shared_memory(const struct mali_shared_memory *desc, bool is_compute)
+{
+ pandecode_prop("stack_shift = 0x%x", desc->stack_shift);
+
+ if (desc->unk0)
+ pandecode_prop("unk0 = 0x%x", desc->unk0);
+
+ if (desc->shared_workgroup_count != 0x1F) {
+ pandecode_prop("shared_workgroup_count = %d", desc->shared_workgroup_count);
+ if (!is_compute)
+ pandecode_msg("XXX: wrong workgroup count for noncompute\n");
+ }
+
+ if (desc->shared_unk1 || desc->shared_shift) {
+ pandecode_prop("shared_unk1 = %X", desc->shared_unk1);
+ pandecode_prop("shared_shift = %X", desc->shared_shift);
+
+ if (!is_compute)
+ pandecode_msg("XXX: shared memory configured in noncompute shader");
+ }
+
+ if (desc->shared_zero) {
+ pandecode_msg("XXX: shared memory zero tripped\n");
+ pandecode_prop("shared_zero = 0x%" PRIx32, desc->shared_zero);
+ }
+
+ if (desc->shared_memory && !is_compute)
+ pandecode_msg("XXX: shared memory used in noncompute shader\n");
+
+ MEMORY_PROP(desc, scratchpad);
+ MEMORY_PROP(desc, shared_memory);
+ MEMORY_PROP(desc, unknown1);
+}
+
static struct pandecode_fbd
-pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment)
+pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment, unsigned gpu_id)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct mali_single_framebuffer *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
- struct pandecode_fbd info;
+ struct pandecode_fbd info = {
+ .has_extra = false,
+ .rt_count = 1
+ };
pandecode_log("struct mali_single_framebuffer framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no);
pandecode_indent++;
- pandecode_prop("unknown1 = 0x%" PRIx32, s->unknown1);
- pandecode_prop("unknown2 = 0x%" PRIx32, s->unknown2);
+ pandecode_log(".shared_memory = {\n");
+ pandecode_indent++;
+ pandecode_shared_memory(&s->shared_memory, false);
+ pandecode_indent--;
+ pandecode_log("},\n");
- pandecode_log(".format = ");
- pandecode_log_decoded_flags(fb_fmt_flag_info, s->format);
- pandecode_log_cont(",\n");
+ pandecode_sfbd_format(s->format);
info.width = s->width + 1;
info.height = s->height + 1;
- info.rt_count = 1;
pandecode_prop("width = MALI_POSITIVE(%" PRId16 ")", info.width);
pandecode_prop("height = MALI_POSITIVE(%" PRId16 ")", info.height);
+ MEMORY_PROP(s, checksum);
+
+ if (s->checksum_stride)
+ pandecode_prop("checksum_stride = %d", s->checksum_stride);
+
MEMORY_PROP(s, framebuffer);
pandecode_prop("stride = %d", s->stride);
pandecode_log_decoded_flags(clear_flag_info, s->clear_flags);
pandecode_log_cont(",\n");
- if (s->depth_buffer | s->depth_buffer_enable) {
+ if (s->depth_buffer) {
MEMORY_PROP(s, depth_buffer);
- pandecode_prop("depth_buffer_enable = %s", DS_ENABLE(s->depth_buffer_enable));
+ pandecode_prop("depth_stride = %d", s->depth_stride);
}
- if (s->stencil_buffer | s->stencil_buffer_enable) {
+ if (s->stencil_buffer) {
MEMORY_PROP(s, stencil_buffer);
- pandecode_prop("stencil_buffer_enable = %s", DS_ENABLE(s->stencil_buffer_enable));
+ pandecode_prop("stencil_stride = %d", s->stencil_stride);
+ }
+
+ if (s->depth_stride_zero ||
+ s->stencil_stride_zero ||
+ s->zero7 || s->zero8) {
+ pandecode_msg("XXX: Depth/stencil zeros tripped\n");
+ pandecode_prop("depth_stride_zero = 0x%x",
+ s->depth_stride_zero);
+ pandecode_prop("stencil_stride_zero = 0x%x",
+ s->stencil_stride_zero);
+ pandecode_prop("zero7 = 0x%" PRIx32,
+ s->zero7);
+ pandecode_prop("zero8 = 0x%" PRIx32,
+ s->zero8);
}
if (s->clear_color_1 | s->clear_color_2 | s->clear_color_3 | s->clear_color_4) {
pandecode_prop("clear_stencil = 0x%x", s->clear_stencil);
}
- MEMORY_PROP(s, unknown_address_0);
const struct midgard_tiler_descriptor t = s->tiler;
- pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment);
+
+ bool has_hierarchy = !(gpu_id == 0x0720 || gpu_id == 0x0820 || gpu_id == 0x0830);
+ pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment, has_hierarchy);
pandecode_indent--;
pandecode_log("};\n");
- pandecode_prop("zero0 = 0x%" PRIx64, s->zero0);
- pandecode_prop("zero1 = 0x%" PRIx64, s->zero1);
pandecode_prop("zero2 = 0x%" PRIx32, s->zero2);
pandecode_prop("zero4 = 0x%" PRIx32, s->zero4);
+ pandecode_prop("zero5 = 0x%" PRIx32, s->zero5);
- printf(".zero3 = {");
+ pandecode_log_cont(".zero3 = {");
for (int i = 0; i < sizeof(s->zero3) / sizeof(s->zero3[0]); ++i)
- printf("%X, ", s->zero3[i]);
+ pandecode_log_cont("%X, ", s->zero3[i]);
- printf("},\n");
+ pandecode_log_cont("},\n");
- printf(".zero6 = {");
+ pandecode_log_cont(".zero6 = {");
for (int i = 0; i < sizeof(s->zero6) / sizeof(s->zero6[0]); ++i)
- printf("%X, ", s->zero6[i]);
+ pandecode_log_cont("%X, ", s->zero6[i]);
- printf("},\n");
+ pandecode_log_cont("},\n");
return info;
}
-static void
-pandecode_u32_slide(unsigned name, const u32 *slide, unsigned count)
-{
- pandecode_log(".unknown%d = {", name);
-
- for (int i = 0; i < count; ++i)
- printf("%X, ", slide[i]);
-
- pandecode_log("},\n");
-}
-
-#define SHORT_SLIDE(num) \
- pandecode_u32_slide(num, s->unknown ## num, ARRAY_SIZE(s->unknown ## num))
-
static void
pandecode_compute_fbd(uint64_t gpu_va, int job_no)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
- const struct mali_compute_fbd *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
+ const struct mali_shared_memory *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
- pandecode_log("struct mali_compute_fbd framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no);
+ pandecode_log("struct mali_shared_memory shared_%"PRIx64"_%d = {\n", gpu_va, job_no);
pandecode_indent++;
-
- SHORT_SLIDE(1);
-
+ pandecode_shared_memory(s, true);
pandecode_indent--;
- printf("},\n");
+ pandecode_log("},\n");
}
/* Extracts the number of components associated with a Mali format */
pandecode_prop("unk2 = 0x%" PRIx32, format.unk2);
pandecode_prop("unk3 = 0x%" PRIx32, format.unk3);
- pandecode_prop("block = %s",
- pandecode_mfbd_block_format(format.block));
+ pandecode_prop("block = %s", pandecode_block_format(format.block));
/* TODO: Map formats so we can check swizzles and print nicely */
pandecode_log("swizzle");
pandecode_log_cont(",\n");
pandecode_prop("nr_channels = MALI_POSITIVE(%d)",
- MALI_NEGATIVE(format.nr_channels));
+ (format.nr_channels + 1));
pandecode_log(".flags = ");
pandecode_log_decoded_flags(mfbd_fmt_flag_info, format.flags);
}
static void
-pandecode_render_target(uint64_t gpu_va, unsigned job_no, const struct bifrost_framebuffer *fb)
+pandecode_render_target(uint64_t gpu_va, unsigned job_no, const struct mali_framebuffer *fb)
{
- pandecode_log("struct bifrost_render_target rts_list_%"PRIx64"_%d[] = {\n", gpu_va, job_no);
+ pandecode_log("struct mali_render_target rts_list_%"PRIx64"_%d[] = {\n", gpu_va, job_no);
pandecode_indent++;
- for (int i = 0; i < MALI_NEGATIVE(fb->rt_count_1); i++) {
- mali_ptr rt_va = gpu_va + i * sizeof(struct bifrost_render_target);
+ for (int i = 0; i < (fb->rt_count_1 + 1); i++) {
+ mali_ptr rt_va = gpu_va + i * sizeof(struct mali_render_target);
struct pandecode_mapped_memory *mem =
pandecode_find_mapped_gpu_mem_containing(rt_va);
- const struct bifrost_render_target *PANDECODE_PTR_VAR(rt, mem, (mali_ptr) rt_va);
+ const struct mali_render_target *PANDECODE_PTR_VAR(rt, mem, (mali_ptr) rt_va);
pandecode_log("{\n");
pandecode_indent++;
pandecode_rt_format(rt->format);
- if (rt->format.block == MALI_MFBD_BLOCK_AFBC) {
+ if (rt->format.block == MALI_BLOCK_AFBC) {
pandecode_log(".afbc = {\n");
pandecode_indent++;
}
static struct pandecode_fbd
-pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment)
+pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment, bool is_compute)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
- const struct bifrost_framebuffer *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va);
+ const struct mali_framebuffer *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va);
struct pandecode_fbd info;
- if (fb->sample_locations) {
- /* The blob stores all possible sample locations in a single buffer
- * allocated on startup, and just switches the pointer when switching
- * MSAA state. For now, we just put the data into the cmdstream, but we
- * should do something like what the blob does with a real driver.
- *
- * There seem to be 32 slots for sample locations, followed by another
- * 16. The second 16 is just the center location followed by 15 zeros
- * in all the cases I've identified (maybe shader vs. depth/color
- * samples?).
- */
-
- struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(fb->sample_locations);
-
- const u16 *PANDECODE_PTR_VAR(samples, smem, fb->sample_locations);
-
- pandecode_log("uint16_t sample_locations_%d[] = {\n", job_no);
- pandecode_indent++;
-
- for (int i = 0; i < 32 + 16; i++) {
- pandecode_log("%d, %d,\n", samples[2 * i], samples[2 * i + 1]);
- }
-
- pandecode_indent--;
- pandecode_log("};\n");
- }
-
- pandecode_log("struct bifrost_framebuffer framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no);
+ pandecode_log("struct mali_framebuffer framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no);
pandecode_indent++;
- pandecode_prop("unk0 = 0x%x", fb->unk0);
-
- if (fb->sample_locations)
- pandecode_prop("sample_locations = sample_locations_%d", job_no);
-
- /* Assume that unknown1 was emitted in the last job for
- * now */
- MEMORY_PROP(fb, unknown1);
+ pandecode_log(".shared_memory = {\n");
+ pandecode_indent++;
+ pandecode_shared_memory(&fb->shared_memory, is_compute);
+ pandecode_indent--;
+ pandecode_log("},\n");
info.width = fb->width1 + 1;
info.height = fb->height1 + 1;
if (fb->clear_depth)
pandecode_prop("clear_depth = %f", fb->clear_depth);
- /* TODO: What is this? Let's not blow up.. */
- if (fb->unknown2 != 0x1F)
- pandecode_prop("unknown2 = 0x%x", fb->unknown2);
-
- pandecode_prop("unknown2 = 0x%x", fb->unknown2);
- MEMORY_PROP(fb, scratchpad);
const struct midgard_tiler_descriptor t = fb->tiler;
- pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment);
+ if (!is_compute)
+ pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment, true);
+ else
+ pandecode_msg("XXX: skipping compute MFBD, fixme\n");
if (fb->zero3 || fb->zero4) {
pandecode_msg("XXX: framebuffer zeros tripped\n");
pandecode_indent--;
pandecode_log("};\n");
- gpu_va += sizeof(struct bifrost_framebuffer);
+ gpu_va += sizeof(struct mali_framebuffer);
info.has_extra = (fb->mfbd_flags & MALI_MFBD_EXTRA) && is_fragment;
if (info.has_extra) {
mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
- const struct bifrost_fb_extra *PANDECODE_PTR_VAR(fbx, mem, (mali_ptr) gpu_va);
+ const struct mali_framebuffer_extra *PANDECODE_PTR_VAR(fbx, mem, (mali_ptr) gpu_va);
- pandecode_log("struct bifrost_fb_extra fb_extra_%"PRIx64"_%d = {\n", gpu_va, job_no);
+ pandecode_log("struct mali_framebuffer_extra fb_extra_%"PRIx64"_%d = {\n", gpu_va, job_no);
pandecode_indent++;
MEMORY_PROP(fbx, checksum);
if (fbx->checksum_stride)
pandecode_prop("checksum_stride = %d", fbx->checksum_stride);
- pandecode_log(".flags = ");
- pandecode_log_decoded_flags(mfbd_extra_flag_info, fbx->flags);
+ pandecode_log(".flags_hi = ");
+ pandecode_log_decoded_flags(mfbd_extra_flag_hi_info, fbx->flags_lo);
+ pandecode_log_cont(",\n");
+
+ pandecode_log(".flags_lo = ");
+ pandecode_log_decoded_flags(mfbd_extra_flag_lo_info, fbx->flags_lo);
pandecode_log_cont(",\n");
- if (fbx->flags & MALI_EXTRA_AFBC_ZS) {
+ pandecode_prop("zs_block = %s", pandecode_block_format(fbx->zs_block));
+
+ if (fbx->zs_block == MALI_BLOCK_AFBC) {
pandecode_log(".ds_afbc = {\n");
pandecode_indent++;
MEMORY_PROP_DIR(fbx->ds_linear, depth);
pandecode_prop("depth_stride = %d",
fbx->ds_linear.depth_stride);
+ } else if (fbx->ds_linear.depth_stride) {
+ pandecode_msg("XXX: depth stride zero tripped %d\n", fbx->ds_linear.depth_stride);
}
if (fbx->ds_linear.stencil) {
MEMORY_PROP_DIR(fbx->ds_linear, stencil);
pandecode_prop("stencil_stride = %d",
fbx->ds_linear.stencil_stride);
+ } else if (fbx->ds_linear.stencil_stride) {
+ pandecode_msg("XXX: stencil stride zero tripped %d\n", fbx->ds_linear.stencil_stride);
}
if (fbx->ds_linear.depth_stride_zero ||
pandecode_indent--;
pandecode_log("};\n");
- gpu_va += sizeof(struct bifrost_fb_extra);
+ gpu_va += sizeof(struct mali_framebuffer_extra);
}
if (is_fragment)
static void
pandecode_attributes(const struct pandecode_mapped_memory *mem,
mali_ptr addr, int job_no, char *suffix,
- int count, bool varying)
+ int count, bool varying, enum mali_job_type job_type)
{
char *prefix = varying ? "varying" : "attribute";
+ assert(addr);
- if (!addr) {
- pandecode_msg("no %s\n", prefix);
+ if (!count) {
+ pandecode_msg("warn: No %s records\n", prefix);
return;
}
union mali_attr *attr = pandecode_fetch_gpu_mem(mem, addr, sizeof(union mali_attr) * count);
for (int i = 0; i < count; ++i) {
+ /* First, check for special records */
+ if (attr[i].elements < MALI_RECORD_SPECIAL) {
+ if (attr[i].size)
+ pandecode_msg("XXX: tripped size=%d\n", attr[i].size);
+
+ if (attr[i].stride) {
+ /* gl_InstanceID passes a magic divisor in the
+ * stride field to divide by the padded vertex
+ * count. No other records should do so, so
+ * stride should otherwise be zero. Note that
+ * stride in the usual attribute sense doesn't
+ * apply to special records. */
+
+ bool has_divisor = attr[i].elements == MALI_ATTR_INSTANCEID;
+
+ pandecode_log_cont("/* %smagic divisor = %X */ ",
+ has_divisor ? "" : "XXX: ", attr[i].stride);
+ }
+
+ if (attr[i].shift || attr[i].extra_flags) {
+ /* Attributes use these fields for
+ * instancing/padding/etc type issues, but
+ * varyings don't */
+
+ pandecode_log_cont("/* %sshift=%d, extra=%d */ ",
+ varying ? "XXX: " : "",
+ attr[i].shift, attr[i].extra_flags);
+ }
+
+ /* Print the special record name */
+ bool attribute = false;
+ pandecode_log("%s_%d = %s;\n", prefix, i, pandecode_special_record(attr[i].elements, &attribute));
+
+ /* Sanity check */
+ if (attribute == varying)
+ pandecode_msg("XXX: mismatched special record\n");
+
+ continue;
+ }
+
enum mali_attr_mode mode = attr[i].elements & 7;
if (mode == MALI_ATTR_UNUSED)
/* Print the stride and size */
pandecode_log_cont("<%u>[%u]", attr[i].stride, attr[i].size);
- /* Check: the size must be divisible by the stride */
- if (attr[i].size % attr[i].stride)
- pandecode_msg("XXX: size not divisible by stride\n");
-
- /* TODO: Sanity check the quotient itself -- it should equal
- * vertex count (or something computed from it for instanced)
- * which means we can check and elide */
+ /* TODO: Sanity check the quotient itself. It must be equal to
+ * (or be greater than, if the driver added padding) the padded
+ * vertex count. */
/* Finally, print the pointer */
mali_ptr raw_elements = attr[i].elements & ~7;
/* shift/extra_flags exist only for instanced */
if (attr[i].shift | attr[i].extra_flags) {
+ /* These are set to random values by the blob for
+ * varyings, most likely a symptom of uninitialized
+ * memory where the hardware masked the bug. As such we
+ * put this at a warning, not an error. */
+
if (mode == MALI_ATTR_LINEAR)
- pandecode_msg("XXX: instancing fields set for linear\n");
+ pandecode_msg("warn: instancing fields set for linear\n");
pandecode_prop("shift = %d", attr[i].shift);
pandecode_prop("extra_flags = %d", attr[i].extra_flags);
snprintf(base, sizeof(base), "%s_meta", prefix);
struct mali_attr_meta *attr_meta;
- mali_ptr p = varying ? (v->varying_meta & ~0xF) : v->attribute_meta;
+ mali_ptr p = varying ? v->varying_meta : v->attribute_meta;
struct pandecode_mapped_memory *attr_mem = pandecode_find_mapped_gpu_mem_containing(p);
return count ? (max_index + 1) : 0;
}
-static void
-pandecode_indices(uintptr_t pindices, uint32_t index_count, int job_no)
-{
- struct pandecode_mapped_memory *imem = pandecode_find_mapped_gpu_mem_containing(pindices);
-
- if (imem) {
- /* Indices are literally just a u32 array :) */
-
- uint32_t *PANDECODE_PTR_VAR(indices, imem, pindices);
-
- pandecode_log("uint32_t indices_%d[] = {\n", job_no);
- pandecode_indent++;
-
- for (unsigned i = 0; i < (index_count + 1); i += 3)
- pandecode_log("%d, %d, %d,\n",
- indices[i],
- indices[i + 1],
- indices[i + 2]);
-
- pandecode_indent--;
- pandecode_log("};\n");
- }
-}
-
/* return bits [lo, hi) of word */
static u32
bits(u32 word, u32 lo, u32 hi)
}
static void
-pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bool noninstanced)
+pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bool graphics)
{
pandecode_log_cont("{\n");
pandecode_indent++;
* invocation_count for an explanation.
*/
- unsigned size_x = bits(p->invocation_count, 0, p->size_y_shift) + 1;
- unsigned size_y = bits(p->invocation_count, p->size_y_shift, p->size_z_shift) + 1;
- unsigned size_z = bits(p->invocation_count, p->size_z_shift, p->workgroups_x_shift) + 1;
+ unsigned size_y_shift = bits(p->invocation_shifts, 0, 5);
+ unsigned size_z_shift = bits(p->invocation_shifts, 5, 10);
+ unsigned workgroups_x_shift = bits(p->invocation_shifts, 10, 16);
+ unsigned workgroups_y_shift = bits(p->invocation_shifts, 16, 22);
+ unsigned workgroups_z_shift = bits(p->invocation_shifts, 22, 28);
+ unsigned workgroups_x_shift_2 = bits(p->invocation_shifts, 28, 32);
- unsigned groups_x = bits(p->invocation_count, p->workgroups_x_shift, p->workgroups_y_shift) + 1;
- unsigned groups_y = bits(p->invocation_count, p->workgroups_y_shift, p->workgroups_z_shift) + 1;
- unsigned groups_z = bits(p->invocation_count, p->workgroups_z_shift, 32) + 1;
+ unsigned size_x = bits(p->invocation_count, 0, size_y_shift) + 1;
+ unsigned size_y = bits(p->invocation_count, size_y_shift, size_z_shift) + 1;
+ unsigned size_z = bits(p->invocation_count, size_z_shift, workgroups_x_shift) + 1;
+
+ unsigned groups_x = bits(p->invocation_count, workgroups_x_shift, workgroups_y_shift) + 1;
+ unsigned groups_y = bits(p->invocation_count, workgroups_y_shift, workgroups_z_shift) + 1;
+ unsigned groups_z = bits(p->invocation_count, workgroups_z_shift, 32) + 1;
/* Even though we have this decoded, we want to ensure that the
* representation is "unique" so we don't lose anything by printing only
* decoded, we're good to go. */
struct mali_vertex_tiler_prefix ref;
- panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, noninstanced);
+ panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, graphics);
bool canonical =
(p->invocation_count == ref.invocation_count) &&
- (p->size_y_shift == ref.size_y_shift) &&
- (p->size_z_shift == ref.size_z_shift) &&
- (p->workgroups_x_shift == ref.workgroups_x_shift) &&
- (p->workgroups_y_shift == ref.workgroups_y_shift) &&
- (p->workgroups_z_shift == ref.workgroups_z_shift) &&
- (p->workgroups_x_shift_2 == ref.workgroups_x_shift_2);
+ (p->invocation_shifts == ref.invocation_shifts);
if (!canonical) {
pandecode_msg("XXX: non-canonical workgroups packing\n");
- pandecode_msg("expected: %X, %d, %d, %d, %d, %d\n",
+ pandecode_msg("expected: %X, %X",
ref.invocation_count,
- ref.size_y_shift,
- ref.size_z_shift,
- ref.workgroups_x_shift,
- ref.workgroups_y_shift,
- ref.workgroups_z_shift,
- ref.workgroups_x_shift_2);
+ ref.invocation_shifts);
pandecode_prop("invocation_count = 0x%" PRIx32, p->invocation_count);
- pandecode_prop("size_y_shift = %d", p->size_y_shift);
- pandecode_prop("size_z_shift = %d", p->size_z_shift);
- pandecode_prop("workgroups_x_shift = %d", p->workgroups_x_shift);
- pandecode_prop("workgroups_y_shift = %d", p->workgroups_y_shift);
- pandecode_prop("workgroups_z_shift = %d", p->workgroups_z_shift);
- pandecode_prop("workgroups_x_shift_2 = %d", p->workgroups_x_shift_2);
+ pandecode_prop("size_y_shift = %d", size_y_shift);
+ pandecode_prop("size_z_shift = %d", size_z_shift);
+ pandecode_prop("workgroups_x_shift = %d", workgroups_x_shift);
+ pandecode_prop("workgroups_y_shift = %d", workgroups_y_shift);
+ pandecode_prop("workgroups_z_shift = %d", workgroups_z_shift);
+ pandecode_prop("workgroups_x_shift_2 = %d", workgroups_x_shift_2);
}
/* Regardless, print the decode */
if (p->index_count)
pandecode_prop("index_count = MALI_POSITIVE(%" PRId32 ")", p->index_count + 1);
+
+ unsigned index_raw_size = (p->unknown_draw & MALI_DRAW_INDEXED_SIZE);
+ index_raw_size >>= MALI_DRAW_INDEXED_SHIFT;
+
+ /* Validate an index buffer is present if we need one. TODO: verify
+ * relationship between invocation_count and index_count */
+
+ if (p->indices) {
+ unsigned count = p->index_count;
+
+ /* Grab the size */
+ unsigned size = (index_raw_size == 0x3) ? 4 : index_raw_size;
+
+ /* Ensure we got a size, and if so, validate the index buffer
+ * is large enough to hold a full set of indices of the given
+ * size */
+
+ if (!index_raw_size)
+ pandecode_msg("XXX: index size missing\n");
+ else
+ pandecode_validate_buffer(p->indices, count * size);
+ } else if (index_raw_size)
+ pandecode_msg("XXX: unexpected index size %u\n", index_raw_size);
+
if (p->offset_bias_correction)
pandecode_prop("offset_bias_correction = %d", p->offset_bias_correction);
pandecode_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no)
{
struct pandecode_mapped_memory *umem = pandecode_find_mapped_gpu_mem_containing(pubufs);
- struct mali_uniform_buffer_meta *PANDECODE_PTR_VAR(ubufs, umem, pubufs);
+ uint64_t *PANDECODE_PTR_VAR(ubufs, umem, pubufs);
for (int i = 0; i < ubufs_count; i++) {
- unsigned size = (ubufs[i].size + 1) * 16;
- mali_ptr addr = ubufs[i].ptr << 2;
+ unsigned size = (ubufs[i] & ((1 << 10) - 1)) * 16;
+ mali_ptr addr = (ubufs[i] >> 10) << 2;
pandecode_validate_buffer(addr, size);
- char *ptr = pointer_as_memory_reference(ubufs[i].ptr << 2);
+ char *ptr = pointer_as_memory_reference(addr);
pandecode_log("ubuf_%d[%u] = %s;\n", i, size, ptr);
free(ptr);
}
}
static void
-pandecode_scratchpad(uintptr_t pscratchpad, int job_no, char *suffix)
+pandecode_uniforms(mali_ptr uniforms, unsigned uniform_count)
{
+ pandecode_validate_buffer(uniforms, uniform_count * 16);
- struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(pscratchpad);
-
- struct bifrost_scratchpad *PANDECODE_PTR_VAR(scratchpad, mem, pscratchpad);
+ char *ptr = pointer_as_memory_reference(uniforms);
+ pandecode_log("vec4 uniforms[%u] = %s;\n", uniform_count, ptr);
+ free(ptr);
+}
- if (scratchpad->zero) {
- pandecode_msg("XXX: scratchpad zero tripped");
- pandecode_prop("zero = 0x%x\n", scratchpad->zero);
+static const char *
+shader_type_for_job(unsigned type)
+{
+ switch (type) {
+ case JOB_TYPE_VERTEX: return "VERTEX";
+ case JOB_TYPE_TILER: return "FRAGMENT";
+ case JOB_TYPE_COMPUTE: return "COMPUTE";
+ default:
+ return "UNKNOWN";
}
-
- pandecode_log("struct bifrost_scratchpad scratchpad_%"PRIx64"_%d%s = {\n", pscratchpad, job_no, suffix);
- pandecode_indent++;
-
- pandecode_prop("flags = 0x%x", scratchpad->flags);
- MEMORY_PROP(scratchpad, gpu_scratchpad);
-
- pandecode_indent--;
- pandecode_log("};\n");
}
static unsigned shader_id = 0;
static struct midgard_disasm_stats
pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type,
- bool is_bifrost, unsigned nr_regs)
+ bool is_bifrost, unsigned gpu_id)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(shader_ptr);
uint8_t *PANDECODE_PTR_VAR(code, mem, shader_ptr);
/* Print some boilerplate to clearly denote the assembly (which doesn't
* obey indentation rules), and actually do the disassembly! */
- printf("\n\n");
+ pandecode_log_cont("\n\n");
struct midgard_disasm_stats stats;
if (is_bifrost) {
- disassemble_bifrost(code, sz, false);
+ disassemble_bifrost(pandecode_dump_stream, code, sz, false);
/* TODO: Extend stats to Bifrost */
- stats.texture_count = -1;
- stats.sampler_count = -1;
- stats.attribute_count = -1;
- stats.varying_count = -1;
- stats.uniform_count = -1;
- stats.uniform_buffer_count = -1;
- stats.work_count = -1;
+ stats.texture_count = -128;
+ stats.sampler_count = -128;
+ stats.attribute_count = -128;
+ stats.varying_count = -128;
+ stats.uniform_count = -128;
+ stats.uniform_buffer_count = -128;
+ stats.work_count = -128;
stats.instruction_count = 0;
stats.bundle_count = 0;
stats.quadword_count = 0;
+ stats.helper_invocations = false;
} else {
- stats = disassemble_midgard(code, sz);
- stats.work_count = nr_regs;
+ stats = disassemble_midgard(pandecode_dump_stream,
+ code, sz, gpu_id,
+ type == JOB_TYPE_TILER ?
+ MESA_SHADER_FRAGMENT : MESA_SHADER_VERTEX);
}
- /* Print shader-db stats */
+ /* Print shader-db stats. Skip COMPUTE jobs since they are used for
+ * driver-internal purposes with the blob and interfere */
- unsigned nr_threads =
- (stats.work_count <= 4) ? 4 :
- (stats.work_count <= 8) ? 2 :
- 1;
+ bool should_shaderdb = type != JOB_TYPE_COMPUTE;
- printf("shader%d - %s shader: "
- "%u inst, %u bundles, %u quadwords, "
- "%u registers, %u threads, 0 loops\n\n\n",
- shader_id++,
- (type == JOB_TYPE_TILER) ? "FRAGMENT" : "VERTEX",
- stats.instruction_count, stats.bundle_count, stats.quadword_count,
- stats.work_count, nr_threads);
+ if (should_shaderdb) {
+ unsigned nr_threads =
+ (stats.work_count <= 4) ? 4 :
+ (stats.work_count <= 8) ? 2 :
+ 1;
+
+ pandecode_log_cont("shader%d - MESA_SHADER_%s shader: "
+ "%u inst, %u bundles, %u quadwords, "
+ "%u registers, %u threads, 0 loops, 0:0 spills:fills\n\n\n",
+ shader_id++,
+ shader_type_for_job(type),
+ stats.instruction_count, stats.bundle_count, stats.quadword_count,
+ stats.work_count, nr_threads);
+ }
return stats;
if (f.layout == MALI_TEXTURE_AFBC)
pandecode_log_cont("afbc");
else if (f.layout == MALI_TEXTURE_TILED)
- pandecode_log_cont(is_zs ? "linear" : "tiled");
+ pandecode_log_cont("tiled");
else if (f.layout == MALI_TEXTURE_LINEAR)
pandecode_log_cont("linear");
else
if (!f.unknown2) {
pandecode_msg("XXX: expected unknown texture bit set\n");
- pandecode_prop("unknown2 = %" PRId32, f.unknown1);
+ pandecode_prop("unknown2 = %" PRId32, f.unknown2);
}
if (t->swizzle_zero) {
* properties, but dump extra
* possibilities to futureproof */
- int bitmap_count = MALI_NEGATIVE(t->levels);
+ int bitmap_count = t->levels + 1;
/* Miptree for each face */
if (f.type == MALI_TEX_CUBE)
bitmap_count *= 6;
+ else if (f.type == MALI_TEX_3D && f.layout == MALI_TEXTURE_LINEAR)
+ bitmap_count *= (t->depth + 1);
/* Array of textures */
- bitmap_count *= MALI_NEGATIVE(t->array_size);
+ bitmap_count *= (t->array_size + 1);
/* Stride for each element */
if (f.manual_stride)
bitmap_count *= 2;
- /* Sanity check the size */
- int max_count = sizeof(t->payload) / sizeof(t->payload[0]);
- assert (bitmap_count <= max_count);
-
+ mali_ptr *pointers_and_strides = pandecode_fetch_gpu_mem(tmem,
+ u + sizeof(*t), sizeof(mali_ptr) * bitmap_count);
for (int i = 0; i < bitmap_count; ++i) {
/* How we dump depends if this is a stride or a pointer */
if (f.manual_stride && (i & 1)) {
/* signed 32-bit snuck in as a 64-bit pointer */
- uint64_t stride_set = t->payload[i];
+ uint64_t stride_set = pointers_and_strides[i];
uint32_t clamped_stride = stride_set;
int32_t stride = clamped_stride;
assert(stride_set == clamped_stride);
pandecode_log("(mali_ptr) %d /* stride */, \n", stride);
} else {
- char *a = pointer_as_memory_reference(t->payload[i]);
+ char *a = pointer_as_memory_reference(pointers_and_strides[i]);
pandecode_log("%s, \n", a);
free(a);
}
pandecode_log("};\n");
}
+/* For shader properties like texture_count, we have a claimed property in the shader_meta, and the actual Truth from static analysis (this may just be an upper limit). We validate accordingly */
+
+static void
+pandecode_shader_prop(const char *name, unsigned claim, signed truth, bool fuzzy)
+{
+ /* Nothing to do */
+ if (claim == truth)
+ return;
+
+ if (fuzzy)
+ assert(truth >= 0);
+
+ if ((truth >= 0) && !fuzzy) {
+ pandecode_msg("%s: expected %s = %d, claimed %u\n",
+ (truth < claim) ? "warn" : "XXX",
+ name, truth, claim);
+ } else if ((claim > -truth) && !fuzzy) {
+ pandecode_msg("XXX: expected %s <= %u, claimed %u\n",
+ name, -truth, claim);
+ } else if (fuzzy && (claim < truth))
+ pandecode_msg("XXX: expected %s >= %u, claimed %u\n",
+ name, truth, claim);
+
+ pandecode_log(".%s = %" PRId16, name, claim);
+
+ if (fuzzy)
+ pandecode_log_cont(" /* %u used */", truth);
+
+ pandecode_log_cont(",\n");
+}
+
+static void
+pandecode_blend_shader_disassemble(mali_ptr shader, int job_no, int job_type,
+ bool is_bifrost, unsigned gpu_id)
+{
+ struct midgard_disasm_stats stats =
+ pandecode_shader_disassemble(shader, job_no, job_type, is_bifrost, gpu_id);
+
+ bool has_texture = (stats.texture_count > 0);
+ bool has_sampler = (stats.sampler_count > 0);
+ bool has_attribute = (stats.attribute_count > 0);
+ bool has_varying = (stats.varying_count > 0);
+ bool has_uniform = (stats.uniform_count > 0);
+ bool has_ubo = (stats.uniform_buffer_count > 0);
+
+ if (has_texture || has_sampler)
+ pandecode_msg("XXX: blend shader accessing textures\n");
+
+ if (has_attribute || has_varying)
+ pandecode_msg("XXX: blend shader accessing interstage\n");
+
+ if (has_uniform || has_ubo)
+ pandecode_msg("XXX: blend shader accessing uniforms\n");
+}
+
static void
pandecode_vertex_tiler_postfix_pre(
const struct mali_vertex_tiler_postfix *p,
int job_no, enum mali_job_type job_type,
- char *suffix, bool is_bifrost)
+ char *suffix, bool is_bifrost, unsigned gpu_id)
{
- mali_ptr shader_meta_ptr = (u64) (uintptr_t) (p->_shader_upper << 4);
struct pandecode_mapped_memory *attr_mem;
/* On Bifrost, since the tiler heap (for tiler jobs) and the scratchpad
.rt_count = 1
};
- if (is_bifrost)
- pandecode_scratchpad(p->framebuffer & ~FBD_TYPE, job_no, suffix);
- else if (p->framebuffer & MALI_MFBD)
- fbd_info = pandecode_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false);
+ if (is_bifrost) {
+ pandecode_log_cont("\t/* %X %/\n", p->shared_memory & 1);
+ pandecode_compute_fbd(p->shared_memory & ~1, job_no);
+ } else if (p->shared_memory & MALI_MFBD)
+ fbd_info = pandecode_mfbd_bfr((u64) ((uintptr_t) p->shared_memory) & FBD_MASK, job_no, false, job_type == JOB_TYPE_COMPUTE);
else if (job_type == JOB_TYPE_COMPUTE)
- pandecode_compute_fbd((u64) (uintptr_t) p->framebuffer, job_no);
+ pandecode_compute_fbd((u64) (uintptr_t) p->shared_memory, job_no);
else
- fbd_info = pandecode_sfbd((u64) (uintptr_t) p->framebuffer, job_no, false);
+ fbd_info = pandecode_sfbd((u64) (uintptr_t) p->shared_memory, job_no, false, gpu_id);
int varying_count = 0, attribute_count = 0, uniform_count = 0, uniform_buffer_count = 0;
int texture_count = 0, sampler_count = 0;
- if (shader_meta_ptr) {
- struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(shader_meta_ptr);
- struct mali_shader_meta *PANDECODE_PTR_VAR(s, smem, shader_meta_ptr);
+ if (p->shader) {
+ struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(p->shader);
+ struct mali_shader_meta *PANDECODE_PTR_VAR(s, smem, p->shader);
+
+ /* Disassemble ahead-of-time to get stats. Initialize with
+ * stats for the missing-shader case so we get validation
+ * there, too */
+
+ struct midgard_disasm_stats info = {
+ .texture_count = 0,
+ .sampler_count = 0,
+ .attribute_count = 0,
+ .varying_count = 0,
+ .work_count = 1,
+
+ .uniform_count = -128,
+ .uniform_buffer_count = 0
+ };
+
+ if (s->shader & ~0xF)
+ info = pandecode_shader_disassemble(s->shader & ~0xF, job_no, job_type, is_bifrost, gpu_id);
- pandecode_log("struct mali_shader_meta shader_meta_%"PRIx64"_%d%s = {\n", shader_meta_ptr, job_no, suffix);
+ pandecode_log("struct mali_shader_meta shader_meta_%"PRIx64"_%d%s = {\n", p->shader, job_no, suffix);
pandecode_indent++;
/* Save for dumps */
uniform_count = s->bifrost2.uniform_count;
uniform_buffer_count = s->bifrost1.uniform_buffer_count;
} else {
- uniform_count = s->midgard1.uniform_buffer_count;
+ uniform_count = s->midgard1.uniform_count;
uniform_buffer_count = s->midgard1.uniform_buffer_count;
}
- mali_ptr shader_ptr = pandecode_shader_address("shader", s->shader);
+ pandecode_shader_address("shader", s->shader);
- pandecode_prop("texture_count = %" PRId16, s->texture_count);
- pandecode_prop("sampler_count = %" PRId16, s->sampler_count);
- pandecode_prop("attribute_count = %" PRId16, s->attribute_count);
- pandecode_prop("varying_count = %" PRId16, s->varying_count);
+ pandecode_shader_prop("texture_count", s->texture_count, info.texture_count, false);
+ pandecode_shader_prop("sampler_count", s->sampler_count, info.sampler_count, false);
+ pandecode_shader_prop("attribute_count", s->attribute_count, info.attribute_count, false);
+ pandecode_shader_prop("varying_count", s->varying_count, info.varying_count, false);
+ pandecode_shader_prop("uniform_buffer_count",
+ uniform_buffer_count,
+ info.uniform_buffer_count, true);
- unsigned nr_registers = 0;
-
- if (is_bifrost) {
- pandecode_log(".bifrost1 = {\n");
- pandecode_indent++;
+ if (!is_bifrost) {
+ pandecode_shader_prop("uniform_count",
+ uniform_count,
+ info.uniform_count, false);
- pandecode_prop("uniform_buffer_count = %" PRId32, s->bifrost1.uniform_buffer_count);
- pandecode_prop("unk1 = 0x%" PRIx32, s->bifrost1.unk1);
+ pandecode_shader_prop("work_count",
+ s->midgard1.work_count, info.work_count, false);
+ }
- pandecode_indent--;
- pandecode_log("},\n");
+ if (is_bifrost) {
+ pandecode_prop("bifrost1.unk1 = 0x%" PRIx32, s->bifrost1.unk1);
} else {
- pandecode_log(".midgard1 = {\n");
- pandecode_indent++;
+ bool helpers = s->midgard1.flags_lo & MALI_HELPER_INVOCATIONS;
+ s->midgard1.flags_lo &= ~MALI_HELPER_INVOCATIONS;
- pandecode_prop("uniform_count = %" PRId16, s->midgard1.uniform_count);
- pandecode_prop("uniform_buffer_count = %" PRId16, s->midgard1.uniform_buffer_count);
- pandecode_prop("work_count = %" PRId16, s->midgard1.work_count);
- nr_registers = s->midgard1.work_count;
+ if (helpers != info.helper_invocations) {
+ pandecode_msg("XXX: expected helpers %u but got %u\n",
+ info.helper_invocations, helpers);
+ }
- pandecode_log(".flags = ");
- pandecode_log_decoded_flags(shader_midgard1_flag_info, s->midgard1.flags);
+ pandecode_log(".midgard1.flags_lo = ");
+ pandecode_log_decoded_flags(shader_midgard1_flag_lo_info, s->midgard1.flags_lo);
pandecode_log_cont(",\n");
- pandecode_prop("unknown2 = 0x%" PRIx32, s->midgard1.unknown2);
-
- pandecode_indent--;
- pandecode_log("},\n");
+ pandecode_log(".midgard1.flags_hi = ");
+ pandecode_log_decoded_flags(shader_midgard1_flag_hi_info, s->midgard1.flags_hi);
+ pandecode_log_cont(",\n");
}
if (s->depth_units || s->depth_factor) {
/* We're not quite sure what these flags mean without the depth test, if anything */
- if (unknown2_3 & (MALI_DEPTH_TEST | MALI_DEPTH_FUNC_MASK)) {
+ if (unknown2_3 & (MALI_DEPTH_WRITEMASK | MALI_DEPTH_FUNC_MASK)) {
const char *func = pandecode_func(MALI_GET_DEPTH_FUNC(unknown2_3));
unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
pandecode_log("},\n");
}
- if (s->unknown2_8)
- pandecode_prop("unknown2_8 = 0x%" PRIx32, s->unknown2_8);
+ if (s->padding) {
+ pandecode_msg("XXX: shader padding tripped\n");
+ pandecode_prop("padding = 0x%" PRIx32, s->padding);
+ }
if (!is_bifrost) {
/* TODO: Blend shaders routing/disasm */
-
union midgard_blend blend = s->blend;
- pandecode_midgard_blend(&blend, false);
+ mali_ptr shader = pandecode_midgard_blend(&blend, s->unknown2_3 & MALI_HAS_BLEND_SHADER);
+ if (shader & ~0xF)
+ pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id);
}
pandecode_indent--;
/* MRT blend fields are used whenever MFBD is used, with
* per-RT descriptors */
- if (job_type == JOB_TYPE_TILER) {
+ if (job_type == JOB_TYPE_TILER && p->shared_memory & MALI_MFBD) {
void* blend_base = (void *) (s + 1);
for (unsigned i = 0; i < fbd_info.rt_count; i++) {
else
shader = pandecode_midgard_blend_mrt(blend_base, job_no, i);
- if (shader & ~0xF) {
- struct midgard_disasm_stats stats =
- pandecode_shader_disassemble(shader, job_no, job_type, false, 0);
-
- bool has_texture = (stats.texture_count > 0);
- bool has_sampler = (stats.sampler_count > 0);
- bool has_attribute = (stats.attribute_count > 0);
- bool has_varying = (stats.varying_count > 0);
- bool has_uniform = (stats.uniform_count > 0);
- bool has_ubo = (stats.uniform_buffer_count > 0);
-
- if (has_texture || has_sampler)
- pandecode_msg("XXX: blend shader accessing textures\n");
-
- if (has_attribute || has_varying)
- pandecode_msg("XXX: blend shader accessing interstage\n");
-
- if (has_uniform || has_ubo)
- pandecode_msg("XXX: blend shader accessing uniforms\n");
- }
+ if (shader & ~0xF)
+ pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id);
}
}
-
- if (shader_ptr & ~0xF)
- pandecode_shader_disassemble(shader_ptr, job_no, job_type, is_bifrost, nr_registers);
} else
- pandecode_msg("<no shader>\n");
+ pandecode_msg("XXX: missing shader descriptor\n");
if (p->viewport) {
struct pandecode_mapped_memory *fmem = pandecode_find_mapped_gpu_mem_containing(p->viewport);
pandecode_log("};\n");
}
- if (p->attribute_meta) {
- unsigned max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix);
+ unsigned max_attr_index = 0;
+ if (p->attribute_meta)
+ max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix);
+
+ if (p->attributes) {
attr_mem = pandecode_find_mapped_gpu_mem_containing(p->attributes);
- pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index, false);
+ pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index, false, job_type);
}
/* Varyings are encoded like attributes but not actually sent; we just
/* Number of descriptors depends on whether there are
* non-internal varyings */
- pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true);
+ pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true, job_type);
}
if (p->uniform_buffers) {
if (uniform_buffer_count)
pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
else
- pandecode_msg("XXX: UBOs specified but not referenced\n");
+ pandecode_msg("warn: UBOs specified but not referenced\n");
} else if (uniform_buffer_count)
pandecode_msg("XXX: UBOs referenced but not specified\n");
if (p->uniforms) {
if (uniform_count)
- pandecode_validate_buffer(p->uniforms, uniform_count * 16);
+ pandecode_uniforms(p->uniforms, uniform_count);
else
- pandecode_msg("XXX: Uniforms specified but not referenced");
+ pandecode_msg("warn: Uniforms specified but not referenced\n");
} else if (uniform_count)
- pandecode_msg("XXX: UBOs referenced but not specified\n");
+ pandecode_msg("XXX: Uniforms referenced but not specified\n");
if (p->texture_trampoline) {
struct pandecode_mapped_memory *mmem = pandecode_find_mapped_gpu_mem_containing(p->texture_trampoline);
pandecode_prop("min_lod = FIXED_16(%f)", DECODE_FIXED_16(s->min_lod));
pandecode_prop("max_lod = FIXED_16(%f)", DECODE_FIXED_16(s->max_lod));
+ if (s->lod_bias)
+ pandecode_prop("lod_bias = FIXED_16(%f)", DECODE_FIXED_16(s->lod_bias));
+
pandecode_prop("wrap_s = %s", pandecode_wrap_mode(s->wrap_s));
pandecode_prop("wrap_t = %s", pandecode_wrap_mode(s->wrap_t));
pandecode_prop("wrap_r = %s", pandecode_wrap_mode(s->wrap_r));
- pandecode_prop("compare_func = %s", pandecode_alt_func(s->compare_func));
+ pandecode_prop("compare_func = %s", pandecode_func(s->compare_func));
if (s->zero || s->zero2) {
pandecode_msg("XXX: sampler zero tripped\n");
static void
pandecode_vertex_tiler_postfix(const struct mali_vertex_tiler_postfix *p, int job_no, bool is_bifrost)
{
- if (!(p->position_varying || p->occlusion_counter || p->flags))
+ if (p->shader & 0xF)
+ pandecode_msg("warn: shader tagged %X\n", (unsigned) (p->shader & 0xF));
+
+ if (!(p->position_varying || p->occlusion_counter))
return;
pandecode_log(".postfix = {\n");
MEMORY_PROP(p, position_varying);
MEMORY_PROP(p, occlusion_counter);
- if (p->flags)
- pandecode_prop("flags = %d", p->flags);
-
pandecode_indent--;
pandecode_log("},\n");
}
static int
pandecode_vertex_job_bfr(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
- mali_ptr payload, int job_no)
+ mali_ptr payload, int job_no, unsigned gpu_id)
{
struct bifrost_payload_vertex *PANDECODE_PTR_VAR(v, mem, payload);
- pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true);
+ pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true, gpu_id);
pandecode_log("struct bifrost_payload_vertex payload_%d = {\n", job_no);
pandecode_indent++;
static int
pandecode_tiler_job_bfr(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
- mali_ptr payload, int job_no)
+ mali_ptr payload, int job_no, unsigned gpu_id)
{
struct bifrost_payload_tiler *PANDECODE_PTR_VAR(t, mem, payload);
- pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true);
-
- pandecode_indices(t->prefix.indices, t->prefix.index_count, job_no);
+ pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true, gpu_id);
pandecode_tiler_meta(t->tiler.tiler_meta, job_no);
pandecode_log("struct bifrost_payload_tiler payload_%d = {\n", job_no);
static int
pandecode_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
- mali_ptr payload, int job_no)
+ mali_ptr payload, int job_no, unsigned gpu_id)
{
struct midgard_payload_vertex_tiler *PANDECODE_PTR_VAR(v, mem, payload);
- pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false);
-
- pandecode_indices(v->prefix.indices, v->prefix.index_count, job_no);
+ pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false, gpu_id);
pandecode_log("struct midgard_payload_vertex_tiler payload_%d = {\n", job_no);
pandecode_indent++;
bool has_primitive_pointer = v->prefix.unknown_draw & MALI_DRAW_VARYING_SIZE;
pandecode_primitive_size(v->primitive_size, !has_primitive_pointer);
- bool instanced = v->instance_shift || v->instance_odd;
bool is_graphics = (h->job_type == JOB_TYPE_VERTEX) || (h->job_type == JOB_TYPE_TILER);
pandecode_log(".prefix = ");
- pandecode_vertex_tiler_prefix(&v->prefix, job_no, !instanced && is_graphics);
+ pandecode_vertex_tiler_prefix(&v->prefix, job_no, is_graphics);
pandecode_gl_enables(v->gl_enables, h->job_type);
static int
pandecode_fragment_job(const struct pandecode_mapped_memory *mem,
mali_ptr payload, int job_no,
- bool is_bifrost)
+ bool is_bifrost, unsigned gpu_id)
{
const struct mali_payload_fragment *PANDECODE_PTR_VAR(s, mem, payload);
- bool is_mfbd = (s->framebuffer & FBD_TYPE) == MALI_MFBD;
-
- /* Bifrost theoretically may retain support for SFBD on compute jobs,
- * but for graphics workloads with a FRAGMENT payload, use MFBD */
+ bool is_mfbd = s->framebuffer & MALI_MFBD;
if (!is_mfbd && is_bifrost)
pandecode_msg("XXX: Bifrost fragment must use MFBD\n");
struct pandecode_fbd info;
if (is_mfbd)
- info = pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true);
+ info = pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true, false);
else
- info = pandecode_sfbd(s->framebuffer & FBD_MASK, job_no, true);
+ info = pandecode_sfbd(s->framebuffer & FBD_MASK, job_no, true, gpu_id);
/* Compute the tag for the tagged pointer. This contains the type of
* FBD (MFBD/SFBD), and in the case of an MFBD, information about which
* additional structures follow the MFBD header (an extra payload or
* not, as well as a count of render targets) */
- unsigned expected_tag = is_mfbd ? MALI_MFBD : MALI_SFBD;
+ unsigned expected_tag = is_mfbd ? MALI_MFBD : 0;
if (is_mfbd) {
if (info.has_extra)
return sizeof(*s);
}
-static int job_descriptor_number = 0;
-
-int
-pandecode_jc(mali_ptr jc_gpu_va, bool bifrost)
+/* Entrypoint to start tracing. jc_gpu_va is the GPU address for the first job
+ * in the chain; later jobs are found by walking the chain. Bifrost is, well,
+ * if it's bifrost or not. GPU ID is the more finegrained ID (at some point, we
+ * might wish to combine this with the bifrost parameter) because some details
+ * are model-specific even within a particular architecture. Minimal traces
+ * *only* examine the job descriptors, skipping printing entirely if there is
+ * no faults, and only descends into the payload if there are faults. This is
+ * useful for looking for faults without the overhead of invasive traces. */
+
+void
+pandecode_jc(mali_ptr jc_gpu_va, bool bifrost, unsigned gpu_id, bool minimal)
{
struct mali_job_descriptor_header *h;
-
- int start_number = 0;
-
- bool first = true;
- bool last_size;
+ unsigned job_descriptor_number = 0;
do {
struct pandecode_mapped_memory *mem =
h->job_type != JOB_TYPE_FRAGMENT ? 4 : 0;
mali_ptr payload_ptr = jc_gpu_va + sizeof(*h) - offset;
- payload = pandecode_fetch_gpu_mem(mem, payload_ptr,
- MALI_PAYLOAD_SIZE);
+ payload = pandecode_fetch_gpu_mem(mem, payload_ptr, 256);
int job_no = job_descriptor_number++;
- if (first)
- start_number = job_no;
+ /* If the job is good to go, skip it in minimal mode */
+ if (minimal && (h->exception_status == 0x0 || h->exception_status == 0x1))
+ continue;
pandecode_log("struct mali_job_descriptor_header job_%"PRIx64"_%d = {\n", jc_gpu_va, job_no);
pandecode_indent++;
pandecode_prop("job_type = %s", pandecode_job_type(h->job_type));
- /* Save for next job fixing */
- last_size = h->job_descriptor_size;
-
if (h->job_descriptor_size)
pandecode_prop("job_descriptor_size = %d", h->job_descriptor_size);
pandecode_indent--;
pandecode_log("};\n");
- /* Do not touch the field yet -- decode the payload first, and
- * don't touch that either. This is essential for the uploads
- * to occur in sequence and therefore be dynamically allocated
- * correctly. Do note the size, however, for that related
- * reason. */
-
switch (h->job_type) {
- case JOB_TYPE_SET_VALUE: {
- struct mali_payload_set_value *s = payload;
- pandecode_log("struct mali_payload_set_value payload_%"PRIx64"_%d = {\n", payload_ptr, job_no);
+ case JOB_TYPE_WRITE_VALUE: {
+ struct mali_payload_write_value *s = payload;
+ pandecode_log("struct mali_payload_write_value payload_%"PRIx64"_%d = {\n", payload_ptr, job_no);
pandecode_indent++;
- MEMORY_PROP(s, out);
- pandecode_prop("unknown = 0x%" PRIX64, s->unknown);
+ MEMORY_PROP(s, address);
+
+ if (s->value_descriptor != MALI_WRITE_VALUE_ZERO) {
+ pandecode_msg("XXX: unknown value descriptor\n");
+ pandecode_prop("value_descriptor = 0x%" PRIX32, s->value_descriptor);
+ }
+
+ if (s->reserved) {
+ pandecode_msg("XXX: set value tripped\n");
+ pandecode_prop("reserved = 0x%" PRIX32, s->reserved);
+ }
+
+ pandecode_prop("immediate = 0x%" PRIX64, s->immediate);
pandecode_indent--;
pandecode_log("};\n");
case JOB_TYPE_COMPUTE:
if (bifrost) {
if (h->job_type == JOB_TYPE_TILER)
- pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no);
+ pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no, gpu_id);
else
- pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no);
+ pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no, gpu_id);
} else
- pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no);
+ pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no, gpu_id);
break;
case JOB_TYPE_FRAGMENT:
- pandecode_fragment_job(mem, payload_ptr, job_no, bifrost);
+ pandecode_fragment_job(mem, payload_ptr, job_no, bifrost, gpu_id);
break;
default:
break;
}
-
- /* Handle linkage */
-
- if (!first) {
- pandecode_log("((struct mali_job_descriptor_header *) (uintptr_t) job_%d_p)->", job_no - 1);
-
- if (last_size)
- pandecode_log_cont("next_job_64 = job_%d_p;\n\n", job_no);
- else
- pandecode_log_cont("next_job_32 = (u32) (uintptr_t) job_%d_p;\n\n", job_no);
- }
-
- first = false;
-
- } while ((jc_gpu_va = h->job_descriptor_size ? h->next_job_64 : h->next_job_32));
-
- return start_number;
+ } while ((jc_gpu_va = h->next_job));
}