X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fpanfrost%2Fpandecode%2Fdecode.c;h=80b9a66978b979218707974ae60e024ada1d0983;hb=ed3eede296e09a1c779d0d8f89ed50765c26b2dc;hp=de7365b3932148d15464898e70b38a8f516d46f2;hpb=9ce45ac808e34bced2df346057beb8ab10313a87;p=mesa.git diff --git a/src/panfrost/pandecode/decode.c b/src/panfrost/pandecode/decode.c index de7365b3932..80b9a66978b 100644 --- a/src/panfrost/pandecode/decode.c +++ b/src/panfrost/pandecode/decode.c @@ -40,7 +40,7 @@ #include "pan_encoder.h" -int pandecode_jc(mali_ptr jc_gpu_va, bool bifrost); +static void pandecode_swizzle(unsigned swizzle, enum mali_format format); #define MEMORY_PROP(obj, p) {\ if (obj->p) { \ @@ -146,9 +146,9 @@ pandecode_validate_buffer(mali_ptr addr, size_t sz) unsigned total = offset + sz; if (total > bo->length) { - pandecode_msg("XXX: buffer overrun." - "Chunk of size %d at offset %d in buffer of size %d. " - "Overrun by %d bytes.", + pandecode_msg("XXX: buffer overrun. " + "Chunk of size %zu at offset %d in buffer of size %zu. " + "Overrun by %zu bytes. \n", sz, offset, bo->length, total - bo->length); return; } @@ -223,7 +223,7 @@ static const struct pandecode_flag_info u3_flag_info[] = { FLAG_INFO(HAS_MSAA), FLAG_INFO(CAN_DISCARD), FLAG_INFO(HAS_BLEND_SHADER), - FLAG_INFO(DEPTH_TEST), + FLAG_INFO(DEPTH_WRITEMASK), {} }; @@ -238,15 +238,6 @@ static const struct pandecode_flag_info u4_flag_info[] = { }; #undef FLAG_INFO -#define FLAG_INFO(flag) { MALI_FRAMEBUFFER_##flag, "MALI_FRAMEBUFFER_" #flag } -static const struct pandecode_flag_info fb_fmt_flag_info[] = { - FLAG_INFO(MSAA_A), - FLAG_INFO(MSAA_B), - FLAG_INFO(MSAA_8), - {} -}; -#undef FLAG_INFO - #define FLAG_INFO(flag) { MALI_MFBD_FORMAT_##flag, "MALI_MFBD_FORMAT_" #flag } static const struct pandecode_flag_info mfbd_fmt_flag_info[] = { FLAG_INFO(MSAA), @@ -267,7 +258,6 @@ static const struct pandecode_flag_info mfbd_extra_flag_info[] = { #define FLAG_INFO(flag) { MALI_##flag, "MALI_" #flag } static const struct pandecode_flag_info shader_midgard1_flag_info [] = { FLAG_INFO(EARLY_Z), - FLAG_INFO(HELPER_INVOCATIONS), FLAG_INFO(READS_TILEBUFFER), FLAG_INFO(READS_ZS), {} @@ -293,6 +283,22 @@ static const struct pandecode_flag_info sampler_flag_info [] = { }; #undef FLAG_INFO +#define FLAG_INFO(flag) { MALI_SFBD_FORMAT_##flag, "MALI_SFBD_FORMAT_" #flag } +static const struct pandecode_flag_info sfbd_unk1_info [] = { + FLAG_INFO(MSAA_8), + FLAG_INFO(MSAA_A), + {} +}; +#undef FLAG_INFO + +#define FLAG_INFO(flag) { MALI_SFBD_FORMAT_##flag, "MALI_SFBD_FORMAT_" #flag } +static const struct pandecode_flag_info sfbd_unk2_info [] = { + FLAG_INFO(MSAA_B), + FLAG_INFO(SRGB), + {} +}; +#undef FLAG_INFO + extern char *replace_fragment; extern char *replace_vertex; @@ -303,7 +309,7 @@ pandecode_job_type(enum mali_job_type type) switch (type) { DEFINE_CASE(NULL); - DEFINE_CASE(SET_VALUE); + DEFINE_CASE(WRITE_VALUE); DEFINE_CASE(CACHE_FLUSH); DEFINE_CASE(COMPUTE); DEFINE_CASE(VERTEX); @@ -369,28 +375,6 @@ pandecode_func(enum mali_func mode) } #undef DEFINE_CASE -/* Why is this duplicated? Who knows... */ -#define DEFINE_CASE(name) case MALI_ALT_FUNC_ ## name: return "MALI_ALT_FUNC_" #name -static char * -pandecode_alt_func(enum mali_alt_func mode) -{ - switch (mode) { - DEFINE_CASE(NEVER); - DEFINE_CASE(LESS); - DEFINE_CASE(EQUAL); - DEFINE_CASE(LEQUAL); - DEFINE_CASE(GREATER); - DEFINE_CASE(NOTEQUAL); - DEFINE_CASE(GEQUAL); - DEFINE_CASE(ALWAYS); - - default: - pandecode_msg("XXX: invalid alt func %X\n", mode); - return ""; - } -} -#undef DEFINE_CASE - #define DEFINE_CASE(name) case MALI_STENCIL_ ## name: return "MALI_STENCIL_" #name static char * pandecode_stencil_op(enum mali_stencil_op op) @@ -413,7 +397,6 @@ pandecode_stencil_op(enum mali_stencil_op op) #undef DEFINE_CASE -#define DEFINE_CASE(name) case MALI_ATTR_ ## name: return "MALI_ATTR_" #name static char *pandecode_attr_mode_short(enum mali_attr_mode mode) { switch(mode) { @@ -427,15 +410,33 @@ static char *pandecode_attr_mode_short(enum mali_attr_mode mode) return "instanced_npot"; case MALI_ATTR_IMAGE: return "image"; - case MALI_ATTR_INTERNAL: - return "internal"; default: pandecode_msg("XXX: invalid attribute mode %X\n", mode); return ""; } } -#undef DEFINE_CASE +static const char * +pandecode_special_record(uint64_t v, bool* attribute) +{ + switch(v) { + case MALI_ATTR_VERTEXID: + *attribute = true; + return "gl_VertexID"; + case MALI_ATTR_INSTANCEID: + *attribute = true; + return "gl_InstanceID"; + case MALI_VARYING_FRAG_COORD: + return "gl_FragCoord"; + case MALI_VARYING_FRONT_FACING: + return "gl_FrontFacing"; + case MALI_VARYING_POINT_COORD: + return "gl_PointCoord"; + default: + pandecode_msg("XXX: invalid special record %" PRIx64 "\n", v); + return ""; + } +} #define DEFINE_CASE(name) case MALI_WRAP_## name: return "MALI_WRAP_" #name static char * @@ -454,9 +455,9 @@ pandecode_wrap_mode(enum mali_wrap_mode op) } #undef DEFINE_CASE -#define DEFINE_CASE(name) case MALI_MFBD_BLOCK_## name: return "MALI_MFBD_BLOCK_" #name +#define DEFINE_CASE(name) case MALI_BLOCK_## name: return "MALI_BLOCK_" #name static char * -pandecode_mfbd_block_format(enum mali_mfbd_block_format fmt) +pandecode_block_format(enum mali_block_format fmt) { switch (fmt) { DEFINE_CASE(TILED); @@ -471,7 +472,7 @@ pandecode_mfbd_block_format(enum mali_mfbd_block_format fmt) #undef DEFINE_CASE #define DEFINE_CASE(name) case MALI_EXCEPTION_ACCESS_## name: return ""#name -static char * +char * pandecode_exception_access(enum mali_exception_access access) { switch (access) { @@ -494,7 +495,8 @@ pandecode_midgard_tiler_descriptor( const struct midgard_tiler_descriptor *t, unsigned width, unsigned height, - bool is_fragment) + bool is_fragment, + bool has_hierarchy) { pandecode_log(".tiler = {\n"); pandecode_indent++; @@ -527,8 +529,8 @@ pandecode_midgard_tiler_descriptor( /* Now that we've sanity checked, we'll try to calculate the sizes * ourselves for comparison */ - unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask); - unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask); + unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask, has_hierarchy); + unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask, has_hierarchy); if (!((ref_header == body_offset) && (ref_size == t->polygon_list_size))) { pandecode_msg("XXX: bad polygon list size (expected %d / 0x%x)\n", @@ -559,7 +561,7 @@ pandecode_midgard_tiler_descriptor( /* When tiling is enabled, the heap should be a tight fit */ unsigned heap_offset = t->heap_start - heap->gpu_va; if ((heap_offset + heap_size) != heap->length) { - pandecode_msg("XXX: heap size %d (expected %d)\n", + pandecode_msg("XXX: heap size %u (expected %zu)\n", heap_size, heap->length - heap_offset); } @@ -611,24 +613,75 @@ pandecode_midgard_tiler_descriptor( pandecode_log("}\n"); } +/* Information about the framebuffer passed back for + * additional analysis */ + +struct pandecode_fbd { + unsigned width; + unsigned height; + unsigned rt_count; + bool has_extra; +}; + static void -pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment) +pandecode_sfbd_format(struct mali_sfbd_format format) +{ + pandecode_log(".format = {\n"); + pandecode_indent++; + + pandecode_log(".unk1 = "); + pandecode_log_decoded_flags(sfbd_unk1_info, format.unk1); + pandecode_log_cont(",\n"); + + /* TODO: Map formats so we can check swizzles and print nicely */ + pandecode_log("swizzle"); + pandecode_swizzle(format.swizzle, MALI_RGBA8_UNORM); + pandecode_log_cont(",\n"); + + pandecode_prop("nr_channels = MALI_POSITIVE(%d)", + (format.nr_channels + 1)); + + pandecode_log(".unk2 = "); + pandecode_log_decoded_flags(sfbd_unk2_info, format.unk2); + pandecode_log_cont(",\n"); + + pandecode_prop("block = %s", pandecode_block_format(format.block)); + + pandecode_prop("unk3 = 0x%" PRIx32, format.unk3); + + pandecode_indent--; + pandecode_log("},\n"); +} + +static struct pandecode_fbd +pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment, unsigned gpu_id) { struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va); const struct mali_single_framebuffer *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va); + struct pandecode_fbd info = { + .has_extra = false, + .rt_count = 1 + }; + pandecode_log("struct mali_single_framebuffer framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no); pandecode_indent++; pandecode_prop("unknown1 = 0x%" PRIx32, s->unknown1); pandecode_prop("unknown2 = 0x%" PRIx32, s->unknown2); - pandecode_log(".format = "); - pandecode_log_decoded_flags(fb_fmt_flag_info, s->format); - pandecode_log_cont(",\n"); + pandecode_sfbd_format(s->format); + + info.width = s->width + 1; + info.height = s->height + 1; + + pandecode_prop("width = MALI_POSITIVE(%" PRId16 ")", info.width); + pandecode_prop("height = MALI_POSITIVE(%" PRId16 ")", info.height); + + MEMORY_PROP(s, checksum); - pandecode_prop("width = MALI_POSITIVE(%" PRId16 ")", s->width + 1); - pandecode_prop("height = MALI_POSITIVE(%" PRId16 ")", s->height + 1); + if (s->checksum_stride) + pandecode_prop("checksum_stride = %d", s->checksum_stride); MEMORY_PROP(s, framebuffer); pandecode_prop("stride = %d", s->stride); @@ -640,14 +693,28 @@ pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment) pandecode_log_decoded_flags(clear_flag_info, s->clear_flags); pandecode_log_cont(",\n"); - if (s->depth_buffer | s->depth_buffer_enable) { + if (s->depth_buffer) { MEMORY_PROP(s, depth_buffer); - pandecode_prop("depth_buffer_enable = %s", DS_ENABLE(s->depth_buffer_enable)); + pandecode_prop("depth_stride = %d", s->depth_stride); } - if (s->stencil_buffer | s->stencil_buffer_enable) { + if (s->stencil_buffer) { MEMORY_PROP(s, stencil_buffer); - pandecode_prop("stencil_buffer_enable = %s", DS_ENABLE(s->stencil_buffer_enable)); + pandecode_prop("stencil_stride = %d", s->stencil_stride); + } + + if (s->depth_stride_zero || + s->stencil_stride_zero || + s->zero7 || s->zero8) { + pandecode_msg("XXX: Depth/stencil zeros tripped\n"); + pandecode_prop("depth_stride_zero = 0x%x", + s->depth_stride_zero); + pandecode_prop("stencil_stride_zero = 0x%x", + s->stencil_stride_zero); + pandecode_prop("zero7 = 0x%" PRIx32, + s->zero7); + pandecode_prop("zero8 = 0x%" PRIx32, + s->zero8); } if (s->clear_color_1 | s->clear_color_2 | s->clear_color_3 | s->clear_color_4) { @@ -668,9 +735,11 @@ pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment) pandecode_prop("clear_stencil = 0x%x", s->clear_stencil); } - MEMORY_PROP(s, unknown_address_0); + MEMORY_PROP(s, scratchpad); const struct midgard_tiler_descriptor t = s->tiler; - pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment); + + bool has_hierarchy = !(gpu_id == 0x0720 || gpu_id == 0x0820 || gpu_id == 0x0830); + pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment, has_hierarchy); pandecode_indent--; pandecode_log("};\n"); @@ -679,6 +748,7 @@ pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment) pandecode_prop("zero1 = 0x%" PRIx64, s->zero1); pandecode_prop("zero2 = 0x%" PRIx32, s->zero2); pandecode_prop("zero4 = 0x%" PRIx32, s->zero4); + pandecode_prop("zero5 = 0x%" PRIx32, s->zero5); printf(".zero3 = {"); @@ -693,6 +763,8 @@ pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment) printf("%X, ", s->zero6[i]); printf("},\n"); + + return info; } static void @@ -856,8 +928,7 @@ pandecode_rt_format(struct mali_rt_format format) pandecode_prop("unk2 = 0x%" PRIx32, format.unk2); pandecode_prop("unk3 = 0x%" PRIx32, format.unk3); - pandecode_prop("block = %s", - pandecode_mfbd_block_format(format.block)); + pandecode_prop("block = %s", pandecode_block_format(format.block)); /* TODO: Map formats so we can check swizzles and print nicely */ pandecode_log("swizzle"); @@ -865,7 +936,7 @@ pandecode_rt_format(struct mali_rt_format format) pandecode_log_cont(",\n"); pandecode_prop("nr_channels = MALI_POSITIVE(%d)", - MALI_NEGATIVE(format.nr_channels)); + (format.nr_channels + 1)); pandecode_log(".flags = "); pandecode_log_decoded_flags(mfbd_fmt_flag_info, format.flags); @@ -895,7 +966,7 @@ pandecode_render_target(uint64_t gpu_va, unsigned job_no, const struct bifrost_f pandecode_log("struct bifrost_render_target rts_list_%"PRIx64"_%d[] = {\n", gpu_va, job_no); pandecode_indent++; - for (int i = 0; i < MALI_NEGATIVE(fb->rt_count_1); i++) { + for (int i = 0; i < (fb->rt_count_1 + 1); i++) { mali_ptr rt_va = gpu_va + i * sizeof(struct bifrost_render_target); struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(rt_va); @@ -906,7 +977,7 @@ pandecode_render_target(uint64_t gpu_va, unsigned job_no, const struct bifrost_f pandecode_rt_format(rt->format); - if (rt->format.block == MALI_MFBD_BLOCK_AFBC) { + if (rt->format.block == MALI_BLOCK_AFBC) { pandecode_log(".afbc = {\n"); pandecode_indent++; @@ -951,12 +1022,14 @@ pandecode_render_target(uint64_t gpu_va, unsigned job_no, const struct bifrost_f pandecode_log("};\n"); } -static unsigned +static struct pandecode_fbd pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment) { struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va); const struct bifrost_framebuffer *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va); + struct pandecode_fbd info; + if (fb->sample_locations) { /* The blob stores all possible sample locations in a single buffer * allocated on startup, and just switches the pointer when switching @@ -987,6 +1060,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment) pandecode_log("struct bifrost_framebuffer framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no); pandecode_indent++; + pandecode_prop("stack_shift = 0x%x", fb->stack_shift); pandecode_prop("unk0 = 0x%x", fb->unk0); if (fb->sample_locations) @@ -996,6 +1070,10 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment) * now */ MEMORY_PROP(fb, unknown1); + info.width = fb->width1 + 1; + info.height = fb->height1 + 1; + info.rt_count = fb->rt_count_1 + 1; + pandecode_prop("width1 = MALI_POSITIVE(%d)", fb->width1 + 1); pandecode_prop("height1 = MALI_POSITIVE(%d)", fb->height1 + 1); pandecode_prop("width2 = MALI_POSITIVE(%d)", fb->width2 + 1); @@ -1023,7 +1101,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment) pandecode_prop("unknown2 = 0x%x", fb->unknown2); MEMORY_PROP(fb, scratchpad); const struct midgard_tiler_descriptor t = fb->tiler; - pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment); + pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment, true); if (fb->zero3 || fb->zero4) { pandecode_msg("XXX: framebuffer zeros tripped\n"); @@ -1036,7 +1114,9 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment) gpu_va += sizeof(struct bifrost_framebuffer); - if ((fb->mfbd_flags & MALI_MFBD_EXTRA) && is_fragment) { + info.has_extra = (fb->mfbd_flags & MALI_MFBD_EXTRA) && is_fragment; + + if (info.has_extra) { mem = pandecode_find_mapped_gpu_mem_containing(gpu_va); const struct bifrost_fb_extra *PANDECODE_PTR_VAR(fbx, mem, (mali_ptr) gpu_va); @@ -1120,8 +1200,7 @@ pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment) if (is_fragment) pandecode_render_target(gpu_va, job_no, fb); - /* Passback the render target count */ - return MALI_NEGATIVE(fb->rt_count_1); + return info; } /* Just add a comment decoding the shift/odd fields forming the padded vertices @@ -1223,28 +1302,64 @@ pandecode_magic_divisor(uint32_t magic, unsigned shift, unsigned orig_divisor, u static void pandecode_attributes(const struct pandecode_mapped_memory *mem, mali_ptr addr, int job_no, char *suffix, - int count, bool varying) + int count, bool varying, enum mali_job_type job_type) { char *prefix = varying ? "varying" : "attribute"; + assert(addr); - if (!addr) { - pandecode_msg("no %s\n", prefix); + if (!count) { + pandecode_msg("warn: No %s records\n", prefix); return; } union mali_attr *attr = pandecode_fetch_gpu_mem(mem, addr, sizeof(union mali_attr) * count); - pandecode_log("union mali_attr %s_%d[] = {\n", prefix, job_no); - pandecode_indent++; - for (int i = 0; i < count; ++i) { + /* First, check for special records */ + if (attr[i].elements < MALI_RECORD_SPECIAL) { + if (attr[i].size) + pandecode_msg("XXX: tripped size=%d\n", attr[i].size); + + if (attr[i].stride) { + /* gl_InstanceID passes a magic divisor in the + * stride field to divide by the padded vertex + * count. No other records should do so, so + * stride should otherwise be zero. Note that + * stride in the usual attribute sense doesn't + * apply to special records. */ + + bool has_divisor = attr[i].elements == MALI_ATTR_INSTANCEID; + + pandecode_log_cont("/* %smagic divisor = %X */ ", + has_divisor ? "" : "XXX: ", attr[i].stride); + } + + if (attr[i].shift || attr[i].extra_flags) { + /* Attributes use these fields for + * instancing/padding/etc type issues, but + * varyings don't */ + + pandecode_log_cont("/* %sshift=%d, extra=%d */ ", + varying ? "XXX: " : "", + attr[i].shift, attr[i].extra_flags); + } + + /* Print the special record name */ + bool attribute = false; + pandecode_log("%s_%d = %s;\n", prefix, i, pandecode_special_record(attr[i].elements, &attribute)); + + /* Sanity check */ + if (attribute == varying) + pandecode_msg("XXX: mismatched special record\n"); + + continue; + } + enum mali_attr_mode mode = attr[i].elements & 7; if (mode == MALI_ATTR_UNUSED) pandecode_msg("XXX: unused attribute record\n"); - pandecode_make_indent(); - /* For non-linear records, we need to print the type of record */ if (mode != MALI_ATTR_LINEAR) pandecode_log_cont("%s ", pandecode_attr_mode_short(mode)); @@ -1255,13 +1370,9 @@ pandecode_attributes(const struct pandecode_mapped_memory *mem, /* Print the stride and size */ pandecode_log_cont("<%u>[%u]", attr[i].stride, attr[i].size); - /* Check: the size must be divisible by the stride */ - if (attr[i].size % attr[i].stride) - pandecode_msg("XXX: size not divisible by stride\n"); - - /* TODO: Sanity check the quotient itself -- it should equal - * vertex count (or something computed from it for instanced) - * which means we can check and elide */ + /* TODO: Sanity check the quotient itself. It must be equal to + * (or be greater than, if the driver added padding) the padded + * vertex count. */ /* Finally, print the pointer */ mali_ptr raw_elements = attr[i].elements & ~7; @@ -1274,8 +1385,13 @@ pandecode_attributes(const struct pandecode_mapped_memory *mem, /* shift/extra_flags exist only for instanced */ if (attr[i].shift | attr[i].extra_flags) { + /* These are set to random values by the blob for + * varyings, most likely a symptom of uninitialized + * memory where the hardware masked the bug. As such we + * put this at a warning, not an error. */ + if (mode == MALI_ATTR_LINEAR) - pandecode_msg("XXX: instancing fields set for linear\n"); + pandecode_msg("warn: instancing fields set for linear\n"); pandecode_prop("shift = %d", attr[i].shift); pandecode_prop("extra_flags = %d", attr[i].extra_flags); @@ -1305,8 +1421,7 @@ pandecode_attributes(const struct pandecode_mapped_memory *mem, } - pandecode_indent--; - pandecode_log("};\n"); + pandecode_log("\n"); } static mali_ptr @@ -1468,11 +1583,8 @@ pandecode_attribute_meta(int job_no, int count, const struct mali_vertex_tiler_p unsigned max_index = 0; snprintf(base, sizeof(base), "%s_meta", prefix); - pandecode_log("struct mali_attr_meta %s_%d%s[] = {\n", base, job_no, suffix); - pandecode_indent++; - struct mali_attr_meta *attr_meta; - mali_ptr p = varying ? (v->varying_meta & ~0xF) : v->attribute_meta; + mali_ptr p = varying ? v->varying_meta : v->attribute_meta; struct pandecode_mapped_memory *attr_mem = pandecode_find_mapped_gpu_mem_containing(p); @@ -1525,7 +1637,6 @@ pandecode_attribute_meta(int job_no, int count, const struct mali_vertex_tiler_p pandecode_prop("unknown3 = 0x%" PRIx64, (u64) attr_meta->unknown3); } - pandecode_make_indent(); pandecode_format_short(attr_meta->format, false); pandecode_log_cont(" %s_%u", prefix, attr_meta->index); @@ -1537,36 +1648,11 @@ pandecode_attribute_meta(int job_no, int count, const struct mali_vertex_tiler_p pandecode_log_cont(";\n"); } - pandecode_indent--; - pandecode_log("};\n"); + pandecode_log("\n"); return count ? (max_index + 1) : 0; } -static void -pandecode_indices(uintptr_t pindices, uint32_t index_count, int job_no) -{ - struct pandecode_mapped_memory *imem = pandecode_find_mapped_gpu_mem_containing(pindices); - - if (imem) { - /* Indices are literally just a u32 array :) */ - - uint32_t *PANDECODE_PTR_VAR(indices, imem, pindices); - - pandecode_log("uint32_t indices_%d[] = {\n", job_no); - pandecode_indent++; - - for (unsigned i = 0; i < (index_count + 1); i += 3) - pandecode_log("%d, %d, %d,\n", - indices[i], - indices[i + 1], - indices[i + 2]); - - pandecode_indent--; - pandecode_log("};\n"); - } -} - /* return bits [lo, hi) of word */ static u32 bits(u32 word, u32 lo, u32 hi) @@ -1578,7 +1664,7 @@ bits(u32 word, u32 lo, u32 hi) } static void -pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bool noninstanced) +pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bool graphics) { pandecode_log_cont("{\n"); pandecode_indent++; @@ -1587,13 +1673,20 @@ pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bo * invocation_count for an explanation. */ - unsigned size_x = bits(p->invocation_count, 0, p->size_y_shift) + 1; - unsigned size_y = bits(p->invocation_count, p->size_y_shift, p->size_z_shift) + 1; - unsigned size_z = bits(p->invocation_count, p->size_z_shift, p->workgroups_x_shift) + 1; + unsigned size_y_shift = bits(p->invocation_shifts, 0, 5); + unsigned size_z_shift = bits(p->invocation_shifts, 5, 10); + unsigned workgroups_x_shift = bits(p->invocation_shifts, 10, 16); + unsigned workgroups_y_shift = bits(p->invocation_shifts, 16, 22); + unsigned workgroups_z_shift = bits(p->invocation_shifts, 22, 28); + unsigned workgroups_x_shift_2 = bits(p->invocation_shifts, 28, 32); - unsigned groups_x = bits(p->invocation_count, p->workgroups_x_shift, p->workgroups_y_shift) + 1; - unsigned groups_y = bits(p->invocation_count, p->workgroups_y_shift, p->workgroups_z_shift) + 1; - unsigned groups_z = bits(p->invocation_count, p->workgroups_z_shift, 32) + 1; + unsigned size_x = bits(p->invocation_count, 0, size_y_shift) + 1; + unsigned size_y = bits(p->invocation_count, size_y_shift, size_z_shift) + 1; + unsigned size_z = bits(p->invocation_count, size_z_shift, workgroups_x_shift) + 1; + + unsigned groups_x = bits(p->invocation_count, workgroups_x_shift, workgroups_y_shift) + 1; + unsigned groups_y = bits(p->invocation_count, workgroups_y_shift, workgroups_z_shift) + 1; + unsigned groups_z = bits(p->invocation_count, workgroups_z_shift, 32) + 1; /* Even though we have this decoded, we want to ensure that the * representation is "unique" so we don't lose anything by printing only @@ -1604,35 +1697,25 @@ pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bo * decoded, we're good to go. */ struct mali_vertex_tiler_prefix ref; - panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, noninstanced); + panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, graphics); bool canonical = (p->invocation_count == ref.invocation_count) && - (p->size_y_shift == ref.size_y_shift) && - (p->size_z_shift == ref.size_z_shift) && - (p->workgroups_x_shift == ref.workgroups_x_shift) && - (p->workgroups_y_shift == ref.workgroups_y_shift) && - (p->workgroups_z_shift == ref.workgroups_z_shift) && - (p->workgroups_x_shift_2 == ref.workgroups_x_shift_2); + (p->invocation_shifts == ref.invocation_shifts); if (!canonical) { pandecode_msg("XXX: non-canonical workgroups packing\n"); - pandecode_msg("expected: %X, %d, %d, %d, %d, %d\n", + pandecode_msg("expected: %X, %X", ref.invocation_count, - ref.size_y_shift, - ref.size_z_shift, - ref.workgroups_x_shift, - ref.workgroups_y_shift, - ref.workgroups_z_shift, - ref.workgroups_x_shift_2); + ref.invocation_shifts); pandecode_prop("invocation_count = 0x%" PRIx32, p->invocation_count); - pandecode_prop("size_y_shift = %d", p->size_y_shift); - pandecode_prop("size_z_shift = %d", p->size_z_shift); - pandecode_prop("workgroups_x_shift = %d", p->workgroups_x_shift); - pandecode_prop("workgroups_y_shift = %d", p->workgroups_y_shift); - pandecode_prop("workgroups_z_shift = %d", p->workgroups_z_shift); - pandecode_prop("workgroups_x_shift_2 = %d", p->workgroups_x_shift_2); + pandecode_prop("size_y_shift = %d", size_y_shift); + pandecode_prop("size_z_shift = %d", size_z_shift); + pandecode_prop("workgroups_x_shift = %d", workgroups_x_shift); + pandecode_prop("workgroups_y_shift = %d", workgroups_y_shift); + pandecode_prop("workgroups_z_shift = %d", workgroups_z_shift); + pandecode_prop("workgroups_x_shift_2 = %d", workgroups_x_shift_2); } /* Regardless, print the decode */ @@ -1654,6 +1737,30 @@ pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bo if (p->index_count) pandecode_prop("index_count = MALI_POSITIVE(%" PRId32 ")", p->index_count + 1); + + unsigned index_raw_size = (p->unknown_draw & MALI_DRAW_INDEXED_SIZE); + index_raw_size >>= MALI_DRAW_INDEXED_SHIFT; + + /* Validate an index buffer is present if we need one. TODO: verify + * relationship between invocation_count and index_count */ + + if (p->indices) { + unsigned count = p->index_count; + + /* Grab the size */ + unsigned size = (index_raw_size == 0x3) ? 4 : index_raw_size; + + /* Ensure we got a size, and if so, validate the index buffer + * is large enough to hold a full set of indices of the given + * size */ + + if (!index_raw_size) + pandecode_msg("XXX: index size missing\n"); + else + pandecode_validate_buffer(p->indices, count * size); + } else if (index_raw_size) + pandecode_msg("XXX: unexpected index size %u\n", index_raw_size); + if (p->offset_bias_correction) pandecode_prop("offset_bias_correction = %d", p->offset_bias_correction); @@ -1670,29 +1777,28 @@ pandecode_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no) struct pandecode_mapped_memory *umem = pandecode_find_mapped_gpu_mem_containing(pubufs); struct mali_uniform_buffer_meta *PANDECODE_PTR_VAR(ubufs, umem, pubufs); - pandecode_log("struct mali_uniform_buffer_meta uniform_buffers_%"PRIx64"_%d[] = {\n", - pubufs, job_no); - pandecode_indent++; - for (int i = 0; i < ubufs_count; i++) { - pandecode_log("{\n"); - pandecode_indent++; - unsigned size = (ubufs[i].size + 1) * 16; mali_ptr addr = ubufs[i].ptr << 2; pandecode_validate_buffer(addr, size); char *ptr = pointer_as_memory_reference(ubufs[i].ptr << 2); - pandecode_prop("size = %u", size); - pandecode_prop("ptr = (%s) >> 2", ptr); - pandecode_indent--; - pandecode_log("},\n"); + pandecode_log("ubuf_%d[%u] = %s;\n", i, size, ptr); free(ptr); } - pandecode_indent--; - pandecode_log("};\n"); + pandecode_log("\n"); +} + +static void +pandecode_uniforms(mali_ptr uniforms, unsigned uniform_count) +{ + pandecode_validate_buffer(uniforms, uniform_count * 16); + + char *ptr = pointer_as_memory_reference(uniforms); + pandecode_log("vec4 uniforms[%u] = %s;\n", uniform_count, ptr); + free(ptr); } static void @@ -1718,11 +1824,23 @@ pandecode_scratchpad(uintptr_t pscratchpad, int job_no, char *suffix) pandecode_log("};\n"); } +static const char * +shader_type_for_job(unsigned type) +{ + switch (type) { + case JOB_TYPE_VERTEX: return "VERTEX"; + case JOB_TYPE_TILER: return "FRAGMENT"; + case JOB_TYPE_COMPUTE: return "COMPUTE"; + default: + return "UNKNOWN"; + } +} + static unsigned shader_id = 0; -static void +static struct midgard_disasm_stats pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type, - bool is_bifrost, unsigned nr_regs) + bool is_bifrost, unsigned gpu_id) { struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(shader_ptr); uint8_t *PANDECODE_PTR_VAR(code, mem, shader_ptr); @@ -1735,19 +1853,52 @@ pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type, printf("\n\n"); - char prefix[512]; - - snprintf(prefix, sizeof(prefix) - 1, "shader%d - %s shader: ", - shader_id++, - (type == JOB_TYPE_TILER) ? "FRAGMENT" : "VERTEX"); + struct midgard_disasm_stats stats; if (is_bifrost) { disassemble_bifrost(code, sz, false); + + /* TODO: Extend stats to Bifrost */ + stats.texture_count = -128; + stats.sampler_count = -128; + stats.attribute_count = -128; + stats.varying_count = -128; + stats.uniform_count = -128; + stats.uniform_buffer_count = -128; + stats.work_count = -128; + + stats.instruction_count = 0; + stats.bundle_count = 0; + stats.quadword_count = 0; + stats.helper_invocations = false; } else { - disassemble_midgard(code, sz, true, nr_regs, prefix); + stats = disassemble_midgard(code, sz, gpu_id, + type == JOB_TYPE_TILER ? + MESA_SHADER_FRAGMENT : MESA_SHADER_VERTEX); } - printf("\n\n"); + /* Print shader-db stats. Skip COMPUTE jobs since they are used for + * driver-internal purposes with the blob and interfere */ + + bool should_shaderdb = type != JOB_TYPE_COMPUTE; + + if (should_shaderdb) { + unsigned nr_threads = + (stats.work_count <= 4) ? 4 : + (stats.work_count <= 8) ? 2 : + 1; + + printf("shader%d - MESA_SHADER_%s shader: " + "%u inst, %u bundles, %u quadwords, " + "%u registers, %u threads, 0 loops, 0:0 spills:fills\n\n\n", + shader_id++, + shader_type_for_job(type), + stats.instruction_count, stats.bundle_count, stats.quadword_count, + stats.work_count, nr_threads); + } + + + return stats; } static void @@ -1862,35 +2013,35 @@ pandecode_texture(mali_ptr u, * properties, but dump extra * possibilities to futureproof */ - int bitmap_count = MALI_NEGATIVE(t->levels); + int bitmap_count = t->levels + 1; /* Miptree for each face */ if (f.type == MALI_TEX_CUBE) bitmap_count *= 6; + else if (f.type == MALI_TEX_3D) + bitmap_count *= t->depth; /* Array of textures */ - bitmap_count *= MALI_NEGATIVE(t->array_size); + bitmap_count *= (t->array_size + 1); /* Stride for each element */ if (f.manual_stride) bitmap_count *= 2; - /* Sanity check the size */ - int max_count = sizeof(t->payload) / sizeof(t->payload[0]); - assert (bitmap_count <= max_count); - + mali_ptr *pointers_and_strides = pandecode_fetch_gpu_mem(tmem, + u + sizeof(*t), sizeof(mali_ptr) * bitmap_count); for (int i = 0; i < bitmap_count; ++i) { /* How we dump depends if this is a stride or a pointer */ if (f.manual_stride && (i & 1)) { /* signed 32-bit snuck in as a 64-bit pointer */ - uint64_t stride_set = t->payload[i]; + uint64_t stride_set = pointers_and_strides[i]; uint32_t clamped_stride = stride_set; int32_t stride = clamped_stride; assert(stride_set == clamped_stride); pandecode_log("(mali_ptr) %d /* stride */, \n", stride); } else { - char *a = pointer_as_memory_reference(t->payload[i]); + char *a = pointer_as_memory_reference(pointers_and_strides[i]); pandecode_log("%s, \n", a); free(a); } @@ -1903,39 +2054,115 @@ pandecode_texture(mali_ptr u, pandecode_log("};\n"); } +/* For shader properties like texture_count, we have a claimed property in the shader_meta, and the actual Truth from static analysis (this may just be an upper limit). We validate accordingly */ + +static void +pandecode_shader_prop(const char *name, unsigned claim, signed truth, bool fuzzy) +{ + /* Nothing to do */ + if (claim == truth) + return; + + if (fuzzy) + assert(truth >= 0); + + if ((truth >= 0) && !fuzzy) { + pandecode_msg("%s: expected %s = %d, claimed %u\n", + (truth < claim) ? "warn" : "XXX", + name, truth, claim); + } else if ((claim > -truth) && !fuzzy) { + pandecode_msg("XXX: expected %s <= %u, claimed %u\n", + name, -truth, claim); + } else if (fuzzy && (claim < truth)) + pandecode_msg("XXX: expected %s >= %u, claimed %u\n", + name, truth, claim); + + pandecode_log(".%s = %" PRId16, name, claim); + + if (fuzzy) + pandecode_log_cont(" /* %u used */", truth); + + pandecode_log_cont(",\n"); +} + +static void +pandecode_blend_shader_disassemble(mali_ptr shader, int job_no, int job_type, + bool is_bifrost, unsigned gpu_id) +{ + struct midgard_disasm_stats stats = + pandecode_shader_disassemble(shader, job_no, job_type, is_bifrost, gpu_id); + + bool has_texture = (stats.texture_count > 0); + bool has_sampler = (stats.sampler_count > 0); + bool has_attribute = (stats.attribute_count > 0); + bool has_varying = (stats.varying_count > 0); + bool has_uniform = (stats.uniform_count > 0); + bool has_ubo = (stats.uniform_buffer_count > 0); + + if (has_texture || has_sampler) + pandecode_msg("XXX: blend shader accessing textures\n"); + + if (has_attribute || has_varying) + pandecode_msg("XXX: blend shader accessing interstage\n"); + + if (has_uniform || has_ubo) + pandecode_msg("XXX: blend shader accessing uniforms\n"); +} + static void pandecode_vertex_tiler_postfix_pre( const struct mali_vertex_tiler_postfix *p, int job_no, enum mali_job_type job_type, - char *suffix, bool is_bifrost) + char *suffix, bool is_bifrost, unsigned gpu_id) { - mali_ptr shader_meta_ptr = (u64) (uintptr_t) (p->_shader_upper << 4); struct pandecode_mapped_memory *attr_mem; - unsigned rt_count = 1; - /* On Bifrost, since the tiler heap (for tiler jobs) and the scratchpad * are the only things actually needed from the FBD, vertex/tiler jobs * no longer reference the FBD -- instead, this field points to some * info about the scratchpad. */ + + struct pandecode_fbd fbd_info = { + /* Default for Bifrost */ + .rt_count = 1 + }; + if (is_bifrost) - pandecode_scratchpad(p->framebuffer & ~FBD_TYPE, job_no, suffix); + pandecode_scratchpad(p->framebuffer & ~1, job_no, suffix); else if (p->framebuffer & MALI_MFBD) - rt_count = pandecode_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false); + fbd_info = pandecode_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false); else if (job_type == JOB_TYPE_COMPUTE) pandecode_compute_fbd((u64) (uintptr_t) p->framebuffer, job_no); else - pandecode_sfbd((u64) (uintptr_t) p->framebuffer, job_no, false); + fbd_info = pandecode_sfbd((u64) (uintptr_t) p->framebuffer, job_no, false, gpu_id); int varying_count = 0, attribute_count = 0, uniform_count = 0, uniform_buffer_count = 0; int texture_count = 0, sampler_count = 0; - if (shader_meta_ptr) { - struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(shader_meta_ptr); - struct mali_shader_meta *PANDECODE_PTR_VAR(s, smem, shader_meta_ptr); + if (p->shader) { + struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(p->shader); + struct mali_shader_meta *PANDECODE_PTR_VAR(s, smem, p->shader); + + /* Disassemble ahead-of-time to get stats. Initialize with + * stats for the missing-shader case so we get validation + * there, too */ - pandecode_log("struct mali_shader_meta shader_meta_%"PRIx64"_%d%s = {\n", shader_meta_ptr, job_no, suffix); + struct midgard_disasm_stats info = { + .texture_count = 0, + .sampler_count = 0, + .attribute_count = 0, + .varying_count = 0, + .work_count = 1, + + .uniform_count = -128, + .uniform_buffer_count = 0 + }; + + if (s->shader & ~0xF) + info = pandecode_shader_disassemble(s->shader & ~0xF, job_no, job_type, is_bifrost, gpu_id); + + pandecode_log("struct mali_shader_meta shader_meta_%"PRIx64"_%d%s = {\n", p->shader, job_no, suffix); pandecode_indent++; /* Save for dumps */ @@ -1948,45 +2175,45 @@ pandecode_vertex_tiler_postfix_pre( uniform_count = s->bifrost2.uniform_count; uniform_buffer_count = s->bifrost1.uniform_buffer_count; } else { - uniform_count = s->midgard1.uniform_buffer_count; + uniform_count = s->midgard1.uniform_count; uniform_buffer_count = s->midgard1.uniform_buffer_count; } - mali_ptr shader_ptr = pandecode_shader_address("shader", s->shader); - - pandecode_prop("texture_count = %" PRId16, s->texture_count); - pandecode_prop("sampler_count = %" PRId16, s->sampler_count); - pandecode_prop("attribute_count = %" PRId16, s->attribute_count); - pandecode_prop("varying_count = %" PRId16, s->varying_count); + pandecode_shader_address("shader", s->shader); - unsigned nr_registers = 0; + pandecode_shader_prop("texture_count", s->texture_count, info.texture_count, false); + pandecode_shader_prop("sampler_count", s->sampler_count, info.sampler_count, false); + pandecode_shader_prop("attribute_count", s->attribute_count, info.attribute_count, false); + pandecode_shader_prop("varying_count", s->varying_count, info.varying_count, false); + pandecode_shader_prop("uniform_buffer_count", + uniform_buffer_count, + info.uniform_buffer_count, true); - if (is_bifrost) { - pandecode_log(".bifrost1 = {\n"); - pandecode_indent++; + if (!is_bifrost) { + pandecode_shader_prop("uniform_count", + uniform_count, + info.uniform_count, false); - pandecode_prop("uniform_buffer_count = %" PRId32, s->bifrost1.uniform_buffer_count); - pandecode_prop("unk1 = 0x%" PRIx32, s->bifrost1.unk1); + pandecode_shader_prop("work_count", + s->midgard1.work_count, info.work_count, false); + } - pandecode_indent--; - pandecode_log("},\n"); + if (is_bifrost) { + pandecode_prop("bifrost1.unk1 = 0x%" PRIx32, s->bifrost1.unk1); } else { - pandecode_log(".midgard1 = {\n"); - pandecode_indent++; + bool helpers = s->midgard1.flags & MALI_HELPER_INVOCATIONS; + s->midgard1.flags &= ~MALI_HELPER_INVOCATIONS; - pandecode_prop("uniform_count = %" PRId16, s->midgard1.uniform_count); - pandecode_prop("uniform_buffer_count = %" PRId16, s->midgard1.uniform_buffer_count); - pandecode_prop("work_count = %" PRId16, s->midgard1.work_count); - nr_registers = s->midgard1.work_count; + if (helpers != info.helper_invocations) { + pandecode_msg("XXX: expected helpers %u but got %u\n", + info.helper_invocations, helpers); + } - pandecode_log(".flags = "); + pandecode_log(".midgard1.flags = "); pandecode_log_decoded_flags(shader_midgard1_flag_info, s->midgard1.flags); pandecode_log_cont(",\n"); - pandecode_prop("unknown2 = 0x%" PRIx32, s->midgard1.unknown2); - - pandecode_indent--; - pandecode_log("},\n"); + pandecode_prop("midgard1.unknown2 = 0x%" PRIx32, s->midgard1.unknown2); } if (s->depth_units || s->depth_factor) { @@ -2011,7 +2238,7 @@ pandecode_vertex_tiler_postfix_pre( /* We're not quite sure what these flags mean without the depth test, if anything */ - if (unknown2_3 & (MALI_DEPTH_TEST | MALI_DEPTH_FUNC_MASK)) { + if (unknown2_3 & (MALI_DEPTH_WRITEMASK | MALI_DEPTH_FUNC_MASK)) { const char *func = pandecode_func(MALI_GET_DEPTH_FUNC(unknown2_3)); unknown2_3 &= ~MALI_DEPTH_FUNC_MASK; @@ -2059,9 +2286,10 @@ pandecode_vertex_tiler_postfix_pre( if (!is_bifrost) { /* TODO: Blend shaders routing/disasm */ - union midgard_blend blend = s->blend; - pandecode_midgard_blend(&blend, false); + mali_ptr shader = pandecode_midgard_blend(&blend, s->unknown2_3 & MALI_HAS_BLEND_SHADER); + if (shader & ~0xF) + pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id); } pandecode_indent--; @@ -2070,10 +2298,10 @@ pandecode_vertex_tiler_postfix_pre( /* MRT blend fields are used whenever MFBD is used, with * per-RT descriptors */ - if (job_type == JOB_TYPE_TILER) { + if (job_type == JOB_TYPE_TILER && p->framebuffer & MALI_MFBD) { void* blend_base = (void *) (s + 1); - for (unsigned i = 0; i < rt_count; i++) { + for (unsigned i = 0; i < fbd_info.rt_count; i++) { mali_ptr shader = 0; if (is_bifrost) @@ -2082,14 +2310,12 @@ pandecode_vertex_tiler_postfix_pre( shader = pandecode_midgard_blend_mrt(blend_base, job_no, i); if (shader & ~0xF) - pandecode_shader_disassemble(shader, job_no, job_type, false, 0); + pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id); + } } - - if (shader_ptr & ~0xF) - pandecode_shader_disassemble(shader_ptr, job_no, job_type, is_bifrost, nr_registers); } else - pandecode_msg("\n"); + pandecode_msg("XXX: missing shader descriptor\n"); if (p->viewport) { struct pandecode_mapped_memory *fmem = pandecode_find_mapped_gpu_mem_containing(p->viewport); @@ -2117,11 +2343,14 @@ pandecode_vertex_tiler_postfix_pre( pandecode_log("};\n"); } - if (p->attribute_meta) { - unsigned max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix); + unsigned max_attr_index = 0; + if (p->attribute_meta) + max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix); + + if (p->attributes) { attr_mem = pandecode_find_mapped_gpu_mem_containing(p->attributes); - pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index, false); + pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index, false, job_type); } /* Varyings are encoded like attributes but not actually sent; we just @@ -2138,14 +2367,14 @@ pandecode_vertex_tiler_postfix_pre( /* Number of descriptors depends on whether there are * non-internal varyings */ - pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true); + pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true, job_type); } if (p->uniform_buffers) { if (uniform_buffer_count) pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no); else - pandecode_msg("XXX: UBOs specified but not referenced\n"); + pandecode_msg("warn: UBOs specified but not referenced\n"); } else if (uniform_buffer_count) pandecode_msg("XXX: UBOs referenced but not specified\n"); @@ -2154,11 +2383,11 @@ pandecode_vertex_tiler_postfix_pre( if (p->uniforms) { if (uniform_count) - pandecode_validate_buffer(p->uniforms, uniform_count * 16); + pandecode_uniforms(p->uniforms, uniform_count); else - pandecode_msg("XXX: Uniforms specified but not referenced"); + pandecode_msg("warn: Uniforms specified but not referenced\n"); } else if (uniform_count) - pandecode_msg("XXX: UBOs referenced but not specified\n"); + pandecode_msg("XXX: Uniforms referenced but not specified\n"); if (p->texture_trampoline) { struct pandecode_mapped_memory *mmem = pandecode_find_mapped_gpu_mem_containing(p->texture_trampoline); @@ -2210,11 +2439,14 @@ pandecode_vertex_tiler_postfix_pre( pandecode_prop("min_lod = FIXED_16(%f)", DECODE_FIXED_16(s->min_lod)); pandecode_prop("max_lod = FIXED_16(%f)", DECODE_FIXED_16(s->max_lod)); + if (s->lod_bias) + pandecode_prop("lod_bias = FIXED_16(%f)", DECODE_FIXED_16(s->lod_bias)); + pandecode_prop("wrap_s = %s", pandecode_wrap_mode(s->wrap_s)); pandecode_prop("wrap_t = %s", pandecode_wrap_mode(s->wrap_t)); pandecode_prop("wrap_r = %s", pandecode_wrap_mode(s->wrap_r)); - pandecode_prop("compare_func = %s", pandecode_alt_func(s->compare_func)); + pandecode_prop("compare_func = %s", pandecode_func(s->compare_func)); if (s->zero || s->zero2) { pandecode_msg("XXX: sampler zero tripped\n"); @@ -2239,7 +2471,10 @@ pandecode_vertex_tiler_postfix_pre( static void pandecode_vertex_tiler_postfix(const struct mali_vertex_tiler_postfix *p, int job_no, bool is_bifrost) { - if (!(p->position_varying || p->occlusion_counter || p->flags)) + if (p->shader & 0xF) + pandecode_msg("warn: shader tagged %X\n", (unsigned) (p->shader & 0xF)); + + if (!(p->position_varying || p->occlusion_counter)) return; pandecode_log(".postfix = {\n"); @@ -2248,9 +2483,6 @@ pandecode_vertex_tiler_postfix(const struct mali_vertex_tiler_postfix *p, int jo MEMORY_PROP(p, position_varying); MEMORY_PROP(p, occlusion_counter); - if (p->flags) - pandecode_prop("flags = %d", p->flags); - pandecode_indent--; pandecode_log("},\n"); } @@ -2410,11 +2642,11 @@ pandecode_tiler_only_bfr(const struct bifrost_tiler_only *t, int job_no) static int pandecode_vertex_job_bfr(const struct mali_job_descriptor_header *h, const struct pandecode_mapped_memory *mem, - mali_ptr payload, int job_no) + mali_ptr payload, int job_no, unsigned gpu_id) { struct bifrost_payload_vertex *PANDECODE_PTR_VAR(v, mem, payload); - pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true); + pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true, gpu_id); pandecode_log("struct bifrost_payload_vertex payload_%d = {\n", job_no); pandecode_indent++; @@ -2436,13 +2668,11 @@ pandecode_vertex_job_bfr(const struct mali_job_descriptor_header *h, static int pandecode_tiler_job_bfr(const struct mali_job_descriptor_header *h, const struct pandecode_mapped_memory *mem, - mali_ptr payload, int job_no) + mali_ptr payload, int job_no, unsigned gpu_id) { struct bifrost_payload_tiler *PANDECODE_PTR_VAR(t, mem, payload); - pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true); - - pandecode_indices(t->prefix.indices, t->prefix.index_count, job_no); + pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true, gpu_id); pandecode_tiler_meta(t->tiler.tiler_meta, job_no); pandecode_log("struct bifrost_payload_tiler payload_%d = {\n", job_no); @@ -2465,13 +2695,11 @@ pandecode_tiler_job_bfr(const struct mali_job_descriptor_header *h, static int pandecode_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h, const struct pandecode_mapped_memory *mem, - mali_ptr payload, int job_no) + mali_ptr payload, int job_no, unsigned gpu_id) { struct midgard_payload_vertex_tiler *PANDECODE_PTR_VAR(v, mem, payload); - pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false); - - pandecode_indices(v->prefix.indices, v->prefix.index_count, job_no); + pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false, gpu_id); pandecode_log("struct midgard_payload_vertex_tiler payload_%d = {\n", job_no); pandecode_indent++; @@ -2479,11 +2707,10 @@ pandecode_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h, bool has_primitive_pointer = v->prefix.unknown_draw & MALI_DRAW_VARYING_SIZE; pandecode_primitive_size(v->primitive_size, !has_primitive_pointer); - bool instanced = v->instance_shift || v->instance_odd; bool is_graphics = (h->job_type == JOB_TYPE_VERTEX) || (h->job_type == JOB_TYPE_TILER); pandecode_log(".prefix = "); - pandecode_vertex_tiler_prefix(&v->prefix, job_no, !instanced && is_graphics); + pandecode_vertex_tiler_prefix(&v->prefix, job_no, is_graphics); pandecode_gl_enables(v->gl_enables, h->job_type); @@ -2515,73 +2742,91 @@ pandecode_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h, static int pandecode_fragment_job(const struct pandecode_mapped_memory *mem, mali_ptr payload, int job_no, - bool is_bifrost) + bool is_bifrost, unsigned gpu_id) { const struct mali_payload_fragment *PANDECODE_PTR_VAR(s, mem, payload); - bool fbd_dumped = false; - - if (!is_bifrost && (s->framebuffer & FBD_TYPE) == MALI_SFBD) { - /* Only SFBDs are understood, not MFBDs. We're speculating, - * based on the versioning, kernel code, etc, that the - * difference is between Single FrameBuffer Descriptor and - * Multiple FrmaeBuffer Descriptor; the change apparently lines - * up with multi-framebuffer support being added (T7xx onwards, - * including Gxx). In any event, there's some field shuffling - * that we haven't looked into yet. */ - - pandecode_sfbd(s->framebuffer & FBD_MASK, job_no, true); - fbd_dumped = true; - } else if ((s->framebuffer & FBD_TYPE) == MALI_MFBD) { - /* We don't know if Bifrost supports SFBD's at all, since the - * driver never uses them. And the format is different from - * Midgard anyways, due to the tiler heap and scratchpad being - * moved out into separate structures, so it's not clear what a - * Bifrost SFBD would even look like without getting an actual - * trace, which appears impossible. - */ + bool is_mfbd = s->framebuffer & MALI_MFBD; - pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true); - fbd_dumped = true; - } + /* Bifrost theoretically may retain support for SFBD on compute jobs, + * but for graphics workloads with a FRAGMENT payload, use MFBD */ - uintptr_t p = (uintptr_t) s->framebuffer & FBD_MASK; - pandecode_log("struct mali_payload_fragment payload_%"PRIx64"_%d = {\n", payload, job_no); - pandecode_indent++; + if (!is_mfbd && is_bifrost) + pandecode_msg("XXX: Bifrost fragment must use MFBD\n"); - /* See the comments by the macro definitions for mathematical context - * on why this is so weird */ + struct pandecode_fbd info; - if (MALI_TILE_COORD_FLAGS(s->max_tile_coord) || MALI_TILE_COORD_FLAGS(s->min_tile_coord)) - pandecode_msg("Tile coordinate flag missed, replay wrong\n"); + if (is_mfbd) + info = pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true); + else + info = pandecode_sfbd(s->framebuffer & FBD_MASK, job_no, true, gpu_id); - pandecode_prop("min_tile_coord = MALI_COORDINATE_TO_TILE_MIN(%d, %d)", - MALI_TILE_COORD_X(s->min_tile_coord) << MALI_TILE_SHIFT, - MALI_TILE_COORD_Y(s->min_tile_coord) << MALI_TILE_SHIFT); + /* Compute the tag for the tagged pointer. This contains the type of + * FBD (MFBD/SFBD), and in the case of an MFBD, information about which + * additional structures follow the MFBD header (an extra payload or + * not, as well as a count of render targets) */ - pandecode_prop("max_tile_coord = MALI_COORDINATE_TO_TILE_MAX(%d, %d)", - (MALI_TILE_COORD_X(s->max_tile_coord) + 1) << MALI_TILE_SHIFT, - (MALI_TILE_COORD_Y(s->max_tile_coord) + 1) << MALI_TILE_SHIFT); + unsigned expected_tag = is_mfbd ? MALI_MFBD : 0; - /* If the FBD was just decoded, we can refer to it by pointer. If not, - * we have to fallback on offsets. */ + if (is_mfbd) { + if (info.has_extra) + expected_tag |= MALI_MFBD_TAG_EXTRA; - const char *fbd_type = s->framebuffer & MALI_MFBD ? "MALI_MFBD" : "MALI_SFBD"; + expected_tag |= (MALI_POSITIVE(info.rt_count) << 2); + } - /* TODO: Decode */ - unsigned extra_flags = (s->framebuffer & ~FBD_MASK) & ~MALI_MFBD; - - if (fbd_dumped) - pandecode_prop("framebuffer = framebuffer_%d_p | %s | 0x%X", job_no, - fbd_type, extra_flags); - else { - char *a = pointer_as_memory_reference(p); - pandecode_prop("framebuffer = %s | %s | 0x%X", a, fbd_type, extra_flags); - free(a); + if ((s->min_tile_coord | s->max_tile_coord) & ~(MALI_X_COORD_MASK | MALI_Y_COORD_MASK)) { + pandecode_msg("XXX: unexpected tile coordinate bits\n"); + pandecode_prop("min_tile_coord = 0x%X\n", s->min_tile_coord); + pandecode_prop("max_tile_coord = 0x%X\n", s->min_tile_coord); } - pandecode_indent--; - pandecode_log("};\n"); + /* Extract tile coordinates */ + + unsigned min_x = MALI_TILE_COORD_X(s->min_tile_coord) << MALI_TILE_SHIFT; + unsigned min_y = MALI_TILE_COORD_Y(s->min_tile_coord) << MALI_TILE_SHIFT; + + unsigned max_x = (MALI_TILE_COORD_X(s->max_tile_coord) + 1) << MALI_TILE_SHIFT; + unsigned max_y = (MALI_TILE_COORD_Y(s->max_tile_coord) + 1) << MALI_TILE_SHIFT; + + /* For the max, we also want the floored (rather than ceiled) version for checking */ + + unsigned max_x_f = (MALI_TILE_COORD_X(s->max_tile_coord)) << MALI_TILE_SHIFT; + unsigned max_y_f = (MALI_TILE_COORD_Y(s->max_tile_coord)) << MALI_TILE_SHIFT; + + /* Validate the coordinates are well-ordered */ + + if (min_x == max_x) + pandecode_msg("XXX: empty X coordinates (%u = %u)\n", min_x, max_x); + else if (min_x > max_x) + pandecode_msg("XXX: misordered X coordinates (%u > %u)\n", min_x, max_x); + + if (min_y == max_y) + pandecode_msg("XXX: empty X coordinates (%u = %u)\n", min_x, max_x); + else if (min_y > max_y) + pandecode_msg("XXX: misordered X coordinates (%u > %u)\n", min_x, max_x); + + /* Validate the coordinates fit inside the framebuffer. We use floor, + * rather than ceil, for the max coordinates, since the tile + * coordinates for something like an 800x600 framebuffer will actually + * resolve to 800x608, which would otherwise trigger a Y-overflow */ + + if ((min_x > info.width) || (max_x_f > info.width)) + pandecode_msg("XXX: tile coordinates overflow in X direction\n"); + + if ((min_y > info.height) || (max_y_f > info.height)) + pandecode_msg("XXX: tile coordinates overflow in Y direction\n"); + + /* After validation, we print */ + + pandecode_log("fragment (%u, %u) ... (%u, %u)\n\n", min_x, min_y, max_x, max_y); + + /* The FBD is a tagged pointer */ + + unsigned tag = (s->framebuffer & ~FBD_MASK); + + if (tag != expected_tag) + pandecode_msg("XXX: expected FBD tag %X but got %X\n", expected_tag, tag); return sizeof(*s); } @@ -2589,7 +2834,7 @@ pandecode_fragment_job(const struct pandecode_mapped_memory *mem, static int job_descriptor_number = 0; int -pandecode_jc(mali_ptr jc_gpu_va, bool bifrost) +pandecode_jc(mali_ptr jc_gpu_va, bool bifrost, unsigned gpu_id) { struct mali_job_descriptor_header *h; @@ -2614,8 +2859,7 @@ pandecode_jc(mali_ptr jc_gpu_va, bool bifrost) h->job_type != JOB_TYPE_FRAGMENT ? 4 : 0; mali_ptr payload_ptr = jc_gpu_va + sizeof(*h) - offset; - payload = pandecode_fetch_gpu_mem(mem, payload_ptr, - MALI_PAYLOAD_SIZE); + payload = pandecode_fetch_gpu_mem(mem, payload_ptr, 256); int job_no = job_descriptor_number++; @@ -2670,12 +2914,23 @@ pandecode_jc(mali_ptr jc_gpu_va, bool bifrost) * reason. */ switch (h->job_type) { - case JOB_TYPE_SET_VALUE: { - struct mali_payload_set_value *s = payload; - pandecode_log("struct mali_payload_set_value payload_%"PRIx64"_%d = {\n", payload_ptr, job_no); + case JOB_TYPE_WRITE_VALUE: { + struct mali_payload_write_value *s = payload; + pandecode_log("struct mali_payload_write_value payload_%"PRIx64"_%d = {\n", payload_ptr, job_no); pandecode_indent++; - MEMORY_PROP(s, out); - pandecode_prop("unknown = 0x%" PRIX64, s->unknown); + MEMORY_PROP(s, address); + + if (s->value_descriptor != MALI_WRITE_VALUE_ZERO) { + pandecode_msg("XXX: unknown value descriptor\n"); + pandecode_prop("value_descriptor = 0x%" PRIX32, s->value_descriptor); + } + + if (s->reserved) { + pandecode_msg("XXX: set value tripped\n"); + pandecode_prop("reserved = 0x%" PRIX32, s->reserved); + } + + pandecode_prop("immediate = 0x%" PRIX64, s->immediate); pandecode_indent--; pandecode_log("};\n"); @@ -2687,16 +2942,16 @@ pandecode_jc(mali_ptr jc_gpu_va, bool bifrost) case JOB_TYPE_COMPUTE: if (bifrost) { if (h->job_type == JOB_TYPE_TILER) - pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no); + pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no, gpu_id); else - pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no); + pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no, gpu_id); } else - pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no); + pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no, gpu_id); break; case JOB_TYPE_FRAGMENT: - pandecode_fragment_job(mem, payload_ptr, job_no, bifrost); + pandecode_fragment_job(mem, payload_ptr, job_no, bifrost, gpu_id); break; default: @@ -2707,16 +2962,12 @@ pandecode_jc(mali_ptr jc_gpu_va, bool bifrost) if (!first) { pandecode_log("((struct mali_job_descriptor_header *) (uintptr_t) job_%d_p)->", job_no - 1); - - if (last_size) - pandecode_log_cont("next_job_64 = job_%d_p;\n\n", job_no); - else - pandecode_log_cont("next_job_32 = (u32) (uintptr_t) job_%d_p;\n\n", job_no); + pandecode_log_cont("next_job = job_%d_p;\n\n", job_no); } first = false; - } while ((jc_gpu_va = h->job_descriptor_size ? h->next_job_64 : h->next_job_32)); + } while ((jc_gpu_va = h->next_job)); return start_number; }