#include <memory.h>
#include <stdbool.h>
#include <stdarg.h>
+#include <ctype.h>
#include "decode.h"
+#include "util/macros.h"
#include "util/u_math.h"
#include "pan_pretty_print.h"
#include "midgard/disassemble.h"
#include "bifrost/disassemble.h"
-int pandecode_jc(mali_ptr jc_gpu_va, bool bifrost);
+#include "pan_encoder.h"
+
+static void pandecode_swizzle(unsigned swizzle, enum mali_format format);
#define MEMORY_PROP(obj, p) {\
if (obj->p) { \
} \
}
-#define DYN_MEMORY_PROP(obj, no, p) { \
- if (obj->p) \
- pandecode_prop("%s = %s_%d_p", #p, #p, no); \
-}
-
/* Semantic logging type.
*
* Raw: for raw messages to be printed as is.
va_end(ap);
}
+/* To check for memory safety issues, validates that the given pointer in GPU
+ * memory is valid, containing at least sz bytes. The goal is to eliminate
+ * GPU-side memory bugs (NULL pointer dereferences, buffer overflows, or buffer
+ * overruns) by statically validating pointers.
+ */
+
+static void
+pandecode_validate_buffer(mali_ptr addr, size_t sz)
+{
+ if (!addr) {
+ pandecode_msg("XXX: null pointer deref");
+ return;
+ }
+
+ /* Find a BO */
+
+ struct pandecode_mapped_memory *bo =
+ pandecode_find_mapped_gpu_mem_containing(addr);
+
+ if (!bo) {
+ pandecode_msg("XXX: invalid memory dereference\n");
+ return;
+ }
+
+ /* Bounds check */
+
+ unsigned offset = addr - bo->gpu_va;
+ unsigned total = offset + sz;
+
+ if (total > bo->length) {
+ pandecode_msg("XXX: buffer overrun. "
+ "Chunk of size %zu at offset %d in buffer of size %zu. "
+ "Overrun by %zu bytes. \n",
+ sz, offset, bo->length, total - bo->length);
+ return;
+ }
+}
+
struct pandecode_flag_info {
u64 flag;
const char *name;
FLAG_INFO(HAS_MSAA),
FLAG_INFO(CAN_DISCARD),
FLAG_INFO(HAS_BLEND_SHADER),
- FLAG_INFO(DEPTH_TEST),
+ FLAG_INFO(DEPTH_WRITEMASK),
{}
};
};
#undef FLAG_INFO
-#define FLAG_INFO(flag) { MALI_FRAMEBUFFER_##flag, "MALI_FRAMEBUFFER_" #flag }
-static const struct pandecode_flag_info fb_fmt_flag_info[] = {
- FLAG_INFO(MSAA_A),
- FLAG_INFO(MSAA_B),
- FLAG_INFO(MSAA_8),
- {}
-};
-#undef FLAG_INFO
-
#define FLAG_INFO(flag) { MALI_MFBD_FORMAT_##flag, "MALI_MFBD_FORMAT_" #flag }
static const struct pandecode_flag_info mfbd_fmt_flag_info[] = {
FLAG_INFO(MSAA),
#define FLAG_INFO(flag) { MALI_##flag, "MALI_" #flag }
static const struct pandecode_flag_info shader_midgard1_flag_info [] = {
FLAG_INFO(EARLY_Z),
- FLAG_INFO(HELPER_INVOCATIONS),
FLAG_INFO(READS_TILEBUFFER),
FLAG_INFO(READS_ZS),
{}
};
#undef FLAG_INFO
+#define FLAG_INFO(flag) { MALI_SFBD_FORMAT_##flag, "MALI_SFBD_FORMAT_" #flag }
+static const struct pandecode_flag_info sfbd_unk1_info [] = {
+ FLAG_INFO(MSAA_8),
+ FLAG_INFO(MSAA_A),
+ {}
+};
+#undef FLAG_INFO
+
+#define FLAG_INFO(flag) { MALI_SFBD_FORMAT_##flag, "MALI_SFBD_FORMAT_" #flag }
+static const struct pandecode_flag_info sfbd_unk2_info [] = {
+ FLAG_INFO(MSAA_B),
+ FLAG_INFO(SRGB),
+ {}
+};
+#undef FLAG_INFO
+
extern char *replace_fragment;
extern char *replace_vertex;
DEFINE_CASE(QUAD_STRIP);
default:
- return "MALI_TRIANGLES /* XXX: Unknown GL mode, check dump */";
+ pandecode_msg("XXX: invalid draw mode %X\n", mode);
+ return "";
}
#undef DEFINE_CASE
DEFINE_CASE(ALWAYS);
default:
- return "MALI_FUNC_NEVER /* XXX: Unknown function, check dump */";
+ pandecode_msg("XXX: invalid func %X\n", mode);
+ return "";
}
}
#undef DEFINE_CASE
DEFINE_CASE(ALWAYS);
default:
- return "MALI_FUNC_NEVER /* XXX: Unknown function, check dump */";
+ pandecode_msg("XXX: invalid alt func %X\n", mode);
+ return "";
}
}
#undef DEFINE_CASE
DEFINE_CASE(DECR);
default:
- return "MALI_STENCIL_KEEP /* XXX: Unknown stencil op, check dump */";
+ pandecode_msg("XXX: invalid stencil op %X\n", op);
+ return "";
}
}
#undef DEFINE_CASE
-#define DEFINE_CASE(name) case MALI_ATTR_ ## name: return "MALI_ATTR_" #name
-static char *pandecode_attr_mode(enum mali_attr_mode mode)
+static char *pandecode_attr_mode_short(enum mali_attr_mode mode)
{
switch(mode) {
- DEFINE_CASE(UNUSED);
- DEFINE_CASE(LINEAR);
- DEFINE_CASE(POT_DIVIDE);
- DEFINE_CASE(MODULO);
- DEFINE_CASE(NPOT_DIVIDE);
- DEFINE_CASE(IMAGE);
- DEFINE_CASE(INTERNAL);
+ /* TODO: Combine to just "instanced" once this can be done
+ * unambiguously in all known cases */
+ case MALI_ATTR_POT_DIVIDE:
+ return "instanced_pot";
+ case MALI_ATTR_MODULO:
+ return "instanced_mod";
+ case MALI_ATTR_NPOT_DIVIDE:
+ return "instanced_npot";
+ case MALI_ATTR_IMAGE:
+ return "image";
+ case MALI_ATTR_INTERNAL:
+ return "internal";
default:
- return "MALI_ATTR_UNUSED /* XXX: Unknown stencil op, check dump */";
+ pandecode_msg("XXX: invalid attribute mode %X\n", mode);
+ return "";
}
}
-#undef DEFINE_CASE
-
-#define DEFINE_CASE(name) case MALI_CHANNEL_## name: return "MALI_CHANNEL_" #name
-static char *
-pandecode_channel(enum mali_channel channel)
+static const char *
+pandecode_special_varying(uint64_t v)
{
- switch (channel) {
- DEFINE_CASE(RED);
- DEFINE_CASE(GREEN);
- DEFINE_CASE(BLUE);
- DEFINE_CASE(ALPHA);
- DEFINE_CASE(ZERO);
- DEFINE_CASE(ONE);
- DEFINE_CASE(RESERVED_0);
- DEFINE_CASE(RESERVED_1);
-
+ switch(v) {
+ case MALI_VARYING_FRAG_COORD:
+ return "gl_FragCoord";
+ case MALI_VARYING_FRONT_FACING:
+ return "gl_FrontFacing";
+ case MALI_VARYING_POINT_COORD:
+ return "gl_PointCoord";
default:
- return "MALI_CHANNEL_ZERO /* XXX: Unknown channel, check dump */";
+ pandecode_msg("XXX: invalid special varying %" PRIx64 "\n", v);
+ return "";
}
}
-#undef DEFINE_CASE
#define DEFINE_CASE(name) case MALI_WRAP_## name: return "MALI_WRAP_" #name
static char *
DEFINE_CASE(MIRRORED_REPEAT);
default:
- return "MALI_WRAP_REPEAT /* XXX: Unknown wrap mode, check dump */";
+ pandecode_msg("XXX: invalid wrap mode %X\n", op);
+ return "";
}
}
#undef DEFINE_CASE
-#define DEFINE_CASE(name) case MALI_TEX_## name: return "MALI_TEX_" #name
+#define DEFINE_CASE(name) case MALI_BLOCK_## name: return "MALI_BLOCK_" #name
static char *
-pandecode_texture_type(enum mali_texture_type type)
+pandecode_block_format(enum mali_block_format fmt)
{
- switch (type) {
- DEFINE_CASE(1D);
- DEFINE_CASE(2D);
- DEFINE_CASE(3D);
- DEFINE_CASE(CUBE);
+ switch (fmt) {
+ DEFINE_CASE(TILED);
+ DEFINE_CASE(UNKNOWN);
+ DEFINE_CASE(LINEAR);
+ DEFINE_CASE(AFBC);
default:
- unreachable("Unknown case");
+ unreachable("Invalid case");
}
}
#undef DEFINE_CASE
-#define DEFINE_CASE(name) case MALI_MFBD_BLOCK_## name: return "MALI_MFBD_BLOCK_" #name
+#define DEFINE_CASE(name) case MALI_EXCEPTION_ACCESS_## name: return ""#name
static char *
-pandecode_mfbd_block_format(enum mali_mfbd_block_format fmt)
+pandecode_exception_access(enum mali_exception_access access)
{
- switch (fmt) {
- DEFINE_CASE(TILED);
- DEFINE_CASE(UNKNOWN);
- DEFINE_CASE(LINEAR);
- DEFINE_CASE(AFBC);
+ switch (access) {
+ DEFINE_CASE(NONE);
+ DEFINE_CASE(EXECUTE);
+ DEFINE_CASE(READ);
+ DEFINE_CASE(WRITE);
default:
unreachable("Invalid case");
* larger FBD */
static void
-pandecode_midgard_tiler_descriptor(const struct midgard_tiler_descriptor *t)
+pandecode_midgard_tiler_descriptor(
+ const struct midgard_tiler_descriptor *t,
+ unsigned width,
+ unsigned height,
+ bool is_fragment)
{
pandecode_log(".tiler = {\n");
pandecode_indent++;
- pandecode_prop("hierarchy_mask = 0x%" PRIx16, t->hierarchy_mask);
- pandecode_prop("flags = 0x%" PRIx16, t->flags);
- pandecode_prop("polygon_list_size = 0x%x", t->polygon_list_size);
+ if (t->hierarchy_mask == MALI_TILER_DISABLED)
+ pandecode_prop("hierarchy_mask = MALI_TILER_DISABLED");
+ else
+ pandecode_prop("hierarchy_mask = 0x%" PRIx16, t->hierarchy_mask);
+
+ /* We know this name from the kernel, but we never see it nonzero */
+
+ if (t->flags)
+ pandecode_msg("XXX: unexpected tiler flags 0x%" PRIx16, t->flags);
MEMORY_PROP(t, polygon_list);
- MEMORY_PROP(t, polygon_list_body);
+
+ /* The body is offset from the base of the polygon list */
+ assert(t->polygon_list_body > t->polygon_list);
+ unsigned body_offset = t->polygon_list_body - t->polygon_list;
+
+ /* It needs to fit inside the reported size */
+ assert(t->polygon_list_size >= body_offset);
+
+ /* Check that we fit */
+ struct pandecode_mapped_memory *plist =
+ pandecode_find_mapped_gpu_mem_containing(t->polygon_list);
+
+ assert(t->polygon_list_size <= plist->length);
+
+ /* Now that we've sanity checked, we'll try to calculate the sizes
+ * ourselves for comparison */
+
+ unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask);
+ unsigned ref_size = panfrost_tiler_full_size(width, height, t->hierarchy_mask);
+
+ if (!((ref_header == body_offset) && (ref_size == t->polygon_list_size))) {
+ pandecode_msg("XXX: bad polygon list size (expected %d / 0x%x)\n",
+ ref_header, ref_size);
+ pandecode_prop("polygon_list_size = 0x%x", t->polygon_list_size);
+ pandecode_msg("body offset %d\n", body_offset);
+ }
+
+ /* The tiler heap has a start and end specified -- it should be
+ * identical to what we have in the BO. The exception is if tiling is
+ * disabled. */
MEMORY_PROP(t, heap_start);
+ assert(t->heap_end >= t->heap_start);
+
+ struct pandecode_mapped_memory *heap =
+ pandecode_find_mapped_gpu_mem_containing(t->heap_start);
+
+ unsigned heap_size = t->heap_end - t->heap_start;
- if (t->heap_start == t->heap_end) {
- /* Print identically to show symmetry for empty tiler heaps */
- MEMORY_PROP(t, heap_start);
+ /* Tiling is enabled with a special flag */
+ unsigned hierarchy_mask = t->hierarchy_mask & MALI_HIERARCHY_MASK;
+ unsigned tiler_flags = t->hierarchy_mask ^ hierarchy_mask;
+
+ bool tiling_enabled = hierarchy_mask;
+
+ if (tiling_enabled) {
+ /* When tiling is enabled, the heap should be a tight fit */
+ unsigned heap_offset = t->heap_start - heap->gpu_va;
+ if ((heap_offset + heap_size) != heap->length) {
+ pandecode_msg("XXX: heap size %u (expected %zu)\n",
+ heap_size, heap->length - heap_offset);
+ }
+
+ /* We should also have no other flags */
+ if (tiler_flags)
+ pandecode_msg("XXX: unexpected tiler %X\n", tiler_flags);
} else {
- /* Points to the end of a buffer */
- char *a = pointer_as_memory_reference(t->heap_end - 1);
- pandecode_prop("heap_end = %s + 1", a);
- free(a);
+ /* When tiling is disabled, we should have that flag and no others */
+
+ if (tiler_flags != MALI_TILER_DISABLED) {
+ pandecode_msg("XXX: unexpected tiler flag %X, expected MALI_TILER_DISABLED\n",
+ tiler_flags);
+ }
+
+ /* We should also have an empty heap */
+ if (heap_size) {
+ pandecode_msg("XXX: tiler heap size %d given, expected empty\n",
+ heap_size);
+ }
+
+ /* Disabled tiling is used only for clear-only jobs, which are
+ * purely FRAGMENT, so we should never see this for
+ * non-FRAGMENT descriptors. */
+
+ if (!is_fragment)
+ pandecode_msg("XXX: tiler disabled for non-FRAGMENT job\n");
}
+ /* We've never seen weights used in practice, but we know from the
+ * kernel these fields is there */
+
bool nonzero_weights = false;
for (unsigned w = 0; w < ARRAY_SIZE(t->weights); ++w) {
pandecode_log("}\n");
}
+/* Information about the framebuffer passed back for
+ * additional analysis */
+
+struct pandecode_fbd {
+ unsigned width;
+ unsigned height;
+ unsigned rt_count;
+ bool has_extra;
+};
+
static void
-pandecode_sfbd(uint64_t gpu_va, int job_no)
+pandecode_sfbd_format(struct mali_sfbd_format format)
+{
+ pandecode_log(".format = {\n");
+ pandecode_indent++;
+
+ pandecode_log(".unk1 = ");
+ pandecode_log_decoded_flags(sfbd_unk1_info, format.unk1);
+ pandecode_log_cont(",\n");
+
+ /* TODO: Map formats so we can check swizzles and print nicely */
+ pandecode_log("swizzle");
+ pandecode_swizzle(format.swizzle, MALI_RGBA8_UNORM);
+ pandecode_log_cont(",\n");
+
+ pandecode_prop("nr_channels = MALI_POSITIVE(%d)",
+ MALI_NEGATIVE(format.nr_channels));
+
+ pandecode_log(".unk2 = ");
+ pandecode_log_decoded_flags(sfbd_unk2_info, format.unk2);
+ pandecode_log_cont(",\n");
+
+ pandecode_prop("block = %s", pandecode_block_format(format.block));
+
+ pandecode_prop("unk3 = 0x%" PRIx32, format.unk3);
+
+ pandecode_indent--;
+ pandecode_log("},\n");
+}
+
+static struct pandecode_fbd
+pandecode_sfbd(uint64_t gpu_va, int job_no, bool is_fragment)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct mali_single_framebuffer *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
+ struct pandecode_fbd info = {
+ .has_extra = false,
+ .rt_count = 1
+ };
+
pandecode_log("struct mali_single_framebuffer framebuffer_%"PRIx64"_%d = {\n", gpu_va, job_no);
pandecode_indent++;
pandecode_prop("unknown1 = 0x%" PRIx32, s->unknown1);
pandecode_prop("unknown2 = 0x%" PRIx32, s->unknown2);
- pandecode_log(".format = ");
- pandecode_log_decoded_flags(fb_fmt_flag_info, s->format);
- pandecode_log_cont(",\n");
+ pandecode_sfbd_format(s->format);
- pandecode_prop("width = MALI_POSITIVE(%" PRId16 ")", s->width + 1);
- pandecode_prop("height = MALI_POSITIVE(%" PRId16 ")", s->height + 1);
+ info.width = s->width + 1;
+ info.height = s->height + 1;
+
+ pandecode_prop("width = MALI_POSITIVE(%" PRId16 ")", info.width);
+ pandecode_prop("height = MALI_POSITIVE(%" PRId16 ")", info.height);
+
+ MEMORY_PROP(s, checksum);
+
+ if (s->checksum_stride)
+ pandecode_prop("checksum_stride = %d", s->checksum_stride);
MEMORY_PROP(s, framebuffer);
pandecode_prop("stride = %d", s->stride);
pandecode_log_decoded_flags(clear_flag_info, s->clear_flags);
pandecode_log_cont(",\n");
- if (s->depth_buffer | s->depth_buffer_enable) {
+ if (s->depth_buffer) {
MEMORY_PROP(s, depth_buffer);
- pandecode_prop("depth_buffer_enable = %s", DS_ENABLE(s->depth_buffer_enable));
+ pandecode_prop("depth_stride = %d", s->depth_stride);
}
- if (s->stencil_buffer | s->stencil_buffer_enable) {
+ if (s->stencil_buffer) {
MEMORY_PROP(s, stencil_buffer);
- pandecode_prop("stencil_buffer_enable = %s", DS_ENABLE(s->stencil_buffer_enable));
+ pandecode_prop("stencil_stride = %d", s->stencil_stride);
+ }
+
+ if (s->depth_stride_zero ||
+ s->stencil_stride_zero ||
+ s->zero7 || s->zero8) {
+ pandecode_msg("XXX: Depth/stencil zeros tripped\n");
+ pandecode_prop("depth_stride_zero = 0x%x",
+ s->depth_stride_zero);
+ pandecode_prop("stencil_stride_zero = 0x%x",
+ s->stencil_stride_zero);
+ pandecode_prop("zero7 = 0x%" PRIx32,
+ s->zero7);
+ pandecode_prop("zero8 = 0x%" PRIx32,
+ s->zero8);
}
if (s->clear_color_1 | s->clear_color_2 | s->clear_color_3 | s->clear_color_4) {
MEMORY_PROP(s, unknown_address_0);
const struct midgard_tiler_descriptor t = s->tiler;
- pandecode_midgard_tiler_descriptor(&t);
+ pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1, is_fragment);
pandecode_indent--;
pandecode_log("};\n");
pandecode_prop("zero1 = 0x%" PRIx64, s->zero1);
pandecode_prop("zero2 = 0x%" PRIx32, s->zero2);
pandecode_prop("zero4 = 0x%" PRIx32, s->zero4);
+ pandecode_prop("zero5 = 0x%" PRIx32, s->zero5);
printf(".zero3 = {");
printf("%X, ", s->zero6[i]);
printf("},\n");
+
+ return info;
}
static void
printf("},\n");
}
+/* Extracts the number of components associated with a Mali format */
+
+static unsigned
+pandecode_format_component_count(enum mali_format fmt)
+{
+ /* Mask out the format class */
+ unsigned top = fmt & 0b11100000;
+
+ switch (top) {
+ case MALI_FORMAT_SNORM:
+ case MALI_FORMAT_UINT:
+ case MALI_FORMAT_UNORM:
+ case MALI_FORMAT_SINT:
+ return ((fmt >> 3) & 3) + 1;
+ default:
+ /* TODO: Validate */
+ return 4;
+ }
+}
+
+/* Extracts a mask of accessed components from a 12-bit Mali swizzle */
+
+static unsigned
+pandecode_access_mask_from_channel_swizzle(unsigned swizzle)
+{
+ unsigned mask = 0;
+ assert(MALI_CHANNEL_RED == 0);
+
+ for (unsigned c = 0; c < 4; ++c) {
+ enum mali_channel chan = (swizzle >> (3*c)) & 0x7;
+
+ if (chan <= MALI_CHANNEL_ALPHA)
+ mask |= (1 << chan);
+ }
+
+ return mask;
+}
+
+/* Validates that a (format, swizzle) pair is valid, in the sense that the
+ * swizzle doesn't access any components that are undefined in the format.
+ * Returns whether the swizzle is trivial (doesn't do any swizzling) and can be
+ * omitted */
+
+static bool
+pandecode_validate_format_swizzle(enum mali_format fmt, unsigned swizzle)
+{
+ unsigned nr_comp = pandecode_format_component_count(fmt);
+ unsigned access_mask = pandecode_access_mask_from_channel_swizzle(swizzle);
+ unsigned valid_mask = (1 << nr_comp) - 1;
+ unsigned invalid_mask = ~valid_mask;
+
+ if (access_mask & invalid_mask) {
+ pandecode_msg("XXX: invalid components accessed\n");
+ return false;
+ }
+
+ /* Check for the default non-swizzling swizzle so we can suppress
+ * useless printing for the defaults */
+
+ unsigned default_swizzles[4] = {
+ MALI_CHANNEL_RED | (MALI_CHANNEL_ZERO << 3) | (MALI_CHANNEL_ZERO << 6) | (MALI_CHANNEL_ONE << 9),
+ MALI_CHANNEL_RED | (MALI_CHANNEL_GREEN << 3) | (MALI_CHANNEL_ZERO << 6) | (MALI_CHANNEL_ONE << 9),
+ MALI_CHANNEL_RED | (MALI_CHANNEL_GREEN << 3) | (MALI_CHANNEL_BLUE << 6) | (MALI_CHANNEL_ONE << 9),
+ MALI_CHANNEL_RED | (MALI_CHANNEL_GREEN << 3) | (MALI_CHANNEL_BLUE << 6) | (MALI_CHANNEL_ALPHA << 9)
+ };
+
+ return (swizzle == default_swizzles[nr_comp - 1]);
+}
+
+/* Maps MALI_RGBA32F to rgba32f, etc */
+
+static void
+pandecode_format_short(enum mali_format fmt, bool srgb)
+{
+ /* We want a type-like format, so cut off the initial MALI_ */
+ char *format = pandecode_format(fmt);
+ format += strlen("MALI_");
+
+ unsigned len = strlen(format);
+ char *lower_format = calloc(1, len + 1);
+
+ for (unsigned i = 0; i < len; ++i)
+ lower_format[i] = tolower(format[i]);
+
+ /* Sanity check sRGB flag is applied to RGB, per the name */
+ if (srgb && lower_format[0] != 'r')
+ pandecode_msg("XXX: sRGB applied to non-colour format\n");
+
+ /* Just prefix with an s, so you get formats like srgba8_unorm */
+ if (srgb)
+ pandecode_log_cont("s");
+
+ pandecode_log_cont("%s", lower_format);
+ free(lower_format);
+}
+
static void
-pandecode_swizzle(unsigned swizzle)
+pandecode_swizzle(unsigned swizzle, enum mali_format format)
{
- pandecode_prop("swizzle = %s | (%s << 3) | (%s << 6) | (%s << 9)",
- pandecode_channel((swizzle >> 0) & 0x7),
- pandecode_channel((swizzle >> 3) & 0x7),
- pandecode_channel((swizzle >> 6) & 0x7),
- pandecode_channel((swizzle >> 9) & 0x7));
+ /* First, do some validation */
+ bool trivial_swizzle = pandecode_validate_format_swizzle(
+ format, swizzle);
+
+ if (trivial_swizzle)
+ return;
+
+ /* Next, print the swizzle */
+ pandecode_log_cont(".");
+
+ static const char components[] = "rgba01";
+
+ for (unsigned c = 0; c < 4; ++c) {
+ enum mali_channel chan = (swizzle >> (3 * c)) & 0x7;
+
+ if (chan >= MALI_CHANNEL_RESERVED_0) {
+ pandecode_log("XXX: invalid swizzle channel %d\n", chan);
+ continue;
+ }
+ pandecode_log_cont("%c", components[chan]);
+ }
}
static void
pandecode_prop("unk2 = 0x%" PRIx32, format.unk2);
pandecode_prop("unk3 = 0x%" PRIx32, format.unk3);
- pandecode_prop("block = %s",
- pandecode_mfbd_block_format(format.block));
+ pandecode_prop("block = %s", pandecode_block_format(format.block));
+
+ /* TODO: Map formats so we can check swizzles and print nicely */
+ pandecode_log("swizzle");
+ pandecode_swizzle(format.swizzle, MALI_RGBA8_UNORM);
+ pandecode_log_cont(",\n");
pandecode_prop("nr_channels = MALI_POSITIVE(%d)",
MALI_NEGATIVE(format.nr_channels));
pandecode_log_decoded_flags(mfbd_fmt_flag_info, format.flags);
pandecode_log_cont(",\n");
- pandecode_swizzle(format.swizzle);
+ /* In theory, the no_preload bit can be cleared to enable MFBD preload,
+ * which is a faster hardware-based alternative to the wallpaper method
+ * to preserve framebuffer contents across frames. In practice, MFBD
+ * preload is buggy on Midgard, and so this is a chicken bit. If this
+ * bit isn't set, most likely something broke unrelated to preload */
- pandecode_prop("unk4 = 0x%" PRIx32, format.unk4);
+ if (!format.no_preload) {
+ pandecode_msg("XXX: buggy MFBD preload enabled - chicken bit should be clear\n");
+ pandecode_prop("no_preload = 0x%" PRIx32, format.no_preload);
+ }
+
+ if (format.zero)
+ pandecode_prop("zero = 0x%" PRIx32, format.zero);
pandecode_indent--;
pandecode_log("},\n");
pandecode_rt_format(rt->format);
- if (rt->format.block == MALI_MFBD_BLOCK_AFBC) {
+ if (rt->format.block == MALI_BLOCK_AFBC) {
pandecode_log(".afbc = {\n");
pandecode_indent++;
pandecode_indent--;
pandecode_log("},\n");
- } else {
- pandecode_log(".chunknown = {\n");
- pandecode_indent++;
-
- pandecode_prop("unk = 0x%" PRIx64, rt->chunknown.unk);
-
- char *a = pointer_as_memory_reference(rt->chunknown.pointer);
- pandecode_prop("pointer = %s", a);
- free(a);
-
- pandecode_indent--;
- pandecode_log("},\n");
+ } else if (rt->afbc.metadata || rt->afbc.stride || rt->afbc.unk) {
+ pandecode_msg("XXX: AFBC disabled but AFBC field set (0x%lX, 0x%x, 0x%x)\n",
+ rt->afbc.metadata,
+ rt->afbc.stride,
+ rt->afbc.unk);
}
MEMORY_PROP(rt, framebuffer);
}
if (rt->zero1 || rt->zero2 || rt->zero3) {
- pandecode_msg("render target zeros tripped\n");
+ pandecode_msg("XXX: render target zeros tripped\n");
pandecode_prop("zero1 = 0x%" PRIx64, rt->zero1);
pandecode_prop("zero2 = 0x%" PRIx32, rt->zero2);
pandecode_prop("zero3 = 0x%" PRIx32, rt->zero3);
pandecode_log("};\n");
}
-static unsigned
-pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool with_render_targets)
+static struct pandecode_fbd
+pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool is_fragment)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct bifrost_framebuffer *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va);
+ struct pandecode_fbd info;
+
if (fb->sample_locations) {
/* The blob stores all possible sample locations in a single buffer
* allocated on startup, and just switches the pointer when switching
* now */
MEMORY_PROP(fb, unknown1);
+ info.width = fb->width1 + 1;
+ info.height = fb->height1 + 1;
+ info.rt_count = fb->rt_count_1 + 1;
+
pandecode_prop("width1 = MALI_POSITIVE(%d)", fb->width1 + 1);
pandecode_prop("height1 = MALI_POSITIVE(%d)", fb->height1 + 1);
pandecode_prop("width2 = MALI_POSITIVE(%d)", fb->width2 + 1);
pandecode_log_decoded_flags(mfbd_flag_info, fb->mfbd_flags);
pandecode_log_cont(",\n");
- pandecode_prop("clear_stencil = 0x%x", fb->clear_stencil);
- pandecode_prop("clear_depth = %f", fb->clear_depth);
+ if (fb->clear_stencil)
+ pandecode_prop("clear_stencil = 0x%x", fb->clear_stencil);
+
+ if (fb->clear_depth)
+ pandecode_prop("clear_depth = %f", fb->clear_depth);
+
+ /* TODO: What is this? Let's not blow up.. */
+ if (fb->unknown2 != 0x1F)
+ pandecode_prop("unknown2 = 0x%x", fb->unknown2);
pandecode_prop("unknown2 = 0x%x", fb->unknown2);
MEMORY_PROP(fb, scratchpad);
const struct midgard_tiler_descriptor t = fb->tiler;
- pandecode_midgard_tiler_descriptor(&t);
+ pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1, is_fragment);
if (fb->zero3 || fb->zero4) {
- pandecode_msg("framebuffer zeros tripped\n");
+ pandecode_msg("XXX: framebuffer zeros tripped\n");
pandecode_prop("zero3 = 0x%" PRIx32, fb->zero3);
pandecode_prop("zero4 = 0x%" PRIx32, fb->zero4);
}
gpu_va += sizeof(struct bifrost_framebuffer);
- if ((fb->mfbd_flags & MALI_MFBD_EXTRA) && with_render_targets) {
+ info.has_extra = (fb->mfbd_flags & MALI_MFBD_EXTRA) && is_fragment;
+
+ if (info.has_extra) {
mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct bifrost_fb_extra *PANDECODE_PTR_VAR(fbx, mem, (mali_ptr) gpu_va);
MEMORY_PROP_DIR(fbx->ds_afbc, depth_stencil);
if (fbx->ds_afbc.zero1 || fbx->ds_afbc.padding) {
- pandecode_msg("Depth/stencil AFBC zeros tripped\n");
+ pandecode_msg("XXX: Depth/stencil AFBC zeros tripped\n");
pandecode_prop("zero1 = 0x%" PRIx32,
fbx->ds_afbc.zero1);
pandecode_prop("padding = 0x%" PRIx64,
if (fbx->ds_linear.depth_stride_zero ||
fbx->ds_linear.stencil_stride_zero ||
fbx->ds_linear.zero1 || fbx->ds_linear.zero2) {
- pandecode_msg("Depth/stencil zeros tripped\n");
+ pandecode_msg("XXX: Depth/stencil zeros tripped\n");
pandecode_prop("depth_stride_zero = 0x%x",
fbx->ds_linear.depth_stride_zero);
pandecode_prop("stencil_stride_zero = 0x%x",
}
if (fbx->zero3 || fbx->zero4) {
- pandecode_msg("fb_extra zeros tripped\n");
+ pandecode_msg("XXX: fb_extra zeros tripped\n");
pandecode_prop("zero3 = 0x%" PRIx64, fbx->zero3);
pandecode_prop("zero4 = 0x%" PRIx64, fbx->zero4);
}
gpu_va += sizeof(struct bifrost_fb_extra);
}
- if (with_render_targets)
+ if (is_fragment)
pandecode_render_target(gpu_va, job_no, fb);
- /* Passback the render target count */
- return MALI_NEGATIVE(fb->rt_count_1);
+ return info;
}
/* Just add a comment decoding the shift/odd fields forming the padded vertices
static void
pandecode_attributes(const struct pandecode_mapped_memory *mem,
mali_ptr addr, int job_no, char *suffix,
- int count, bool varying)
+ int count, bool varying, enum mali_job_type job_type)
{
- char *prefix = varying ? "varyings" : "attributes";
+ char *prefix = varying ? "varying" : "attribute";
+ assert(addr);
- union mali_attr *attr = pandecode_fetch_gpu_mem(mem, addr, sizeof(union mali_attr) * count);
+ if (!count) {
+ pandecode_msg("warn: No %s records\n", prefix);
+ return;
+ }
- char base[128];
- snprintf(base, sizeof(base), "%s_data_%d%s", prefix, job_no, suffix);
+ union mali_attr *attr = pandecode_fetch_gpu_mem(mem, addr, sizeof(union mali_attr) * count);
for (int i = 0; i < count; ++i) {
+ /* First, check for special records */
+ if (attr[i].elements < MALI_VARYING_SPECIAL) {
+ /* Special records are always varyings */
+
+ if (!varying)
+ pandecode_msg("XXX: Special varying in attribute field\n");
+
+ if (job_type != JOB_TYPE_TILER)
+ pandecode_msg("XXX: Special varying in non-FS\n");
+
+ /* We're special, so all fields should be zero */
+ unsigned zero = attr[i].stride | attr[i].size;
+ zero |= attr[i].shift | attr[i].extra_flags;
+
+ if (zero)
+ pandecode_msg("XXX: Special varying has non-zero fields\n");
+ else {
+ /* Print the special varying name */
+ pandecode_log("varying_%d = %s;", i, pandecode_special_varying(attr[i].elements));
+ continue;
+ }
+ }
+
enum mali_attr_mode mode = attr[i].elements & 7;
if (mode == MALI_ATTR_UNUSED)
- continue;
+ pandecode_msg("XXX: unused attribute record\n");
- mali_ptr raw_elements = attr[i].elements & ~7;
+ /* For non-linear records, we need to print the type of record */
+ if (mode != MALI_ATTR_LINEAR)
+ pandecode_log_cont("%s ", pandecode_attr_mode_short(mode));
+
+ /* Print the name to link with attr_meta */
+ pandecode_log_cont("%s_%d", prefix, i);
- /* TODO: Do we maybe want to dump the attribute values
- * themselves given the specified format? Or is that too hard?
- * */
+ /* Print the stride and size */
+ pandecode_log_cont("<%u>[%u]", attr[i].stride, attr[i].size);
+ /* TODO: Sanity check the quotient itself. It must be equal to
+ * (or be greater than, if the driver added padding) the padded
+ * vertex count. */
+
+ /* Finally, print the pointer */
+ mali_ptr raw_elements = attr[i].elements & ~7;
char *a = pointer_as_memory_reference(raw_elements);
- pandecode_log("mali_ptr %s_%d_p = %s;\n", base, i, a);
+ pandecode_log_cont(" = (%s);\n", a);
free(a);
- }
- pandecode_log("union mali_attr %s_%d[] = {\n", prefix, job_no);
- pandecode_indent++;
+ /* Check the pointer */
+ pandecode_validate_buffer(raw_elements, attr[i].size);
- for (int i = 0; i < count; ++i) {
- pandecode_log("{\n");
- pandecode_indent++;
+ /* shift/extra_flags exist only for instanced */
+ if (attr[i].shift | attr[i].extra_flags) {
+ /* These are set to random values by the blob for
+ * varyings, most likely a symptom of uninitialized
+ * memory where the hardware masked the bug. As such we
+ * put this at a warning, not an error. */
- unsigned mode = attr[i].elements & 7;
- pandecode_prop("elements = (%s_%d_p) | %s", base, i, pandecode_attr_mode(mode));
- pandecode_prop("shift = %d", attr[i].shift);
- pandecode_prop("extra_flags = %d", attr[i].extra_flags);
- pandecode_prop("stride = 0x%" PRIx32, attr[i].stride);
- pandecode_prop("size = 0x%" PRIx32, attr[i].size);
+ if (mode == MALI_ATTR_LINEAR)
+ pandecode_msg("warn: instancing fields set for linear\n");
+
+ pandecode_prop("shift = %d", attr[i].shift);
+ pandecode_prop("extra_flags = %d", attr[i].extra_flags);
+ }
/* Decode further where possible */
attr[i].extra_flags);
}
- pandecode_indent--;
- pandecode_log("}, \n");
-
if (mode == MALI_ATTR_NPOT_DIVIDE) {
i++;
pandecode_log("{\n");
pandecode_prop("unk = 0x%x", attr[i].unk);
pandecode_prop("magic_divisor = 0x%08x", attr[i].magic_divisor);
if (attr[i].zero != 0)
- pandecode_prop("zero = 0x%x /* XXX zero tripped */", attr[i].zero);
+ pandecode_prop("XXX: zero tripped (0x%x)\n", attr[i].zero);
pandecode_prop("divisor = %d", attr[i].divisor);
pandecode_magic_divisor(attr[i].magic_divisor, attr[i - 1].shift, attr[i].divisor, attr[i - 1].extra_flags);
pandecode_indent--;
}
- pandecode_indent--;
- pandecode_log("};\n");
+ pandecode_log("\n");
}
static mali_ptr
return shader_ptr;
}
-static bool
-all_zero(unsigned *buffer, unsigned count)
-{
- for (unsigned i = 0; i < count; ++i) {
- if (buffer[i])
- return false;
- }
-
- return true;
-}
-
static void
pandecode_stencil(const char *name, const struct mali_stencil_test *stencil)
{
- if (all_zero((unsigned *) stencil, sizeof(stencil) / sizeof(unsigned)))
+ unsigned any_nonzero =
+ stencil->ref | stencil->mask | stencil->func |
+ stencil->sfail | stencil->dpfail | stencil->dppass;
+
+ if (any_nonzero == 0)
return;
const char *func = pandecode_func(stencil->func);
const char *dppass = pandecode_stencil_op(stencil->dppass);
if (stencil->zero)
- pandecode_msg("Stencil zero tripped: %X\n", stencil->zero);
+ pandecode_msg("XXX: stencil zero tripped: %X\n", stencil->zero);
pandecode_log(".stencil_%s = {\n", name);
pandecode_indent++;
pandecode_blend_equation(const struct mali_blend_equation *blend)
{
if (blend->zero1)
- pandecode_msg("Blend zero tripped: %X\n", blend->zero1);
+ pandecode_msg("XXX: blend zero tripped: %X\n", blend->zero1);
pandecode_log(".equation = {\n");
pandecode_indent++;
static mali_ptr
pandecode_midgard_blend(union midgard_blend *blend, bool is_shader)
{
- if (all_zero((unsigned *) blend, sizeof(blend) / sizeof(unsigned)))
+ /* constant/equation is in a union */
+ if (!blend->shader)
return 0;
pandecode_log(".blend = {\n");
return shader;
}
+/* Attributes and varyings have descriptor records, which contain information
+ * about their format and ordering with the attribute/varying buffers. We'll
+ * want to validate that the combinations specified are self-consistent.
+ */
+
static int
pandecode_attribute_meta(int job_no, int count, const struct mali_vertex_tiler_postfix *v, bool varying, char *suffix)
{
unsigned max_index = 0;
snprintf(base, sizeof(base), "%s_meta", prefix);
- pandecode_log("struct mali_attr_meta %s_%d%s[] = {\n", base, job_no, suffix);
- pandecode_indent++;
-
struct mali_attr_meta *attr_meta;
- mali_ptr p = varying ? (v->varying_meta & ~0xF) : v->attribute_meta;
+ mali_ptr p = varying ? v->varying_meta : v->attribute_meta;
struct pandecode_mapped_memory *attr_mem = pandecode_find_mapped_gpu_mem_containing(p);
attr_meta = pandecode_fetch_gpu_mem(attr_mem, p,
sizeof(*attr_mem));
- pandecode_log("{\n");
- pandecode_indent++;
- pandecode_prop("index = %d", attr_meta->index);
+ /* If the record is discard, it should be zero for everything else */
- if (attr_meta->index > max_index)
- max_index = attr_meta->index;
- pandecode_swizzle(attr_meta->swizzle);
- pandecode_prop("format = %s", pandecode_format(attr_meta->format));
+ if (attr_meta->format == MALI_VARYING_DISCARD) {
+ uint64_t zero =
+ attr_meta->index |
+ attr_meta->unknown1 |
+ attr_meta->unknown3 |
+ attr_meta->src_offset;
- pandecode_prop("unknown1 = 0x%" PRIx64, (u64) attr_meta->unknown1);
- pandecode_prop("unknown3 = 0x%" PRIx64, (u64) attr_meta->unknown3);
- pandecode_prop("src_offset = %d", attr_meta->src_offset);
- pandecode_indent--;
- pandecode_log("},\n");
+ if (zero)
+ pandecode_msg("XXX: expected empty record for varying discard\n");
- }
+ /* We want to look for a literal 0000 swizzle -- this
+ * is not encoded with all zeroes, however */
- pandecode_indent--;
- pandecode_log("};\n");
+ enum mali_channel z = MALI_CHANNEL_ZERO;
+ unsigned zero_swizzle = z | (z << 3) | (z << 6) | (z << 9);
+ bool good_swizzle = attr_meta->swizzle == zero_swizzle;
- return count ? (max_index + 1) : 0;
-}
+ if (!good_swizzle)
+ pandecode_msg("XXX: expected zero swizzle for discard\n");
-static void
-pandecode_indices(uintptr_t pindices, uint32_t index_count, int job_no)
-{
- struct pandecode_mapped_memory *imem = pandecode_find_mapped_gpu_mem_containing(pindices);
+ if (!varying)
+ pandecode_msg("XXX: cannot discard attribute\n");
+
+ /* If we're all good, omit the record */
+ if (!zero && varying && good_swizzle) {
+ pandecode_log("/* discarded varying */\n");
+ continue;
+ }
+ }
- if (imem) {
- /* Indices are literally just a u32 array :) */
+ if (attr_meta->index > max_index)
+ max_index = attr_meta->index;
- uint32_t *PANDECODE_PTR_VAR(indices, imem, pindices);
+ if (attr_meta->unknown1 != 0x2) {
+ pandecode_msg("XXX: expected unknown1 = 0x2\n");
+ pandecode_prop("unknown1 = 0x%" PRIx64, (u64) attr_meta->unknown1);
+ }
- pandecode_log("uint32_t indices_%d[] = {\n", job_no);
- pandecode_indent++;
+ if (attr_meta->unknown3) {
+ pandecode_msg("XXX: unexpected unknown3 set\n");
+ pandecode_prop("unknown3 = 0x%" PRIx64, (u64) attr_meta->unknown3);
+ }
- for (unsigned i = 0; i < (index_count + 1); i += 3)
- pandecode_log("%d, %d, %d,\n",
- indices[i],
- indices[i + 1],
- indices[i + 2]);
+ pandecode_format_short(attr_meta->format, false);
+ pandecode_log_cont(" %s_%u", prefix, attr_meta->index);
- pandecode_indent--;
- pandecode_log("};\n");
+ if (attr_meta->src_offset)
+ pandecode_log_cont("[%u]", attr_meta->src_offset);
+
+ pandecode_swizzle(attr_meta->swizzle, attr_meta->format);
+
+ pandecode_log_cont(";\n");
}
+
+ pandecode_log("\n");
+
+ return count ? (max_index + 1) : 0;
}
/* return bits [lo, hi) of word */
}
static void
-pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no)
+pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bool noninstanced)
{
pandecode_log_cont("{\n");
pandecode_indent++;
- pandecode_prop("invocation_count = 0x%" PRIx32, p->invocation_count);
- pandecode_prop("size_y_shift = %d", p->size_y_shift);
- pandecode_prop("size_z_shift = %d", p->size_z_shift);
- pandecode_prop("workgroups_x_shift = %d", p->workgroups_x_shift);
- pandecode_prop("workgroups_y_shift = %d", p->workgroups_y_shift);
- pandecode_prop("workgroups_z_shift = %d", p->workgroups_z_shift);
- pandecode_prop("workgroups_x_shift_2 = 0x%" PRIx32, p->workgroups_x_shift_2);
-
/* Decode invocation_count. See the comment before the definition of
* invocation_count for an explanation.
*/
- pandecode_msg("size: (%d, %d, %d)\n",
- bits(p->invocation_count, 0, p->size_y_shift) + 1,
- bits(p->invocation_count, p->size_y_shift, p->size_z_shift) + 1,
- bits(p->invocation_count, p->size_z_shift,
- p->workgroups_x_shift) + 1);
- pandecode_msg("workgroups: (%d, %d, %d)\n",
- bits(p->invocation_count, p->workgroups_x_shift,
- p->workgroups_y_shift) + 1,
- bits(p->invocation_count, p->workgroups_y_shift,
- p->workgroups_z_shift) + 1,
- bits(p->invocation_count, p->workgroups_z_shift,
- 32) + 1);
+
+ unsigned size_x = bits(p->invocation_count, 0, p->size_y_shift) + 1;
+ unsigned size_y = bits(p->invocation_count, p->size_y_shift, p->size_z_shift) + 1;
+ unsigned size_z = bits(p->invocation_count, p->size_z_shift, p->workgroups_x_shift) + 1;
+
+ unsigned groups_x = bits(p->invocation_count, p->workgroups_x_shift, p->workgroups_y_shift) + 1;
+ unsigned groups_y = bits(p->invocation_count, p->workgroups_y_shift, p->workgroups_z_shift) + 1;
+ unsigned groups_z = bits(p->invocation_count, p->workgroups_z_shift, 32) + 1;
+
+ /* Even though we have this decoded, we want to ensure that the
+ * representation is "unique" so we don't lose anything by printing only
+ * the final result. More specifically, we need to check that we were
+ * passed something in canonical form, since the definition per the
+ * hardware is inherently not unique. How? Well, take the resulting
+ * decode and pack it ourselves! If it is bit exact with what we
+ * decoded, we're good to go. */
+
+ struct mali_vertex_tiler_prefix ref;
+ panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, noninstanced);
+
+ bool canonical =
+ (p->invocation_count == ref.invocation_count) &&
+ (p->size_y_shift == ref.size_y_shift) &&
+ (p->size_z_shift == ref.size_z_shift) &&
+ (p->workgroups_x_shift == ref.workgroups_x_shift) &&
+ (p->workgroups_y_shift == ref.workgroups_y_shift) &&
+ (p->workgroups_z_shift == ref.workgroups_z_shift) &&
+ (p->workgroups_x_shift_2 == ref.workgroups_x_shift_2);
+
+ if (!canonical) {
+ pandecode_msg("XXX: non-canonical workgroups packing\n");
+ pandecode_msg("expected: %X, %d, %d, %d, %d, %d, %d\n",
+ ref.invocation_count,
+ ref.size_y_shift,
+ ref.size_z_shift,
+ ref.workgroups_x_shift,
+ ref.workgroups_y_shift,
+ ref.workgroups_z_shift,
+ ref.workgroups_x_shift_2);
+
+ pandecode_prop("invocation_count = 0x%" PRIx32, p->invocation_count);
+ pandecode_prop("size_y_shift = %d", p->size_y_shift);
+ pandecode_prop("size_z_shift = %d", p->size_z_shift);
+ pandecode_prop("workgroups_x_shift = %d", p->workgroups_x_shift);
+ pandecode_prop("workgroups_y_shift = %d", p->workgroups_y_shift);
+ pandecode_prop("workgroups_z_shift = %d", p->workgroups_z_shift);
+ pandecode_prop("workgroups_x_shift_2 = %d", p->workgroups_x_shift_2);
+ }
+
+ /* Regardless, print the decode */
+ pandecode_msg("size (%d, %d, %d), count (%d, %d, %d)\n",
+ size_x, size_y, size_z,
+ groups_x, groups_y, groups_z);
/* TODO: Decode */
if (p->unknown_draw)
pandecode_prop("workgroups_x_shift_3 = 0x%" PRIx32, p->workgroups_x_shift_3);
- pandecode_prop("draw_mode = %s", pandecode_draw_mode(p->draw_mode));
+ if (p->draw_mode != MALI_DRAW_NONE)
+ pandecode_prop("draw_mode = %s", pandecode_draw_mode(p->draw_mode));
/* Index count only exists for tiler jobs anyway */
if (p->index_count)
pandecode_prop("index_count = MALI_POSITIVE(%" PRId32 ")", p->index_count + 1);
+
+ unsigned index_raw_size = (p->unknown_draw & MALI_DRAW_INDEXED_SIZE);
+ index_raw_size >>= MALI_DRAW_INDEXED_SHIFT;
+
+ /* Validate an index buffer is present if we need one. TODO: verify
+ * relationship between invocation_count and index_count */
+
+ if (p->indices) {
+ unsigned count = p->index_count;
+
+ /* Grab the size */
+ unsigned size = (index_raw_size == 0x3) ? 4 : index_raw_size;
+
+ /* Ensure we got a size, and if so, validate the index buffer
+ * is large enough to hold a full set of indices of the given
+ * size */
+
+ if (!index_raw_size)
+ pandecode_msg("XXX: index size missing\n");
+ else
+ pandecode_validate_buffer(p->indices, count * size);
+ } else if (index_raw_size)
+ pandecode_msg("XXX: unexpected index size %u\n", index_raw_size);
+
if (p->offset_bias_correction)
pandecode_prop("offset_bias_correction = %d", p->offset_bias_correction);
- DYN_MEMORY_PROP(p, job_no, indices);
-
- if (p->zero1) {
- pandecode_msg("Zero tripped\n");
- pandecode_prop("zero1 = 0x%" PRIx32, p->zero1);
- }
+ /* TODO: Figure out what this is. It's not zero */
+ pandecode_prop("zero1 = 0x%" PRIx32, p->zero1);
pandecode_indent--;
pandecode_log("},\n");
pandecode_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no)
{
struct pandecode_mapped_memory *umem = pandecode_find_mapped_gpu_mem_containing(pubufs);
-
struct mali_uniform_buffer_meta *PANDECODE_PTR_VAR(ubufs, umem, pubufs);
for (int i = 0; i < ubufs_count; i++) {
- mali_ptr ptr = ubufs[i].ptr << 2;
- struct pandecode_mapped_memory *umem2 = pandecode_find_mapped_gpu_mem_containing(ptr);
- uint32_t *PANDECODE_PTR_VAR(ubuf, umem2, ptr);
- char name[50];
- snprintf(name, sizeof(name), "ubuf_%d", i);
- /* The blob uses ubuf 0 to upload internal stuff and
- * uniforms that won't fit/are accessed indirectly, so
- * it puts it in the batchbuffer.
- */
- pandecode_log("uint32_t %s_%d[] = {\n", name, job_no);
- pandecode_indent++;
+ unsigned size = (ubufs[i].size + 1) * 16;
+ mali_ptr addr = ubufs[i].ptr << 2;
- for (int j = 0; j <= ubufs[i].size; j++) {
- for (int k = 0; k < 4; k++) {
- if (k == 0)
- pandecode_log("0x%"PRIx32", ", ubuf[4 * j + k]);
- else
- pandecode_log_cont("0x%"PRIx32", ", ubuf[4 * j + k]);
+ pandecode_validate_buffer(addr, size);
- }
-
- pandecode_log_cont("\n");
- }
-
- pandecode_indent--;
- pandecode_log("};\n");
+ char *ptr = pointer_as_memory_reference(ubufs[i].ptr << 2);
+ pandecode_log("ubuf_%d[%u] = %s;\n", i, size, ptr);
+ free(ptr);
}
- pandecode_log("struct mali_uniform_buffer_meta uniform_buffers_%"PRIx64"_%d[] = {\n",
- pubufs, job_no);
- pandecode_indent++;
+ pandecode_log("\n");
+}
- for (int i = 0; i < ubufs_count; i++) {
- pandecode_log("{\n");
- pandecode_indent++;
- pandecode_prop("size = MALI_POSITIVE(%d)", ubufs[i].size + 1);
- pandecode_prop("ptr = ubuf_%d_%d_p >> 2", i, job_no);
- pandecode_indent--;
- pandecode_log("},\n");
- }
+static void
+pandecode_uniforms(mali_ptr uniforms, unsigned uniform_count)
+{
+ pandecode_validate_buffer(uniforms, uniform_count * 16);
- pandecode_indent--;
- pandecode_log("};\n");
+ char *ptr = pointer_as_memory_reference(uniforms);
+ pandecode_log("vec4 uniforms[%u] = %s;\n", uniform_count, ptr);
+ free(ptr);
}
static void
struct bifrost_scratchpad *PANDECODE_PTR_VAR(scratchpad, mem, pscratchpad);
- if (scratchpad->zero)
- pandecode_msg("XXX scratchpad zero tripped");
+ if (scratchpad->zero) {
+ pandecode_msg("XXX: scratchpad zero tripped");
+ pandecode_prop("zero = 0x%x\n", scratchpad->zero);
+ }
pandecode_log("struct bifrost_scratchpad scratchpad_%"PRIx64"_%d%s = {\n", pscratchpad, job_no, suffix);
pandecode_indent++;
pandecode_log("};\n");
}
-static void
+static unsigned shader_id = 0;
+
+static struct midgard_disasm_stats
pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type,
- bool is_bifrost)
+ bool is_bifrost, unsigned gpu_id)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(shader_ptr);
uint8_t *PANDECODE_PTR_VAR(code, mem, shader_ptr);
printf("\n\n");
+ struct midgard_disasm_stats stats;
+
if (is_bifrost) {
disassemble_bifrost(code, sz, false);
+
+ /* TODO: Extend stats to Bifrost */
+ stats.texture_count = -128;
+ stats.sampler_count = -128;
+ stats.attribute_count = -128;
+ stats.varying_count = -128;
+ stats.uniform_count = -128;
+ stats.uniform_buffer_count = -128;
+ stats.work_count = -128;
+
+ stats.instruction_count = 0;
+ stats.bundle_count = 0;
+ stats.quadword_count = 0;
+ stats.helper_invocations = false;
} else {
- disassemble_midgard(code, sz);
+ stats = disassemble_midgard(code, sz, gpu_id);
}
- printf("\n\n");
+ /* Print shader-db stats */
+
+ unsigned nr_threads =
+ (stats.work_count <= 4) ? 4 :
+ (stats.work_count <= 8) ? 2 :
+ 1;
+
+ printf("shader%d - %s shader: "
+ "%u inst, %u bundles, %u quadwords, "
+ "%u registers, %u threads, 0 loops\n\n\n",
+ shader_id++,
+ (type == JOB_TYPE_TILER) ? "FRAGMENT" : "VERTEX",
+ stats.instruction_count, stats.bundle_count, stats.quadword_count,
+ stats.work_count, nr_threads);
+
+
+ return stats;
}
static void
-pandecode_vertex_tiler_postfix_pre(const struct mali_vertex_tiler_postfix *p,
+pandecode_texture(mali_ptr u,
+ struct pandecode_mapped_memory *tmem,
+ unsigned job_no, unsigned tex)
+{
+ struct mali_texture_descriptor *PANDECODE_PTR_VAR(t, tmem, u);
+
+ pandecode_log("struct mali_texture_descriptor texture_descriptor_%"PRIx64"_%d_%d = {\n", u, job_no, tex);
+ pandecode_indent++;
+
+ struct mali_texture_format f = t->format;
+
+ /* See the definiton of enum mali_texture_type */
+
+ bool is_cube = f.type == MALI_TEX_CUBE;
+ unsigned dimension = is_cube ? 2 : f.type;
+
+ pandecode_make_indent();
+
+ /* TODO: Are there others? */
+ bool is_zs = f.format == MALI_Z32_UNORM;
+
+ /* Recall Z/S switched the meaning of linear/tiled .. */
+ if (is_zs && f.layout == MALI_TEXTURE_LINEAR)
+ pandecode_msg("XXX: depth/stencil cannot be tiled\n");
+
+ /* Print the layout. Default is linear; a modifier can denote AFBC or
+ * u-interleaved/tiled modes */
+
+ if (f.layout == MALI_TEXTURE_AFBC)
+ pandecode_log_cont("afbc");
+ else if (f.layout == MALI_TEXTURE_TILED)
+ pandecode_log_cont(is_zs ? "linear" : "tiled");
+ else if (f.layout == MALI_TEXTURE_LINEAR)
+ pandecode_log_cont("linear");
+ else
+ pandecode_msg("XXX: invalid texture layout 0x%X\n", f.layout);
+
+ pandecode_swizzle(t->swizzle, f.format);
+ pandecode_log_cont(" ");
+
+ /* Distinguish cube/2D with modifier */
+
+ if (is_cube)
+ pandecode_log_cont("cube ");
+
+ pandecode_format_short(f.format, f.srgb);
+ pandecode_swizzle(f.swizzle, f.format);
+
+ /* All four width/height/depth/array_size dimensions are present
+ * regardless of the type of texture, but it is an error to have
+ * non-zero dimensions for unused dimensions. Verify this. array_size
+ * can always be set, as can width. */
+
+ if (t->height && dimension < 2)
+ pandecode_msg("XXX: nonzero height for <2D texture\n");
+
+ if (t->depth && dimension < 3)
+ pandecode_msg("XXX: nonzero depth for <2D texture\n");
+
+ /* Print only the dimensions that are actually there */
+
+ pandecode_log_cont(": %d", t->width + 1);
+
+ if (dimension >= 2)
+ pandecode_log_cont("x%u", t->height + 1);
+
+ if (dimension >= 3)
+ pandecode_log_cont("x%u", t->depth + 1);
+
+ if (t->array_size)
+ pandecode_log_cont("[%u]", t->array_size + 1);
+
+ if (t->levels)
+ pandecode_log_cont(" mip %u", t->levels);
+
+ pandecode_log_cont("\n");
+
+ if (f.unknown1 | f.zero) {
+ pandecode_msg("XXX: texture format zero tripped\n");
+ pandecode_prop("unknown1 = %" PRId32, f.unknown1);
+ pandecode_prop("zero = %" PRId32, f.zero);
+ }
+
+ if (!f.unknown2) {
+ pandecode_msg("XXX: expected unknown texture bit set\n");
+ pandecode_prop("unknown2 = %" PRId32, f.unknown1);
+ }
+
+ if (t->swizzle_zero) {
+ pandecode_msg("XXX: swizzle zero tripped\n");
+ pandecode_prop("swizzle_zero = %d", t->swizzle_zero);
+ }
+
+ if (t->unknown3 | t->unknown3A | t->unknown5 | t->unknown6 | t->unknown7) {
+ pandecode_msg("XXX: texture zero tripped\n");
+ pandecode_prop("unknown3 = %" PRId16, t->unknown3);
+ pandecode_prop("unknown3A = %" PRId8, t->unknown3A);
+ pandecode_prop("unknown5 = 0x%" PRIx32, t->unknown5);
+ pandecode_prop("unknown6 = 0x%" PRIx32, t->unknown6);
+ pandecode_prop("unknown7 = 0x%" PRIx32, t->unknown7);
+ }
+
+ pandecode_log(".payload = {\n");
+ pandecode_indent++;
+
+ /* A bunch of bitmap pointers follow.
+ * We work out the correct number,
+ * based on the mipmap/cubemap
+ * properties, but dump extra
+ * possibilities to futureproof */
+
+ int bitmap_count = MALI_NEGATIVE(t->levels);
+
+ /* Miptree for each face */
+ if (f.type == MALI_TEX_CUBE)
+ bitmap_count *= 6;
+
+ /* Array of textures */
+ bitmap_count *= MALI_NEGATIVE(t->array_size);
+
+ /* Stride for each element */
+ if (f.manual_stride)
+ bitmap_count *= 2;
+
+ /* Sanity check the size */
+ int max_count = sizeof(t->payload) / sizeof(t->payload[0]);
+ assert (bitmap_count <= max_count);
+
+ for (int i = 0; i < bitmap_count; ++i) {
+ /* How we dump depends if this is a stride or a pointer */
+
+ if (f.manual_stride && (i & 1)) {
+ /* signed 32-bit snuck in as a 64-bit pointer */
+ uint64_t stride_set = t->payload[i];
+ uint32_t clamped_stride = stride_set;
+ int32_t stride = clamped_stride;
+ assert(stride_set == clamped_stride);
+ pandecode_log("(mali_ptr) %d /* stride */, \n", stride);
+ } else {
+ char *a = pointer_as_memory_reference(t->payload[i]);
+ pandecode_log("%s, \n", a);
+ free(a);
+ }
+ }
+
+ pandecode_indent--;
+ pandecode_log("},\n");
+
+ pandecode_indent--;
+ pandecode_log("};\n");
+}
+
+/* For shader properties like texture_count, we have a claimed property in the shader_meta, and the actual Truth from static analysis (this may just be an upper limit). We validate accordingly */
+
+static void
+pandecode_shader_prop(const char *name, unsigned claim, signed truth, bool fuzzy)
+{
+ /* Nothing to do */
+ if (claim == truth)
+ return;
+
+ if (fuzzy)
+ assert(truth >= 0);
+
+ if ((truth >= 0) && !fuzzy) {
+ pandecode_msg("%s: expected %s = %d, claimed %u\n",
+ (truth < claim) ? "warn" : "XXX",
+ name, truth, claim);
+ } else if ((claim > -truth) && !fuzzy) {
+ pandecode_msg("XXX: expected %s <= %u, claimed %u\n",
+ name, -truth, claim);
+ } else if (fuzzy && (claim < truth))
+ pandecode_msg("XXX: expected %s >= %u, claimed %u\n",
+ name, truth, claim);
+
+ pandecode_log(".%s = %" PRId16, name, claim);
+
+ if (fuzzy)
+ pandecode_log_cont(" /* %u used */", truth);
+
+ pandecode_log_cont(",\n");
+}
+
+static void
+pandecode_blend_shader_disassemble(mali_ptr shader, int job_no, int job_type,
+ bool is_bifrost, unsigned gpu_id)
+{
+ struct midgard_disasm_stats stats =
+ pandecode_shader_disassemble(shader, job_no, job_type, is_bifrost, gpu_id);
+
+ bool has_texture = (stats.texture_count > 0);
+ bool has_sampler = (stats.sampler_count > 0);
+ bool has_attribute = (stats.attribute_count > 0);
+ bool has_varying = (stats.varying_count > 0);
+ bool has_uniform = (stats.uniform_count > 0);
+ bool has_ubo = (stats.uniform_buffer_count > 0);
+
+ if (has_texture || has_sampler)
+ pandecode_msg("XXX: blend shader accessing textures\n");
+
+ if (has_attribute || has_varying)
+ pandecode_msg("XXX: blend shader accessing interstage\n");
+
+ if (has_uniform || has_ubo)
+ pandecode_msg("XXX: blend shader accessing uniforms\n");
+}
+
+static void
+pandecode_vertex_tiler_postfix_pre(
+ const struct mali_vertex_tiler_postfix *p,
int job_no, enum mali_job_type job_type,
- char *suffix, bool is_bifrost)
+ char *suffix, bool is_bifrost, unsigned gpu_id)
{
- mali_ptr shader_meta_ptr = (u64) (uintptr_t) (p->_shader_upper << 4);
struct pandecode_mapped_memory *attr_mem;
- unsigned rt_count = 1;
-
/* On Bifrost, since the tiler heap (for tiler jobs) and the scratchpad
* are the only things actually needed from the FBD, vertex/tiler jobs
* no longer reference the FBD -- instead, this field points to some
* info about the scratchpad.
*/
+
+ struct pandecode_fbd fbd_info = {
+ /* Default for Bifrost */
+ .rt_count = 1
+ };
+
if (is_bifrost)
pandecode_scratchpad(p->framebuffer & ~FBD_TYPE, job_no, suffix);
else if (p->framebuffer & MALI_MFBD)
- rt_count = pandecode_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false);
+ fbd_info = pandecode_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false);
else if (job_type == JOB_TYPE_COMPUTE)
pandecode_compute_fbd((u64) (uintptr_t) p->framebuffer, job_no);
else
- pandecode_sfbd((u64) (uintptr_t) p->framebuffer, job_no);
+ fbd_info = pandecode_sfbd((u64) (uintptr_t) p->framebuffer, job_no, false);
int varying_count = 0, attribute_count = 0, uniform_count = 0, uniform_buffer_count = 0;
int texture_count = 0, sampler_count = 0;
- if (shader_meta_ptr) {
- struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(shader_meta_ptr);
- struct mali_shader_meta *PANDECODE_PTR_VAR(s, smem, shader_meta_ptr);
+ if (p->shader) {
+ struct pandecode_mapped_memory *smem = pandecode_find_mapped_gpu_mem_containing(p->shader);
+ struct mali_shader_meta *PANDECODE_PTR_VAR(s, smem, p->shader);
+
+ /* Disassemble ahead-of-time to get stats. Initialize with
+ * stats for the missing-shader case so we get validation
+ * there, too */
+
+ struct midgard_disasm_stats info = {
+ .texture_count = 0,
+ .sampler_count = 0,
+ .attribute_count = 0,
+ .varying_count = 0,
+ .work_count = 1,
+
+ .uniform_count = -128,
+ .uniform_buffer_count = 0
+ };
- pandecode_log("struct mali_shader_meta shader_meta_%"PRIx64"_%d%s = {\n", shader_meta_ptr, job_no, suffix);
+ if (s->shader & ~0xF)
+ info = pandecode_shader_disassemble(s->shader & ~0xF, job_no, job_type, is_bifrost, gpu_id);
+
+ pandecode_log("struct mali_shader_meta shader_meta_%"PRIx64"_%d%s = {\n", p->shader, job_no, suffix);
pandecode_indent++;
/* Save for dumps */
uniform_buffer_count = s->midgard1.uniform_buffer_count;
}
- mali_ptr shader_ptr = pandecode_shader_address("shader", s->shader);
+ pandecode_shader_address("shader", s->shader);
- pandecode_prop("texture_count = %" PRId16, s->texture_count);
- pandecode_prop("sampler_count = %" PRId16, s->sampler_count);
- pandecode_prop("attribute_count = %" PRId16, s->attribute_count);
- pandecode_prop("varying_count = %" PRId16, s->varying_count);
+ pandecode_shader_prop("texture_count", s->texture_count, info.texture_count, false);
+ pandecode_shader_prop("sampler_count", s->sampler_count, info.sampler_count, false);
+ pandecode_shader_prop("attribute_count", s->attribute_count, info.attribute_count, false);
+ pandecode_shader_prop("varying_count", s->varying_count, info.varying_count, false);
+ pandecode_shader_prop("uniform_buffer_count",
+ uniform_buffer_count,
+ info.uniform_buffer_count, true);
- if (is_bifrost) {
- pandecode_log(".bifrost1 = {\n");
- pandecode_indent++;
+ if (!is_bifrost) {
+ pandecode_shader_prop("uniform_count",
+ uniform_count,
+ info.uniform_count, false);
- pandecode_prop("uniform_buffer_count = %" PRId32, s->bifrost1.uniform_buffer_count);
- pandecode_prop("unk1 = 0x%" PRIx32, s->bifrost1.unk1);
+ pandecode_shader_prop("work_count",
+ s->midgard1.work_count, info.work_count, false);
+ }
- pandecode_indent--;
- pandecode_log("},\n");
+ if (is_bifrost) {
+ pandecode_prop("bifrost1.unk1 = 0x%" PRIx32, s->bifrost1.unk1);
} else {
- pandecode_log(".midgard1 = {\n");
- pandecode_indent++;
+ bool helpers = s->midgard1.flags & MALI_HELPER_INVOCATIONS;
+ s->midgard1.flags &= ~MALI_HELPER_INVOCATIONS;
- pandecode_prop("uniform_count = %" PRId16, s->midgard1.uniform_count);
- pandecode_prop("uniform_buffer_count = %" PRId16, s->midgard1.uniform_buffer_count);
- pandecode_prop("work_count = %" PRId16, s->midgard1.work_count);
+ if (helpers != info.helper_invocations) {
+ pandecode_msg("XXX: expected helpers %u but got %u\n",
+ info.helper_invocations, helpers);
+ }
- pandecode_log(".flags = ");
+ pandecode_log(".midgard1.flags = ");
pandecode_log_decoded_flags(shader_midgard1_flag_info, s->midgard1.flags);
pandecode_log_cont(",\n");
- pandecode_prop("unknown2 = 0x%" PRIx32, s->midgard1.unknown2);
-
- pandecode_indent--;
- pandecode_log("},\n");
+ pandecode_prop("midgard1.unknown2 = 0x%" PRIx32, s->midgard1.unknown2);
}
if (s->depth_units || s->depth_factor) {
/* We're not quite sure what these flags mean without the depth test, if anything */
- if (unknown2_3 & (MALI_DEPTH_TEST | MALI_DEPTH_FUNC_MASK)) {
+ if (unknown2_3 & (MALI_DEPTH_WRITEMASK | MALI_DEPTH_FUNC_MASK)) {
const char *func = pandecode_func(MALI_GET_DEPTH_FUNC(unknown2_3));
unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
if (!is_bifrost) {
/* TODO: Blend shaders routing/disasm */
-
union midgard_blend blend = s->blend;
- pandecode_midgard_blend(&blend, false);
+ mali_ptr shader = pandecode_midgard_blend(&blend, s->unknown2_3 & MALI_HAS_BLEND_SHADER);
+ if (shader & ~0xF)
+ pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id);
}
pandecode_indent--;
if (job_type == JOB_TYPE_TILER) {
void* blend_base = (void *) (s + 1);
- for (unsigned i = 0; i < rt_count; i++) {
+ for (unsigned i = 0; i < fbd_info.rt_count; i++) {
mali_ptr shader = 0;
if (is_bifrost)
shader = pandecode_midgard_blend_mrt(blend_base, job_no, i);
if (shader & ~0xF)
- pandecode_shader_disassemble(shader, job_no, job_type, false);
+ pandecode_blend_shader_disassemble(shader, job_no, job_type, false, gpu_id);
+
}
}
-
- if (shader_ptr & ~0xF)
- pandecode_shader_disassemble(shader_ptr, job_no, job_type, is_bifrost);
} else
- pandecode_msg("<no shader>\n");
+ pandecode_msg("XXX: missing shader descriptor\n");
if (p->viewport) {
struct pandecode_mapped_memory *fmem = pandecode_find_mapped_gpu_mem_containing(p->viewport);
pandecode_log("};\n");
}
- if (p->attribute_meta) {
- unsigned max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix);
+ unsigned max_attr_index = 0;
+
+ if (p->attribute_meta)
+ max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix);
+ if (p->attributes) {
attr_mem = pandecode_find_mapped_gpu_mem_containing(p->attributes);
- pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index + 1, false);
+ pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index, false, job_type);
}
/* Varyings are encoded like attributes but not actually sent; we just
/* Number of descriptors depends on whether there are
* non-internal varyings */
- pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true);
- }
-
- bool is_compute = job_type == JOB_TYPE_COMPUTE;
-
- if (p->uniforms && !is_compute) {
- int rows = uniform_count, width = 4;
- size_t sz = rows * width * sizeof(float);
-
- struct pandecode_mapped_memory *uniform_mem = pandecode_find_mapped_gpu_mem_containing(p->uniforms);
- pandecode_fetch_gpu_mem(uniform_mem, p->uniforms, sz);
- u32 *PANDECODE_PTR_VAR(uniforms, uniform_mem, p->uniforms);
-
- pandecode_log("u32 uniforms_%d%s[] = {\n", job_no, suffix);
-
- pandecode_indent++;
-
- for (int row = 0; row < rows; row++) {
- for (int i = 0; i < width; i++) {
- u32 v = uniforms[i];
- float f;
- memcpy(&f, &v, sizeof(v));
- pandecode_log_cont("%X /* %f */, ", v, f);
- }
-
- pandecode_log_cont("\n");
-
- uniforms += width;
- }
-
- pandecode_indent--;
- pandecode_log("};\n");
- } else if (p->uniforms) {
- int rows = uniform_count * 2;
- size_t sz = rows * sizeof(mali_ptr);
-
- struct pandecode_mapped_memory *uniform_mem = pandecode_find_mapped_gpu_mem_containing(p->uniforms);
- pandecode_fetch_gpu_mem(uniform_mem, p->uniforms, sz);
- mali_ptr *PANDECODE_PTR_VAR(uniforms, uniform_mem, p->uniforms);
-
- pandecode_log("mali_ptr uniforms_%d%s[] = {\n", job_no, suffix);
-
- pandecode_indent++;
-
- for (int row = 0; row < rows; row++) {
- char *a = pointer_as_memory_reference(uniforms[row]);
- pandecode_log("%s,\n", a);
- free(a);
- }
-
- pandecode_indent--;
- pandecode_log("};\n");
-
+ pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true, job_type);
}
if (p->uniform_buffers) {
- pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
- }
+ if (uniform_buffer_count)
+ pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
+ else
+ pandecode_msg("warn: UBOs specified but not referenced\n");
+ } else if (uniform_buffer_count)
+ pandecode_msg("XXX: UBOs referenced but not specified\n");
+
+ /* We don't want to actually dump uniforms, but we do need to validate
+ * that the counts we were given are sane */
+
+ if (p->uniforms) {
+ if (uniform_count)
+ pandecode_uniforms(p->uniforms, uniform_count);
+ else
+ pandecode_msg("warn: Uniforms specified but not referenced\n");
+ } else if (uniform_count)
+ pandecode_msg("XXX: Uniforms referenced but not specified\n");
if (p->texture_trampoline) {
struct pandecode_mapped_memory *mmem = pandecode_find_mapped_gpu_mem_containing(p->texture_trampoline);
pandecode_log("};\n");
/* Now, finally, descend down into the texture descriptor */
- for (int tex = 0; tex < texture_count; ++tex) {
+ for (unsigned tex = 0; tex < texture_count; ++tex) {
mali_ptr *PANDECODE_PTR_VAR(u, mmem, p->texture_trampoline + tex * sizeof(mali_ptr));
struct pandecode_mapped_memory *tmem = pandecode_find_mapped_gpu_mem_containing(*u);
-
- if (tmem) {
- struct mali_texture_descriptor *PANDECODE_PTR_VAR(t, tmem, *u);
-
- pandecode_log("struct mali_texture_descriptor texture_descriptor_%"PRIx64"_%d_%d = {\n", *u, job_no, tex);
- pandecode_indent++;
-
- pandecode_prop("width = MALI_POSITIVE(%" PRId16 ")", t->width + 1);
- pandecode_prop("height = MALI_POSITIVE(%" PRId16 ")", t->height + 1);
- pandecode_prop("depth = MALI_POSITIVE(%" PRId16 ")", t->depth + 1);
- pandecode_prop("array_size = MALI_POSITIVE(%" PRId16 ")", t->array_size + 1);
- pandecode_prop("unknown3 = %" PRId16, t->unknown3);
- pandecode_prop("unknown3A = %" PRId8, t->unknown3A);
- pandecode_prop("nr_mipmap_levels = %" PRId8, t->nr_mipmap_levels);
-
- struct mali_texture_format f = t->format;
-
- pandecode_log(".format = {\n");
- pandecode_indent++;
-
- pandecode_swizzle(f.swizzle);
- pandecode_prop("format = %s", pandecode_format(f.format));
- pandecode_prop("type = %s", pandecode_texture_type(f.type));
- pandecode_prop("srgb = %" PRId32, f.srgb);
- pandecode_prop("unknown1 = %" PRId32, f.unknown1);
- pandecode_prop("usage2 = 0x%" PRIx32, f.usage2);
-
- pandecode_indent--;
- pandecode_log("},\n");
-
- pandecode_swizzle(t->swizzle);
-
- if (t->swizzle_zero) {
- /* Shouldn't happen */
- pandecode_msg("Swizzle zero tripped but replay will be fine anyway");
- pandecode_prop("swizzle_zero = %d", t->swizzle_zero);
- }
-
- pandecode_prop("unknown3 = 0x%" PRIx32, t->unknown3);
-
- pandecode_prop("unknown5 = 0x%" PRIx32, t->unknown5);
- pandecode_prop("unknown6 = 0x%" PRIx32, t->unknown6);
- pandecode_prop("unknown7 = 0x%" PRIx32, t->unknown7);
-
- pandecode_log(".payload = {\n");
- pandecode_indent++;
-
- /* A bunch of bitmap pointers follow.
- * We work out the correct number,
- * based on the mipmap/cubemap
- * properties, but dump extra
- * possibilities to futureproof */
-
- int bitmap_count = MALI_NEGATIVE(t->nr_mipmap_levels);
- bool manual_stride = f.usage2 & MALI_TEX_MANUAL_STRIDE;
-
- /* Miptree for each face */
- if (f.type == MALI_TEX_CUBE)
- bitmap_count *= 6;
-
- /* Array of textures */
- bitmap_count *= MALI_NEGATIVE(t->array_size);
-
- /* Stride for each element */
- if (manual_stride)
- bitmap_count *= 2;
-
- /* Sanity check the size */
- int max_count = sizeof(t->payload) / sizeof(t->payload[0]);
- assert (bitmap_count <= max_count);
-
- /* Dump more to be safe, but not _that_ much more */
- int safe_count = MIN2(bitmap_count * 2, max_count);
-
- for (int i = 0; i < safe_count; ++i) {
- char *prefix = (i >= bitmap_count) ? "// " : "";
-
- /* How we dump depends if this is a stride or a pointer */
-
- if ((f.usage2 & MALI_TEX_MANUAL_STRIDE) && (i & 1)) {
- /* signed 32-bit snuck in as a 64-bit pointer */
- uint64_t stride_set = t->payload[i];
- uint32_t clamped_stride = stride_set;
- int32_t stride = clamped_stride;
- assert(stride_set == clamped_stride);
- pandecode_log("%s(mali_ptr) %d /* stride */, \n", prefix, stride);
- } else {
- char *a = pointer_as_memory_reference(t->payload[i]);
- pandecode_log("%s%s, \n", prefix, a);
- free(a);
- }
- }
-
- pandecode_indent--;
- pandecode_log("},\n");
-
- pandecode_indent--;
- pandecode_log("};\n");
- }
+ if (tmem)
+ pandecode_texture(*u, tmem, job_no, tex);
}
}
}
pandecode_prop("compare_func = %s", pandecode_alt_func(s->compare_func));
if (s->zero || s->zero2) {
- pandecode_msg("Zero tripped\n");
+ pandecode_msg("XXX: sampler zero tripped\n");
pandecode_prop("zero = 0x%X, 0x%X\n", s->zero, s->zero2);
}
static void
pandecode_vertex_tiler_postfix(const struct mali_vertex_tiler_postfix *p, int job_no, bool is_bifrost)
{
- pandecode_log_cont("{\n");
- pandecode_indent++;
+ if (p->shader & 0xF)
+ pandecode_msg("warn: shader tagged %X\n", p->shader & 0xF);
- MEMORY_PROP(p, position_varying);
- DYN_MEMORY_PROP(p, job_no, uniform_buffers);
- DYN_MEMORY_PROP(p, job_no, texture_trampoline);
- DYN_MEMORY_PROP(p, job_no, sampler_descriptor);
- DYN_MEMORY_PROP(p, job_no, uniforms);
- DYN_MEMORY_PROP(p, job_no, attributes);
- DYN_MEMORY_PROP(p, job_no, attribute_meta);
- DYN_MEMORY_PROP(p, job_no, varyings);
- DYN_MEMORY_PROP(p, job_no, varying_meta);
- DYN_MEMORY_PROP(p, job_no, viewport);
- DYN_MEMORY_PROP(p, job_no, occlusion_counter);
+ if (!(p->position_varying || p->occlusion_counter))
+ return;
- if (is_bifrost)
- pandecode_prop("framebuffer = scratchpad_%d_p", job_no);
- else
- pandecode_prop("framebuffer = framebuffer_%d_p | %s", job_no, p->framebuffer & MALI_MFBD ? "MALI_MFBD" : "0");
+ pandecode_log(".postfix = {\n");
+ pandecode_indent++;
- pandecode_prop("_shader_upper = (shader_meta_%d_p) >> 4", job_no);
- pandecode_prop("flags = %d", p->flags);
+ MEMORY_PROP(p, position_varying);
+ MEMORY_PROP(p, occlusion_counter);
pandecode_indent--;
pandecode_log("},\n");
pandecode_prop("unk2 = 0x%x", v->unk2);
if (v->zero0 || v->zero1) {
- pandecode_msg("vertex only zero tripped");
+ pandecode_msg("XXX: vertex only zero tripped");
pandecode_prop("zero0 = 0x%" PRIx32, v->zero0);
pandecode_prop("zero1 = 0x%" PRIx64, v->zero1);
}
pandecode_indent++;
if (h->zero) {
- pandecode_msg("tiler heap zero tripped\n");
+ pandecode_msg("XXX: tiler heap zero tripped\n");
pandecode_prop("zero = 0x%x", h->zero);
}
for (int i = 0; i < 12; i++) {
if (h->zeros[i] != 0) {
- pandecode_msg("tiler heap zero %d tripped, value %x\n",
+ pandecode_msg("XXX: tiler heap zero %d tripped, value %x\n",
i, h->zeros[i]);
}
}
pandecode_indent++;
if (t->zero0 || t->zero1) {
- pandecode_msg("tiler meta zero tripped");
+ pandecode_msg("XXX: tiler meta zero tripped\n");
pandecode_prop("zero0 = 0x%" PRIx64, t->zero0);
pandecode_prop("zero1 = 0x%" PRIx64, t->zero1);
}
pandecode_prop("width = MALI_POSITIVE(%d)", t->width + 1);
pandecode_prop("height = MALI_POSITIVE(%d)", t->height + 1);
- DYN_MEMORY_PROP(t, job_no, tiler_heap_meta);
for (int i = 0; i < 12; i++) {
if (t->zeros[i] != 0) {
- pandecode_msg("tiler heap zero %d tripped, value %" PRIx64 "\n",
+ pandecode_msg("XXX: tiler heap zero %d tripped, value %" PRIx64 "\n",
i, t->zeros[i]);
}
}
/* TODO: gl_PointSize on Bifrost */
pandecode_primitive_size(t->primitive_size, true);
- DYN_MEMORY_PROP(t, job_no, tiler_meta);
pandecode_gl_enables(t->gl_enables, JOB_TYPE_TILER);
if (t->zero1 || t->zero2 || t->zero3 || t->zero4 || t->zero5
|| t->zero6 || t->zero7 || t->zero8) {
- pandecode_msg("tiler only zero tripped");
+ pandecode_msg("XXX: tiler only zero tripped\n");
pandecode_prop("zero1 = 0x%" PRIx64, t->zero1);
pandecode_prop("zero2 = 0x%" PRIx64, t->zero2);
pandecode_prop("zero3 = 0x%" PRIx64, t->zero3);
static int
pandecode_vertex_job_bfr(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
- mali_ptr payload, int job_no)
+ mali_ptr payload, int job_no, unsigned gpu_id)
{
struct bifrost_payload_vertex *PANDECODE_PTR_VAR(v, mem, payload);
- pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true);
+ pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true, gpu_id);
pandecode_log("struct bifrost_payload_vertex payload_%d = {\n", job_no);
pandecode_indent++;
pandecode_log(".prefix = ");
- pandecode_vertex_tiler_prefix(&v->prefix, job_no);
+ pandecode_vertex_tiler_prefix(&v->prefix, job_no, false);
pandecode_log(".vertex = ");
pandecode_vertex_only_bfr(&v->vertex);
- pandecode_log(".postfix = ");
pandecode_vertex_tiler_postfix(&v->postfix, job_no, true);
pandecode_indent--;
static int
pandecode_tiler_job_bfr(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
- mali_ptr payload, int job_no)
+ mali_ptr payload, int job_no, unsigned gpu_id)
{
struct bifrost_payload_tiler *PANDECODE_PTR_VAR(t, mem, payload);
- pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true);
-
- pandecode_indices(t->prefix.indices, t->prefix.index_count, job_no);
+ pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true, gpu_id);
pandecode_tiler_meta(t->tiler.tiler_meta, job_no);
pandecode_log("struct bifrost_payload_tiler payload_%d = {\n", job_no);
pandecode_indent++;
pandecode_log(".prefix = ");
- pandecode_vertex_tiler_prefix(&t->prefix, job_no);
+ pandecode_vertex_tiler_prefix(&t->prefix, job_no, false);
pandecode_log(".tiler = ");
pandecode_tiler_only_bfr(&t->tiler, job_no);
- pandecode_log(".postfix = ");
pandecode_vertex_tiler_postfix(&t->postfix, job_no, true);
pandecode_indent--;
static int
pandecode_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
- mali_ptr payload, int job_no)
+ mali_ptr payload, int job_no, unsigned gpu_id)
{
struct midgard_payload_vertex_tiler *PANDECODE_PTR_VAR(v, mem, payload);
- pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false);
-
- pandecode_indices(v->prefix.indices, v->prefix.index_count, job_no);
+ pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false, gpu_id);
pandecode_log("struct midgard_payload_vertex_tiler payload_%d = {\n", job_no);
pandecode_indent++;
bool has_primitive_pointer = v->prefix.unknown_draw & MALI_DRAW_VARYING_SIZE;
pandecode_primitive_size(v->primitive_size, !has_primitive_pointer);
+ bool instanced = v->instance_shift || v->instance_odd;
+ bool is_graphics = (h->job_type == JOB_TYPE_VERTEX) || (h->job_type == JOB_TYPE_TILER);
+
pandecode_log(".prefix = ");
- pandecode_vertex_tiler_prefix(&v->prefix, job_no);
+ pandecode_vertex_tiler_prefix(&v->prefix, job_no, !instanced && is_graphics);
pandecode_gl_enables(v->gl_enables, h->job_type);
pandecode_prop("offset_start = %d", v->offset_start);
if (v->zero5) {
- pandecode_msg("Zero tripped\n");
+ pandecode_msg("XXX: midgard payload zero tripped\n");
pandecode_prop("zero5 = 0x%" PRIx64, v->zero5);
}
- pandecode_log(".postfix = ");
pandecode_vertex_tiler_postfix(&v->postfix, job_no, false);
pandecode_indent--;
{
const struct mali_payload_fragment *PANDECODE_PTR_VAR(s, mem, payload);
- bool fbd_dumped = false;
-
- if (!is_bifrost && (s->framebuffer & FBD_TYPE) == MALI_SFBD) {
- /* Only SFBDs are understood, not MFBDs. We're speculating,
- * based on the versioning, kernel code, etc, that the
- * difference is between Single FrameBuffer Descriptor and
- * Multiple FrmaeBuffer Descriptor; the change apparently lines
- * up with multi-framebuffer support being added (T7xx onwards,
- * including Gxx). In any event, there's some field shuffling
- * that we haven't looked into yet. */
-
- pandecode_sfbd(s->framebuffer & FBD_MASK, job_no);
- fbd_dumped = true;
- } else if ((s->framebuffer & FBD_TYPE) == MALI_MFBD) {
- /* We don't know if Bifrost supports SFBD's at all, since the
- * driver never uses them. And the format is different from
- * Midgard anyways, due to the tiler heap and scratchpad being
- * moved out into separate structures, so it's not clear what a
- * Bifrost SFBD would even look like without getting an actual
- * trace, which appears impossible.
- */
+ bool is_mfbd = (s->framebuffer & FBD_TYPE) == MALI_MFBD;
+
+ /* Bifrost theoretically may retain support for SFBD on compute jobs,
+ * but for graphics workloads with a FRAGMENT payload, use MFBD */
+
+ if (!is_mfbd && is_bifrost)
+ pandecode_msg("XXX: Bifrost fragment must use MFBD\n");
+
+ struct pandecode_fbd info;
+
+ if (is_mfbd)
+ info = pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true);
+ else
+ info = pandecode_sfbd(s->framebuffer & FBD_MASK, job_no, true);
+
+ /* Compute the tag for the tagged pointer. This contains the type of
+ * FBD (MFBD/SFBD), and in the case of an MFBD, information about which
+ * additional structures follow the MFBD header (an extra payload or
+ * not, as well as a count of render targets) */
+
+ unsigned expected_tag = is_mfbd ? MALI_MFBD : MALI_SFBD;
- pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true);
- fbd_dumped = true;
+ if (is_mfbd) {
+ if (info.has_extra)
+ expected_tag |= MALI_MFBD_TAG_EXTRA;
+
+ expected_tag |= (MALI_POSITIVE(info.rt_count) << 2);
}
- uintptr_t p = (uintptr_t) s->framebuffer & FBD_MASK;
- pandecode_log("struct mali_payload_fragment payload_%"PRIx64"_%d = {\n", payload, job_no);
- pandecode_indent++;
+ if ((s->min_tile_coord | s->max_tile_coord) & ~(MALI_X_COORD_MASK | MALI_Y_COORD_MASK)) {
+ pandecode_msg("XXX: unexpected tile coordinate bits\n");
+ pandecode_prop("min_tile_coord = 0x%X\n", s->min_tile_coord);
+ pandecode_prop("max_tile_coord = 0x%X\n", s->min_tile_coord);
+ }
- /* See the comments by the macro definitions for mathematical context
- * on why this is so weird */
+ /* Extract tile coordinates */
- if (MALI_TILE_COORD_FLAGS(s->max_tile_coord) || MALI_TILE_COORD_FLAGS(s->min_tile_coord))
- pandecode_msg("Tile coordinate flag missed, replay wrong\n");
+ unsigned min_x = MALI_TILE_COORD_X(s->min_tile_coord) << MALI_TILE_SHIFT;
+ unsigned min_y = MALI_TILE_COORD_Y(s->min_tile_coord) << MALI_TILE_SHIFT;
- pandecode_prop("min_tile_coord = MALI_COORDINATE_TO_TILE_MIN(%d, %d)",
- MALI_TILE_COORD_X(s->min_tile_coord) << MALI_TILE_SHIFT,
- MALI_TILE_COORD_Y(s->min_tile_coord) << MALI_TILE_SHIFT);
+ unsigned max_x = (MALI_TILE_COORD_X(s->max_tile_coord) + 1) << MALI_TILE_SHIFT;
+ unsigned max_y = (MALI_TILE_COORD_Y(s->max_tile_coord) + 1) << MALI_TILE_SHIFT;
- pandecode_prop("max_tile_coord = MALI_COORDINATE_TO_TILE_MAX(%d, %d)",
- (MALI_TILE_COORD_X(s->max_tile_coord) + 1) << MALI_TILE_SHIFT,
- (MALI_TILE_COORD_Y(s->max_tile_coord) + 1) << MALI_TILE_SHIFT);
+ /* For the max, we also want the floored (rather than ceiled) version for checking */
- /* If the FBD was just decoded, we can refer to it by pointer. If not,
- * we have to fallback on offsets. */
+ unsigned max_x_f = (MALI_TILE_COORD_X(s->max_tile_coord)) << MALI_TILE_SHIFT;
+ unsigned max_y_f = (MALI_TILE_COORD_Y(s->max_tile_coord)) << MALI_TILE_SHIFT;
- const char *fbd_type = s->framebuffer & MALI_MFBD ? "MALI_MFBD" : "MALI_SFBD";
+ /* Validate the coordinates are well-ordered */
- if (fbd_dumped)
- pandecode_prop("framebuffer = framebuffer_%d_p | %s", job_no, fbd_type);
- else
- pandecode_prop("framebuffer = %s | %s", pointer_as_memory_reference(p), fbd_type);
+ if (min_x == max_x)
+ pandecode_msg("XXX: empty X coordinates (%u = %u)\n", min_x, max_x);
+ else if (min_x > max_x)
+ pandecode_msg("XXX: misordered X coordinates (%u > %u)\n", min_x, max_x);
- pandecode_indent--;
- pandecode_log("};\n");
+ if (min_y == max_y)
+ pandecode_msg("XXX: empty X coordinates (%u = %u)\n", min_x, max_x);
+ else if (min_y > max_y)
+ pandecode_msg("XXX: misordered X coordinates (%u > %u)\n", min_x, max_x);
+
+ /* Validate the coordinates fit inside the framebuffer. We use floor,
+ * rather than ceil, for the max coordinates, since the tile
+ * coordinates for something like an 800x600 framebuffer will actually
+ * resolve to 800x608, which would otherwise trigger a Y-overflow */
+
+ if ((min_x > info.width) || (max_x_f > info.width))
+ pandecode_msg("XXX: tile coordinates overflow in X direction\n");
+
+ if ((min_y > info.height) || (max_y_f > info.height))
+ pandecode_msg("XXX: tile coordinates overflow in Y direction\n");
+
+ /* After validation, we print */
+
+ pandecode_log("fragment (%u, %u) ... (%u, %u)\n\n", min_x, min_y, max_x, max_y);
+
+ /* The FBD is a tagged pointer */
+
+ unsigned tag = (s->framebuffer & ~FBD_MASK);
+
+ if (tag != expected_tag)
+ pandecode_msg("XXX: expected FBD tag %X but got %X\n", expected_tag, tag);
return sizeof(*s);
}
static int job_descriptor_number = 0;
int
-pandecode_jc(mali_ptr jc_gpu_va, bool bifrost)
+pandecode_jc(mali_ptr jc_gpu_va, bool bifrost, unsigned gpu_id)
{
struct mali_job_descriptor_header *h;
h->job_type != JOB_TYPE_FRAGMENT ? 4 : 0;
mali_ptr payload_ptr = jc_gpu_va + sizeof(*h) - offset;
- payload = pandecode_fetch_gpu_mem(mem, payload_ptr,
- MALI_PAYLOAD_SIZE);
+ payload = pandecode_fetch_gpu_mem(mem, payload_ptr, 256);
int job_no = job_descriptor_number++;
if (h->job_descriptor_size)
pandecode_prop("job_descriptor_size = %d", h->job_descriptor_size);
- if (h->exception_status != 0x1)
- pandecode_prop("exception_status = %x (source ID: 0x%x access: 0x%x exception: 0x%x)",
+ if (h->exception_status && h->exception_status != 0x1)
+ pandecode_prop("exception_status = %x (source ID: 0x%x access: %s exception: 0x%x)",
h->exception_status,
(h->exception_status >> 16) & 0xFFFF,
- (h->exception_status >> 8) & 0x3,
+ pandecode_exception_access((h->exception_status >> 8) & 0x3),
h->exception_status & 0xFF);
if (h->first_incomplete_task)
case JOB_TYPE_COMPUTE:
if (bifrost) {
if (h->job_type == JOB_TYPE_TILER)
- pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no);
+ pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no, gpu_id);
else
- pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no);
+ pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no, gpu_id);
} else
- pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no);
+ pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no, gpu_id);
break;