#include <stdbool.h>
#include <stdarg.h>
#include "decode.h"
+#include "util/macros.h"
#include "util/u_math.h"
#include "pan_pretty_print.h"
#include "midgard/disassemble.h"
#include "bifrost/disassemble.h"
-int pandecode_replay_jc(mali_ptr jc_gpu_va, bool bifrost);
+#include "pan_encoder.h"
+
+int pandecode_jc(mali_ptr jc_gpu_va, bool bifrost);
#define MEMORY_PROP(obj, p) {\
if (obj->p) { \
} \
}
-#define DYN_MEMORY_PROP(obj, no, p) { \
- if (obj->p) \
- pandecode_prop("%s = %s_%d_p", #p, #p, no); \
+#define MEMORY_PROP_DIR(obj, p) {\
+ if (obj.p) { \
+ char *a = pointer_as_memory_reference(obj.p); \
+ pandecode_prop("%s = %s", #p, a); \
+ free(a); \
+ } \
}
/* Semantic logging type.
};
#undef FLAG_INFO
+#define FLAG_INFO(flag) { MALI_SAMP_##flag, "MALI_SAMP_" #flag }
+static const struct pandecode_flag_info sampler_flag_info [] = {
+ FLAG_INFO(MAG_NEAREST),
+ FLAG_INFO(MIN_NEAREST),
+ FLAG_INFO(MIP_LINEAR_1),
+ FLAG_INFO(MIP_LINEAR_2),
+ FLAG_INFO(NORM_COORDS),
+ {}
+};
+#undef FLAG_INFO
extern char *replace_fragment;
extern char *replace_vertex;
static char *
-pandecode_job_type_name(enum mali_job_type type)
+pandecode_job_type(enum mali_job_type type)
{
#define DEFINE_CASE(name) case JOB_TYPE_ ## name: return "JOB_TYPE_" #name
}
static char *
-pandecode_draw_mode_name(enum mali_draw_mode mode)
+pandecode_draw_mode(enum mali_draw_mode mode)
{
#define DEFINE_CASE(name) case MALI_ ## name: return "MALI_" #name
#define DEFINE_CASE(name) case MALI_FUNC_ ## name: return "MALI_FUNC_" #name
static char *
-pandecode_func_name(enum mali_func mode)
+pandecode_func(enum mali_func mode)
{
switch (mode) {
DEFINE_CASE(NEVER);
/* Why is this duplicated? Who knows... */
#define DEFINE_CASE(name) case MALI_ALT_FUNC_ ## name: return "MALI_ALT_FUNC_" #name
static char *
-pandecode_alt_func_name(enum mali_alt_func mode)
+pandecode_alt_func(enum mali_alt_func mode)
{
switch (mode) {
DEFINE_CASE(NEVER);
#define DEFINE_CASE(name) case MALI_STENCIL_ ## name: return "MALI_STENCIL_" #name
static char *
-pandecode_stencil_op_name(enum mali_stencil_op op)
+pandecode_stencil_op(enum mali_stencil_op op)
{
switch (op) {
DEFINE_CASE(KEEP);
#undef DEFINE_CASE
#define DEFINE_CASE(name) case MALI_ATTR_ ## name: return "MALI_ATTR_" #name
-static char *pandecode_attr_mode_name(enum mali_attr_mode mode)
+static char *pandecode_attr_mode(enum mali_attr_mode mode)
{
switch(mode) {
DEFINE_CASE(UNUSED);
DEFINE_CASE(POT_DIVIDE);
DEFINE_CASE(MODULO);
DEFINE_CASE(NPOT_DIVIDE);
+ DEFINE_CASE(IMAGE);
+ DEFINE_CASE(INTERNAL);
default:
return "MALI_ATTR_UNUSED /* XXX: Unknown stencil op, check dump */";
}
#define DEFINE_CASE(name) case MALI_CHANNEL_## name: return "MALI_CHANNEL_" #name
static char *
-pandecode_channel_name(enum mali_channel channel)
+pandecode_channel(enum mali_channel channel)
{
switch (channel) {
DEFINE_CASE(RED);
#define DEFINE_CASE(name) case MALI_WRAP_## name: return "MALI_WRAP_" #name
static char *
-pandecode_wrap_mode_name(enum mali_wrap_mode op)
+pandecode_wrap_mode(enum mali_wrap_mode op)
{
switch (op) {
DEFINE_CASE(REPEAT);
}
#undef DEFINE_CASE
+#define DEFINE_CASE(name) case MALI_EXCEPTION_ACCESS_## name: return ""#name
+static char *
+pandecode_exception_access(enum mali_exception_access fmt)
+{
+ switch (fmt) {
+ DEFINE_CASE(NONE);
+ DEFINE_CASE(EXECUTE);
+ DEFINE_CASE(READ);
+ DEFINE_CASE(WRITE);
+
+ default:
+ unreachable("Invalid case");
+ }
+}
+#undef DEFINE_CASE
+
/* Midgard's tiler descriptor is embedded within the
* larger FBD */
static void
-pandecode_midgard_tiler_descriptor(const struct midgard_tiler_descriptor *t)
+pandecode_midgard_tiler_descriptor(
+ const struct midgard_tiler_descriptor *t,
+ unsigned width,
+ unsigned height)
{
pandecode_log(".tiler = {\n");
pandecode_indent++;
pandecode_prop("hierarchy_mask = 0x%" PRIx16, t->hierarchy_mask);
pandecode_prop("flags = 0x%" PRIx16, t->flags);
- pandecode_prop("polygon_list_size = 0x%x", t->polygon_list_size);
MEMORY_PROP(t, polygon_list);
- MEMORY_PROP(t, polygon_list_body);
- MEMORY_PROP(t, heap_start);
+ /* The body is offset from the base of the polygon list */
+ assert(t->polygon_list_body > t->polygon_list);
+ unsigned body_offset = t->polygon_list_body - t->polygon_list;
- {
- /* Points to the end of a buffer */
- char *a = pointer_as_memory_reference(t->heap_end - 1);
- pandecode_prop("heap_end = %s + 1", a);
- free(a);
+ /* It needs to fit inside the reported size */
+ assert(t->polygon_list_size >= body_offset);
+
+ /* Check that we fit */
+ struct pandecode_mapped_memory *plist =
+ pandecode_find_mapped_gpu_mem_containing(t->polygon_list);
+
+ assert(t->polygon_list_size <= plist->length);
+
+ /* Now that we've sanity checked, we'll try to calculate the sizes
+ * ourselves for comparison */
+
+ unsigned ref_header = panfrost_tiler_header_size(width, height, t->hierarchy_mask);
+ unsigned ref_body = panfrost_tiler_body_size(width, height, t->hierarchy_mask);
+ unsigned ref_size = ref_header + ref_body;
+
+ if (!((ref_header == body_offset) && (ref_size == t->polygon_list_size))) {
+ pandecode_msg("XXX: bad polygon list size (expected %d / 0x%x)\n",
+ ref_header, ref_size);
+ pandecode_prop("polygon_list_size = 0x%x", t->polygon_list_size);
+ pandecode_msg("body offset %d\n", body_offset);
}
+ /* The tiler heap has a start and end specified, so check that
+ * everything fits in a contiguous BO (otherwise, we risk out-of-bounds
+ * reads) */
+
+ MEMORY_PROP(t, heap_start);
+ assert(t->heap_end >= t->heap_start);
+
+ struct pandecode_mapped_memory *heap =
+ pandecode_find_mapped_gpu_mem_containing(t->heap_start);
+
+ unsigned heap_size = t->heap_end - t->heap_start;
+ assert(heap_size <= heap->length);
+
+ pandecode_msg("heap size %d\n", heap_size);
+
bool nonzero_weights = false;
for (unsigned w = 0; w < ARRAY_SIZE(t->weights); ++w) {
}
static void
-pandecode_replay_sfbd(uint64_t gpu_va, int job_no)
+pandecode_sfbd(uint64_t gpu_va, int job_no)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct mali_single_framebuffer *PANDECODE_PTR_VAR(s, mem, (mali_ptr) gpu_va);
MEMORY_PROP(s, unknown_address_0);
const struct midgard_tiler_descriptor t = s->tiler;
- pandecode_midgard_tiler_descriptor(&t);
+ pandecode_midgard_tiler_descriptor(&t, s->width + 1, s->height + 1);
pandecode_indent--;
pandecode_log("};\n");
}
static void
-pandecode_replay_swizzle(unsigned swizzle)
+pandecode_swizzle(unsigned swizzle)
{
pandecode_prop("swizzle = %s | (%s << 3) | (%s << 6) | (%s << 9)",
- pandecode_channel_name((swizzle >> 0) & 0x7),
- pandecode_channel_name((swizzle >> 3) & 0x7),
- pandecode_channel_name((swizzle >> 6) & 0x7),
- pandecode_channel_name((swizzle >> 9) & 0x7));
+ pandecode_channel((swizzle >> 0) & 0x7),
+ pandecode_channel((swizzle >> 3) & 0x7),
+ pandecode_channel((swizzle >> 6) & 0x7),
+ pandecode_channel((swizzle >> 9) & 0x7));
}
static void
pandecode_log_decoded_flags(mfbd_fmt_flag_info, format.flags);
pandecode_log_cont(",\n");
- pandecode_replay_swizzle(format.swizzle);
+ pandecode_swizzle(format.swizzle);
+
+ pandecode_prop("no_preload = 0x%" PRIx32, format.no_preload);
- pandecode_prop("unk4 = 0x%" PRIx32, format.unk4);
+ if (format.zero)
+ pandecode_prop("zero = 0x%" PRIx32, format.zero);
pandecode_indent--;
pandecode_log("},\n");
}
static unsigned
-pandecode_replay_mfbd_bfr(uint64_t gpu_va, int job_no, bool with_render_targets)
+pandecode_mfbd_bfr(uint64_t gpu_va, int job_no, bool with_render_targets)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct bifrost_framebuffer *PANDECODE_PTR_VAR(fb, mem, (mali_ptr) gpu_va);
pandecode_prop("unknown2 = 0x%x", fb->unknown2);
MEMORY_PROP(fb, scratchpad);
const struct midgard_tiler_descriptor t = fb->tiler;
- pandecode_midgard_tiler_descriptor(&t);
+ pandecode_midgard_tiler_descriptor(&t, fb->width1 + 1, fb->height1 + 1);
if (fb->zero3 || fb->zero4) {
pandecode_msg("framebuffer zeros tripped\n");
pandecode_log(".ds_afbc = {\n");
pandecode_indent++;
- MEMORY_PROP((&fbx->ds_afbc), depth_stencil_afbc_metadata);
+ MEMORY_PROP_DIR(fbx->ds_afbc, depth_stencil_afbc_metadata);
pandecode_prop("depth_stencil_afbc_stride = %d",
fbx->ds_afbc.depth_stencil_afbc_stride);
- MEMORY_PROP((&fbx->ds_afbc), depth_stencil);
+ MEMORY_PROP_DIR(fbx->ds_afbc, depth_stencil);
if (fbx->ds_afbc.zero1 || fbx->ds_afbc.padding) {
pandecode_msg("Depth/stencil AFBC zeros tripped\n");
pandecode_indent++;
if (fbx->ds_linear.depth) {
- MEMORY_PROP((&fbx->ds_linear), depth);
+ MEMORY_PROP_DIR(fbx->ds_linear, depth);
pandecode_prop("depth_stride = %d",
fbx->ds_linear.depth_stride);
}
if (fbx->ds_linear.stencil) {
- MEMORY_PROP((&fbx->ds_linear), stencil);
+ MEMORY_PROP_DIR(fbx->ds_linear, stencil);
pandecode_prop("stencil_stride = %d",
fbx->ds_linear.stencil_stride);
}
static void
pandecode_magic_divisor(uint32_t magic, unsigned shift, unsigned orig_divisor, unsigned extra)
{
+#if 0
/* Compute the modular inverse of `magic` with respect to 2^(32 -
* shift) the most lame way possible... just repeatedly add.
* Asymptoptically slow but nobody cares in practice, unless you have
unsigned padded_num_vertices = inverse / orig_divisor;
pandecode_msg("padded_num_vertices = %d\n", padded_num_vertices);
+#endif
}
static void
-pandecode_replay_attributes(const struct pandecode_mapped_memory *mem,
+pandecode_attributes(const struct pandecode_mapped_memory *mem,
mali_ptr addr, int job_no, char *suffix,
int count, bool varying)
{
char *prefix = varying ? "varyings" : "attributes";
+ if (!addr) {
+ pandecode_msg("no %s\n", prefix);
+ return;
+ }
+
union mali_attr *attr = pandecode_fetch_gpu_mem(mem, addr, sizeof(union mali_attr) * count);
char base[128];
pandecode_indent++;
unsigned mode = attr[i].elements & 7;
- pandecode_prop("elements = (%s_%d_p) | %s", base, i, pandecode_attr_mode_name(mode));
+ pandecode_prop("elements = (%s_%d_p) | %s", base, i, pandecode_attr_mode(mode));
pandecode_prop("shift = %d", attr[i].shift);
pandecode_prop("extra_flags = %d", attr[i].extra_flags);
pandecode_prop("stride = 0x%" PRIx32, attr[i].stride);
}
static mali_ptr
-pandecode_replay_shader_address(const char *name, mali_ptr ptr)
+pandecode_shader_address(const char *name, mali_ptr ptr)
{
/* TODO: Decode flags */
mali_ptr shader_ptr = ptr & ~15;
}
static void
-pandecode_replay_stencil(const char *name, const struct mali_stencil_test *stencil)
+pandecode_stencil(const char *name, const struct mali_stencil_test *stencil)
{
if (all_zero((unsigned *) stencil, sizeof(stencil) / sizeof(unsigned)))
return;
- const char *func = pandecode_func_name(stencil->func);
- const char *sfail = pandecode_stencil_op_name(stencil->sfail);
- const char *dpfail = pandecode_stencil_op_name(stencil->dpfail);
- const char *dppass = pandecode_stencil_op_name(stencil->dppass);
+ const char *func = pandecode_func(stencil->func);
+ const char *sfail = pandecode_stencil_op(stencil->sfail);
+ const char *dpfail = pandecode_stencil_op(stencil->dpfail);
+ const char *dppass = pandecode_stencil_op(stencil->dppass);
if (stencil->zero)
pandecode_msg("Stencil zero tripped: %X\n", stencil->zero);
}
static void
-pandecode_replay_blend_equation(const struct mali_blend_equation *blend)
+pandecode_blend_equation(const struct mali_blend_equation *blend)
{
if (blend->zero1)
pandecode_msg("Blend zero tripped: %X\n", blend->zero1);
b->constant, decode_bifrost_constant(b->constant));
/* TODO figure out blend shader enable bit */
- pandecode_replay_blend_equation(&b->equation);
+ pandecode_blend_equation(&b->equation);
pandecode_prop("unk2 = 0x%" PRIx16, b->unk2);
pandecode_prop("index = 0x%" PRIx16, b->index);
pandecode_prop("shader = 0x%" PRIx32, b->shader);
pandecode_indent++;
if (is_shader) {
- pandecode_replay_shader_address("shader", blend->shader);
+ pandecode_shader_address("shader", blend->shader);
} else {
- pandecode_replay_blend_equation(&blend->equation);
+ pandecode_blend_equation(&blend->equation);
pandecode_prop("constant = %f", blend->constant);
}
}
static int
-pandecode_replay_attribute_meta(int job_no, int count, const struct mali_vertex_tiler_postfix *v, bool varying, char *suffix)
+pandecode_attribute_meta(int job_no, int count, const struct mali_vertex_tiler_postfix *v, bool varying, char *suffix)
{
char base[128];
char *prefix = varying ? "varying" : "attribute";
if (attr_meta->index > max_index)
max_index = attr_meta->index;
- pandecode_replay_swizzle(attr_meta->swizzle);
- pandecode_prop("format = %s", pandecode_format_name(attr_meta->format));
+ pandecode_swizzle(attr_meta->swizzle);
+ pandecode_prop("format = %s", pandecode_format(attr_meta->format));
pandecode_prop("unknown1 = 0x%" PRIx64, (u64) attr_meta->unknown1);
pandecode_prop("unknown3 = 0x%" PRIx64, (u64) attr_meta->unknown3);
pandecode_indent--;
pandecode_log("};\n");
- return max_index;
+ return count ? (max_index + 1) : 0;
}
static void
-pandecode_replay_indices(uintptr_t pindices, uint32_t index_count, int job_no)
+pandecode_indices(uintptr_t pindices, uint32_t index_count, int job_no)
{
struct pandecode_mapped_memory *imem = pandecode_find_mapped_gpu_mem_containing(pindices);
}
static void
-pandecode_replay_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no)
+pandecode_vertex_tiler_prefix(struct mali_vertex_tiler_prefix *p, int job_no, bool noninstanced)
{
pandecode_log_cont("{\n");
pandecode_indent++;
- pandecode_prop("invocation_count = 0x%" PRIx32, p->invocation_count);
- pandecode_prop("size_y_shift = %d", p->size_y_shift);
- pandecode_prop("size_z_shift = %d", p->size_z_shift);
- pandecode_prop("workgroups_x_shift = %d", p->workgroups_x_shift);
- pandecode_prop("workgroups_y_shift = %d", p->workgroups_y_shift);
- pandecode_prop("workgroups_z_shift = %d", p->workgroups_z_shift);
- pandecode_prop("workgroups_x_shift_2 = 0x%" PRIx32, p->workgroups_x_shift_2);
-
/* Decode invocation_count. See the comment before the definition of
* invocation_count for an explanation.
*/
- pandecode_msg("size: (%d, %d, %d)\n",
- bits(p->invocation_count, 0, p->size_y_shift) + 1,
- bits(p->invocation_count, p->size_y_shift, p->size_z_shift) + 1,
- bits(p->invocation_count, p->size_z_shift,
- p->workgroups_x_shift) + 1);
- pandecode_msg("workgroups: (%d, %d, %d)\n",
- bits(p->invocation_count, p->workgroups_x_shift,
- p->workgroups_y_shift) + 1,
- bits(p->invocation_count, p->workgroups_y_shift,
- p->workgroups_z_shift) + 1,
- bits(p->invocation_count, p->workgroups_z_shift,
- 32) + 1);
+
+ unsigned size_x = bits(p->invocation_count, 0, p->size_y_shift) + 1;
+ unsigned size_y = bits(p->invocation_count, p->size_y_shift, p->size_z_shift) + 1;
+ unsigned size_z = bits(p->invocation_count, p->size_z_shift, p->workgroups_x_shift) + 1;
+
+ unsigned groups_x = bits(p->invocation_count, p->workgroups_x_shift, p->workgroups_y_shift) + 1;
+ unsigned groups_y = bits(p->invocation_count, p->workgroups_y_shift, p->workgroups_z_shift) + 1;
+ unsigned groups_z = bits(p->invocation_count, p->workgroups_z_shift, 32) + 1;
+
+ /* Even though we have this decoded, we want to ensure that the
+ * representation is "unique" so we don't lose anything by printing only
+ * the final result. More specifically, we need to check that we were
+ * passed something in canonical form, since the definition per the
+ * hardware is inherently not unique. How? Well, take the resulting
+ * decode and pack it ourselves! If it is bit exact with what we
+ * decoded, we're good to go. */
+
+ struct mali_vertex_tiler_prefix ref;
+ panfrost_pack_work_groups_compute(&ref, groups_x, groups_y, groups_z, size_x, size_y, size_z, noninstanced);
+
+ bool canonical =
+ (p->invocation_count == ref.invocation_count) &&
+ (p->size_y_shift == ref.size_y_shift) &&
+ (p->size_z_shift == ref.size_z_shift) &&
+ (p->workgroups_x_shift == ref.workgroups_x_shift) &&
+ (p->workgroups_y_shift == ref.workgroups_y_shift) &&
+ (p->workgroups_z_shift == ref.workgroups_z_shift) &&
+ (p->workgroups_x_shift_2 == ref.workgroups_x_shift_2);
+
+ if (!canonical) {
+ pandecode_msg("XXX: non-canonical workgroups packing\n");
+ pandecode_msg("expected: %X, %d, %d, %d, %d, %d\n",
+ ref.invocation_count,
+ ref.size_y_shift,
+ ref.size_z_shift,
+ ref.workgroups_x_shift,
+ ref.workgroups_y_shift,
+ ref.workgroups_z_shift,
+ ref.workgroups_x_shift_2);
+
+ pandecode_prop("invocation_count = 0x%" PRIx32, p->invocation_count);
+ pandecode_prop("size_y_shift = %d", p->size_y_shift);
+ pandecode_prop("size_z_shift = %d", p->size_z_shift);
+ pandecode_prop("workgroups_x_shift = %d", p->workgroups_x_shift);
+ pandecode_prop("workgroups_y_shift = %d", p->workgroups_y_shift);
+ pandecode_prop("workgroups_z_shift = %d", p->workgroups_z_shift);
+ pandecode_prop("workgroups_x_shift_2 = %d", p->workgroups_x_shift_2);
+ }
+
+ /* Regardless, print the decode */
+ pandecode_msg("size (%d, %d, %d), count (%d, %d, %d)\n",
+ size_x, size_y, size_z,
+ groups_x, groups_y, groups_z);
/* TODO: Decode */
if (p->unknown_draw)
pandecode_prop("workgroups_x_shift_3 = 0x%" PRIx32, p->workgroups_x_shift_3);
- pandecode_prop("draw_mode = %s", pandecode_draw_mode_name(p->draw_mode));
+ if (p->draw_mode != MALI_DRAW_NONE)
+ pandecode_prop("draw_mode = %s", pandecode_draw_mode(p->draw_mode));
/* Index count only exists for tiler jobs anyway */
if (p->index_count)
pandecode_prop("index_count = MALI_POSITIVE(%" PRId32 ")", p->index_count + 1);
- if (p->negative_start)
- pandecode_prop("negative_start = %d", p->negative_start);
-
- DYN_MEMORY_PROP(p, job_no, indices);
+ if (p->offset_bias_correction)
+ pandecode_prop("offset_bias_correction = %d", p->offset_bias_correction);
if (p->zero1) {
pandecode_msg("Zero tripped\n");
}
static void
-pandecode_replay_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no)
+pandecode_uniform_buffers(mali_ptr pubufs, int ubufs_count, int job_no)
{
struct pandecode_mapped_memory *umem = pandecode_find_mapped_gpu_mem_containing(pubufs);
pandecode_log("};\n");
}
- pandecode_log("struct mali_uniform_buffer_meta uniform_buffers_%d[] = {\n",
- job_no);
+ pandecode_log("struct mali_uniform_buffer_meta uniform_buffers_%"PRIx64"_%d[] = {\n",
+ pubufs, job_no);
pandecode_indent++;
for (int i = 0; i < ubufs_count; i++) {
}
static void
-pandecode_replay_scratchpad(uintptr_t pscratchpad, int job_no, char *suffix)
+pandecode_scratchpad(uintptr_t pscratchpad, int job_no, char *suffix)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(pscratchpad);
pandecode_log("};\n");
}
+static unsigned shader_id = 0;
+
static void
pandecode_shader_disassemble(mali_ptr shader_ptr, int shader_no, int type,
- bool is_bifrost)
+ bool is_bifrost, unsigned nr_regs)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(shader_ptr);
uint8_t *PANDECODE_PTR_VAR(code, mem, shader_ptr);
printf("\n\n");
+ char prefix[512];
+
+ snprintf(prefix, sizeof(prefix) - 1, "shader%d - %s shader: ",
+ shader_id++,
+ (type == JOB_TYPE_TILER) ? "FRAGMENT" : "VERTEX");
+
if (is_bifrost) {
disassemble_bifrost(code, sz, false);
} else {
- disassemble_midgard(code, sz);
+ disassemble_midgard(code, sz, true, nr_regs, prefix);
}
printf("\n\n");
}
static void
-pandecode_replay_vertex_tiler_postfix_pre(const struct mali_vertex_tiler_postfix *p,
+pandecode_vertex_tiler_postfix_pre(const struct mali_vertex_tiler_postfix *p,
int job_no, enum mali_job_type job_type,
char *suffix, bool is_bifrost)
{
* info about the scratchpad.
*/
if (is_bifrost)
- pandecode_replay_scratchpad(p->framebuffer & ~FBD_TYPE, job_no, suffix);
+ pandecode_scratchpad(p->framebuffer & ~FBD_TYPE, job_no, suffix);
else if (p->framebuffer & MALI_MFBD)
- rt_count = pandecode_replay_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false);
+ rt_count = pandecode_mfbd_bfr((u64) ((uintptr_t) p->framebuffer) & FBD_MASK, job_no, false);
else if (job_type == JOB_TYPE_COMPUTE)
pandecode_compute_fbd((u64) (uintptr_t) p->framebuffer, job_no);
else
- pandecode_replay_sfbd((u64) (uintptr_t) p->framebuffer, job_no);
+ pandecode_sfbd((u64) (uintptr_t) p->framebuffer, job_no);
int varying_count = 0, attribute_count = 0, uniform_count = 0, uniform_buffer_count = 0;
int texture_count = 0, sampler_count = 0;
uniform_buffer_count = s->midgard1.uniform_buffer_count;
}
- mali_ptr shader_ptr = pandecode_replay_shader_address("shader", s->shader);
+ mali_ptr shader_ptr = pandecode_shader_address("shader", s->shader);
pandecode_prop("texture_count = %" PRId16, s->texture_count);
pandecode_prop("sampler_count = %" PRId16, s->sampler_count);
pandecode_prop("attribute_count = %" PRId16, s->attribute_count);
pandecode_prop("varying_count = %" PRId16, s->varying_count);
+ unsigned nr_registers = 0;
+
if (is_bifrost) {
pandecode_log(".bifrost1 = {\n");
pandecode_indent++;
pandecode_prop("uniform_count = %" PRId16, s->midgard1.uniform_count);
pandecode_prop("uniform_buffer_count = %" PRId16, s->midgard1.uniform_buffer_count);
pandecode_prop("work_count = %" PRId16, s->midgard1.work_count);
+ nr_registers = s->midgard1.work_count;
pandecode_log(".flags = ");
pandecode_log_decoded_flags(shader_midgard1_flag_info, s->midgard1.flags);
/* We're not quite sure what these flags mean without the depth test, if anything */
if (unknown2_3 & (MALI_DEPTH_TEST | MALI_DEPTH_FUNC_MASK)) {
- const char *func = pandecode_func_name(MALI_GET_DEPTH_FUNC(unknown2_3));
+ const char *func = pandecode_func(MALI_GET_DEPTH_FUNC(unknown2_3));
unknown2_3 &= ~MALI_DEPTH_FUNC_MASK;
pandecode_log_cont("MALI_DEPTH_FUNC(%s) | ", func);
pandecode_prop("stencil_mask_back = 0x%02X", s->stencil_mask_back);
}
- pandecode_replay_stencil("front", &s->stencil_front);
- pandecode_replay_stencil("back", &s->stencil_back);
+ pandecode_stencil("front", &s->stencil_front);
+ pandecode_stencil("back", &s->stencil_back);
if (is_bifrost) {
pandecode_log(".bifrost2 = {\n");
else
shader = pandecode_midgard_blend_mrt(blend_base, job_no, i);
- if (shader)
- pandecode_shader_disassemble(shader, job_no, job_type, false);
+ if (shader & ~0xF)
+ pandecode_shader_disassemble(shader, job_no, job_type, false, 0);
}
}
- pandecode_shader_disassemble(shader_ptr, job_no, job_type, is_bifrost);
+ if (shader_ptr & ~0xF)
+ pandecode_shader_disassemble(shader_ptr, job_no, job_type, is_bifrost, nr_registers);
} else
pandecode_msg("<no shader>\n");
struct pandecode_mapped_memory *fmem = pandecode_find_mapped_gpu_mem_containing(p->viewport);
struct mali_viewport *PANDECODE_PTR_VAR(f, fmem, p->viewport);
- pandecode_log("struct mali_viewport viewport_%d%s = {\n", job_no, suffix);
+ pandecode_log("struct mali_viewport viewport_%"PRIx64"_%d%s = {\n", p->viewport, job_no, suffix);
pandecode_indent++;
pandecode_prop("clip_minx = %f", f->clip_minx);
}
if (p->attribute_meta) {
- unsigned max_attr_index = pandecode_replay_attribute_meta(job_no, attribute_count, p, false, suffix);
+ unsigned max_attr_index = pandecode_attribute_meta(job_no, attribute_count, p, false, suffix);
attr_mem = pandecode_find_mapped_gpu_mem_containing(p->attributes);
- pandecode_replay_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index + 1, false);
+ pandecode_attributes(attr_mem, p->attributes, job_no, suffix, max_attr_index + 1, false);
}
/* Varyings are encoded like attributes but not actually sent; we just
* pass a zero buffer with the right stride/size set, (or whatever)
* since the GPU will write to it itself */
+ if (p->varying_meta) {
+ varying_count = pandecode_attribute_meta(job_no, varying_count, p, true, suffix);
+ }
+
if (p->varyings) {
attr_mem = pandecode_find_mapped_gpu_mem_containing(p->varyings);
/* Number of descriptors depends on whether there are
* non-internal varyings */
- pandecode_replay_attributes(attr_mem, p->varyings, job_no, suffix, varying_count > 1 ? 4 : 1, true);
- }
-
- if (p->varying_meta) {
- pandecode_replay_attribute_meta(job_no, varying_count, p, true, suffix);
+ pandecode_attributes(attr_mem, p->varyings, job_no, suffix, varying_count, true);
}
bool is_compute = job_type == JOB_TYPE_COMPUTE;
}
if (p->uniform_buffers) {
- pandecode_replay_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
+ pandecode_uniform_buffers(p->uniform_buffers, uniform_buffer_count, job_no);
}
if (p->texture_trampoline) {
if (mmem) {
mali_ptr *PANDECODE_PTR_VAR(u, mmem, p->texture_trampoline);
- pandecode_log("uint64_t texture_trampoline_%d[] = {\n", job_no);
+ pandecode_log("uint64_t texture_trampoline_%"PRIx64"_%d[] = {\n", p->texture_trampoline, job_no);
pandecode_indent++;
for (int tex = 0; tex < texture_count; ++tex) {
pandecode_log(".format = {\n");
pandecode_indent++;
- pandecode_replay_swizzle(f.swizzle);
- pandecode_prop("format = %s", pandecode_format_name(f.format));
+ pandecode_swizzle(f.swizzle);
+ pandecode_prop("format = %s", pandecode_format(f.format));
pandecode_prop("type = %s", pandecode_texture_type(f.type));
pandecode_prop("srgb = %" PRId32, f.srgb);
pandecode_prop("unknown1 = %" PRId32, f.unknown1);
pandecode_indent--;
pandecode_log("},\n");
- pandecode_replay_swizzle(t->swizzle);
+ pandecode_swizzle(t->swizzle);
if (t->swizzle_zero) {
/* Shouldn't happen */
for (int i = 0; i < sampler_count; ++i) {
s = pandecode_fetch_gpu_mem(smem, d + sizeof(*s) * i, sizeof(*s));
- pandecode_log("struct mali_sampler_descriptor sampler_descriptor_%d_%d = {\n", job_no, i);
+ pandecode_log("struct mali_sampler_descriptor sampler_descriptor_%"PRIx64"_%d_%d = {\n", d + sizeof(*s) * i, job_no, i);
pandecode_indent++;
- /* Only the lower two bits are understood right now; the rest we display as hex */
- pandecode_log(".filter_mode = MALI_TEX_MIN(%s) | MALI_TEX_MAG(%s) | 0x%" PRIx32",\n",
- MALI_FILTER_NAME(s->filter_mode & MALI_TEX_MIN_MASK),
- MALI_FILTER_NAME(s->filter_mode & MALI_TEX_MAG_MASK),
- s->filter_mode & ~3);
+ pandecode_log(".filter_mode = ");
+ pandecode_log_decoded_flags(sampler_flag_info, s->filter_mode);
+ pandecode_log_cont(",\n");
pandecode_prop("min_lod = FIXED_16(%f)", DECODE_FIXED_16(s->min_lod));
pandecode_prop("max_lod = FIXED_16(%f)", DECODE_FIXED_16(s->max_lod));
- pandecode_prop("wrap_s = %s", pandecode_wrap_mode_name(s->wrap_s));
- pandecode_prop("wrap_t = %s", pandecode_wrap_mode_name(s->wrap_t));
- pandecode_prop("wrap_r = %s", pandecode_wrap_mode_name(s->wrap_r));
+ pandecode_prop("wrap_s = %s", pandecode_wrap_mode(s->wrap_s));
+ pandecode_prop("wrap_t = %s", pandecode_wrap_mode(s->wrap_t));
+ pandecode_prop("wrap_r = %s", pandecode_wrap_mode(s->wrap_r));
- pandecode_prop("compare_func = %s", pandecode_alt_func_name(s->compare_func));
+ pandecode_prop("compare_func = %s", pandecode_alt_func(s->compare_func));
if (s->zero || s->zero2) {
pandecode_msg("Zero tripped\n");
}
static void
-pandecode_replay_vertex_tiler_postfix(const struct mali_vertex_tiler_postfix *p, int job_no, bool is_bifrost)
+pandecode_vertex_tiler_postfix(const struct mali_vertex_tiler_postfix *p, int job_no, bool is_bifrost)
{
- pandecode_log_cont("{\n");
+ if (!(p->position_varying || p->occlusion_counter || p->flags))
+ return;
+
+ pandecode_log(".postfix = {\n");
pandecode_indent++;
MEMORY_PROP(p, position_varying);
- DYN_MEMORY_PROP(p, job_no, uniform_buffers);
- DYN_MEMORY_PROP(p, job_no, texture_trampoline);
- DYN_MEMORY_PROP(p, job_no, sampler_descriptor);
- DYN_MEMORY_PROP(p, job_no, uniforms);
- DYN_MEMORY_PROP(p, job_no, attributes);
- DYN_MEMORY_PROP(p, job_no, attribute_meta);
- DYN_MEMORY_PROP(p, job_no, varyings);
- DYN_MEMORY_PROP(p, job_no, varying_meta);
- DYN_MEMORY_PROP(p, job_no, viewport);
- DYN_MEMORY_PROP(p, job_no, occlusion_counter);
+ MEMORY_PROP(p, occlusion_counter);
- if (is_bifrost)
- pandecode_prop("framebuffer = scratchpad_%d_p", job_no);
- else
- pandecode_prop("framebuffer = framebuffer_%d_p | %s", job_no, p->framebuffer & MALI_MFBD ? "MALI_MFBD" : "0");
-
- pandecode_prop("_shader_upper = (shader_meta_%d_p) >> 4", job_no);
- pandecode_prop("flags = %d", p->flags);
+ if (p->flags)
+ pandecode_prop("flags = %d", p->flags);
pandecode_indent--;
pandecode_log("},\n");
}
static void
-pandecode_replay_vertex_only_bfr(struct bifrost_vertex_only *v)
+pandecode_vertex_only_bfr(struct bifrost_vertex_only *v)
{
pandecode_log_cont("{\n");
pandecode_indent++;
}
static void
-pandecode_replay_tiler_heap_meta(mali_ptr gpu_va, int job_no)
+pandecode_tiler_heap_meta(mali_ptr gpu_va, int job_no)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
/* this might point to the beginning of another buffer, when it's
* really the end of the tiler heap buffer, so we have to be careful
- * here.
+ * here. but for zero length, we need the same pointer.
*/
- char *a = pointer_as_memory_reference(h->tiler_heap_end - 1);
- pandecode_prop("tiler_heap_end = %s + 1", a);
- free(a);
+
+ if (h->tiler_heap_end == h->tiler_heap_start) {
+ MEMORY_PROP(h, tiler_heap_start);
+ } else {
+ char *a = pointer_as_memory_reference(h->tiler_heap_end - 1);
+ pandecode_prop("tiler_heap_end = %s + 1", a);
+ free(a);
+ }
pandecode_indent--;
pandecode_log("};\n");
}
static void
-pandecode_replay_tiler_meta(mali_ptr gpu_va, int job_no)
+pandecode_tiler_meta(mali_ptr gpu_va, int job_no)
{
struct pandecode_mapped_memory *mem = pandecode_find_mapped_gpu_mem_containing(gpu_va);
const struct bifrost_tiler_meta *PANDECODE_PTR_VAR(t, mem, gpu_va);
- pandecode_replay_tiler_heap_meta(t->tiler_heap_meta, job_no);
+ pandecode_tiler_heap_meta(t->tiler_heap_meta, job_no);
pandecode_log("struct bifrost_tiler_meta tiler_meta_%d = {\n", job_no);
pandecode_indent++;
pandecode_prop("width = MALI_POSITIVE(%d)", t->width + 1);
pandecode_prop("height = MALI_POSITIVE(%d)", t->height + 1);
- DYN_MEMORY_PROP(t, job_no, tiler_heap_meta);
for (int i = 0; i < 12; i++) {
if (t->zeros[i] != 0) {
}
static void
-pandecode_replay_gl_enables(uint32_t gl_enables, int job_type)
+pandecode_gl_enables(uint32_t gl_enables, int job_type)
{
pandecode_log(".gl_enables = ");
}
static void
-pandecode_replay_primitive_size(union midgard_primitive_size u, bool constant)
+pandecode_primitive_size(union midgard_primitive_size u, bool constant)
{
if (u.pointer == 0x0)
return;
}
static void
-pandecode_replay_tiler_only_bfr(const struct bifrost_tiler_only *t, int job_no)
+pandecode_tiler_only_bfr(const struct bifrost_tiler_only *t, int job_no)
{
pandecode_log_cont("{\n");
pandecode_indent++;
/* TODO: gl_PointSize on Bifrost */
- pandecode_replay_primitive_size(t->primitive_size, true);
+ pandecode_primitive_size(t->primitive_size, true);
- DYN_MEMORY_PROP(t, job_no, tiler_meta);
- pandecode_replay_gl_enables(t->gl_enables, JOB_TYPE_TILER);
+ pandecode_gl_enables(t->gl_enables, JOB_TYPE_TILER);
if (t->zero1 || t->zero2 || t->zero3 || t->zero4 || t->zero5
|| t->zero6 || t->zero7 || t->zero8) {
}
static int
-pandecode_replay_vertex_job_bfr(const struct mali_job_descriptor_header *h,
+pandecode_vertex_job_bfr(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
mali_ptr payload, int job_no)
{
struct bifrost_payload_vertex *PANDECODE_PTR_VAR(v, mem, payload);
- pandecode_replay_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true);
+ pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", true);
pandecode_log("struct bifrost_payload_vertex payload_%d = {\n", job_no);
pandecode_indent++;
pandecode_log(".prefix = ");
- pandecode_replay_vertex_tiler_prefix(&v->prefix, job_no);
+ pandecode_vertex_tiler_prefix(&v->prefix, job_no, false);
pandecode_log(".vertex = ");
- pandecode_replay_vertex_only_bfr(&v->vertex);
+ pandecode_vertex_only_bfr(&v->vertex);
- pandecode_log(".postfix = ");
- pandecode_replay_vertex_tiler_postfix(&v->postfix, job_no, true);
+ pandecode_vertex_tiler_postfix(&v->postfix, job_no, true);
pandecode_indent--;
pandecode_log("};\n");
}
static int
-pandecode_replay_tiler_job_bfr(const struct mali_job_descriptor_header *h,
+pandecode_tiler_job_bfr(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
mali_ptr payload, int job_no)
{
struct bifrost_payload_tiler *PANDECODE_PTR_VAR(t, mem, payload);
- pandecode_replay_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true);
+ pandecode_vertex_tiler_postfix_pre(&t->postfix, job_no, h->job_type, "", true);
- pandecode_replay_indices(t->prefix.indices, t->prefix.index_count, job_no);
- pandecode_replay_tiler_meta(t->tiler.tiler_meta, job_no);
+ pandecode_indices(t->prefix.indices, t->prefix.index_count, job_no);
+ pandecode_tiler_meta(t->tiler.tiler_meta, job_no);
pandecode_log("struct bifrost_payload_tiler payload_%d = {\n", job_no);
pandecode_indent++;
pandecode_log(".prefix = ");
- pandecode_replay_vertex_tiler_prefix(&t->prefix, job_no);
+ pandecode_vertex_tiler_prefix(&t->prefix, job_no, false);
pandecode_log(".tiler = ");
- pandecode_replay_tiler_only_bfr(&t->tiler, job_no);
+ pandecode_tiler_only_bfr(&t->tiler, job_no);
- pandecode_log(".postfix = ");
- pandecode_replay_vertex_tiler_postfix(&t->postfix, job_no, true);
+ pandecode_vertex_tiler_postfix(&t->postfix, job_no, true);
pandecode_indent--;
pandecode_log("};\n");
}
static int
-pandecode_replay_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h,
+pandecode_vertex_or_tiler_job_mdg(const struct mali_job_descriptor_header *h,
const struct pandecode_mapped_memory *mem,
mali_ptr payload, int job_no)
{
struct midgard_payload_vertex_tiler *PANDECODE_PTR_VAR(v, mem, payload);
- pandecode_replay_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false);
+ pandecode_vertex_tiler_postfix_pre(&v->postfix, job_no, h->job_type, "", false);
- pandecode_replay_indices(v->prefix.indices, v->prefix.index_count, job_no);
+ pandecode_indices(v->prefix.indices, v->prefix.index_count, job_no);
pandecode_log("struct midgard_payload_vertex_tiler payload_%d = {\n", job_no);
pandecode_indent++;
bool has_primitive_pointer = v->prefix.unknown_draw & MALI_DRAW_VARYING_SIZE;
- pandecode_replay_primitive_size(v->primitive_size, !has_primitive_pointer);
+ pandecode_primitive_size(v->primitive_size, !has_primitive_pointer);
+
+ bool instanced = v->instance_shift || v->instance_odd;
+ bool is_graphics = (h->job_type == JOB_TYPE_VERTEX) || (h->job_type == JOB_TYPE_TILER);
pandecode_log(".prefix = ");
- pandecode_replay_vertex_tiler_prefix(&v->prefix, job_no);
+ pandecode_vertex_tiler_prefix(&v->prefix, job_no, !instanced && is_graphics);
- pandecode_replay_gl_enables(v->gl_enables, h->job_type);
+ pandecode_gl_enables(v->gl_enables, h->job_type);
if (v->instance_shift || v->instance_odd) {
pandecode_prop("instance_shift = 0x%d /* %d */",
pandecode_padded_vertices(v->instance_shift, v->instance_odd);
}
- if (v->draw_start)
- pandecode_prop("draw_start = %d", v->draw_start);
-
-#ifndef __LP64__
-
- if (v->zero3) {
- pandecode_msg("Zero tripped\n");
- pandecode_prop("zero3 = 0x%" PRIx32, v->zero3);
- }
-
-#endif
+ if (v->offset_start)
+ pandecode_prop("offset_start = %d", v->offset_start);
if (v->zero5) {
pandecode_msg("Zero tripped\n");
pandecode_prop("zero5 = 0x%" PRIx64, v->zero5);
}
- pandecode_log(".postfix = ");
- pandecode_replay_vertex_tiler_postfix(&v->postfix, job_no, false);
+ pandecode_vertex_tiler_postfix(&v->postfix, job_no, false);
pandecode_indent--;
pandecode_log("};\n");
}
static int
-pandecode_replay_fragment_job(const struct pandecode_mapped_memory *mem,
+pandecode_fragment_job(const struct pandecode_mapped_memory *mem,
mali_ptr payload, int job_no,
bool is_bifrost)
{
* including Gxx). In any event, there's some field shuffling
* that we haven't looked into yet. */
- pandecode_replay_sfbd(s->framebuffer & FBD_MASK, job_no);
+ pandecode_sfbd(s->framebuffer & FBD_MASK, job_no);
fbd_dumped = true;
} else if ((s->framebuffer & FBD_TYPE) == MALI_MFBD) {
/* We don't know if Bifrost supports SFBD's at all, since the
* trace, which appears impossible.
*/
- pandecode_replay_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true);
+ pandecode_mfbd_bfr(s->framebuffer & FBD_MASK, job_no, true);
fbd_dumped = true;
}
const char *fbd_type = s->framebuffer & MALI_MFBD ? "MALI_MFBD" : "MALI_SFBD";
+ /* TODO: Decode */
+ unsigned extra_flags = (s->framebuffer & ~FBD_MASK) & ~MALI_MFBD;
+
if (fbd_dumped)
- pandecode_prop("framebuffer = framebuffer_%d_p | %s", job_no, fbd_type);
+ pandecode_prop("framebuffer = framebuffer_%d_p | %s | 0x%X", job_no,
+ fbd_type, extra_flags);
else
- pandecode_prop("framebuffer = %s | %s", pointer_as_memory_reference(p), fbd_type);
+ pandecode_prop("framebuffer = %s | %s | 0x%X", pointer_as_memory_reference(p),
+ fbd_type, extra_flags);
pandecode_indent--;
pandecode_log("};\n");
static int job_descriptor_number = 0;
int
-pandecode_replay_jc(mali_ptr jc_gpu_va, bool bifrost)
+pandecode_jc(mali_ptr jc_gpu_va, bool bifrost)
{
struct mali_job_descriptor_header *h;
pandecode_log("struct mali_job_descriptor_header job_%"PRIx64"_%d = {\n", jc_gpu_va, job_no);
pandecode_indent++;
- pandecode_prop("job_type = %s", pandecode_job_type_name(h->job_type));
+ pandecode_prop("job_type = %s", pandecode_job_type(h->job_type));
/* Save for next job fixing */
last_size = h->job_descriptor_size;
if (h->job_descriptor_size)
pandecode_prop("job_descriptor_size = %d", h->job_descriptor_size);
- if (h->exception_status != 0x1)
- pandecode_prop("exception_status = %x (source ID: 0x%x access: 0x%x exception: 0x%x)",
+ if (h->exception_status && h->exception_status != 0x1)
+ pandecode_prop("exception_status = %x (source ID: 0x%x access: %s exception: 0x%x)",
h->exception_status,
(h->exception_status >> 16) & 0xFFFF,
- (h->exception_status >> 8) & 0x3,
+ pandecode_exception_access((h->exception_status >> 8) & 0x3),
h->exception_status & 0xFF);
if (h->first_incomplete_task)
case JOB_TYPE_COMPUTE:
if (bifrost) {
if (h->job_type == JOB_TYPE_TILER)
- pandecode_replay_tiler_job_bfr(h, mem, payload_ptr, job_no);
+ pandecode_tiler_job_bfr(h, mem, payload_ptr, job_no);
else
- pandecode_replay_vertex_job_bfr(h, mem, payload_ptr, job_no);
+ pandecode_vertex_job_bfr(h, mem, payload_ptr, job_no);
} else
- pandecode_replay_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no);
+ pandecode_vertex_or_tiler_job_mdg(h, mem, payload_ptr, job_no);
break;
case JOB_TYPE_FRAGMENT:
- pandecode_replay_fragment_job(mem, payload_ptr, job_no, bifrost);
+ pandecode_fragment_job(mem, payload_ptr, job_no, bifrost);
break;
default: