2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
39 typedef uint64_t mali_ptr
;
41 /* Applies to tiler_gl_enables */
43 #define MALI_OCCLUSION_QUERY (1 << 3)
44 #define MALI_OCCLUSION_PRECISE (1 << 4)
46 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
47 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
48 * disagree about how to do viewport flipping, so the blob actually sets this
49 * for GL_CW but then has a negative viewport stride */
51 #define MALI_FRONT_CCW_TOP (1 << 5)
53 #define MALI_CULL_FACE_FRONT (1 << 6)
54 #define MALI_CULL_FACE_BACK (1 << 7)
56 enum mali_nondominant_mode
{
57 MALI_BLEND_NON_MIRROR
= 0,
58 MALI_BLEND_NON_ZERO
= 1
61 enum mali_dominant_blend
{
62 MALI_BLEND_DOM_SOURCE
= 0,
63 MALI_BLEND_DOM_DESTINATION
= 1
66 enum mali_dominant_factor
{
67 MALI_DOMINANT_UNK0
= 0,
68 MALI_DOMINANT_ZERO
= 1,
69 MALI_DOMINANT_SRC_COLOR
= 2,
70 MALI_DOMINANT_DST_COLOR
= 3,
71 MALI_DOMINANT_UNK4
= 4,
72 MALI_DOMINANT_SRC_ALPHA
= 5,
73 MALI_DOMINANT_DST_ALPHA
= 6,
74 MALI_DOMINANT_CONSTANT
= 7,
77 enum mali_blend_modifier
{
78 MALI_BLEND_MOD_UNK0
= 0,
79 MALI_BLEND_MOD_NORMAL
= 1,
80 MALI_BLEND_MOD_SOURCE_ONE
= 2,
81 MALI_BLEND_MOD_DEST_ONE
= 3,
84 struct mali_blend_mode
{
85 enum mali_blend_modifier clip_modifier
: 2;
86 unsigned unused_0
: 1;
87 unsigned negate_source
: 1;
89 enum mali_dominant_blend dominant
: 1;
91 enum mali_nondominant_mode nondominant_mode
: 1;
93 unsigned unused_1
: 1;
95 unsigned negate_dest
: 1;
97 enum mali_dominant_factor dominant_factor
: 3;
98 unsigned complement_dominant
: 1;
99 } __attribute__((packed
));
101 /* Compressed per-pixel formats. Each of these formats expands to one to four
102 * floating-point or integer numbers, as defined by the OpenGL specification.
103 * There are various places in OpenGL where the user can specify a compressed
104 * format in memory, which all use the same 8-bit enum in the various
105 * descriptors, although different hardware units support different formats.
108 /* The top 3 bits specify how the bits of each component are interpreted. */
111 #define MALI_FORMAT_COMPRESSED (0 << 5)
113 /* e.g. R11F_G11F_B10F */
114 #define MALI_FORMAT_SPECIAL (2 << 5)
116 /* signed normalized, e.g. RGBA8_SNORM */
117 #define MALI_FORMAT_SNORM (3 << 5)
120 #define MALI_FORMAT_UINT (4 << 5)
122 /* e.g. RGBA8 and RGBA32F */
123 #define MALI_FORMAT_UNORM (5 << 5)
125 /* e.g. RGBA8I and RGBA16F */
126 #define MALI_FORMAT_SINT (6 << 5)
128 /* These formats seem to largely duplicate the others. They're used at least
129 * for Bifrost framebuffer output.
131 #define MALI_FORMAT_SPECIAL2 (7 << 5)
132 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
134 /* If the high 3 bits are 3 to 6 these two bits say how many components
137 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
138 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
140 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
141 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
145 #define MALI_CHANNEL_4 2
147 #define MALI_CHANNEL_8 3
149 #define MALI_CHANNEL_16 4
151 #define MALI_CHANNEL_32 5
153 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
154 * MALI_FORMAT_UNORM, it means a 32-bit float.
156 #define MALI_CHANNEL_FLOAT 7
157 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
159 /* The raw Midgard blend payload can either be an equation or a shader
160 * address, depending on the context */
162 union midgard_blend
{
166 struct mali_blend_equation_packed equation
;
171 struct midgard_blend_rt
{
172 struct mali_blend_flags_packed flags
;
174 union midgard_blend blend
;
175 } __attribute__((packed
));
177 /* On Bifrost systems (all MRT), each render target gets one of these
180 enum bifrost_shader_type
{
181 BIFROST_BLEND_F16
= 0,
182 BIFROST_BLEND_F32
= 1,
183 BIFROST_BLEND_I32
= 2,
184 BIFROST_BLEND_U32
= 3,
185 BIFROST_BLEND_I16
= 4,
186 BIFROST_BLEND_U16
= 5,
189 #define BIFROST_MAX_RENDER_TARGET_COUNT 8
191 struct bifrost_blend_rt
{
192 /* This is likely an analogue of the flags on
193 * midgard_blend_rt */
195 u16 flags
; // = 0x200
197 /* Single-channel blend constants are encoded in a sort of
198 * fixed-point. Basically, the float is mapped to a byte, becoming
199 * a high byte, and then the lower-byte is added for precision.
200 * For the original float f:
202 * f = (constant_hi / 255) + (constant_lo / 65535)
204 * constant_hi = int(f / 255)
205 * constant_lo = 65535*f - (65535/255) * constant_hi
209 struct mali_blend_equation_packed equation
;
213 * - 0x3 when this slot is unused (everything else is 0 except the index)
214 * - 0x11 when this is the fourth slot (and it's used)
215 * - 0 when there is a blend shader
219 /* increments from 0 to 3 */
224 /* So far, I've only seen:
225 * - R001 for 1-component formats
226 * - RG01 for 2-component formats
227 * - RGB1 for 3-component formats
228 * - RGBA for 4-component formats
231 enum mali_format format
: 8;
233 /* Type of the shader output variable. Note, this can
234 * be different from the format.
235 * enum bifrost_shader_type
242 /* Only the low 32 bits of the blend shader are stored, the
243 * high 32 bits are implicitly the same as the original shader.
244 * According to the kernel driver, the program counter for
245 * shaders is actually only 24 bits, so shaders cannot cross
246 * the 2^24-byte boundary, and neither can the blend shader.
247 * The blob handles this by allocating a 2^24 byte pool for
248 * shaders, and making sure that any blend shaders are stored
249 * in the same pool as the original shader. The kernel will
250 * make sure this allocation is aligned to 2^24 bytes.
254 } __attribute__((packed
));
256 /* Possible values for job_descriptor_size */
258 #define MALI_JOB_32 0
259 #define MALI_JOB_64 1
261 struct mali_job_descriptor_header
{
262 u32 exception_status
;
263 u32 first_incomplete_task
;
265 u8 job_descriptor_size
: 1;
266 enum mali_job_type job_type
: 7;
268 u8 unknown_flags
: 7;
270 u16 job_dependency_index_1
;
271 u16 job_dependency_index_2
;
273 } __attribute__((packed
));
275 /* Details about write_value from panfrost igt tests which use it as a generic
276 * dword write primitive */
278 #define MALI_WRITE_VALUE_ZERO 3
280 struct mali_payload_write_value
{
282 u32 value_descriptor
;
285 } __attribute__((packed
));
290 * This structure lets the attribute unit compute the address of an attribute
291 * given the vertex and instance ID. Unfortunately, the way this works is
292 * rather complicated when instancing is enabled.
294 * To explain this, first we need to explain how compute and vertex threads are
295 * dispatched. This is a guess (although a pretty firm guess!) since the
296 * details are mostly hidden from the driver, except for attribute instancing.
297 * When a quad is dispatched, it receives a single, linear index. However, we
298 * need to translate that index into a (vertex id, instance id) pair, or a
299 * (local id x, local id y, local id z) triple for compute shaders (although
300 * vertex shaders and compute shaders are handled almost identically).
301 * Focusing on vertex shaders, one option would be to do:
303 * vertex_id = linear_id % num_vertices
304 * instance_id = linear_id / num_vertices
306 * but this involves a costly division and modulus by an arbitrary number.
307 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
308 * num_instances threads instead of num_vertices * num_instances, which results
309 * in some "extra" threads with vertex_id >= num_vertices, which we have to
310 * discard. The more we pad num_vertices, the more "wasted" threads we
311 * dispatch, but the division is potentially easier.
313 * One straightforward choice is to pad num_vertices to the next power of two,
314 * which means that the division and modulus are just simple bit shifts and
315 * masking. But the actual algorithm is a bit more complicated. The thread
316 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
317 * to dividing by a power of two. This is possibly using the technique
318 * described in patent US20170010862A1. As a result, padded_num_vertices can be
319 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
320 * since we need less padding.
322 * padded_num_vertices is picked by the hardware. The driver just specifies the
323 * actual number of vertices. At least for Mali G71, the first few cases are
326 * num_vertices | padded_num_vertices
333 * Note that padded_num_vertices is a multiple of four (presumably because
334 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
335 * at least one more than num_vertices, which seems like a quirk of the
336 * hardware. For larger num_vertices, the hardware uses the following
337 * algorithm: using the binary representation of num_vertices, we look at the
338 * most significant set bit as well as the following 3 bits. Let n be the
339 * number of bits after those 4 bits. Then we set padded_num_vertices according
340 * to the following table:
342 * high bits | padded_num_vertices
349 * For example, if num_vertices = 70 is passed to glDraw(), its binary
350 * representation is 1000110, so n = 3 and the high bits are 1000, and
351 * therefore padded_num_vertices = 9 * 2^3 = 72.
353 * The attribute unit works in terms of the original linear_id. if
354 * num_instances = 1, then they are the same, and everything is simple.
355 * However, with instancing things get more complicated. There are four
356 * possible modes, two of them we can group together:
358 * 1. Use the linear_id directly. Only used when there is no instancing.
360 * 2. Use the linear_id modulo a constant. This is used for per-vertex
361 * attributes with instancing enabled by making the constant equal
362 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
363 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
364 * The shift field specifies the power of two, while the extra_flags field
365 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
366 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
367 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
368 * shift = 3. Note that we must exactly follow the hardware algorithm used to
369 * get padded_num_vertices in order to correctly implement per-vertex
372 * 3. Divide the linear_id by a constant. In order to correctly implement
373 * instance divisors, we have to divide linear_id by padded_num_vertices times
374 * to user-specified divisor. So first we compute padded_num_vertices, again
375 * following the exact same algorithm that the hardware uses, then multiply it
376 * by the GL-level divisor to get the hardware-level divisor. This case is
377 * further divided into two more cases. If the hardware-level divisor is a
378 * power of two, then we just need to shift. The shift amount is specified by
379 * the shift field, so that the hardware-level divisor is just 2^shift.
381 * If it isn't a power of two, then we have to divide by an arbitrary integer.
382 * For that, we use the well-known technique of multiplying by an approximation
383 * of the inverse. The driver must compute the magic multiplier and shift
384 * amount, and then the hardware does the multiplication and shift. The
385 * hardware and driver also use the "round-down" optimization as described in
386 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
387 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
388 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
389 * presumably this simplifies the hardware multiplier a little. The hardware
390 * first multiplies linear_id by the multiplier and takes the high 32 bits,
391 * then applies the round-down correction if extra_flags = 1, then finally
392 * shifts right by the shift field.
394 * There are some differences between ridiculousfish's algorithm and the Mali
395 * hardware algorithm, which means that the reference code from ridiculousfish
396 * doesn't always produce the right constants. Mali does not use the pre-shift
397 * optimization, since that would make a hardware implementation slower (it
398 * would have to always do the pre-shift, multiply, and post-shift operations).
399 * It also forces the multplier to be at least 2^31, which means that the
400 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
401 * given the divisor d, the algorithm the driver must follow is:
403 * 1. Set shift = floor(log2(d)).
404 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
405 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
406 * magic_divisor = m - 1 and extra_flags = 1.
407 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
410 #define FBD_MASK (~0x3f)
412 /* MFBD, rather than SFBD */
413 #define MALI_MFBD (0x1)
415 /* ORed into an MFBD address to specify the fbx section is included */
416 #define MALI_MFBD_TAG_EXTRA (0x2)
418 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
419 * They also seem to be the same between Bifrost and Midgard. They're shared in
423 struct mali_vertex_tiler_prefix
{
424 struct mali_invocation_packed invocation
;
425 struct mali_primitive_packed primitive
;
426 } __attribute__((packed
));
428 /* Point size / line width can either be specified as a 32-bit float (for
429 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
430 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
431 * payload, the contents of varying_pointer will be intepreted as an array of
432 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
433 * creating a special MALI_R16F varying writing to varying_pointer. */
435 union midgard_primitive_size
{
440 struct bifrost_tiler_heap_meta
{
443 /* note: these are just guesses! */
444 mali_ptr tiler_heap_start
;
445 mali_ptr tiler_heap_free
;
446 mali_ptr tiler_heap_end
;
448 /* hierarchy weights? but they're still 0 after the job has run... */
452 } __attribute__((packed
));
454 struct bifrost_tiler_meta
{
455 u32 tiler_heap_next_start
; /* To be written by the GPU */
456 u32 used_hierarchy_mask
; /* To be written by the GPU */
457 u16 hierarchy_mask
; /* Five values observed: 0xa, 0x14, 0x28, 0x50, 0xa0 */
462 mali_ptr tiler_heap_meta
;
463 /* TODO what is this used for? */
465 } __attribute__((packed
));
467 struct bifrost_tiler_only
{
469 union midgard_primitive_size primitive_size
;
473 u64 zero1
, zero2
, zero3
, zero4
, zero5
, zero6
;
474 } __attribute__((packed
));
476 struct mali_vertex_tiler_postfix
{
477 u16 gl_enables
; // 0x6 on Midgard, 0x2 on Bifrost
479 /* Both zero for non-instanced draws. For instanced draws, a
480 * decomposition of padded_num_vertices. See the comments about the
481 * corresponding fields in mali_attr for context. */
483 unsigned instance_shift
: 5;
484 unsigned instance_odd
: 3;
488 /* Offset for first vertex in buffer */
493 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
494 * output from the vertex shader for tiler jobs.
497 u64 position_varying
;
499 /* An array of mali_uniform_buffer_meta's. The size is given by the
504 /* On Bifrost, this is a pointer to an array of bifrost_texture_descriptor.
505 * On Midgard, this is a pointer to an array of pointers to the texture
506 * descriptors, number of pointers bounded by number of textures. The
507 * indirection is needed to accomodate varying numbers and sizes of
508 * texture descriptors */
511 /* For OpenGL, from what I've seen, this is intimately connected to
512 * texture_meta. cwabbott says this is not the case under Vulkan, hence
513 * why this field is seperate (Midgard is Vulkan capable). Pointer to
514 * array of sampler descriptors (which are uniform in size) */
515 u64 sampler_descriptor
;
519 u64 attributes
; /* struct attribute_buffer[] */
520 u64 attribute_meta
; /* attribute_meta[] */
521 u64 varyings
; /* struct attr */
522 u64 varying_meta
; /* pointer */
524 u64 occlusion_counter
; /* A single bit as far as I can tell */
526 /* On Bifrost, this points directly to a mali_shared_memory structure.
527 * On Midgard, this points to a framebuffer (either SFBD or MFBD as
528 * tagged), which embeds a mali_shared_memory structure */
529 mali_ptr shared_memory
;
530 } __attribute__((packed
));
532 struct midgard_payload_vertex_tiler
{
533 struct mali_vertex_tiler_prefix prefix
;
534 struct mali_vertex_tiler_postfix postfix
;
536 union midgard_primitive_size primitive_size
;
537 } __attribute__((packed
));
539 struct bifrost_payload_vertex
{
540 struct mali_vertex_tiler_prefix prefix
;
541 struct mali_vertex_tiler_postfix postfix
;
542 } __attribute__((packed
));
544 struct bifrost_payload_tiler
{
545 struct mali_vertex_tiler_prefix prefix
;
546 struct bifrost_tiler_only tiler
;
547 struct mali_vertex_tiler_postfix postfix
;
548 } __attribute__((packed
));
550 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
551 * texture is stored as (63, 63) in these fields. This adjusts for that.
552 * There's an identical pattern in the framebuffer descriptor. Even vertex
553 * count fields work this way, hence the generic name -- integral fields that
554 * are strictly positive generally need this adjustment. */
556 #define MALI_POSITIVE(dim) (dim - 1)
559 #define MAX_MIP_LEVELS (13)
561 /* Cubemap bloats everything up */
562 #define MAX_CUBE_FACES (6)
564 /* For each pointer, there is an address and optionally also a stride */
565 #define MAX_ELEMENTS (2)
567 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
568 * be cleaned up a lot. */
570 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
572 static inline int16_t
573 FIXED_16(float x
, bool allow_negative
)
575 /* Clamp inputs, accounting for float error */
576 float max_lod
= (32.0 - (1.0 / 512.0));
577 float min_lod
= allow_negative
? -max_lod
: 0.0;
579 x
= ((x
> max_lod
) ? max_lod
: ((x
< min_lod
) ? min_lod
: x
));
581 return (int) (x
* 256.0);
584 /* From presentations, 16x16 tiles externally. Use shift for fast computation
585 * of tile numbers. */
587 #define MALI_TILE_SHIFT 4
588 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
590 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
591 * each component. Notice that this provides a theoretical upper bound of (1 <<
592 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
593 * 65536x65536. Multiplying that together, times another four given that Mali
594 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
595 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
596 * alone rendering in real-time to such a buffer.
600 /* From mali_kbase_10969_workaround.c */
601 #define MALI_X_COORD_MASK 0x00000FFF
602 #define MALI_Y_COORD_MASK 0x0FFF0000
604 /* Extract parts of a tile coordinate */
606 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
607 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
609 /* Helpers to generate tile coordinates based on the boundary coordinates in
610 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
611 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
612 * Intentional "off-by-one"; finding the tile number is a form of fencepost
615 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
616 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
617 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
618 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
619 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
621 struct mali_payload_fragment
{
624 mali_ptr framebuffer
;
625 } __attribute__((packed
));
627 /* Single Framebuffer Descriptor */
629 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
630 * configured for 4x. With MSAA_8, it is configured for 8x. */
632 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
633 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
634 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
635 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
637 /* Fast/slow based on whether all three buffers are cleared at once */
639 #define MALI_CLEAR_FAST (1 << 18)
640 #define MALI_CLEAR_SLOW (1 << 28)
641 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
643 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
644 * within the larget framebuffer descriptor). Analogous to
645 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
647 /* See pan_tiler.c for derivation */
648 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
650 /* Flag disabling the tiler for clear-only jobs, with
651 hierarchical tiling */
652 #define MALI_TILER_DISABLED (1 << 12)
654 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
655 * hierarhical tiling. */
656 #define MALI_TILER_USER 0xFFF
658 /* Absent any geometry, the minimum size of the polygon list header */
659 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
661 struct midgard_tiler_descriptor
{
662 /* Size of the entire polygon list; see pan_tiler.c for the
663 * computation. It's based on hierarchical tiling */
665 u32 polygon_list_size
;
667 /* Name known from the replay workaround in the kernel. What exactly is
668 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
669 * specifies a mask of hierarchy weights, which explains some of the
670 * performance mysteries around setting it. We also see the bottom bit
671 * of tiler_flags set in the kernel, but no comment why.
673 * hierarchy_mask can have the TILER_DISABLED flag */
678 /* See mali_tiler.c for an explanation */
679 mali_ptr polygon_list
;
680 mali_ptr polygon_list_body
;
682 /* Names based on we see symmetry with replay jobs which name these
685 mali_ptr heap_start
; /* tiler heap_free_address */
688 /* Hierarchy weights. We know these are weights based on the kernel,
689 * but I've never seen them be anything other than zero */
693 struct mali_sfbd_format
{
697 /* mali_channel_swizzle */
698 unsigned swizzle
: 12;
701 unsigned nr_channels
: 2;
706 enum mali_block_format block
: 2;
712 /* Shared structure at the start of framebuffer descriptors, or used bare for
713 * compute jobs, configuring stack and shared memory */
715 struct mali_shared_memory
{
719 /* Configuration for shared memory for compute shaders.
720 * shared_workgroup_count is logarithmic and may be computed for a
721 * compute shader using shared memory as:
723 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
725 * For compute shaders that don't use shared memory, or non-compute
726 * shaders, this is set to ~0
729 u32 shared_workgroup_count
: 5;
731 u32 shared_shift
: 4;
732 u32 shared_zero
: 20;
736 /* For compute shaders, the RAM backing of workgroup-shared memory. For
737 * fragment shaders on Bifrost, apparently multisampling locations */
739 mali_ptr shared_memory
;
741 } __attribute__((packed
));
743 /* Configures multisampling on Bifrost fragment jobs */
745 struct bifrost_multisampling
{
748 mali_ptr sample_locations
;
750 } __attribute__((packed
));
752 struct mali_single_framebuffer
{
753 struct mali_shared_memory shared_memory
;
754 struct mali_sfbd_format format
;
759 /* Purposeful off-by-one in these fields should be accounted for by the
760 * MALI_DIMENSION macro */
770 /* By default, the framebuffer is upside down from OpenGL's
771 * perspective. Set framebuffer to the end and negate the stride to
772 * flip in the Y direction */
774 mali_ptr framebuffer
;
779 /* Depth and stencil buffers are interleaved, it appears, as they are
780 * set to the same address in captures. Both fields set to zero if the
781 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
782 * get a zero enable despite the buffer being present; that still is
785 mali_ptr depth_buffer
; // not SAME_VA
786 u32 depth_stride_zero
: 4;
787 u32 depth_stride
: 28;
790 mali_ptr stencil_buffer
; // not SAME_VA
791 u32 stencil_stride_zero
: 4;
792 u32 stencil_stride
: 28;
795 u32 clear_color_1
; // RGBA8888 from glClear, actually used by hardware
796 u32 clear_color_2
; // always equal, but unclear function?
797 u32 clear_color_3
; // always equal, but unclear function?
798 u32 clear_color_4
; // always equal, but unclear function?
800 /* Set to zero if not cleared */
802 float clear_depth_1
; // float32, ditto
803 float clear_depth_2
; // float32, ditto
804 float clear_depth_3
; // float32, ditto
805 float clear_depth_4
; // float32, ditto
807 u32 clear_stencil
; // Exactly as it appears in OpenGL
811 struct midgard_tiler_descriptor tiler
;
813 /* More below this, maybe */
814 } __attribute__((packed
));
817 #define MALI_MFBD_FORMAT_SRGB (1 << 0)
819 struct mali_rt_format
{
823 unsigned nr_channels
: 2; /* MALI_POSITIVE */
827 enum mali_block_format block
: 2;
828 enum mali_msaa msaa
: 2;
831 unsigned swizzle
: 12;
835 /* Disables MFBD preload. When this bit is set, the render target will
836 * be cleared every frame. When this bit is clear, the hardware will
837 * automatically wallpaper the render target back from main memory.
838 * Unfortunately, MFBD preload is very broken on Midgard, so in
839 * practice, this is a chicken bit that should always be set.
840 * Discovered by accident, as all good chicken bits are. */
842 unsigned no_preload
: 1;
843 } __attribute__((packed
));
845 /* Flags for afbc.flags and ds_afbc.flags */
847 #define MALI_AFBC_FLAGS 0x10009
849 /* Lossless RGB and RGBA colorspace transform */
850 #define MALI_AFBC_YTR (1 << 17)
852 struct mali_render_target
{
853 struct mali_rt_format format
;
858 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
859 * there is an extra metadata buffer that contains 16 bytes per tile.
860 * The framebuffer needs to be the same size as before, since we don't
861 * know ahead of time how much space it will take up. The
862 * framebuffer_stride is set to 0, since the data isn't stored linearly
865 * When AFBC is disabled, these fields are zero.
869 u32 stride
; // stride in units of tiles
870 u32 flags
; // = 0x20000
873 mali_ptr framebuffer
;
876 u32 framebuffer_stride
: 28; // in units of bytes, row to next
877 u32 layer_stride
; /* For multisample rendering */
879 u32 clear_color_1
; // RGBA8888 from glClear, actually used by hardware
880 u32 clear_color_2
; // always equal, but unclear function?
881 u32 clear_color_3
; // always equal, but unclear function?
882 u32 clear_color_4
; // always equal, but unclear function?
883 } __attribute__((packed
));
885 /* An optional part of mali_framebuffer. It comes between the main structure
886 * and the array of render targets. It must be included if any of these are
889 * - Transaction Elimination
891 * - TODO: Anything else?
895 #define MALI_EXTRA_PRESENT (0x1)
898 #define MALI_EXTRA_ZS (0x4)
900 struct mali_framebuffer_extra
{
902 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
905 unsigned flags_lo
: 4;
906 enum mali_block_format zs_block
: 2;
908 /* Number of samples in Z/S attachment, MALI_POSITIVE. So zero for
909 * 1-sample (non-MSAA), 0x3 for MSAA 4x, etc */
910 unsigned zs_samples
: 4;
911 unsigned flags_hi
: 22;
914 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
916 mali_ptr depth_stencil_afbc_metadata
;
917 u32 depth_stencil_afbc_stride
; // in units of tiles
920 mali_ptr depth_stencil
;
926 /* Depth becomes depth/stencil in case of combined D/S */
928 u32 depth_stride_zero
: 4;
929 u32 depth_stride
: 28;
930 u32 depth_layer_stride
;
933 u32 stencil_stride_zero
: 4;
934 u32 stencil_stride
: 28;
935 u32 stencil_layer_stride
;
943 } __attribute__((packed
));
945 /* Flags for mfbd_flags */
947 /* Enables writing depth results back to main memory (rather than keeping them
948 * on-chip in the tile buffer and then discarding) */
950 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
952 /* The MFBD contains the extra mali_framebuffer_extra section */
954 #define MALI_MFBD_EXTRA (1 << 13)
956 struct mali_framebuffer
{
958 struct mali_shared_memory shared_memory
;
959 struct bifrost_multisampling msaa
;
966 u32 unk1
: 19; // = 0x01000
967 u32 rt_count_1
: 3; // off-by-one (use MALI_POSITIVE)
969 u32 rt_count_2
: 3; // no off-by-one
972 u32 clear_stencil
: 8;
973 u32 mfbd_flags
: 24; // = 0x100
977 struct midgard_tiler_descriptor tiler
;
984 /* optional: struct mali_framebuffer_extra extra */
985 /* struct mali_render_target rts[] */
986 } __attribute__((packed
));
988 #endif /* __PANFROST_JOB_H__ */