2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
39 typedef uint64_t mali_ptr
;
41 /* Applies to tiler_gl_enables */
43 #define MALI_OCCLUSION_QUERY (1 << 3)
44 #define MALI_OCCLUSION_PRECISE (1 << 4)
46 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
47 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
48 * disagree about how to do viewport flipping, so the blob actually sets this
49 * for GL_CW but then has a negative viewport stride */
51 #define MALI_FRONT_CCW_TOP (1 << 5)
53 #define MALI_CULL_FACE_FRONT (1 << 6)
54 #define MALI_CULL_FACE_BACK (1 << 7)
56 /* Flags apply to unknown2_3? */
58 #define MALI_HAS_MSAA (1 << 0)
60 /* Execute fragment shader per-sample if set (e.g. to implement gl_SampleID
62 #define MALI_PER_SAMPLE (1 << 2)
63 #define MALI_CAN_DISCARD (1 << 5)
65 /* Applies on SFBD systems, specifying that programmable blending is in use */
66 #define MALI_HAS_BLEND_SHADER (1 << 6)
68 /* func is mali_func */
69 #define MALI_DEPTH_FUNC(func) (func << 8)
70 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
71 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
73 #define MALI_DEPTH_WRITEMASK (1 << 11)
75 #define MALI_DEPTH_CLIP_NEAR (1 << 12)
76 #define MALI_DEPTH_CLIP_FAR (1 << 13)
78 /* Next flags to unknown2_4 */
79 #define MALI_STENCIL_TEST (1 << 0)
81 #define MALI_ALPHA_TO_COVERAGE (1 << 1)
83 #define MALI_NO_DITHER (1 << 9)
84 #define MALI_DEPTH_RANGE_A (1 << 12)
85 #define MALI_DEPTH_RANGE_B (1 << 13)
86 #define MALI_NO_MSAA (1 << 14)
88 #define MALI_MASK_R (1 << 0)
89 #define MALI_MASK_G (1 << 1)
90 #define MALI_MASK_B (1 << 2)
91 #define MALI_MASK_A (1 << 3)
93 enum mali_nondominant_mode
{
94 MALI_BLEND_NON_MIRROR
= 0,
95 MALI_BLEND_NON_ZERO
= 1
98 enum mali_dominant_blend
{
99 MALI_BLEND_DOM_SOURCE
= 0,
100 MALI_BLEND_DOM_DESTINATION
= 1
103 enum mali_dominant_factor
{
104 MALI_DOMINANT_UNK0
= 0,
105 MALI_DOMINANT_ZERO
= 1,
106 MALI_DOMINANT_SRC_COLOR
= 2,
107 MALI_DOMINANT_DST_COLOR
= 3,
108 MALI_DOMINANT_UNK4
= 4,
109 MALI_DOMINANT_SRC_ALPHA
= 5,
110 MALI_DOMINANT_DST_ALPHA
= 6,
111 MALI_DOMINANT_CONSTANT
= 7,
114 enum mali_blend_modifier
{
115 MALI_BLEND_MOD_UNK0
= 0,
116 MALI_BLEND_MOD_NORMAL
= 1,
117 MALI_BLEND_MOD_SOURCE_ONE
= 2,
118 MALI_BLEND_MOD_DEST_ONE
= 3,
121 struct mali_blend_mode
{
122 enum mali_blend_modifier clip_modifier
: 2;
123 unsigned unused_0
: 1;
124 unsigned negate_source
: 1;
126 enum mali_dominant_blend dominant
: 1;
128 enum mali_nondominant_mode nondominant_mode
: 1;
130 unsigned unused_1
: 1;
132 unsigned negate_dest
: 1;
134 enum mali_dominant_factor dominant_factor
: 3;
135 unsigned complement_dominant
: 1;
136 } __attribute__((packed
));
138 struct mali_blend_equation
{
139 /* Of type mali_blend_mode */
140 unsigned rgb_mode
: 12;
141 unsigned alpha_mode
: 12;
145 /* Corresponds to MALI_MASK_* above and glColorMask arguments */
147 unsigned color_mask
: 4;
148 } __attribute__((packed
));
150 /* Compressed per-pixel formats. Each of these formats expands to one to four
151 * floating-point or integer numbers, as defined by the OpenGL specification.
152 * There are various places in OpenGL where the user can specify a compressed
153 * format in memory, which all use the same 8-bit enum in the various
154 * descriptors, although different hardware units support different formats.
157 /* The top 3 bits specify how the bits of each component are interpreted. */
160 #define MALI_FORMAT_COMPRESSED (0 << 5)
162 /* e.g. R11F_G11F_B10F */
163 #define MALI_FORMAT_SPECIAL (2 << 5)
165 /* signed normalized, e.g. RGBA8_SNORM */
166 #define MALI_FORMAT_SNORM (3 << 5)
169 #define MALI_FORMAT_UINT (4 << 5)
171 /* e.g. RGBA8 and RGBA32F */
172 #define MALI_FORMAT_UNORM (5 << 5)
174 /* e.g. RGBA8I and RGBA16F */
175 #define MALI_FORMAT_SINT (6 << 5)
177 /* These formats seem to largely duplicate the others. They're used at least
178 * for Bifrost framebuffer output.
180 #define MALI_FORMAT_SPECIAL2 (7 << 5)
181 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
183 /* If the high 3 bits are 3 to 6 these two bits say how many components
186 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
187 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
189 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
190 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
194 #define MALI_CHANNEL_4 2
196 #define MALI_CHANNEL_8 3
198 #define MALI_CHANNEL_16 4
200 #define MALI_CHANNEL_32 5
202 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
203 * MALI_FORMAT_UNORM, it means a 32-bit float.
205 #define MALI_CHANNEL_FLOAT 7
206 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
208 /* Applies to midgard1.flags_lo */
210 /* Should be set when the fragment shader updates the depth value. */
211 #define MALI_WRITES_Z (1 << 4)
213 /* Should the hardware perform early-Z testing? Set if the shader does not use
214 * discard, alpha-to-coverage, shader depth writes, and if the shader has no
215 * side effects (writes to global memory or images) unless early-z testing is
216 * forced in the shader.
219 #define MALI_EARLY_Z (1 << 6)
221 /* Should the hardware calculate derivatives (via helper invocations)? Set in a
222 * fragment shader that uses texturing or derivative functions */
224 #define MALI_HELPER_INVOCATIONS (1 << 7)
226 /* Flags denoting the fragment shader's use of tilebuffer readback. If the
227 * shader might read any part of the tilebuffer, set MALI_READS_TILEBUFFER. If
228 * it might read depth/stencil in particular, also set MALI_READS_ZS */
230 #define MALI_READS_ZS (1 << 8)
232 /* The shader might write to global memory (via OpenCL, SSBOs, or images).
233 * Reading is okay, as are ordinary writes to the tilebuffer/varyings. Setting
234 * incurs a performance penalty. On a fragment shader, this bit implies there
235 * are side effects, hence it interacts with early-z. */
236 #define MALI_WRITES_GLOBAL (1 << 9)
238 #define MALI_READS_TILEBUFFER (1 << 10)
240 /* Applies to midgard1.flags_hi */
242 /* Should be set when the fragment shader updates the stencil value. */
243 #define MALI_WRITES_S (1 << 2)
245 /* Mode to suppress generation of Infinity and NaN values by clamping inf
246 * (-inf) to MAX_FLOAT (-MIN_FLOAT) and flushing NaN to 0.0
248 * Compare suppress_inf/suppress_nan flags on the Bifrost clause header for the
249 * same functionality.
251 * This is not conformant on GLES3 or OpenCL, but is optional on GLES2, where
252 * it works around app bugs (e.g. in glmark2-es2 -bterrain with FP16).
254 #define MALI_SUPPRESS_INF_NAN (1 << 3)
256 /* Flags for bifrost1.unk1 */
258 /* Shader uses less than 32 registers, partitioned as [R0, R15] U [R48, R63],
259 * allowing for full thread count. If clear, the full [R0, R63] register set is
260 * available at half thread count */
261 #define MALI_BIFROST_FULL_THREAD (1 << 9)
263 /* Enable early-z testing (presumably). This flag may not be set if the shader:
267 * - Writes gl_FragDepth
269 * This differs from Midgard which sets the MALI_EARLY_Z flag even with
270 * blending, although I've begun to suspect that flag does not in fact enable
272 #define MALI_BIFROST_EARLY_Z (1 << 15)
274 /* First clause type is ATEST */
275 #define MALI_BIFROST_FIRST_ATEST (1 << 26)
277 /* The raw Midgard blend payload can either be an equation or a shader
278 * address, depending on the context */
280 union midgard_blend
{
284 struct mali_blend_equation equation
;
289 /* We need to load the tilebuffer to blend (i.e. the destination factor is not
292 #define MALI_BLEND_LOAD_TIB (0x1)
294 /* A blend shader is used to blend this render target */
295 #define MALI_BLEND_MRT_SHADER (0x2)
297 /* On MRT Midgard systems (using an MFBD), each render target gets its own
298 * blend descriptor */
300 #define MALI_BLEND_SRGB (0x400)
302 /* Dithering is specified here for MFBD, otherwise NO_DITHER for SFBD */
303 #define MALI_BLEND_NO_DITHER (0x800)
305 struct midgard_blend_rt
{
306 /* Flags base value of 0x200 to enable the render target.
307 * OR with 0x1 for blending (anything other than REPLACE).
308 * OR with 0x2 for programmable blending
309 * OR with MALI_BLEND_SRGB for implicit sRGB
313 union midgard_blend blend
;
314 } __attribute__((packed
));
316 /* On Bifrost systems (all MRT), each render target gets one of these
319 enum bifrost_shader_type
{
320 BIFROST_BLEND_F16
= 0,
321 BIFROST_BLEND_F32
= 1,
322 BIFROST_BLEND_I32
= 2,
323 BIFROST_BLEND_U32
= 3,
324 BIFROST_BLEND_I16
= 4,
325 BIFROST_BLEND_U16
= 5,
328 #define BIFROST_MAX_RENDER_TARGET_COUNT 8
330 struct bifrost_blend_rt
{
331 /* This is likely an analogue of the flags on
332 * midgard_blend_rt */
334 u16 flags
; // = 0x200
336 /* Single-channel blend constants are encoded in a sort of
337 * fixed-point. Basically, the float is mapped to a byte, becoming
338 * a high byte, and then the lower-byte is added for precision.
339 * For the original float f:
341 * f = (constant_hi / 255) + (constant_lo / 65535)
343 * constant_hi = int(f / 255)
344 * constant_lo = 65535*f - (65535/255) * constant_hi
348 struct mali_blend_equation equation
;
352 * - 0x3 when this slot is unused (everything else is 0 except the index)
353 * - 0x11 when this is the fourth slot (and it's used)
354 * - 0 when there is a blend shader
358 /* increments from 0 to 3 */
363 /* So far, I've only seen:
364 * - R001 for 1-component formats
365 * - RG01 for 2-component formats
366 * - RGB1 for 3-component formats
367 * - RGBA for 4-component formats
370 enum mali_format format
: 8;
372 /* Type of the shader output variable. Note, this can
373 * be different from the format.
374 * enum bifrost_shader_type
381 /* Only the low 32 bits of the blend shader are stored, the
382 * high 32 bits are implicitly the same as the original shader.
383 * According to the kernel driver, the program counter for
384 * shaders is actually only 24 bits, so shaders cannot cross
385 * the 2^24-byte boundary, and neither can the blend shader.
386 * The blob handles this by allocating a 2^24 byte pool for
387 * shaders, and making sure that any blend shaders are stored
388 * in the same pool as the original shader. The kernel will
389 * make sure this allocation is aligned to 2^24 bytes.
393 } __attribute__((packed
));
395 /* Descriptor for the shader. Following this is at least one, up to four blend
396 * descriptors for each active render target */
398 struct mali_shader_meta
{
407 u32 uniform_buffer_count
: 4;
408 u32 unk1
: 28; // = 0x800000 for vertex, 0x958020 for tiler
411 unsigned uniform_buffer_count
: 4;
412 unsigned flags_lo
: 12;
415 unsigned work_count
: 5;
416 unsigned uniform_count
: 5;
417 unsigned flags_hi
: 6;
421 /* Same as glPolygoOffset() arguments */
427 /* Generated from SAMPLE_COVERAGE_VALUE and SAMPLE_COVERAGE_INVERT. See
428 * 13.8.3 ("Multisample Fragment Operations") in the OpenGL ES 3.2
429 * specification. Only matters when multisampling is enabled. */
434 u8 stencil_mask_front
;
435 u8 stencil_mask_back
;
438 struct mali_stencil_packed stencil_front
;
439 struct mali_stencil_packed stencil_back
;
444 /* On Bifrost, some system values are preloaded in
445 * registers R55-R62 by the thread dispatcher prior to
446 * the start of shader execution. This is a bitfield
447 * with one entry for each register saying which
448 * registers need to be preloaded. Right now, the known
452 * - R55 : gl_LocalInvocationID.xy
453 * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
454 * - R57 : gl_WorkGroupID.x
455 * - R58 : gl_WorkGroupID.y
456 * - R59 : gl_WorkGroupID.z
457 * - R60 : gl_GlobalInvocationID.x
458 * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
459 * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
462 * - R55 : unknown, never seen (but the bit for this is
464 * - R56 : unknown (bit always unset)
465 * - R57 : gl_PrimitiveID
466 * - R58 : gl_FrontFacing in low bit, potentially other stuff
467 * - R59 : u16 fragment coordinates (used to compute
468 * gl_FragCoord.xy, together with sample positions)
469 * - R60 : gl_SampleMask (used in epilog, so pretty
470 * much always used, but the bit is always 0 -- is
471 * this just always pushed?)
472 * - R61 : gl_SampleMaskIn and gl_SampleID, used by
473 * varying interpolation.
474 * - R62 : unknown (bit always unset).
476 * Later GPUs (starting with Mali-G52?) support
477 * preloading float varyings into r0-r7. This is
478 * indicated by setting 0x40. There is no distinction
479 * here between 1 varying and 2.
481 u32 preload_regs
: 8;
482 /* In units of 8 bytes or 64 bits, since the
483 * uniform/const port loads 64 bits at a time.
485 u32 uniform_count
: 7;
486 u32 unk4
: 10; // = 2
495 /* Blending information for the older non-MRT Midgard HW. Check for
496 * MALI_HAS_BLEND_SHADER to decide how to interpret.
499 union midgard_blend blend
;
500 } __attribute__((packed
));
502 /* This only concerns hardware jobs */
504 /* Possible values for job_descriptor_size */
506 #define MALI_JOB_32 0
507 #define MALI_JOB_64 1
509 struct mali_job_descriptor_header
{
510 u32 exception_status
;
511 u32 first_incomplete_task
;
513 u8 job_descriptor_size
: 1;
514 enum mali_job_type job_type
: 7;
516 u8 unknown_flags
: 7;
518 u16 job_dependency_index_1
;
519 u16 job_dependency_index_2
;
521 } __attribute__((packed
));
523 /* Details about write_value from panfrost igt tests which use it as a generic
524 * dword write primitive */
526 #define MALI_WRITE_VALUE_ZERO 3
528 struct mali_payload_write_value
{
530 u32 value_descriptor
;
533 } __attribute__((packed
));
538 * This structure lets the attribute unit compute the address of an attribute
539 * given the vertex and instance ID. Unfortunately, the way this works is
540 * rather complicated when instancing is enabled.
542 * To explain this, first we need to explain how compute and vertex threads are
543 * dispatched. This is a guess (although a pretty firm guess!) since the
544 * details are mostly hidden from the driver, except for attribute instancing.
545 * When a quad is dispatched, it receives a single, linear index. However, we
546 * need to translate that index into a (vertex id, instance id) pair, or a
547 * (local id x, local id y, local id z) triple for compute shaders (although
548 * vertex shaders and compute shaders are handled almost identically).
549 * Focusing on vertex shaders, one option would be to do:
551 * vertex_id = linear_id % num_vertices
552 * instance_id = linear_id / num_vertices
554 * but this involves a costly division and modulus by an arbitrary number.
555 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
556 * num_instances threads instead of num_vertices * num_instances, which results
557 * in some "extra" threads with vertex_id >= num_vertices, which we have to
558 * discard. The more we pad num_vertices, the more "wasted" threads we
559 * dispatch, but the division is potentially easier.
561 * One straightforward choice is to pad num_vertices to the next power of two,
562 * which means that the division and modulus are just simple bit shifts and
563 * masking. But the actual algorithm is a bit more complicated. The thread
564 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
565 * to dividing by a power of two. This is possibly using the technique
566 * described in patent US20170010862A1. As a result, padded_num_vertices can be
567 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
568 * since we need less padding.
570 * padded_num_vertices is picked by the hardware. The driver just specifies the
571 * actual number of vertices. At least for Mali G71, the first few cases are
574 * num_vertices | padded_num_vertices
581 * Note that padded_num_vertices is a multiple of four (presumably because
582 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
583 * at least one more than num_vertices, which seems like a quirk of the
584 * hardware. For larger num_vertices, the hardware uses the following
585 * algorithm: using the binary representation of num_vertices, we look at the
586 * most significant set bit as well as the following 3 bits. Let n be the
587 * number of bits after those 4 bits. Then we set padded_num_vertices according
588 * to the following table:
590 * high bits | padded_num_vertices
597 * For example, if num_vertices = 70 is passed to glDraw(), its binary
598 * representation is 1000110, so n = 3 and the high bits are 1000, and
599 * therefore padded_num_vertices = 9 * 2^3 = 72.
601 * The attribute unit works in terms of the original linear_id. if
602 * num_instances = 1, then they are the same, and everything is simple.
603 * However, with instancing things get more complicated. There are four
604 * possible modes, two of them we can group together:
606 * 1. Use the linear_id directly. Only used when there is no instancing.
608 * 2. Use the linear_id modulo a constant. This is used for per-vertex
609 * attributes with instancing enabled by making the constant equal
610 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
611 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
612 * The shift field specifies the power of two, while the extra_flags field
613 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
614 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
615 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
616 * shift = 3. Note that we must exactly follow the hardware algorithm used to
617 * get padded_num_vertices in order to correctly implement per-vertex
620 * 3. Divide the linear_id by a constant. In order to correctly implement
621 * instance divisors, we have to divide linear_id by padded_num_vertices times
622 * to user-specified divisor. So first we compute padded_num_vertices, again
623 * following the exact same algorithm that the hardware uses, then multiply it
624 * by the GL-level divisor to get the hardware-level divisor. This case is
625 * further divided into two more cases. If the hardware-level divisor is a
626 * power of two, then we just need to shift. The shift amount is specified by
627 * the shift field, so that the hardware-level divisor is just 2^shift.
629 * If it isn't a power of two, then we have to divide by an arbitrary integer.
630 * For that, we use the well-known technique of multiplying by an approximation
631 * of the inverse. The driver must compute the magic multiplier and shift
632 * amount, and then the hardware does the multiplication and shift. The
633 * hardware and driver also use the "round-down" optimization as described in
634 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
635 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
636 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
637 * presumably this simplifies the hardware multiplier a little. The hardware
638 * first multiplies linear_id by the multiplier and takes the high 32 bits,
639 * then applies the round-down correction if extra_flags = 1, then finally
640 * shifts right by the shift field.
642 * There are some differences between ridiculousfish's algorithm and the Mali
643 * hardware algorithm, which means that the reference code from ridiculousfish
644 * doesn't always produce the right constants. Mali does not use the pre-shift
645 * optimization, since that would make a hardware implementation slower (it
646 * would have to always do the pre-shift, multiply, and post-shift operations).
647 * It also forces the multplier to be at least 2^31, which means that the
648 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
649 * given the divisor d, the algorithm the driver must follow is:
651 * 1. Set shift = floor(log2(d)).
652 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
653 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
654 * magic_divisor = m - 1 and extra_flags = 1.
655 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
657 * Unrelated to instancing/actual attributes, images (the OpenCL kind) are
658 * implemented as special attributes, denoted by MALI_ATTR_IMAGE. For images,
659 * let shift=extra_flags=0. Stride is set to the image format's bytes-per-pixel
660 * (*NOT the row stride*). Size is set to the size of the image itself.
662 * Special internal attribtues and varyings (gl_VertexID, gl_FrontFacing, etc)
663 * use particular fixed addresses with modified structures.
666 enum mali_attr_mode
{
667 MALI_ATTR_UNUSED
= 0,
668 MALI_ATTR_LINEAR
= 1,
669 MALI_ATTR_POT_DIVIDE
= 2,
670 MALI_ATTR_MODULO
= 3,
671 MALI_ATTR_NPOT_DIVIDE
= 4,
675 /* Pseudo-address for gl_VertexID, gl_FragCoord, gl_FrontFacing */
677 #define MALI_ATTR_VERTEXID (0x22)
678 #define MALI_ATTR_INSTANCEID (0x24)
679 #define MALI_VARYING_FRAG_COORD (0x25)
680 #define MALI_VARYING_FRONT_FACING (0x26)
682 /* This magic "pseudo-address" is used as `elements` to implement
683 * gl_PointCoord. When read from a fragment shader, it generates a point
684 * coordinate per the OpenGL ES 2.0 specification. Flipped coordinate spaces
685 * require an affine transformation in the shader. */
687 #define MALI_VARYING_POINT_COORD (0x61)
689 /* Used for comparison to check if an address is special. Mostly a guess, but
690 * it doesn't really matter. */
692 #define MALI_RECORD_SPECIAL (0x100)
695 /* This is used for actual attributes. */
697 /* The bottom 3 bits are the mode */
698 mali_ptr elements
: 64 - 8;
704 /* The entry after an NPOT_DIVIDE entry has this format. It stores
705 * extra information that wouldn't fit in a normal entry.
708 u32 unk
; /* = 0x20 */
711 /* This is the original, GL-level divisor. */
714 } __attribute__((packed
));
716 struct mali_attr_meta
{
717 /* Vertex buffer index */
720 unsigned unknown1
: 2;
721 unsigned format
: 22;
723 /* When packing multiple attributes in a buffer, offset addresses by
724 * this value. Obscurely, this is signed. */
726 } __attribute__((packed
));
728 #define FBD_MASK (~0x3f)
730 /* MFBD, rather than SFBD */
731 #define MALI_MFBD (0x1)
733 /* ORed into an MFBD address to specify the fbx section is included */
734 #define MALI_MFBD_TAG_EXTRA (0x2)
736 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
737 * They also seem to be the same between Bifrost and Midgard. They're shared in
741 /* Applies to unknown_draw */
743 #define MALI_DRAW_INDEXED_UINT8 (0x10)
744 #define MALI_DRAW_INDEXED_UINT16 (0x20)
745 #define MALI_DRAW_INDEXED_UINT32 (0x30)
746 #define MALI_DRAW_INDEXED_SIZE (0x30)
747 #define MALI_DRAW_INDEXED_SHIFT (4)
749 #define MALI_DRAW_VARYING_SIZE (0x100)
751 /* Set to use first vertex as the provoking vertex for flatshading. Clear to
752 * use the last vertex. This is the default in DX and VK, but not in GL. */
754 #define MALI_DRAW_FLATSHADE_FIRST (0x800)
756 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
758 struct mali_vertex_tiler_prefix
{
759 /* This is a dynamic bitfield containing the following things in this order:
761 * - gl_WorkGroupSize.x
762 * - gl_WorkGroupSize.y
763 * - gl_WorkGroupSize.z
764 * - gl_NumWorkGroups.x
765 * - gl_NumWorkGroups.y
766 * - gl_NumWorkGroups.z
768 * The number of bits allocated for each number is based on the *_shift
769 * fields below. For example, workgroups_y_shift gives the bit that
770 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
771 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
772 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
773 * value is one more than the stored value, since if any of the values
774 * are zero, then there would be no invocations (and hence no job). If
775 * there were 0 bits allocated to a given field, then it must be zero,
776 * and hence the real value is one.
778 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
779 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
780 * where vertex count is the number of vertices.
782 u32 invocation_count
;
784 /* Bitfield for shifts:
788 * workgroups_x_shift : 6
789 * workgroups_y_shift : 6
790 * workgroups_z_shift : 6
791 * workgroups_x_shift_2 : 4
793 u32 invocation_shifts
;
796 u32 unknown_draw
: 22;
798 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
799 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
800 * something to do with how many quads get put in the same execution
801 * engine, which is a balance (you don't want to starve the engine, but
802 * you also want to distribute work evenly).
804 u32 workgroups_x_shift_3
: 6;
807 /* Negative of min_index. This is used to compute
808 * the unbiased index in tiler/fragment shader runs.
810 * The hardware adds offset_bias_correction in each run,
811 * so that absent an index bias, the first vertex processed is
812 * genuinely the first vertex (0). But with an index bias,
813 * the first vertex process is numbered the same as the bias.
815 * To represent this more conviniently:
816 * unbiased_index = lower_bound_index +
818 * offset_bias_correction
820 * This is done since the hardware doesn't accept a index_bias
821 * and this allows it to recover the unbiased index.
823 int32_t offset_bias_correction
;
826 /* Like many other strictly nonzero quantities, index_count is
827 * subtracted by one. For an indexed cube, this is equal to 35 = 6
828 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
829 * for an indexed draw, index_count is the number of actual vertices
830 * rendered whereas invocation_count is the number of unique vertices
831 * rendered (the number of times the vertex shader must be invoked).
832 * For non-indexed draws, this is just equal to invocation_count. */
836 /* No hidden structure; literally just a pointer to an array of uint
837 * indices (width depends on flags). Thanks, guys, for not making my
838 * life insane for once! NULL for non-indexed draws. */
841 } __attribute__((packed
));
843 /* Point size / line width can either be specified as a 32-bit float (for
844 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
845 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
846 * payload, the contents of varying_pointer will be intepreted as an array of
847 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
848 * creating a special MALI_R16F varying writing to varying_pointer. */
850 union midgard_primitive_size
{
855 struct bifrost_tiler_heap_meta
{
858 /* note: these are just guesses! */
859 mali_ptr tiler_heap_start
;
860 mali_ptr tiler_heap_free
;
861 mali_ptr tiler_heap_end
;
863 /* hierarchy weights? but they're still 0 after the job has run... */
867 } __attribute__((packed
));
869 struct bifrost_tiler_meta
{
870 u32 tiler_heap_next_start
; /* To be written by the GPU */
871 u32 used_hierarchy_mask
; /* To be written by the GPU */
872 u16 hierarchy_mask
; /* Five values observed: 0xa, 0x14, 0x28, 0x50, 0xa0 */
877 mali_ptr tiler_heap_meta
;
878 /* TODO what is this used for? */
880 } __attribute__((packed
));
882 struct bifrost_tiler_only
{
884 union midgard_primitive_size primitive_size
;
888 u64 zero1
, zero2
, zero3
, zero4
, zero5
, zero6
;
889 } __attribute__((packed
));
891 struct mali_vertex_tiler_postfix
{
892 u16 gl_enables
; // 0x6 on Midgard, 0x2 on Bifrost
894 /* Both zero for non-instanced draws. For instanced draws, a
895 * decomposition of padded_num_vertices. See the comments about the
896 * corresponding fields in mali_attr for context. */
898 unsigned instance_shift
: 5;
899 unsigned instance_odd
: 3;
903 /* Offset for first vertex in buffer */
908 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
909 * output from the vertex shader for tiler jobs.
912 u64 position_varying
;
914 /* An array of mali_uniform_buffer_meta's. The size is given by the
919 /* On Bifrost, this is a pointer to an array of bifrost_texture_descriptor.
920 * On Midgard, this is a pointer to an array of pointers to the texture
921 * descriptors, number of pointers bounded by number of textures. The
922 * indirection is needed to accomodate varying numbers and sizes of
923 * texture descriptors */
926 /* For OpenGL, from what I've seen, this is intimately connected to
927 * texture_meta. cwabbott says this is not the case under Vulkan, hence
928 * why this field is seperate (Midgard is Vulkan capable). Pointer to
929 * array of sampler descriptors (which are uniform in size) */
930 u64 sampler_descriptor
;
934 u64 attributes
; /* struct attribute_buffer[] */
935 u64 attribute_meta
; /* attribute_meta[] */
936 u64 varyings
; /* struct attr */
937 u64 varying_meta
; /* pointer */
939 u64 occlusion_counter
; /* A single bit as far as I can tell */
941 /* On Bifrost, this points directly to a mali_shared_memory structure.
942 * On Midgard, this points to a framebuffer (either SFBD or MFBD as
943 * tagged), which embeds a mali_shared_memory structure */
944 mali_ptr shared_memory
;
945 } __attribute__((packed
));
947 struct midgard_payload_vertex_tiler
{
948 struct mali_vertex_tiler_prefix prefix
;
949 struct mali_vertex_tiler_postfix postfix
;
951 union midgard_primitive_size primitive_size
;
952 } __attribute__((packed
));
954 struct bifrost_payload_vertex
{
955 struct mali_vertex_tiler_prefix prefix
;
956 struct mali_vertex_tiler_postfix postfix
;
957 } __attribute__((packed
));
959 struct bifrost_payload_tiler
{
960 struct mali_vertex_tiler_prefix prefix
;
961 struct bifrost_tiler_only tiler
;
962 struct mali_vertex_tiler_postfix postfix
;
963 } __attribute__((packed
));
965 struct bifrost_payload_fused
{
966 struct mali_vertex_tiler_prefix prefix
;
967 struct bifrost_tiler_only tiler
;
968 struct mali_vertex_tiler_postfix tiler_postfix
;
969 u64 padding
; /* zero */
970 struct mali_vertex_tiler_postfix vertex_postfix
;
971 } __attribute__((packed
));
973 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
974 * texture is stored as (63, 63) in these fields. This adjusts for that.
975 * There's an identical pattern in the framebuffer descriptor. Even vertex
976 * count fields work this way, hence the generic name -- integral fields that
977 * are strictly positive generally need this adjustment. */
979 #define MALI_POSITIVE(dim) (dim - 1)
982 #define MAX_MIP_LEVELS (13)
984 /* Cubemap bloats everything up */
985 #define MAX_CUBE_FACES (6)
987 /* For each pointer, there is an address and optionally also a stride */
988 #define MAX_ELEMENTS (2)
990 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
991 * be cleaned up a lot. */
993 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
995 static inline int16_t
996 FIXED_16(float x
, bool allow_negative
)
998 /* Clamp inputs, accounting for float error */
999 float max_lod
= (32.0 - (1.0 / 512.0));
1000 float min_lod
= allow_negative
? -max_lod
: 0.0;
1002 x
= ((x
> max_lod
) ? max_lod
: ((x
< min_lod
) ? min_lod
: x
));
1004 return (int) (x
* 256.0);
1007 /* From presentations, 16x16 tiles externally. Use shift for fast computation
1008 * of tile numbers. */
1010 #define MALI_TILE_SHIFT 4
1011 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
1013 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
1014 * each component. Notice that this provides a theoretical upper bound of (1 <<
1015 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
1016 * 65536x65536. Multiplying that together, times another four given that Mali
1017 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
1018 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
1019 * alone rendering in real-time to such a buffer.
1023 /* From mali_kbase_10969_workaround.c */
1024 #define MALI_X_COORD_MASK 0x00000FFF
1025 #define MALI_Y_COORD_MASK 0x0FFF0000
1027 /* Extract parts of a tile coordinate */
1029 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
1030 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
1032 /* Helpers to generate tile coordinates based on the boundary coordinates in
1033 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
1034 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
1035 * Intentional "off-by-one"; finding the tile number is a form of fencepost
1038 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
1039 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
1040 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
1041 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
1042 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
1044 struct mali_payload_fragment
{
1047 mali_ptr framebuffer
;
1048 } __attribute__((packed
));
1050 /* Single Framebuffer Descriptor */
1052 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
1053 * configured for 4x. With MSAA_8, it is configured for 8x. */
1055 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
1056 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
1057 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
1058 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
1060 /* Fast/slow based on whether all three buffers are cleared at once */
1062 #define MALI_CLEAR_FAST (1 << 18)
1063 #define MALI_CLEAR_SLOW (1 << 28)
1064 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
1066 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
1067 * within the larget framebuffer descriptor). Analogous to
1068 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
1070 /* See pan_tiler.c for derivation */
1071 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
1073 /* Flag disabling the tiler for clear-only jobs, with
1074 hierarchical tiling */
1075 #define MALI_TILER_DISABLED (1 << 12)
1077 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
1078 * hierarhical tiling. */
1079 #define MALI_TILER_USER 0xFFF
1081 /* Absent any geometry, the minimum size of the polygon list header */
1082 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
1084 struct midgard_tiler_descriptor
{
1085 /* Size of the entire polygon list; see pan_tiler.c for the
1086 * computation. It's based on hierarchical tiling */
1088 u32 polygon_list_size
;
1090 /* Name known from the replay workaround in the kernel. What exactly is
1091 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
1092 * specifies a mask of hierarchy weights, which explains some of the
1093 * performance mysteries around setting it. We also see the bottom bit
1094 * of tiler_flags set in the kernel, but no comment why.
1096 * hierarchy_mask can have the TILER_DISABLED flag */
1101 /* See mali_tiler.c for an explanation */
1102 mali_ptr polygon_list
;
1103 mali_ptr polygon_list_body
;
1105 /* Names based on we see symmetry with replay jobs which name these
1108 mali_ptr heap_start
; /* tiler heap_free_address */
1111 /* Hierarchy weights. We know these are weights based on the kernel,
1112 * but I've never seen them be anything other than zero */
1116 struct mali_sfbd_format
{
1120 /* mali_channel_swizzle */
1121 unsigned swizzle
: 12;
1124 unsigned nr_channels
: 2;
1129 enum mali_block_format block
: 2;
1135 /* Shared structure at the start of framebuffer descriptors, or used bare for
1136 * compute jobs, configuring stack and shared memory */
1138 struct mali_shared_memory
{
1139 u32 stack_shift
: 4;
1142 /* Configuration for shared memory for compute shaders.
1143 * shared_workgroup_count is logarithmic and may be computed for a
1144 * compute shader using shared memory as:
1146 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
1148 * For compute shaders that don't use shared memory, or non-compute
1149 * shaders, this is set to ~0
1152 u32 shared_workgroup_count
: 5;
1153 u32 shared_unk1
: 3;
1154 u32 shared_shift
: 4;
1155 u32 shared_zero
: 20;
1157 mali_ptr scratchpad
;
1159 /* For compute shaders, the RAM backing of workgroup-shared memory. For
1160 * fragment shaders on Bifrost, apparently multisampling locations */
1162 mali_ptr shared_memory
;
1164 } __attribute__((packed
));
1166 /* Configures multisampling on Bifrost fragment jobs */
1168 struct bifrost_multisampling
{
1171 mali_ptr sample_locations
;
1173 } __attribute__((packed
));
1175 struct mali_single_framebuffer
{
1176 struct mali_shared_memory shared_memory
;
1177 struct mali_sfbd_format format
;
1182 /* Purposeful off-by-one in these fields should be accounted for by the
1183 * MALI_DIMENSION macro */
1190 u32 checksum_stride
;
1193 /* By default, the framebuffer is upside down from OpenGL's
1194 * perspective. Set framebuffer to the end and negate the stride to
1195 * flip in the Y direction */
1197 mali_ptr framebuffer
;
1202 /* Depth and stencil buffers are interleaved, it appears, as they are
1203 * set to the same address in captures. Both fields set to zero if the
1204 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
1205 * get a zero enable despite the buffer being present; that still is
1208 mali_ptr depth_buffer
; // not SAME_VA
1209 u32 depth_stride_zero
: 4;
1210 u32 depth_stride
: 28;
1213 mali_ptr stencil_buffer
; // not SAME_VA
1214 u32 stencil_stride_zero
: 4;
1215 u32 stencil_stride
: 28;
1218 u32 clear_color_1
; // RGBA8888 from glClear, actually used by hardware
1219 u32 clear_color_2
; // always equal, but unclear function?
1220 u32 clear_color_3
; // always equal, but unclear function?
1221 u32 clear_color_4
; // always equal, but unclear function?
1223 /* Set to zero if not cleared */
1225 float clear_depth_1
; // float32, ditto
1226 float clear_depth_2
; // float32, ditto
1227 float clear_depth_3
; // float32, ditto
1228 float clear_depth_4
; // float32, ditto
1230 u32 clear_stencil
; // Exactly as it appears in OpenGL
1234 struct midgard_tiler_descriptor tiler
;
1236 /* More below this, maybe */
1237 } __attribute__((packed
));
1240 #define MALI_MFBD_FORMAT_SRGB (1 << 0)
1242 struct mali_rt_format
{
1246 unsigned nr_channels
: 2; /* MALI_POSITIVE */
1250 enum mali_block_format block
: 2;
1251 enum mali_msaa msaa
: 2;
1254 unsigned swizzle
: 12;
1258 /* Disables MFBD preload. When this bit is set, the render target will
1259 * be cleared every frame. When this bit is clear, the hardware will
1260 * automatically wallpaper the render target back from main memory.
1261 * Unfortunately, MFBD preload is very broken on Midgard, so in
1262 * practice, this is a chicken bit that should always be set.
1263 * Discovered by accident, as all good chicken bits are. */
1265 unsigned no_preload
: 1;
1266 } __attribute__((packed
));
1268 /* Flags for afbc.flags and ds_afbc.flags */
1270 #define MALI_AFBC_FLAGS 0x10009
1272 /* Lossless RGB and RGBA colorspace transform */
1273 #define MALI_AFBC_YTR (1 << 17)
1275 struct mali_render_target
{
1276 struct mali_rt_format format
;
1281 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1282 * there is an extra metadata buffer that contains 16 bytes per tile.
1283 * The framebuffer needs to be the same size as before, since we don't
1284 * know ahead of time how much space it will take up. The
1285 * framebuffer_stride is set to 0, since the data isn't stored linearly
1288 * When AFBC is disabled, these fields are zero.
1292 u32 stride
; // stride in units of tiles
1293 u32 flags
; // = 0x20000
1296 mali_ptr framebuffer
;
1299 u32 framebuffer_stride
: 28; // in units of bytes, row to next
1300 u32 layer_stride
; /* For multisample rendering */
1302 u32 clear_color_1
; // RGBA8888 from glClear, actually used by hardware
1303 u32 clear_color_2
; // always equal, but unclear function?
1304 u32 clear_color_3
; // always equal, but unclear function?
1305 u32 clear_color_4
; // always equal, but unclear function?
1306 } __attribute__((packed
));
1308 /* An optional part of mali_framebuffer. It comes between the main structure
1309 * and the array of render targets. It must be included if any of these are
1312 * - Transaction Elimination
1314 * - TODO: Anything else?
1318 #define MALI_EXTRA_PRESENT (0x1)
1321 #define MALI_EXTRA_ZS (0x4)
1323 struct mali_framebuffer_extra
{
1325 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1326 u32 checksum_stride
;
1328 unsigned flags_lo
: 4;
1329 enum mali_block_format zs_block
: 2;
1331 /* Number of samples in Z/S attachment, MALI_POSITIVE. So zero for
1332 * 1-sample (non-MSAA), 0x3 for MSAA 4x, etc */
1333 unsigned zs_samples
: 4;
1334 unsigned flags_hi
: 22;
1337 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1339 mali_ptr depth_stencil_afbc_metadata
;
1340 u32 depth_stencil_afbc_stride
; // in units of tiles
1343 mali_ptr depth_stencil
;
1349 /* Depth becomes depth/stencil in case of combined D/S */
1351 u32 depth_stride_zero
: 4;
1352 u32 depth_stride
: 28;
1353 u32 depth_layer_stride
;
1356 u32 stencil_stride_zero
: 4;
1357 u32 stencil_stride
: 28;
1358 u32 stencil_layer_stride
;
1366 } __attribute__((packed
));
1368 /* Flags for mfbd_flags */
1370 /* Enables writing depth results back to main memory (rather than keeping them
1371 * on-chip in the tile buffer and then discarding) */
1373 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1375 /* The MFBD contains the extra mali_framebuffer_extra section */
1377 #define MALI_MFBD_EXTRA (1 << 13)
1379 struct mali_framebuffer
{
1381 struct mali_shared_memory shared_memory
;
1382 struct bifrost_multisampling msaa
;
1386 u16 width1
, height1
;
1388 u16 width2
, height2
;
1389 u32 unk1
: 19; // = 0x01000
1390 u32 rt_count_1
: 3; // off-by-one (use MALI_POSITIVE)
1391 u32 unk2
: 2; // = 0
1392 u32 rt_count_2
: 3; // no off-by-one
1395 u32 clear_stencil
: 8;
1396 u32 mfbd_flags
: 24; // = 0x100
1400 struct midgard_tiler_descriptor tiler
;
1402 mali_ptr tiler_meta
;
1407 /* optional: struct mali_framebuffer_extra extra */
1408 /* struct mali_render_target rts[] */
1409 } __attribute__((packed
));
1411 #endif /* __PANFROST_JOB_H__ */