2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
33 #include <panfrost-misc.h>
38 JOB_TYPE_WRITE_VALUE
= 2,
39 JOB_TYPE_CACHE_FLUSH
= 3,
42 JOB_TYPE_GEOMETRY
= 6,
45 JOB_TYPE_FRAGMENT
= 9,
52 MALI_LINE_STRIP
= 0x4,
55 MALI_TRIANGLE_STRIP
= 0xA,
56 MALI_TRIANGLE_FAN
= 0xC,
59 MALI_QUAD_STRIP
= 0xF,
61 /* All other modes invalid */
64 /* Applies to tiler_gl_enables */
66 #define MALI_OCCLUSION_QUERY (1 << 3)
67 #define MALI_OCCLUSION_PRECISE (1 << 4)
69 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
70 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
71 * disagree about how to do viewport flipping, so the blob actually sets this
72 * for GL_CW but then has a negative viewport stride */
74 #define MALI_FRONT_CCW_TOP (1 << 5)
76 #define MALI_CULL_FACE_FRONT (1 << 6)
77 #define MALI_CULL_FACE_BACK (1 << 7)
79 /* Used in stencil and depth tests */
86 MALI_FUNC_GREATER
= 4,
87 MALI_FUNC_NOTEQUAL
= 5,
92 /* Flags apply to unknown2_3? */
94 #define MALI_HAS_MSAA (1 << 0)
95 #define MALI_CAN_DISCARD (1 << 5)
97 /* Applies on SFBD systems, specifying that programmable blending is in use */
98 #define MALI_HAS_BLEND_SHADER (1 << 6)
100 /* func is mali_func */
101 #define MALI_DEPTH_FUNC(func) (func << 8)
102 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
103 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
105 #define MALI_DEPTH_WRITEMASK (1 << 11)
107 /* Next flags to unknown2_4 */
108 #define MALI_STENCIL_TEST (1 << 0)
111 #define MALI_SAMPLE_ALPHA_TO_COVERAGE_NO_BLEND_SHADER (1 << 1)
113 #define MALI_NO_DITHER (1 << 9)
114 #define MALI_DEPTH_RANGE_A (1 << 12)
115 #define MALI_DEPTH_RANGE_B (1 << 13)
116 #define MALI_NO_MSAA (1 << 14)
118 /* Stencil test state is all encoded in a single u32, just with a lot of
121 enum mali_stencil_op
{
122 MALI_STENCIL_KEEP
= 0,
123 MALI_STENCIL_REPLACE
= 1,
124 MALI_STENCIL_ZERO
= 2,
125 MALI_STENCIL_INVERT
= 3,
126 MALI_STENCIL_INCR_WRAP
= 4,
127 MALI_STENCIL_DECR_WRAP
= 5,
128 MALI_STENCIL_INCR
= 6,
129 MALI_STENCIL_DECR
= 7
132 struct mali_stencil_test
{
135 enum mali_func func
: 3;
136 enum mali_stencil_op sfail
: 3;
137 enum mali_stencil_op dpfail
: 3;
138 enum mali_stencil_op dppass
: 3;
140 } __attribute__((packed
));
142 #define MALI_MASK_R (1 << 0)
143 #define MALI_MASK_G (1 << 1)
144 #define MALI_MASK_B (1 << 2)
145 #define MALI_MASK_A (1 << 3)
147 enum mali_nondominant_mode
{
148 MALI_BLEND_NON_MIRROR
= 0,
149 MALI_BLEND_NON_ZERO
= 1
152 enum mali_dominant_blend
{
153 MALI_BLEND_DOM_SOURCE
= 0,
154 MALI_BLEND_DOM_DESTINATION
= 1
157 enum mali_dominant_factor
{
158 MALI_DOMINANT_UNK0
= 0,
159 MALI_DOMINANT_ZERO
= 1,
160 MALI_DOMINANT_SRC_COLOR
= 2,
161 MALI_DOMINANT_DST_COLOR
= 3,
162 MALI_DOMINANT_UNK4
= 4,
163 MALI_DOMINANT_SRC_ALPHA
= 5,
164 MALI_DOMINANT_DST_ALPHA
= 6,
165 MALI_DOMINANT_CONSTANT
= 7,
168 enum mali_blend_modifier
{
169 MALI_BLEND_MOD_UNK0
= 0,
170 MALI_BLEND_MOD_NORMAL
= 1,
171 MALI_BLEND_MOD_SOURCE_ONE
= 2,
172 MALI_BLEND_MOD_DEST_ONE
= 3,
175 struct mali_blend_mode
{
176 enum mali_blend_modifier clip_modifier
: 2;
177 unsigned unused_0
: 1;
178 unsigned negate_source
: 1;
180 enum mali_dominant_blend dominant
: 1;
182 enum mali_nondominant_mode nondominant_mode
: 1;
184 unsigned unused_1
: 1;
186 unsigned negate_dest
: 1;
188 enum mali_dominant_factor dominant_factor
: 3;
189 unsigned complement_dominant
: 1;
190 } __attribute__((packed
));
192 struct mali_blend_equation
{
193 /* Of type mali_blend_mode */
194 unsigned rgb_mode
: 12;
195 unsigned alpha_mode
: 12;
199 /* Corresponds to MALI_MASK_* above and glColorMask arguments */
201 unsigned color_mask
: 4;
202 } __attribute__((packed
));
204 /* Used with channel swizzling */
206 MALI_CHANNEL_RED
= 0,
207 MALI_CHANNEL_GREEN
= 1,
208 MALI_CHANNEL_BLUE
= 2,
209 MALI_CHANNEL_ALPHA
= 3,
210 MALI_CHANNEL_ZERO
= 4,
211 MALI_CHANNEL_ONE
= 5,
212 MALI_CHANNEL_RESERVED_0
= 6,
213 MALI_CHANNEL_RESERVED_1
= 7,
216 struct mali_channel_swizzle
{
217 enum mali_channel r
: 3;
218 enum mali_channel g
: 3;
219 enum mali_channel b
: 3;
220 enum mali_channel a
: 3;
221 } __attribute__((packed
));
223 /* Compressed per-pixel formats. Each of these formats expands to one to four
224 * floating-point or integer numbers, as defined by the OpenGL specification.
225 * There are various places in OpenGL where the user can specify a compressed
226 * format in memory, which all use the same 8-bit enum in the various
227 * descriptors, although different hardware units support different formats.
230 /* The top 3 bits specify how the bits of each component are interpreted. */
233 #define MALI_FORMAT_COMPRESSED (0 << 5)
235 /* e.g. R11F_G11F_B10F */
236 #define MALI_FORMAT_SPECIAL (2 << 5)
238 /* signed normalized, e.g. RGBA8_SNORM */
239 #define MALI_FORMAT_SNORM (3 << 5)
242 #define MALI_FORMAT_UINT (4 << 5)
244 /* e.g. RGBA8 and RGBA32F */
245 #define MALI_FORMAT_UNORM (5 << 5)
247 /* e.g. RGBA8I and RGBA16F */
248 #define MALI_FORMAT_SINT (6 << 5)
250 /* These formats seem to largely duplicate the others. They're used at least
251 * for Bifrost framebuffer output.
253 #define MALI_FORMAT_SPECIAL2 (7 << 5)
255 /* If the high 3 bits are 3 to 6 these two bits say how many components
258 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
260 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
261 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
265 #define MALI_CHANNEL_4 2
267 #define MALI_CHANNEL_8 3
269 #define MALI_CHANNEL_16 4
271 #define MALI_CHANNEL_32 5
273 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
274 * MALI_FORMAT_UNORM, it means a 32-bit float.
276 #define MALI_CHANNEL_FLOAT 7
279 MALI_ETC2_RGB8
= MALI_FORMAT_COMPRESSED
| 0x1,
280 MALI_ETC2_R11_UNORM
= MALI_FORMAT_COMPRESSED
| 0x2,
281 MALI_ETC2_RGBA8
= MALI_FORMAT_COMPRESSED
| 0x3,
282 MALI_ETC2_RG11_UNORM
= MALI_FORMAT_COMPRESSED
| 0x4,
283 MALI_ETC2_R11_SNORM
= MALI_FORMAT_COMPRESSED
| 0x11,
284 MALI_ETC2_RG11_SNORM
= MALI_FORMAT_COMPRESSED
| 0x12,
285 MALI_ETC2_RGB8A1
= MALI_FORMAT_COMPRESSED
| 0x13,
286 MALI_ASTC_SRGB_SUPP
= MALI_FORMAT_COMPRESSED
| 0x16,
287 MALI_ASTC_HDR_SUPP
= MALI_FORMAT_COMPRESSED
| 0x17,
289 MALI_RGB565
= MALI_FORMAT_SPECIAL
| 0x0,
290 MALI_RGB5_A1_UNORM
= MALI_FORMAT_SPECIAL
| 0x2,
291 MALI_RGB10_A2_UNORM
= MALI_FORMAT_SPECIAL
| 0x3,
292 MALI_RGB10_A2_SNORM
= MALI_FORMAT_SPECIAL
| 0x5,
293 MALI_RGB10_A2UI
= MALI_FORMAT_SPECIAL
| 0x7,
294 MALI_RGB10_A2I
= MALI_FORMAT_SPECIAL
| 0x9,
297 MALI_NV12
= MALI_FORMAT_SPECIAL
| 0xc,
299 MALI_Z32_UNORM
= MALI_FORMAT_SPECIAL
| 0xD,
300 MALI_R32_FIXED
= MALI_FORMAT_SPECIAL
| 0x11,
301 MALI_RG32_FIXED
= MALI_FORMAT_SPECIAL
| 0x12,
302 MALI_RGB32_FIXED
= MALI_FORMAT_SPECIAL
| 0x13,
303 MALI_RGBA32_FIXED
= MALI_FORMAT_SPECIAL
| 0x14,
304 MALI_R11F_G11F_B10F
= MALI_FORMAT_SPECIAL
| 0x19,
305 MALI_R9F_G9F_B9F_E5F
= MALI_FORMAT_SPECIAL
| 0x1b,
306 /* Only used for varyings, to indicate the transformed gl_Position */
307 MALI_VARYING_POS
= MALI_FORMAT_SPECIAL
| 0x1e,
308 /* Only used for varyings, to indicate that the write should be
311 MALI_VARYING_DISCARD
= MALI_FORMAT_SPECIAL
| 0x1f,
313 MALI_R8_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_8
,
314 MALI_R16_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_16
,
315 MALI_R32_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_32
,
316 MALI_RG8_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_8
,
317 MALI_RG16_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_16
,
318 MALI_RG32_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_32
,
319 MALI_RGB8_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_8
,
320 MALI_RGB16_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_16
,
321 MALI_RGB32_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_32
,
322 MALI_RGBA8_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_8
,
323 MALI_RGBA16_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_16
,
324 MALI_RGBA32_SNORM
= MALI_FORMAT_SNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_32
,
326 MALI_R8UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_8
,
327 MALI_R16UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_16
,
328 MALI_R32UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_32
,
329 MALI_RG8UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_8
,
330 MALI_RG16UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_16
,
331 MALI_RG32UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_32
,
332 MALI_RGB8UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_8
,
333 MALI_RGB16UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_16
,
334 MALI_RGB32UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_32
,
335 MALI_RGBA8UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_8
,
336 MALI_RGBA16UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_16
,
337 MALI_RGBA32UI
= MALI_FORMAT_UINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_32
,
339 MALI_R8_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_8
,
340 MALI_R16_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_16
,
341 MALI_R32_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_32
,
342 MALI_R32F
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT
,
343 MALI_RG8_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_8
,
344 MALI_RG16_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_16
,
345 MALI_RG32_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_32
,
346 MALI_RG32F
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT
,
347 MALI_RGB8_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_8
,
348 MALI_RGB16_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_16
,
349 MALI_RGB32_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_32
,
350 MALI_RGB32F
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT
,
351 MALI_RGBA4_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_4
,
352 MALI_RGBA8_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_8
,
353 MALI_RGBA16_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_16
,
354 MALI_RGBA32_UNORM
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_32
,
355 MALI_RGBA32F
= MALI_FORMAT_UNORM
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT
,
357 MALI_R8I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_8
,
358 MALI_R16I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_16
,
359 MALI_R32I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_32
,
360 MALI_R16F
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT
,
361 MALI_RG8I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_8
,
362 MALI_RG16I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_16
,
363 MALI_RG32I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_32
,
364 MALI_RG16F
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT
,
365 MALI_RGB8I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_8
,
366 MALI_RGB16I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_16
,
367 MALI_RGB32I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_32
,
368 MALI_RGB16F
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT
,
369 MALI_RGBA8I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_8
,
370 MALI_RGBA16I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_16
,
371 MALI_RGBA32I
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_32
,
372 MALI_RGBA16F
= MALI_FORMAT_SINT
| MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT
,
374 MALI_RGBA4
= MALI_FORMAT_SPECIAL2
| 0x8,
375 MALI_RGBA8_2
= MALI_FORMAT_SPECIAL2
| 0xd,
376 MALI_RGB10_A2_2
= MALI_FORMAT_SPECIAL2
| 0xe,
380 /* Alpha coverage is encoded as 4-bits (from a clampf), with inversion
381 * literally performing a bitwise invert. This function produces slightly wrong
382 * results and I'm not sure why; some rounding issue I suppose... */
384 #define MALI_ALPHA_COVERAGE(clampf) ((uint16_t) (int) (clampf * 15.0f))
385 #define MALI_GET_ALPHA_COVERAGE(nibble) ((float) nibble / 15.0f)
387 /* Applies to midgard1.flags */
389 /* Should the hardware perform early-Z testing? Normally should be set
390 * for performance reasons. Clear if you use: discard,
391 * alpha-to-coverage... * It's also possible this disables
392 * forward-pixel kill; we're not quite sure which bit is which yet.
393 * TODO: How does this interact with blending?*/
395 #define MALI_EARLY_Z (1 << 6)
397 /* Should the hardware calculate derivatives (via helper invocations)? Set in a
398 * fragment shader that uses texturing or derivative functions */
400 #define MALI_HELPER_INVOCATIONS (1 << 7)
402 /* Flags denoting the fragment shader's use of tilebuffer readback. If the
403 * shader might read any part of the tilebuffer, set MALI_READS_TILEBUFFER. If
404 * it might read depth/stencil in particular, also set MALI_READS_ZS */
406 #define MALI_READS_ZS (1 << 8)
407 #define MALI_READS_TILEBUFFER (1 << 12)
409 /* The raw Midgard blend payload can either be an equation or a shader
410 * address, depending on the context */
412 union midgard_blend
{
416 struct mali_blend_equation equation
;
421 /* We need to load the tilebuffer to blend (i.e. the destination factor is not
424 #define MALI_BLEND_LOAD_TIB (0x1)
426 /* A blend shader is used to blend this render target */
427 #define MALI_BLEND_MRT_SHADER (0x2)
429 /* On MRT Midgard systems (using an MFBD), each render target gets its own
430 * blend descriptor */
432 #define MALI_BLEND_SRGB (0x400)
434 /* Dithering is specified here for MFBD, otherwise NO_DITHER for SFBD */
435 #define MALI_BLEND_NO_DITHER (0x800)
437 struct midgard_blend_rt
{
438 /* Flags base value of 0x200 to enable the render target.
439 * OR with 0x1 for blending (anything other than REPLACE).
440 * OR with 0x2 for programmable blending
441 * OR with MALI_BLEND_SRGB for implicit sRGB
445 union midgard_blend blend
;
446 } __attribute__((packed
));
448 /* On Bifrost systems (all MRT), each render target gets one of these
451 struct bifrost_blend_rt
{
452 /* This is likely an analogue of the flags on
453 * midgard_blend_rt */
455 u16 flags
; // = 0x200
457 /* Single-channel blend constants are encoded in a sort of
458 * fixed-point. Basically, the float is mapped to a byte, becoming
459 * a high byte, and then the lower-byte is added for precision.
460 * For the original float f:
462 * f = (constant_hi / 255) + (constant_lo / 65535)
464 * constant_hi = int(f / 255)
465 * constant_lo = 65535*f - (65535/255) * constant_hi
470 struct mali_blend_equation equation
;
473 * - 0x3 when this slot is unused (everything else is 0 except the index)
474 * - 0x11 when this is the fourth slot (and it's used)
475 + * - 0 when there is a blend shader
478 /* increments from 0 to 3 */
483 /* So far, I've only seen:
484 * - R001 for 1-component formats
485 * - RG01 for 2-component formats
486 * - RGB1 for 3-component formats
487 * - RGBA for 4-component formats
490 enum mali_format format
: 8;
492 /* Type of the shader output variable. Note, this can
493 * be different from the format.
495 * 0: f16 (mediump float)
496 * 1: f32 (highp float)
498 * 3: u32 (highp uint)
499 * 4: i16 (mediump int)
500 * 5: u16 (mediump uint)
506 /* Only the low 32 bits of the blend shader are stored, the
507 * high 32 bits are implicitly the same as the original shader.
508 * According to the kernel driver, the program counter for
509 * shaders is actually only 24 bits, so shaders cannot cross
510 * the 2^24-byte boundary, and neither can the blend shader.
511 * The blob handles this by allocating a 2^24 byte pool for
512 * shaders, and making sure that any blend shaders are stored
513 * in the same pool as the original shader. The kernel will
514 * make sure this allocation is aligned to 2^24 bytes.
518 } __attribute__((packed
));
520 /* Descriptor for the shader. Following this is at least one, up to four blend
521 * descriptors for each active render target */
523 struct mali_shader_meta
{
532 u32 uniform_buffer_count
: 4;
533 u32 unk1
: 28; // = 0x800000 for vertex, 0x958020 for tiler
536 unsigned uniform_buffer_count
: 4;
540 unsigned work_count
: 5;
541 unsigned uniform_count
: 5;
542 unsigned unknown2
: 6;
546 /* Same as glPolygoOffset() arguments */
555 u8 stencil_mask_front
;
556 u8 stencil_mask_back
;
559 struct mali_stencil_test stencil_front
;
560 struct mali_stencil_test stencil_back
;
565 /* On Bifrost, some system values are preloaded in
566 * registers R55-R62 by the thread dispatcher prior to
567 * the start of shader execution. This is a bitfield
568 * with one entry for each register saying which
569 * registers need to be preloaded. Right now, the known
573 * - R55 : gl_LocalInvocationID.xy
574 * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
575 * - R57 : gl_WorkGroupID.x
576 * - R58 : gl_WorkGroupID.y
577 * - R59 : gl_WorkGroupID.z
578 * - R60 : gl_GlobalInvocationID.x
579 * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
580 * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
583 * - R55 : unknown, never seen (but the bit for this is
585 * - R56 : unknown (bit always unset)
586 * - R57 : gl_PrimitiveID
587 * - R58 : gl_FrontFacing in low bit, potentially other stuff
588 * - R59 : u16 fragment coordinates (used to compute
589 * gl_FragCoord.xy, together with sample positions)
590 * - R60 : gl_SampleMask (used in epilog, so pretty
591 * much always used, but the bit is always 0 -- is
592 * this just always pushed?)
593 * - R61 : gl_SampleMaskIn and gl_SampleID, used by
594 * varying interpolation.
595 * - R62 : unknown (bit always unset).
597 u32 preload_regs
: 8;
598 /* In units of 8 bytes or 64 bits, since the
599 * uniform/const port loads 64 bits at a time.
601 u32 uniform_count
: 7;
602 u32 unk4
: 10; // = 2
609 /* zero on bifrost */
612 /* Blending information for the older non-MRT Midgard HW. Check for
613 * MALI_HAS_BLEND_SHADER to decide how to interpret.
616 union midgard_blend blend
;
617 } __attribute__((packed
));
619 /* This only concerns hardware jobs */
621 /* Possible values for job_descriptor_size */
623 #define MALI_JOB_32 0
624 #define MALI_JOB_64 1
626 struct mali_job_descriptor_header
{
627 u32 exception_status
;
628 u32 first_incomplete_task
;
630 u8 job_descriptor_size
: 1;
631 enum mali_job_type job_type
: 7;
633 u8 unknown_flags
: 7;
635 u16 job_dependency_index_1
;
636 u16 job_dependency_index_2
;
638 } __attribute__((packed
));
640 /* These concern exception_status */
642 /* Access type causing a fault, paralleling AS_FAULTSTATUS_* entries in the
645 enum mali_exception_access
{
646 /* Atomic in the kernel for MMU, but that doesn't make sense for a job
647 * fault so it's just unused */
648 MALI_EXCEPTION_ACCESS_NONE
= 0,
650 MALI_EXCEPTION_ACCESS_EXECUTE
= 1,
651 MALI_EXCEPTION_ACCESS_READ
= 2,
652 MALI_EXCEPTION_ACCESS_WRITE
= 3
655 /* Details about write_value from panfrost igt tests which use it as a generic
656 * dword write primitive */
658 #define MALI_WRITE_VALUE_ZERO 3
660 struct mali_payload_write_value
{
662 u32 value_descriptor
;
665 } __attribute__((packed
));
670 * This structure lets the attribute unit compute the address of an attribute
671 * given the vertex and instance ID. Unfortunately, the way this works is
672 * rather complicated when instancing is enabled.
674 * To explain this, first we need to explain how compute and vertex threads are
675 * dispatched. This is a guess (although a pretty firm guess!) since the
676 * details are mostly hidden from the driver, except for attribute instancing.
677 * When a quad is dispatched, it receives a single, linear index. However, we
678 * need to translate that index into a (vertex id, instance id) pair, or a
679 * (local id x, local id y, local id z) triple for compute shaders (although
680 * vertex shaders and compute shaders are handled almost identically).
681 * Focusing on vertex shaders, one option would be to do:
683 * vertex_id = linear_id % num_vertices
684 * instance_id = linear_id / num_vertices
686 * but this involves a costly division and modulus by an arbitrary number.
687 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
688 * num_instances threads instead of num_vertices * num_instances, which results
689 * in some "extra" threads with vertex_id >= num_vertices, which we have to
690 * discard. The more we pad num_vertices, the more "wasted" threads we
691 * dispatch, but the division is potentially easier.
693 * One straightforward choice is to pad num_vertices to the next power of two,
694 * which means that the division and modulus are just simple bit shifts and
695 * masking. But the actual algorithm is a bit more complicated. The thread
696 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
697 * to dividing by a power of two. This is possibly using the technique
698 * described in patent US20170010862A1. As a result, padded_num_vertices can be
699 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
700 * since we need less padding.
702 * padded_num_vertices is picked by the hardware. The driver just specifies the
703 * actual number of vertices. At least for Mali G71, the first few cases are
706 * num_vertices | padded_num_vertices
713 * Note that padded_num_vertices is a multiple of four (presumably because
714 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
715 * at least one more than num_vertices, which seems like a quirk of the
716 * hardware. For larger num_vertices, the hardware uses the following
717 * algorithm: using the binary representation of num_vertices, we look at the
718 * most significant set bit as well as the following 3 bits. Let n be the
719 * number of bits after those 4 bits. Then we set padded_num_vertices according
720 * to the following table:
722 * high bits | padded_num_vertices
729 * For example, if num_vertices = 70 is passed to glDraw(), its binary
730 * representation is 1000110, so n = 3 and the high bits are 1000, and
731 * therefore padded_num_vertices = 9 * 2^3 = 72.
733 * The attribute unit works in terms of the original linear_id. if
734 * num_instances = 1, then they are the same, and everything is simple.
735 * However, with instancing things get more complicated. There are four
736 * possible modes, two of them we can group together:
738 * 1. Use the linear_id directly. Only used when there is no instancing.
740 * 2. Use the linear_id modulo a constant. This is used for per-vertex
741 * attributes with instancing enabled by making the constant equal
742 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
743 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
744 * The shift field specifies the power of two, while the extra_flags field
745 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
746 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
747 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
748 * shift = 3. Note that we must exactly follow the hardware algorithm used to
749 * get padded_num_vertices in order to correctly implement per-vertex
752 * 3. Divide the linear_id by a constant. In order to correctly implement
753 * instance divisors, we have to divide linear_id by padded_num_vertices times
754 * to user-specified divisor. So first we compute padded_num_vertices, again
755 * following the exact same algorithm that the hardware uses, then multiply it
756 * by the GL-level divisor to get the hardware-level divisor. This case is
757 * further divided into two more cases. If the hardware-level divisor is a
758 * power of two, then we just need to shift. The shift amount is specified by
759 * the shift field, so that the hardware-level divisor is just 2^shift.
761 * If it isn't a power of two, then we have to divide by an arbitrary integer.
762 * For that, we use the well-known technique of multiplying by an approximation
763 * of the inverse. The driver must compute the magic multiplier and shift
764 * amount, and then the hardware does the multiplication and shift. The
765 * hardware and driver also use the "round-down" optimization as described in
766 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
767 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
768 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
769 * presumably this simplifies the hardware multiplier a little. The hardware
770 * first multiplies linear_id by the multiplier and takes the high 32 bits,
771 * then applies the round-down correction if extra_flags = 1, then finally
772 * shifts right by the shift field.
774 * There are some differences between ridiculousfish's algorithm and the Mali
775 * hardware algorithm, which means that the reference code from ridiculousfish
776 * doesn't always produce the right constants. Mali does not use the pre-shift
777 * optimization, since that would make a hardware implementation slower (it
778 * would have to always do the pre-shift, multiply, and post-shift operations).
779 * It also forces the multplier to be at least 2^31, which means that the
780 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
781 * given the divisor d, the algorithm the driver must follow is:
783 * 1. Set shift = floor(log2(d)).
784 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
785 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
786 * magic_divisor = m - 1 and extra_flags = 1.
787 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
789 * Unrelated to instancing/actual attributes, images (the OpenCL kind) are
790 * implemented as special attributes, denoted by MALI_ATTR_IMAGE. For images,
791 * let shift=extra_flags=0. Stride is set to the image format's bytes-per-pixel
792 * (*NOT the row stride*). Size is set to the size of the image itself.
794 * Special internal attribtues and varyings (gl_VertexID, gl_FrontFacing, etc)
795 * use particular fixed addresses with modified structures.
798 enum mali_attr_mode
{
799 MALI_ATTR_UNUSED
= 0,
800 MALI_ATTR_LINEAR
= 1,
801 MALI_ATTR_POT_DIVIDE
= 2,
802 MALI_ATTR_MODULO
= 3,
803 MALI_ATTR_NPOT_DIVIDE
= 4,
807 /* Pseudo-address for gl_VertexID, gl_FragCoord, gl_FrontFacing */
809 #define MALI_ATTR_VERTEXID (0x22)
810 #define MALI_ATTR_INSTANCEID (0x24)
811 #define MALI_VARYING_FRAG_COORD (0x25)
812 #define MALI_VARYING_FRONT_FACING (0x26)
814 /* This magic "pseudo-address" is used as `elements` to implement
815 * gl_PointCoord. When read from a fragment shader, it generates a point
816 * coordinate per the OpenGL ES 2.0 specification. Flipped coordinate spaces
817 * require an affine transformation in the shader. */
819 #define MALI_VARYING_POINT_COORD (0x61)
821 /* Used for comparison to check if an address is special. Mostly a guess, but
822 * it doesn't really matter. */
824 #define MALI_RECORD_SPECIAL (0x100)
827 /* This is used for actual attributes. */
829 /* The bottom 3 bits are the mode */
830 mali_ptr elements
: 64 - 8;
836 /* The entry after an NPOT_DIVIDE entry has this format. It stores
837 * extra information that wouldn't fit in a normal entry.
840 u32 unk
; /* = 0x20 */
843 /* This is the original, GL-level divisor. */
846 } __attribute__((packed
));
848 struct mali_attr_meta
{
849 /* Vertex buffer index */
852 unsigned unknown1
: 2;
853 unsigned swizzle
: 12;
854 enum mali_format format
: 8;
856 /* Always observed to be zero at the moment */
857 unsigned unknown3
: 2;
859 /* When packing multiple attributes in a buffer, offset addresses by
860 * this value. Obscurely, this is signed. */
862 } __attribute__((packed
));
864 #define FBD_MASK (~0x3f)
866 /* MFBD, rather than SFBD */
867 #define MALI_MFBD (0x1)
869 /* ORed into an MFBD address to specify the fbx section is included */
870 #define MALI_MFBD_TAG_EXTRA (0x2)
872 struct mali_uniform_buffer_meta
{
873 /* This is actually the size minus 1 (MALI_POSITIVE), in units of 16
874 * bytes. This gives a maximum of 2^14 bytes, which just so happens to
875 * be the GL minimum-maximum for GL_MAX_UNIFORM_BLOCK_SIZE.
879 /* This is missing the bottom 2 bits and top 8 bits. The top 8 bits
880 * should be 0 for userspace pointers, according to
881 * https://lwn.net/Articles/718895/. By reusing these bits, we can make
882 * each entry in the table only 64 bits.
884 mali_ptr ptr
: 64 - 10;
887 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
888 * They also seem to be the same between Bifrost and Midgard. They're shared in
892 /* Applies to unknown_draw */
894 #define MALI_DRAW_INDEXED_UINT8 (0x10)
895 #define MALI_DRAW_INDEXED_UINT16 (0x20)
896 #define MALI_DRAW_INDEXED_UINT32 (0x30)
897 #define MALI_DRAW_INDEXED_SIZE (0x30)
898 #define MALI_DRAW_INDEXED_SHIFT (4)
900 #define MALI_DRAW_VARYING_SIZE (0x100)
902 /* Set to use first vertex as the provoking vertex for flatshading. Clear to
903 * use the last vertex. This is the default in DX and VK, but not in GL. */
905 #define MALI_DRAW_FLATSHADE_FIRST (0x800)
907 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
909 struct mali_vertex_tiler_prefix
{
910 /* This is a dynamic bitfield containing the following things in this order:
912 * - gl_WorkGroupSize.x
913 * - gl_WorkGroupSize.y
914 * - gl_WorkGroupSize.z
915 * - gl_NumWorkGroups.x
916 * - gl_NumWorkGroups.y
917 * - gl_NumWorkGroups.z
919 * The number of bits allocated for each number is based on the *_shift
920 * fields below. For example, workgroups_y_shift gives the bit that
921 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
922 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
923 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
924 * value is one more than the stored value, since if any of the values
925 * are zero, then there would be no invocations (and hence no job). If
926 * there were 0 bits allocated to a given field, then it must be zero,
927 * and hence the real value is one.
929 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
930 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
931 * where vertex count is the number of vertices.
933 u32 invocation_count
;
935 /* Bitfield for shifts:
939 * workgroups_x_shift : 6
940 * workgroups_y_shift : 6
941 * workgroups_z_shift : 6
942 * workgroups_x_shift_2 : 4
944 u32 invocation_shifts
;
947 u32 unknown_draw
: 22;
949 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
950 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
951 * something to do with how many quads get put in the same execution
952 * engine, which is a balance (you don't want to starve the engine, but
953 * you also want to distribute work evenly).
955 u32 workgroups_x_shift_3
: 6;
958 /* Negative of min_index. This is used to compute
959 * the unbiased index in tiler/fragment shader runs.
961 * The hardware adds offset_bias_correction in each run,
962 * so that absent an index bias, the first vertex processed is
963 * genuinely the first vertex (0). But with an index bias,
964 * the first vertex process is numbered the same as the bias.
966 * To represent this more conviniently:
967 * unbiased_index = lower_bound_index +
969 * offset_bias_correction
971 * This is done since the hardware doesn't accept a index_bias
972 * and this allows it to recover the unbiased index.
974 int32_t offset_bias_correction
;
977 /* Like many other strictly nonzero quantities, index_count is
978 * subtracted by one. For an indexed cube, this is equal to 35 = 6
979 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
980 * for an indexed draw, index_count is the number of actual vertices
981 * rendered whereas invocation_count is the number of unique vertices
982 * rendered (the number of times the vertex shader must be invoked).
983 * For non-indexed draws, this is just equal to invocation_count. */
987 /* No hidden structure; literally just a pointer to an array of uint
988 * indices (width depends on flags). Thanks, guys, for not making my
989 * life insane for once! NULL for non-indexed draws. */
992 } __attribute__((packed
));
994 /* Point size / line width can either be specified as a 32-bit float (for
995 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
996 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
997 * payload, the contents of varying_pointer will be intepreted as an array of
998 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
999 * creating a special MALI_R16F varying writing to varying_pointer. */
1001 union midgard_primitive_size
{
1006 struct bifrost_vertex_only
{
1007 u32 unk2
; /* =0x2 */
1012 } __attribute__((packed
));
1014 struct bifrost_tiler_heap_meta
{
1017 /* note: these are just guesses! */
1018 mali_ptr tiler_heap_start
;
1019 mali_ptr tiler_heap_free
;
1020 mali_ptr tiler_heap_end
;
1022 /* hierarchy weights? but they're still 0 after the job has run... */
1024 } __attribute__((packed
));
1026 struct bifrost_tiler_meta
{
1033 mali_ptr tiler_heap_meta
;
1034 /* TODO what is this used for? */
1036 } __attribute__((packed
));
1038 struct bifrost_tiler_only
{
1040 union midgard_primitive_size primitive_size
;
1042 mali_ptr tiler_meta
;
1044 u64 zero1
, zero2
, zero3
, zero4
, zero5
, zero6
;
1049 } __attribute__((packed
));
1051 struct bifrost_scratchpad
{
1053 u32 flags
; // = 0x1f
1054 /* This is a pointer to a CPU-inaccessible buffer, 16 pages, allocated
1055 * during startup. It seems to serve the same purpose as the
1056 * gpu_scratchpad in the SFBD for Midgard, although it's slightly
1059 mali_ptr gpu_scratchpad
;
1060 } __attribute__((packed
));
1062 struct mali_vertex_tiler_postfix
{
1063 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
1064 * output from the vertex shader for tiler jobs.
1067 u64 position_varying
;
1069 /* An array of mali_uniform_buffer_meta's. The size is given by the
1072 u64 uniform_buffers
;
1074 /* This is a pointer to an array of pointers to the texture
1075 * descriptors, number of pointers bounded by number of textures. The
1076 * indirection is needed to accomodate varying numbers and sizes of
1077 * texture descriptors */
1078 u64 texture_trampoline
;
1080 /* For OpenGL, from what I've seen, this is intimately connected to
1081 * texture_meta. cwabbott says this is not the case under Vulkan, hence
1082 * why this field is seperate (Midgard is Vulkan capable). Pointer to
1083 * array of sampler descriptors (which are uniform in size) */
1084 u64 sampler_descriptor
;
1088 u64 attributes
; /* struct attribute_buffer[] */
1089 u64 attribute_meta
; /* attribute_meta[] */
1090 u64 varyings
; /* struct attr */
1091 u64 varying_meta
; /* pointer */
1093 u64 occlusion_counter
; /* A single bit as far as I can tell */
1095 /* Note: on Bifrost, this isn't actually the FBD. It points to
1096 * bifrost_scratchpad instead. However, it does point to the same thing
1097 * in vertex and tiler jobs.
1099 mali_ptr framebuffer
;
1100 } __attribute__((packed
));
1102 struct midgard_payload_vertex_tiler
{
1103 struct mali_vertex_tiler_prefix prefix
;
1105 u16 gl_enables
; // 0x5
1107 /* Both zero for non-instanced draws. For instanced draws, a
1108 * decomposition of padded_num_vertices. See the comments about the
1109 * corresponding fields in mali_attr for context. */
1111 unsigned instance_shift
: 5;
1112 unsigned instance_odd
: 3;
1116 /* Offset for first vertex in buffer */
1121 struct mali_vertex_tiler_postfix postfix
;
1123 union midgard_primitive_size primitive_size
;
1124 } __attribute__((packed
));
1126 struct bifrost_payload_vertex
{
1127 struct mali_vertex_tiler_prefix prefix
;
1128 struct bifrost_vertex_only vertex
;
1129 struct mali_vertex_tiler_postfix postfix
;
1130 } __attribute__((packed
));
1132 struct bifrost_payload_tiler
{
1133 struct mali_vertex_tiler_prefix prefix
;
1134 struct bifrost_tiler_only tiler
;
1135 struct mali_vertex_tiler_postfix postfix
;
1136 } __attribute__((packed
));
1138 struct bifrost_payload_fused
{
1139 struct mali_vertex_tiler_prefix prefix
;
1140 struct bifrost_tiler_only tiler
;
1141 struct mali_vertex_tiler_postfix tiler_postfix
;
1142 u64 padding
; /* zero */
1143 struct bifrost_vertex_only vertex
;
1144 struct mali_vertex_tiler_postfix vertex_postfix
;
1145 } __attribute__((packed
));
1147 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
1148 * texture is stored as (63, 63) in these fields. This adjusts for that.
1149 * There's an identical pattern in the framebuffer descriptor. Even vertex
1150 * count fields work this way, hence the generic name -- integral fields that
1151 * are strictly positive generally need this adjustment. */
1153 #define MALI_POSITIVE(dim) (dim - 1)
1155 /* Used with wrapping. Unclear what top bit conveys */
1157 enum mali_wrap_mode
{
1158 MALI_WRAP_REPEAT
= 0x8 | 0x0,
1159 MALI_WRAP_CLAMP_TO_EDGE
= 0x8 | 0x1,
1160 MALI_WRAP_CLAMP
= 0x8 | 0x2,
1161 MALI_WRAP_CLAMP_TO_BORDER
= 0x8 | 0x3,
1162 MALI_WRAP_MIRRORED_REPEAT
= 0x8 | 0x4 | 0x0,
1163 MALI_WRAP_MIRRORED_CLAMP_TO_EDGE
= 0x8 | 0x4 | 0x1,
1164 MALI_WRAP_MIRRORED_CLAMP
= 0x8 | 0x4 | 0x2,
1165 MALI_WRAP_MIRRORED_CLAMP_TO_BORDER
= 0x8 | 0x4 | 0x3,
1168 /* Shared across both command stream and Midgard, and even with Bifrost */
1170 enum mali_texture_type
{
1171 MALI_TEX_CUBE
= 0x0,
1178 #define MAX_MIP_LEVELS (13)
1180 /* Cubemap bloats everything up */
1181 #define MAX_CUBE_FACES (6)
1183 /* For each pointer, there is an address and optionally also a stride */
1184 #define MAX_ELEMENTS (2)
1186 /* It's not known why there are 4-bits allocated -- this enum is almost
1187 * certainly incomplete */
1189 enum mali_texture_layout
{
1190 /* For a Z/S texture, this is linear */
1191 MALI_TEXTURE_TILED
= 0x1,
1193 /* Z/S textures cannot be tiled */
1194 MALI_TEXTURE_LINEAR
= 0x2,
1197 MALI_TEXTURE_AFBC
= 0xC
1200 /* Corresponds to the type passed to glTexImage2D and so forth */
1202 struct mali_texture_format
{
1203 unsigned swizzle
: 12;
1204 enum mali_format format
: 8;
1207 unsigned unknown1
: 1;
1209 enum mali_texture_type type
: 2;
1210 enum mali_texture_layout layout
: 4;
1213 unsigned unknown2
: 1;
1215 /* Set to allow packing an explicit stride */
1216 unsigned manual_stride
: 1;
1219 } __attribute__((packed
));
1221 struct mali_texture_descriptor
{
1225 uint16_t array_size
;
1227 struct mali_texture_format format
;
1231 /* One for non-mipmapped, zero for mipmapped */
1234 /* Zero for non-mipmapped, (number of levels - 1) for mipmapped */
1237 /* Swizzling is a single 32-bit word, broken up here for convenience.
1238 * Here, swizzling refers to the ES 3.0 texture parameters for channel
1239 * level swizzling, not the internal pixel-level swizzling which is
1240 * below OpenGL's reach */
1242 unsigned swizzle
: 12;
1243 unsigned swizzle_zero
: 20;
1248 } __attribute__((packed
));
1252 #define MALI_SAMP_MAG_NEAREST (1 << 0)
1253 #define MALI_SAMP_MIN_NEAREST (1 << 1)
1255 /* TODO: What do these bits mean individually? Only seen set together */
1257 #define MALI_SAMP_MIP_LINEAR_1 (1 << 3)
1258 #define MALI_SAMP_MIP_LINEAR_2 (1 << 4)
1260 /* Flag in filter_mode, corresponding to OpenCL's NORMALIZED_COORDS_TRUE
1261 * sampler_t flag. For typical OpenGL textures, this is always set. */
1263 #define MALI_SAMP_NORM_COORDS (1 << 5)
1265 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
1266 * be cleaned up a lot. */
1268 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
1270 static inline int16_t
1271 FIXED_16(float x
, bool allow_negative
)
1273 /* Clamp inputs, accounting for float error */
1274 float max_lod
= (32.0 - (1.0 / 512.0));
1275 float min_lod
= allow_negative
? -max_lod
: 0.0;
1277 x
= ((x
> max_lod
) ? max_lod
: ((x
< min_lod
) ? min_lod
: x
));
1279 return (int) (x
* 256.0);
1282 struct mali_sampler_descriptor
{
1283 uint16_t filter_mode
;
1285 /* Fixed point, signed.
1286 * Upper 7 bits before the decimal point, although it caps [0-31].
1287 * Lower 8 bits after the decimal point: int(round(x * 256)) */
1293 /* All one word in reality, but packed a bit. Comparisons are flipped
1296 enum mali_wrap_mode wrap_s
: 4;
1297 enum mali_wrap_mode wrap_t
: 4;
1298 enum mali_wrap_mode wrap_r
: 4;
1299 enum mali_func compare_func
: 3;
1301 /* No effect on 2D textures. For cubemaps, set for ES3 and clear for
1302 * ES2, controlling seamless cubemapping */
1303 unsigned seamless_cube_map
: 1;
1308 float border_color
[4];
1309 } __attribute__((packed
));
1311 /* viewport0/viewport1 form the arguments to glViewport. viewport1 is
1312 * modified by MALI_POSITIVE; viewport0 is as-is.
1315 struct mali_viewport
{
1316 /* XY clipping planes */
1322 /* Depth clipping planes */
1328 } __attribute__((packed
));
1330 /* From presentations, 16x16 tiles externally. Use shift for fast computation
1331 * of tile numbers. */
1333 #define MALI_TILE_SHIFT 4
1334 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
1336 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
1337 * each component. Notice that this provides a theoretical upper bound of (1 <<
1338 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
1339 * 65536x65536. Multiplying that together, times another four given that Mali
1340 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
1341 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
1342 * alone rendering in real-time to such a buffer.
1346 /* From mali_kbase_10969_workaround.c */
1347 #define MALI_X_COORD_MASK 0x00000FFF
1348 #define MALI_Y_COORD_MASK 0x0FFF0000
1350 /* Extract parts of a tile coordinate */
1352 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
1353 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
1355 /* Helpers to generate tile coordinates based on the boundary coordinates in
1356 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
1357 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
1358 * Intentional "off-by-one"; finding the tile number is a form of fencepost
1361 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
1362 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
1363 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
1364 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
1365 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
1367 struct mali_payload_fragment
{
1370 mali_ptr framebuffer
;
1371 } __attribute__((packed
));
1373 /* Single Framebuffer Descriptor */
1375 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
1376 * configured for 4x. With MSAA_8, it is configured for 8x. */
1378 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
1379 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
1380 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
1381 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
1383 /* Fast/slow based on whether all three buffers are cleared at once */
1385 #define MALI_CLEAR_FAST (1 << 18)
1386 #define MALI_CLEAR_SLOW (1 << 28)
1387 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
1389 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
1390 * within the larget framebuffer descriptor). Analogous to
1391 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
1393 /* See pan_tiler.c for derivation */
1394 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
1396 /* Flag disabling the tiler for clear-only jobs, with
1397 hierarchical tiling */
1398 #define MALI_TILER_DISABLED (1 << 12)
1400 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
1401 * hierarhical tiling. */
1402 #define MALI_TILER_USER 0xFFF
1404 /* Absent any geometry, the minimum size of the polygon list header */
1405 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
1407 struct midgard_tiler_descriptor
{
1408 /* Size of the entire polygon list; see pan_tiler.c for the
1409 * computation. It's based on hierarchical tiling */
1411 u32 polygon_list_size
;
1413 /* Name known from the replay workaround in the kernel. What exactly is
1414 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
1415 * specifies a mask of hierarchy weights, which explains some of the
1416 * performance mysteries around setting it. We also see the bottom bit
1417 * of tiler_flags set in the kernel, but no comment why.
1419 * hierarchy_mask can have the TILER_DISABLED flag */
1424 /* See mali_tiler.c for an explanation */
1425 mali_ptr polygon_list
;
1426 mali_ptr polygon_list_body
;
1428 /* Names based on we see symmetry with replay jobs which name these
1431 mali_ptr heap_start
; /* tiler heap_free_address */
1434 /* Hierarchy weights. We know these are weights based on the kernel,
1435 * but I've never seen them be anything other than zero */
1439 enum mali_block_format
{
1440 MALI_BLOCK_TILED
= 0x0,
1441 MALI_BLOCK_UNKNOWN
= 0x1,
1442 MALI_BLOCK_LINEAR
= 0x2,
1443 MALI_BLOCK_AFBC
= 0x3,
1446 struct mali_sfbd_format
{
1450 /* mali_channel_swizzle */
1451 unsigned swizzle
: 12;
1454 unsigned nr_channels
: 2;
1459 enum mali_block_format block
: 2;
1465 struct mali_single_framebuffer
{
1468 mali_ptr scratchpad
;
1473 struct mali_sfbd_format format
;
1478 /* Purposeful off-by-one in these fields should be accounted for by the
1479 * MALI_DIMENSION macro */
1486 u32 checksum_stride
;
1489 /* By default, the framebuffer is upside down from OpenGL's
1490 * perspective. Set framebuffer to the end and negate the stride to
1491 * flip in the Y direction */
1493 mali_ptr framebuffer
;
1498 /* Depth and stencil buffers are interleaved, it appears, as they are
1499 * set to the same address in captures. Both fields set to zero if the
1500 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
1501 * get a zero enable despite the buffer being present; that still is
1504 mali_ptr depth_buffer
; // not SAME_VA
1505 u32 depth_stride_zero
: 4;
1506 u32 depth_stride
: 28;
1509 mali_ptr stencil_buffer
; // not SAME_VA
1510 u32 stencil_stride_zero
: 4;
1511 u32 stencil_stride
: 28;
1514 u32 clear_color_1
; // RGBA8888 from glClear, actually used by hardware
1515 u32 clear_color_2
; // always equal, but unclear function?
1516 u32 clear_color_3
; // always equal, but unclear function?
1517 u32 clear_color_4
; // always equal, but unclear function?
1519 /* Set to zero if not cleared */
1521 float clear_depth_1
; // float32, ditto
1522 float clear_depth_2
; // float32, ditto
1523 float clear_depth_3
; // float32, ditto
1524 float clear_depth_4
; // float32, ditto
1526 u32 clear_stencil
; // Exactly as it appears in OpenGL
1530 struct midgard_tiler_descriptor tiler
;
1532 /* More below this, maybe */
1533 } __attribute__((packed
));
1535 /* On Midgard, this "framebuffer descriptor" is used for the framebuffer field
1536 * of compute jobs. Superficially resembles a single framebuffer descriptor */
1538 struct mali_compute_fbd
{
1540 } __attribute__((packed
));
1542 /* Format bits for the render target flags */
1544 #define MALI_MFBD_FORMAT_MSAA (1 << 1)
1545 #define MALI_MFBD_FORMAT_SRGB (1 << 2)
1547 struct mali_rt_format
{
1551 unsigned nr_channels
: 2; /* MALI_POSITIVE */
1554 enum mali_block_format block
: 2;
1557 unsigned swizzle
: 12;
1561 /* Disables MFBD preload. When this bit is set, the render target will
1562 * be cleared every frame. When this bit is clear, the hardware will
1563 * automatically wallpaper the render target back from main memory.
1564 * Unfortunately, MFBD preload is very broken on Midgard, so in
1565 * practice, this is a chicken bit that should always be set.
1566 * Discovered by accident, as all good chicken bits are. */
1568 unsigned no_preload
: 1;
1569 } __attribute__((packed
));
1571 struct bifrost_render_target
{
1572 struct mali_rt_format format
;
1577 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1578 * there is an extra metadata buffer that contains 16 bytes per tile.
1579 * The framebuffer needs to be the same size as before, since we don't
1580 * know ahead of time how much space it will take up. The
1581 * framebuffer_stride is set to 0, since the data isn't stored linearly
1584 * When AFBC is disabled, these fields are zero.
1588 u32 stride
; // stride in units of tiles
1589 u32 unk
; // = 0x20000
1592 mali_ptr framebuffer
;
1595 u32 framebuffer_stride
: 28; // in units of bytes
1598 u32 clear_color_1
; // RGBA8888 from glClear, actually used by hardware
1599 u32 clear_color_2
; // always equal, but unclear function?
1600 u32 clear_color_3
; // always equal, but unclear function?
1601 u32 clear_color_4
; // always equal, but unclear function?
1602 } __attribute__((packed
));
1604 /* An optional part of bifrost_framebuffer. It comes between the main structure
1605 * and the array of render targets. It must be included if any of these are
1608 * - Transaction Elimination
1610 * - TODO: Anything else?
1614 #define MALI_EXTRA_PRESENT (0x10)
1617 #define MALI_EXTRA_ZS (0x4)
1619 struct bifrost_fb_extra
{
1621 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1622 u32 checksum_stride
;
1624 unsigned flags_lo
: 4;
1625 enum mali_block_format zs_block
: 2;
1626 unsigned flags_hi
: 26;
1629 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1631 mali_ptr depth_stencil_afbc_metadata
;
1632 u32 depth_stencil_afbc_stride
; // in units of tiles
1635 mali_ptr depth_stencil
;
1641 /* Depth becomes depth/stencil in case of combined D/S */
1643 u32 depth_stride_zero
: 4;
1644 u32 depth_stride
: 28;
1648 u32 stencil_stride_zero
: 4;
1649 u32 stencil_stride
: 28;
1656 } __attribute__((packed
));
1658 /* Flags for mfbd_flags */
1660 /* Enables writing depth results back to main memory (rather than keeping them
1661 * on-chip in the tile buffer and then discarding) */
1663 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1665 /* The MFBD contains the extra bifrost_fb_extra section */
1667 #define MALI_MFBD_EXTRA (1 << 13)
1669 struct bifrost_framebuffer
{
1670 u32 stack_shift
: 4;
1673 u32 unknown2
; // = 0x1f, same as SFBD
1674 mali_ptr scratchpad
;
1677 mali_ptr sample_locations
;
1680 u16 width1
, height1
;
1682 u16 width2
, height2
;
1683 u32 unk1
: 19; // = 0x01000
1684 u32 rt_count_1
: 2; // off-by-one (use MALI_POSITIVE)
1685 u32 unk2
: 3; // = 0
1686 u32 rt_count_2
: 3; // no off-by-one
1689 u32 clear_stencil
: 8;
1690 u32 mfbd_flags
: 24; // = 0x100
1693 struct midgard_tiler_descriptor tiler
;
1695 /* optional: struct bifrost_fb_extra extra */
1696 /* struct bifrost_render_target rts[] */
1697 } __attribute__((packed
));
1699 #endif /* __PANFROST_JOB_H__ */