panfrost: Add support for R3G3B2
[mesa.git] / src / panfrost / include / panfrost-job.h
1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <panfrost-misc.h>
34
35 enum mali_job_type {
36 JOB_NOT_STARTED = 0,
37 JOB_TYPE_NULL = 1,
38 JOB_TYPE_WRITE_VALUE = 2,
39 JOB_TYPE_CACHE_FLUSH = 3,
40 JOB_TYPE_COMPUTE = 4,
41 JOB_TYPE_VERTEX = 5,
42 JOB_TYPE_GEOMETRY = 6,
43 JOB_TYPE_TILER = 7,
44 JOB_TYPE_FUSED = 8,
45 JOB_TYPE_FRAGMENT = 9,
46 };
47
48 enum mali_draw_mode {
49 MALI_DRAW_NONE = 0x0,
50 MALI_POINTS = 0x1,
51 MALI_LINES = 0x2,
52 MALI_LINE_STRIP = 0x4,
53 MALI_LINE_LOOP = 0x6,
54 MALI_TRIANGLES = 0x8,
55 MALI_TRIANGLE_STRIP = 0xA,
56 MALI_TRIANGLE_FAN = 0xC,
57 MALI_POLYGON = 0xD,
58 MALI_QUADS = 0xE,
59 MALI_QUAD_STRIP = 0xF,
60
61 /* All other modes invalid */
62 };
63
64 /* Applies to tiler_gl_enables */
65
66 #define MALI_OCCLUSION_QUERY (1 << 3)
67 #define MALI_OCCLUSION_PRECISE (1 << 4)
68
69 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
70 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
71 * disagree about how to do viewport flipping, so the blob actually sets this
72 * for GL_CW but then has a negative viewport stride */
73
74 #define MALI_FRONT_CCW_TOP (1 << 5)
75
76 #define MALI_CULL_FACE_FRONT (1 << 6)
77 #define MALI_CULL_FACE_BACK (1 << 7)
78
79 /* Used in stencil and depth tests */
80
81 enum mali_func {
82 MALI_FUNC_NEVER = 0,
83 MALI_FUNC_LESS = 1,
84 MALI_FUNC_EQUAL = 2,
85 MALI_FUNC_LEQUAL = 3,
86 MALI_FUNC_GREATER = 4,
87 MALI_FUNC_NOTEQUAL = 5,
88 MALI_FUNC_GEQUAL = 6,
89 MALI_FUNC_ALWAYS = 7
90 };
91
92 /* Flags apply to unknown2_3? */
93
94 #define MALI_HAS_MSAA (1 << 0)
95 #define MALI_CAN_DISCARD (1 << 5)
96
97 /* Applies on SFBD systems, specifying that programmable blending is in use */
98 #define MALI_HAS_BLEND_SHADER (1 << 6)
99
100 /* func is mali_func */
101 #define MALI_DEPTH_FUNC(func) (func << 8)
102 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
103 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
104
105 #define MALI_DEPTH_WRITEMASK (1 << 11)
106
107 /* Next flags to unknown2_4 */
108 #define MALI_STENCIL_TEST (1 << 0)
109
110 /* What?! */
111 #define MALI_SAMPLE_ALPHA_TO_COVERAGE_NO_BLEND_SHADER (1 << 1)
112
113 #define MALI_NO_DITHER (1 << 9)
114 #define MALI_DEPTH_RANGE_A (1 << 12)
115 #define MALI_DEPTH_RANGE_B (1 << 13)
116 #define MALI_NO_MSAA (1 << 14)
117
118 /* Stencil test state is all encoded in a single u32, just with a lot of
119 * enums... */
120
121 enum mali_stencil_op {
122 MALI_STENCIL_KEEP = 0,
123 MALI_STENCIL_REPLACE = 1,
124 MALI_STENCIL_ZERO = 2,
125 MALI_STENCIL_INVERT = 3,
126 MALI_STENCIL_INCR_WRAP = 4,
127 MALI_STENCIL_DECR_WRAP = 5,
128 MALI_STENCIL_INCR = 6,
129 MALI_STENCIL_DECR = 7
130 };
131
132 struct mali_stencil_test {
133 unsigned ref : 8;
134 unsigned mask : 8;
135 enum mali_func func : 3;
136 enum mali_stencil_op sfail : 3;
137 enum mali_stencil_op dpfail : 3;
138 enum mali_stencil_op dppass : 3;
139 unsigned zero : 4;
140 } __attribute__((packed));
141
142 #define MALI_MASK_R (1 << 0)
143 #define MALI_MASK_G (1 << 1)
144 #define MALI_MASK_B (1 << 2)
145 #define MALI_MASK_A (1 << 3)
146
147 enum mali_nondominant_mode {
148 MALI_BLEND_NON_MIRROR = 0,
149 MALI_BLEND_NON_ZERO = 1
150 };
151
152 enum mali_dominant_blend {
153 MALI_BLEND_DOM_SOURCE = 0,
154 MALI_BLEND_DOM_DESTINATION = 1
155 };
156
157 enum mali_dominant_factor {
158 MALI_DOMINANT_UNK0 = 0,
159 MALI_DOMINANT_ZERO = 1,
160 MALI_DOMINANT_SRC_COLOR = 2,
161 MALI_DOMINANT_DST_COLOR = 3,
162 MALI_DOMINANT_UNK4 = 4,
163 MALI_DOMINANT_SRC_ALPHA = 5,
164 MALI_DOMINANT_DST_ALPHA = 6,
165 MALI_DOMINANT_CONSTANT = 7,
166 };
167
168 enum mali_blend_modifier {
169 MALI_BLEND_MOD_UNK0 = 0,
170 MALI_BLEND_MOD_NORMAL = 1,
171 MALI_BLEND_MOD_SOURCE_ONE = 2,
172 MALI_BLEND_MOD_DEST_ONE = 3,
173 };
174
175 struct mali_blend_mode {
176 enum mali_blend_modifier clip_modifier : 2;
177 unsigned unused_0 : 1;
178 unsigned negate_source : 1;
179
180 enum mali_dominant_blend dominant : 1;
181
182 enum mali_nondominant_mode nondominant_mode : 1;
183
184 unsigned unused_1 : 1;
185
186 unsigned negate_dest : 1;
187
188 enum mali_dominant_factor dominant_factor : 3;
189 unsigned complement_dominant : 1;
190 } __attribute__((packed));
191
192 struct mali_blend_equation {
193 /* Of type mali_blend_mode */
194 unsigned rgb_mode : 12;
195 unsigned alpha_mode : 12;
196
197 unsigned zero1 : 4;
198
199 /* Corresponds to MALI_MASK_* above and glColorMask arguments */
200
201 unsigned color_mask : 4;
202 } __attribute__((packed));
203
204 /* Used with channel swizzling */
205 enum mali_channel {
206 MALI_CHANNEL_RED = 0,
207 MALI_CHANNEL_GREEN = 1,
208 MALI_CHANNEL_BLUE = 2,
209 MALI_CHANNEL_ALPHA = 3,
210 MALI_CHANNEL_ZERO = 4,
211 MALI_CHANNEL_ONE = 5,
212 MALI_CHANNEL_RESERVED_0 = 6,
213 MALI_CHANNEL_RESERVED_1 = 7,
214 };
215
216 struct mali_channel_swizzle {
217 enum mali_channel r : 3;
218 enum mali_channel g : 3;
219 enum mali_channel b : 3;
220 enum mali_channel a : 3;
221 } __attribute__((packed));
222
223 /* Compressed per-pixel formats. Each of these formats expands to one to four
224 * floating-point or integer numbers, as defined by the OpenGL specification.
225 * There are various places in OpenGL where the user can specify a compressed
226 * format in memory, which all use the same 8-bit enum in the various
227 * descriptors, although different hardware units support different formats.
228 */
229
230 /* The top 3 bits specify how the bits of each component are interpreted. */
231
232 /* e.g. ETC2_RGB8 */
233 #define MALI_FORMAT_COMPRESSED (0 << 5)
234
235 /* e.g. R11F_G11F_B10F */
236 #define MALI_FORMAT_SPECIAL (2 << 5)
237
238 /* signed normalized, e.g. RGBA8_SNORM */
239 #define MALI_FORMAT_SNORM (3 << 5)
240
241 /* e.g. RGBA8UI */
242 #define MALI_FORMAT_UINT (4 << 5)
243
244 /* e.g. RGBA8 and RGBA32F */
245 #define MALI_FORMAT_UNORM (5 << 5)
246
247 /* e.g. RGBA8I and RGBA16F */
248 #define MALI_FORMAT_SINT (6 << 5)
249
250 /* These formats seem to largely duplicate the others. They're used at least
251 * for Bifrost framebuffer output.
252 */
253 #define MALI_FORMAT_SPECIAL2 (7 << 5)
254
255 /* If the high 3 bits are 3 to 6 these two bits say how many components
256 * there are.
257 */
258 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
259
260 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
261 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
262 * bits mean.
263 */
264
265 #define MALI_CHANNEL_4 2
266
267 #define MALI_CHANNEL_8 3
268
269 #define MALI_CHANNEL_16 4
270
271 #define MALI_CHANNEL_32 5
272
273 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
274 * MALI_FORMAT_UNORM, it means a 32-bit float.
275 */
276 #define MALI_CHANNEL_FLOAT 7
277
278 enum mali_format {
279 MALI_ETC2_RGB8 = MALI_FORMAT_COMPRESSED | 0x1,
280 MALI_ETC2_R11_UNORM = MALI_FORMAT_COMPRESSED | 0x2,
281 MALI_ETC2_RGBA8 = MALI_FORMAT_COMPRESSED | 0x3,
282 MALI_ETC2_RG11_UNORM = MALI_FORMAT_COMPRESSED | 0x4,
283 MALI_ETC2_R11_SNORM = MALI_FORMAT_COMPRESSED | 0x11,
284 MALI_ETC2_RG11_SNORM = MALI_FORMAT_COMPRESSED | 0x12,
285 MALI_ETC2_RGB8A1 = MALI_FORMAT_COMPRESSED | 0x13,
286 MALI_ASTC_SRGB_SUPP = MALI_FORMAT_COMPRESSED | 0x16,
287 MALI_ASTC_HDR_SUPP = MALI_FORMAT_COMPRESSED | 0x17,
288
289 MALI_RGB565 = MALI_FORMAT_SPECIAL | 0x0,
290 MALI_RGB5_X1_UNORM = MALI_FORMAT_SPECIAL | 0x1,
291 MALI_RGB5_A1_UNORM = MALI_FORMAT_SPECIAL | 0x2,
292 MALI_RGB10_A2_UNORM = MALI_FORMAT_SPECIAL | 0x3,
293 MALI_RGB10_A2_SNORM = MALI_FORMAT_SPECIAL | 0x5,
294 MALI_RGB10_A2UI = MALI_FORMAT_SPECIAL | 0x7,
295 MALI_RGB10_A2I = MALI_FORMAT_SPECIAL | 0x9,
296
297 MALI_RGB332_UNORM = MALI_FORMAT_SPECIAL | 0xb,
298
299 /* YUV formats */
300 MALI_NV12 = MALI_FORMAT_SPECIAL | 0xc,
301
302 MALI_Z32_UNORM = MALI_FORMAT_SPECIAL | 0xD,
303 MALI_R32_FIXED = MALI_FORMAT_SPECIAL | 0x11,
304 MALI_RG32_FIXED = MALI_FORMAT_SPECIAL | 0x12,
305 MALI_RGB32_FIXED = MALI_FORMAT_SPECIAL | 0x13,
306 MALI_RGBA32_FIXED = MALI_FORMAT_SPECIAL | 0x14,
307 MALI_R11F_G11F_B10F = MALI_FORMAT_SPECIAL | 0x19,
308 MALI_R9F_G9F_B9F_E5F = MALI_FORMAT_SPECIAL | 0x1b,
309 /* Only used for varyings, to indicate the transformed gl_Position */
310 MALI_VARYING_POS = MALI_FORMAT_SPECIAL | 0x1e,
311 /* Only used for varyings, to indicate that the write should be
312 * discarded.
313 */
314 MALI_VARYING_DISCARD = MALI_FORMAT_SPECIAL | 0x1f,
315
316 MALI_R8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
317 MALI_R16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
318 MALI_R32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
319 MALI_RG8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
320 MALI_RG16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
321 MALI_RG32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
322 MALI_RGB8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
323 MALI_RGB16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
324 MALI_RGB32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
325 MALI_RGBA8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
326 MALI_RGBA16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
327 MALI_RGBA32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
328
329 MALI_R8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
330 MALI_R16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
331 MALI_R32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
332 MALI_RG8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
333 MALI_RG16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
334 MALI_RG32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
335 MALI_RGB8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
336 MALI_RGB16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
337 MALI_RGB32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
338 MALI_RGBA8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
339 MALI_RGBA16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
340 MALI_RGBA32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
341
342 MALI_R8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
343 MALI_R16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
344 MALI_R32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
345 MALI_R32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT,
346 MALI_RG8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
347 MALI_RG16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
348 MALI_RG32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
349 MALI_RG32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT,
350 MALI_RGB8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
351 MALI_RGB16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
352 MALI_RGB32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
353 MALI_RGB32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT,
354 MALI_RGBA4_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_4,
355 MALI_RGBA8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
356 MALI_RGBA16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
357 MALI_RGBA32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
358 MALI_RGBA32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT,
359
360 MALI_R8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
361 MALI_R16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
362 MALI_R32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
363 MALI_R16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT,
364 MALI_RG8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
365 MALI_RG16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
366 MALI_RG32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
367 MALI_RG16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT,
368 MALI_RGB8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
369 MALI_RGB16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
370 MALI_RGB32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
371 MALI_RGB16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT,
372 MALI_RGBA8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
373 MALI_RGBA16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
374 MALI_RGBA32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
375 MALI_RGBA16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT,
376
377 MALI_RGBA4 = MALI_FORMAT_SPECIAL2 | 0x8,
378 MALI_RGBA8_2 = MALI_FORMAT_SPECIAL2 | 0xd,
379 MALI_RGB10_A2_2 = MALI_FORMAT_SPECIAL2 | 0xe,
380 };
381
382
383 /* Alpha coverage is encoded as 4-bits (from a clampf), with inversion
384 * literally performing a bitwise invert. This function produces slightly wrong
385 * results and I'm not sure why; some rounding issue I suppose... */
386
387 #define MALI_ALPHA_COVERAGE(clampf) ((uint16_t) (int) (clampf * 15.0f))
388 #define MALI_GET_ALPHA_COVERAGE(nibble) ((float) nibble / 15.0f)
389
390 /* Applies to midgard1.flags_lo */
391
392 /* Should be set when the fragment shader updates the depth value. */
393 #define MALI_WRITES_Z (1 << 4)
394
395 /* Should the hardware perform early-Z testing? Normally should be set
396 * for performance reasons. Clear if you use: discard,
397 * alpha-to-coverage... * It's also possible this disables
398 * forward-pixel kill; we're not quite sure which bit is which yet.
399 * TODO: How does this interact with blending?*/
400
401 #define MALI_EARLY_Z (1 << 6)
402
403 /* Should the hardware calculate derivatives (via helper invocations)? Set in a
404 * fragment shader that uses texturing or derivative functions */
405
406 #define MALI_HELPER_INVOCATIONS (1 << 7)
407
408 /* Flags denoting the fragment shader's use of tilebuffer readback. If the
409 * shader might read any part of the tilebuffer, set MALI_READS_TILEBUFFER. If
410 * it might read depth/stencil in particular, also set MALI_READS_ZS */
411
412 #define MALI_READS_ZS (1 << 8)
413 #define MALI_READS_TILEBUFFER (1 << 12)
414
415 /* Applies to midgard1.flags_hi */
416
417 /* Should be set when the fragment shader updates the stencil value. */
418 #define MALI_WRITES_S (1 << 2)
419
420 /* The raw Midgard blend payload can either be an equation or a shader
421 * address, depending on the context */
422
423 union midgard_blend {
424 mali_ptr shader;
425
426 struct {
427 struct mali_blend_equation equation;
428 float constant;
429 };
430 };
431
432 /* We need to load the tilebuffer to blend (i.e. the destination factor is not
433 * ZERO) */
434
435 #define MALI_BLEND_LOAD_TIB (0x1)
436
437 /* A blend shader is used to blend this render target */
438 #define MALI_BLEND_MRT_SHADER (0x2)
439
440 /* On MRT Midgard systems (using an MFBD), each render target gets its own
441 * blend descriptor */
442
443 #define MALI_BLEND_SRGB (0x400)
444
445 /* Dithering is specified here for MFBD, otherwise NO_DITHER for SFBD */
446 #define MALI_BLEND_NO_DITHER (0x800)
447
448 struct midgard_blend_rt {
449 /* Flags base value of 0x200 to enable the render target.
450 * OR with 0x1 for blending (anything other than REPLACE).
451 * OR with 0x2 for programmable blending
452 * OR with MALI_BLEND_SRGB for implicit sRGB
453 */
454
455 u64 flags;
456 union midgard_blend blend;
457 } __attribute__((packed));
458
459 /* On Bifrost systems (all MRT), each render target gets one of these
460 * descriptors */
461
462 struct bifrost_blend_rt {
463 /* This is likely an analogue of the flags on
464 * midgard_blend_rt */
465
466 u16 flags; // = 0x200
467
468 /* Single-channel blend constants are encoded in a sort of
469 * fixed-point. Basically, the float is mapped to a byte, becoming
470 * a high byte, and then the lower-byte is added for precision.
471 * For the original float f:
472 *
473 * f = (constant_hi / 255) + (constant_lo / 65535)
474 *
475 * constant_hi = int(f / 255)
476 * constant_lo = 65535*f - (65535/255) * constant_hi
477 */
478
479 u16 constant;
480
481 struct mali_blend_equation equation;
482 /*
483 * - 0x19 normally
484 * - 0x3 when this slot is unused (everything else is 0 except the index)
485 * - 0x11 when this is the fourth slot (and it's used)
486 + * - 0 when there is a blend shader
487 */
488 u16 unk2;
489 /* increments from 0 to 3 */
490 u16 index;
491
492 union {
493 struct {
494 /* So far, I've only seen:
495 * - R001 for 1-component formats
496 * - RG01 for 2-component formats
497 * - RGB1 for 3-component formats
498 * - RGBA for 4-component formats
499 */
500 u32 swizzle : 12;
501 enum mali_format format : 8;
502
503 /* Type of the shader output variable. Note, this can
504 * be different from the format.
505 *
506 * 0: f16 (mediump float)
507 * 1: f32 (highp float)
508 * 2: i32 (highp int)
509 * 3: u32 (highp uint)
510 * 4: i16 (mediump int)
511 * 5: u16 (mediump uint)
512 */
513 u32 shader_type : 3;
514 u32 zero : 9;
515 };
516
517 /* Only the low 32 bits of the blend shader are stored, the
518 * high 32 bits are implicitly the same as the original shader.
519 * According to the kernel driver, the program counter for
520 * shaders is actually only 24 bits, so shaders cannot cross
521 * the 2^24-byte boundary, and neither can the blend shader.
522 * The blob handles this by allocating a 2^24 byte pool for
523 * shaders, and making sure that any blend shaders are stored
524 * in the same pool as the original shader. The kernel will
525 * make sure this allocation is aligned to 2^24 bytes.
526 */
527 u32 shader;
528 };
529 } __attribute__((packed));
530
531 /* Descriptor for the shader. Following this is at least one, up to four blend
532 * descriptors for each active render target */
533
534 struct mali_shader_meta {
535 mali_ptr shader;
536 u16 sampler_count;
537 u16 texture_count;
538 u16 attribute_count;
539 u16 varying_count;
540
541 union {
542 struct {
543 u32 uniform_buffer_count : 4;
544 u32 unk1 : 28; // = 0x800000 for vertex, 0x958020 for tiler
545 } bifrost1;
546 struct {
547 unsigned uniform_buffer_count : 4;
548 unsigned flags_lo : 12;
549
550 /* vec4 units */
551 unsigned work_count : 5;
552 unsigned uniform_count : 5;
553 unsigned flags_hi : 6;
554 } midgard1;
555 };
556
557 /* Same as glPolygoOffset() arguments */
558 float depth_units;
559 float depth_factor;
560
561 u32 unknown2_2;
562
563 u16 alpha_coverage;
564 u16 unknown2_3;
565
566 u8 stencil_mask_front;
567 u8 stencil_mask_back;
568 u16 unknown2_4;
569
570 struct mali_stencil_test stencil_front;
571 struct mali_stencil_test stencil_back;
572
573 union {
574 struct {
575 u32 unk3 : 7;
576 /* On Bifrost, some system values are preloaded in
577 * registers R55-R62 by the thread dispatcher prior to
578 * the start of shader execution. This is a bitfield
579 * with one entry for each register saying which
580 * registers need to be preloaded. Right now, the known
581 * values are:
582 *
583 * Vertex/compute:
584 * - R55 : gl_LocalInvocationID.xy
585 * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
586 * - R57 : gl_WorkGroupID.x
587 * - R58 : gl_WorkGroupID.y
588 * - R59 : gl_WorkGroupID.z
589 * - R60 : gl_GlobalInvocationID.x
590 * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
591 * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
592 *
593 * Fragment:
594 * - R55 : unknown, never seen (but the bit for this is
595 * always set?)
596 * - R56 : unknown (bit always unset)
597 * - R57 : gl_PrimitiveID
598 * - R58 : gl_FrontFacing in low bit, potentially other stuff
599 * - R59 : u16 fragment coordinates (used to compute
600 * gl_FragCoord.xy, together with sample positions)
601 * - R60 : gl_SampleMask (used in epilog, so pretty
602 * much always used, but the bit is always 0 -- is
603 * this just always pushed?)
604 * - R61 : gl_SampleMaskIn and gl_SampleID, used by
605 * varying interpolation.
606 * - R62 : unknown (bit always unset).
607 *
608 * Later GPUs (starting with Mali-G52?) support
609 * preloading float varyings into r0-r7. This is
610 * indicated by setting 0x40. There is no distinction
611 * here between 1 varying and 2.
612 */
613 u32 preload_regs : 8;
614 /* In units of 8 bytes or 64 bits, since the
615 * uniform/const port loads 64 bits at a time.
616 */
617 u32 uniform_count : 7;
618 u32 unk4 : 10; // = 2
619 } bifrost2;
620 struct {
621 u32 unknown2_7;
622 } midgard2;
623 };
624
625 u32 padding;
626
627 /* Blending information for the older non-MRT Midgard HW. Check for
628 * MALI_HAS_BLEND_SHADER to decide how to interpret.
629 */
630
631 union midgard_blend blend;
632 } __attribute__((packed));
633
634 /* This only concerns hardware jobs */
635
636 /* Possible values for job_descriptor_size */
637
638 #define MALI_JOB_32 0
639 #define MALI_JOB_64 1
640
641 struct mali_job_descriptor_header {
642 u32 exception_status;
643 u32 first_incomplete_task;
644 u64 fault_pointer;
645 u8 job_descriptor_size : 1;
646 enum mali_job_type job_type : 7;
647 u8 job_barrier : 1;
648 u8 unknown_flags : 7;
649 u16 job_index;
650 u16 job_dependency_index_1;
651 u16 job_dependency_index_2;
652 u64 next_job;
653 } __attribute__((packed));
654
655 /* These concern exception_status */
656
657 /* Access type causing a fault, paralleling AS_FAULTSTATUS_* entries in the
658 * kernel */
659
660 enum mali_exception_access {
661 /* Atomic in the kernel for MMU, but that doesn't make sense for a job
662 * fault so it's just unused */
663 MALI_EXCEPTION_ACCESS_NONE = 0,
664
665 MALI_EXCEPTION_ACCESS_EXECUTE = 1,
666 MALI_EXCEPTION_ACCESS_READ = 2,
667 MALI_EXCEPTION_ACCESS_WRITE = 3
668 };
669
670 /* Details about write_value from panfrost igt tests which use it as a generic
671 * dword write primitive */
672
673 #define MALI_WRITE_VALUE_ZERO 3
674
675 struct mali_payload_write_value {
676 u64 address;
677 u32 value_descriptor;
678 u32 reserved;
679 u64 immediate;
680 } __attribute__((packed));
681
682 /*
683 * Mali Attributes
684 *
685 * This structure lets the attribute unit compute the address of an attribute
686 * given the vertex and instance ID. Unfortunately, the way this works is
687 * rather complicated when instancing is enabled.
688 *
689 * To explain this, first we need to explain how compute and vertex threads are
690 * dispatched. This is a guess (although a pretty firm guess!) since the
691 * details are mostly hidden from the driver, except for attribute instancing.
692 * When a quad is dispatched, it receives a single, linear index. However, we
693 * need to translate that index into a (vertex id, instance id) pair, or a
694 * (local id x, local id y, local id z) triple for compute shaders (although
695 * vertex shaders and compute shaders are handled almost identically).
696 * Focusing on vertex shaders, one option would be to do:
697 *
698 * vertex_id = linear_id % num_vertices
699 * instance_id = linear_id / num_vertices
700 *
701 * but this involves a costly division and modulus by an arbitrary number.
702 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
703 * num_instances threads instead of num_vertices * num_instances, which results
704 * in some "extra" threads with vertex_id >= num_vertices, which we have to
705 * discard. The more we pad num_vertices, the more "wasted" threads we
706 * dispatch, but the division is potentially easier.
707 *
708 * One straightforward choice is to pad num_vertices to the next power of two,
709 * which means that the division and modulus are just simple bit shifts and
710 * masking. But the actual algorithm is a bit more complicated. The thread
711 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
712 * to dividing by a power of two. This is possibly using the technique
713 * described in patent US20170010862A1. As a result, padded_num_vertices can be
714 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
715 * since we need less padding.
716 *
717 * padded_num_vertices is picked by the hardware. The driver just specifies the
718 * actual number of vertices. At least for Mali G71, the first few cases are
719 * given by:
720 *
721 * num_vertices | padded_num_vertices
722 * 3 | 4
723 * 4-7 | 8
724 * 8-11 | 12 (3 * 4)
725 * 12-15 | 16
726 * 16-19 | 20 (5 * 4)
727 *
728 * Note that padded_num_vertices is a multiple of four (presumably because
729 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
730 * at least one more than num_vertices, which seems like a quirk of the
731 * hardware. For larger num_vertices, the hardware uses the following
732 * algorithm: using the binary representation of num_vertices, we look at the
733 * most significant set bit as well as the following 3 bits. Let n be the
734 * number of bits after those 4 bits. Then we set padded_num_vertices according
735 * to the following table:
736 *
737 * high bits | padded_num_vertices
738 * 1000 | 9 * 2^n
739 * 1001 | 5 * 2^(n+1)
740 * 101x | 3 * 2^(n+2)
741 * 110x | 7 * 2^(n+1)
742 * 111x | 2^(n+4)
743 *
744 * For example, if num_vertices = 70 is passed to glDraw(), its binary
745 * representation is 1000110, so n = 3 and the high bits are 1000, and
746 * therefore padded_num_vertices = 9 * 2^3 = 72.
747 *
748 * The attribute unit works in terms of the original linear_id. if
749 * num_instances = 1, then they are the same, and everything is simple.
750 * However, with instancing things get more complicated. There are four
751 * possible modes, two of them we can group together:
752 *
753 * 1. Use the linear_id directly. Only used when there is no instancing.
754 *
755 * 2. Use the linear_id modulo a constant. This is used for per-vertex
756 * attributes with instancing enabled by making the constant equal
757 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
758 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
759 * The shift field specifies the power of two, while the extra_flags field
760 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
761 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
762 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
763 * shift = 3. Note that we must exactly follow the hardware algorithm used to
764 * get padded_num_vertices in order to correctly implement per-vertex
765 * attributes.
766 *
767 * 3. Divide the linear_id by a constant. In order to correctly implement
768 * instance divisors, we have to divide linear_id by padded_num_vertices times
769 * to user-specified divisor. So first we compute padded_num_vertices, again
770 * following the exact same algorithm that the hardware uses, then multiply it
771 * by the GL-level divisor to get the hardware-level divisor. This case is
772 * further divided into two more cases. If the hardware-level divisor is a
773 * power of two, then we just need to shift. The shift amount is specified by
774 * the shift field, so that the hardware-level divisor is just 2^shift.
775 *
776 * If it isn't a power of two, then we have to divide by an arbitrary integer.
777 * For that, we use the well-known technique of multiplying by an approximation
778 * of the inverse. The driver must compute the magic multiplier and shift
779 * amount, and then the hardware does the multiplication and shift. The
780 * hardware and driver also use the "round-down" optimization as described in
781 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
782 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
783 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
784 * presumably this simplifies the hardware multiplier a little. The hardware
785 * first multiplies linear_id by the multiplier and takes the high 32 bits,
786 * then applies the round-down correction if extra_flags = 1, then finally
787 * shifts right by the shift field.
788 *
789 * There are some differences between ridiculousfish's algorithm and the Mali
790 * hardware algorithm, which means that the reference code from ridiculousfish
791 * doesn't always produce the right constants. Mali does not use the pre-shift
792 * optimization, since that would make a hardware implementation slower (it
793 * would have to always do the pre-shift, multiply, and post-shift operations).
794 * It also forces the multplier to be at least 2^31, which means that the
795 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
796 * given the divisor d, the algorithm the driver must follow is:
797 *
798 * 1. Set shift = floor(log2(d)).
799 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
800 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
801 * magic_divisor = m - 1 and extra_flags = 1.
802 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
803 *
804 * Unrelated to instancing/actual attributes, images (the OpenCL kind) are
805 * implemented as special attributes, denoted by MALI_ATTR_IMAGE. For images,
806 * let shift=extra_flags=0. Stride is set to the image format's bytes-per-pixel
807 * (*NOT the row stride*). Size is set to the size of the image itself.
808 *
809 * Special internal attribtues and varyings (gl_VertexID, gl_FrontFacing, etc)
810 * use particular fixed addresses with modified structures.
811 */
812
813 enum mali_attr_mode {
814 MALI_ATTR_UNUSED = 0,
815 MALI_ATTR_LINEAR = 1,
816 MALI_ATTR_POT_DIVIDE = 2,
817 MALI_ATTR_MODULO = 3,
818 MALI_ATTR_NPOT_DIVIDE = 4,
819 MALI_ATTR_IMAGE = 5,
820 };
821
822 /* Pseudo-address for gl_VertexID, gl_FragCoord, gl_FrontFacing */
823
824 #define MALI_ATTR_VERTEXID (0x22)
825 #define MALI_ATTR_INSTANCEID (0x24)
826 #define MALI_VARYING_FRAG_COORD (0x25)
827 #define MALI_VARYING_FRONT_FACING (0x26)
828
829 /* This magic "pseudo-address" is used as `elements` to implement
830 * gl_PointCoord. When read from a fragment shader, it generates a point
831 * coordinate per the OpenGL ES 2.0 specification. Flipped coordinate spaces
832 * require an affine transformation in the shader. */
833
834 #define MALI_VARYING_POINT_COORD (0x61)
835
836 /* Used for comparison to check if an address is special. Mostly a guess, but
837 * it doesn't really matter. */
838
839 #define MALI_RECORD_SPECIAL (0x100)
840
841 union mali_attr {
842 /* This is used for actual attributes. */
843 struct {
844 /* The bottom 3 bits are the mode */
845 mali_ptr elements : 64 - 8;
846 u32 shift : 5;
847 u32 extra_flags : 3;
848 u32 stride;
849 u32 size;
850 };
851 /* The entry after an NPOT_DIVIDE entry has this format. It stores
852 * extra information that wouldn't fit in a normal entry.
853 */
854 struct {
855 u32 unk; /* = 0x20 */
856 u32 magic_divisor;
857 u32 zero;
858 /* This is the original, GL-level divisor. */
859 u32 divisor;
860 };
861 } __attribute__((packed));
862
863 struct mali_attr_meta {
864 /* Vertex buffer index */
865 u8 index;
866
867 unsigned unknown1 : 2;
868 unsigned swizzle : 12;
869 enum mali_format format : 8;
870
871 /* Always observed to be zero at the moment */
872 unsigned unknown3 : 2;
873
874 /* When packing multiple attributes in a buffer, offset addresses by
875 * this value. Obscurely, this is signed. */
876 int32_t src_offset;
877 } __attribute__((packed));
878
879 #define FBD_MASK (~0x3f)
880
881 /* MFBD, rather than SFBD */
882 #define MALI_MFBD (0x1)
883
884 /* ORed into an MFBD address to specify the fbx section is included */
885 #define MALI_MFBD_TAG_EXTRA (0x2)
886
887 /* Uniform buffer objects are 64-bit fields divided as:
888 *
889 * u64 size : 10;
890 * mali_ptr ptr : 64 - 10;
891 *
892 * The size is actually the size minus 1 (MALI_POSITIVE), in units of 16 bytes.
893 * This gives a maximum of 2^14 bytes, which just so happens to be the GL
894 * minimum-maximum for GL_MAX_UNIFORM_BLOCK_SIZE.
895 *
896 * The pointer is missing the bottom 2 bits and top 8 bits. The top 8 bits
897 * should be 0 for userspace pointers, according to
898 * https://lwn.net/Articles/718895/. By reusing these bits, we can make each
899 * entry in the table only 64 bits.
900 */
901
902 #define MALI_MAKE_UBO(elements, ptr) \
903 (MALI_POSITIVE((elements)) | (((ptr) >> 2) << 10))
904
905 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
906 * They also seem to be the same between Bifrost and Midgard. They're shared in
907 * fused payloads.
908 */
909
910 /* Applies to unknown_draw */
911
912 #define MALI_DRAW_INDEXED_UINT8 (0x10)
913 #define MALI_DRAW_INDEXED_UINT16 (0x20)
914 #define MALI_DRAW_INDEXED_UINT32 (0x30)
915 #define MALI_DRAW_INDEXED_SIZE (0x30)
916 #define MALI_DRAW_INDEXED_SHIFT (4)
917
918 #define MALI_DRAW_VARYING_SIZE (0x100)
919
920 /* Set to use first vertex as the provoking vertex for flatshading. Clear to
921 * use the last vertex. This is the default in DX and VK, but not in GL. */
922
923 #define MALI_DRAW_FLATSHADE_FIRST (0x800)
924
925 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
926
927 struct mali_vertex_tiler_prefix {
928 /* This is a dynamic bitfield containing the following things in this order:
929 *
930 * - gl_WorkGroupSize.x
931 * - gl_WorkGroupSize.y
932 * - gl_WorkGroupSize.z
933 * - gl_NumWorkGroups.x
934 * - gl_NumWorkGroups.y
935 * - gl_NumWorkGroups.z
936 *
937 * The number of bits allocated for each number is based on the *_shift
938 * fields below. For example, workgroups_y_shift gives the bit that
939 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
940 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
941 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
942 * value is one more than the stored value, since if any of the values
943 * are zero, then there would be no invocations (and hence no job). If
944 * there were 0 bits allocated to a given field, then it must be zero,
945 * and hence the real value is one.
946 *
947 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
948 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
949 * where vertex count is the number of vertices.
950 */
951 u32 invocation_count;
952
953 /* Bitfield for shifts:
954 *
955 * size_y_shift : 5
956 * size_z_shift : 5
957 * workgroups_x_shift : 6
958 * workgroups_y_shift : 6
959 * workgroups_z_shift : 6
960 * workgroups_x_shift_2 : 4
961 */
962 u32 invocation_shifts;
963
964 u32 draw_mode : 4;
965 u32 unknown_draw : 22;
966
967 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
968 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
969 * something to do with how many quads get put in the same execution
970 * engine, which is a balance (you don't want to starve the engine, but
971 * you also want to distribute work evenly).
972 */
973 u32 workgroups_x_shift_3 : 6;
974
975
976 /* Negative of min_index. This is used to compute
977 * the unbiased index in tiler/fragment shader runs.
978 *
979 * The hardware adds offset_bias_correction in each run,
980 * so that absent an index bias, the first vertex processed is
981 * genuinely the first vertex (0). But with an index bias,
982 * the first vertex process is numbered the same as the bias.
983 *
984 * To represent this more conviniently:
985 * unbiased_index = lower_bound_index +
986 * index_bias +
987 * offset_bias_correction
988 *
989 * This is done since the hardware doesn't accept a index_bias
990 * and this allows it to recover the unbiased index.
991 */
992 int32_t offset_bias_correction;
993 u32 zero1;
994
995 /* Like many other strictly nonzero quantities, index_count is
996 * subtracted by one. For an indexed cube, this is equal to 35 = 6
997 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
998 * for an indexed draw, index_count is the number of actual vertices
999 * rendered whereas invocation_count is the number of unique vertices
1000 * rendered (the number of times the vertex shader must be invoked).
1001 * For non-indexed draws, this is just equal to invocation_count. */
1002
1003 u32 index_count;
1004
1005 /* No hidden structure; literally just a pointer to an array of uint
1006 * indices (width depends on flags). Thanks, guys, for not making my
1007 * life insane for once! NULL for non-indexed draws. */
1008
1009 u64 indices;
1010 } __attribute__((packed));
1011
1012 /* Point size / line width can either be specified as a 32-bit float (for
1013 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
1014 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
1015 * payload, the contents of varying_pointer will be intepreted as an array of
1016 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
1017 * creating a special MALI_R16F varying writing to varying_pointer. */
1018
1019 union midgard_primitive_size {
1020 float constant;
1021 u64 pointer;
1022 };
1023
1024 struct bifrost_vertex_only {
1025 u32 unk2; /* =0x2 */
1026
1027 u32 zero0;
1028
1029 u64 zero1;
1030 } __attribute__((packed));
1031
1032 struct bifrost_tiler_heap_meta {
1033 u32 zero;
1034 u32 heap_size;
1035 /* note: these are just guesses! */
1036 mali_ptr tiler_heap_start;
1037 mali_ptr tiler_heap_free;
1038 mali_ptr tiler_heap_end;
1039
1040 /* hierarchy weights? but they're still 0 after the job has run... */
1041 u32 zeros[12];
1042 } __attribute__((packed));
1043
1044 struct bifrost_tiler_meta {
1045 u64 zero0;
1046 u16 hierarchy_mask;
1047 u16 flags;
1048 u16 width;
1049 u16 height;
1050 u64 zero1;
1051 mali_ptr tiler_heap_meta;
1052 /* TODO what is this used for? */
1053 u64 zeros[20];
1054 } __attribute__((packed));
1055
1056 struct bifrost_tiler_only {
1057 /* 0x20 */
1058 union midgard_primitive_size primitive_size;
1059
1060 mali_ptr tiler_meta;
1061
1062 u64 zero1, zero2, zero3, zero4, zero5, zero6;
1063
1064 u32 gl_enables;
1065 u32 zero7;
1066 u64 zero8;
1067 } __attribute__((packed));
1068
1069 struct mali_vertex_tiler_postfix {
1070 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
1071 * output from the vertex shader for tiler jobs.
1072 */
1073
1074 u64 position_varying;
1075
1076 /* An array of mali_uniform_buffer_meta's. The size is given by the
1077 * shader_meta.
1078 */
1079 u64 uniform_buffers;
1080
1081 /* This is a pointer to an array of pointers to the texture
1082 * descriptors, number of pointers bounded by number of textures. The
1083 * indirection is needed to accomodate varying numbers and sizes of
1084 * texture descriptors */
1085 u64 texture_trampoline;
1086
1087 /* For OpenGL, from what I've seen, this is intimately connected to
1088 * texture_meta. cwabbott says this is not the case under Vulkan, hence
1089 * why this field is seperate (Midgard is Vulkan capable). Pointer to
1090 * array of sampler descriptors (which are uniform in size) */
1091 u64 sampler_descriptor;
1092
1093 u64 uniforms;
1094 u64 shader;
1095 u64 attributes; /* struct attribute_buffer[] */
1096 u64 attribute_meta; /* attribute_meta[] */
1097 u64 varyings; /* struct attr */
1098 u64 varying_meta; /* pointer */
1099 u64 viewport;
1100 u64 occlusion_counter; /* A single bit as far as I can tell */
1101
1102 /* On Bifrost, this points directly to a mali_shared_memory structure.
1103 * On Midgard, this points to a framebuffer (either SFBD or MFBD as
1104 * tagged), which embeds a mali_shared_memory structure */
1105 mali_ptr shared_memory;
1106 } __attribute__((packed));
1107
1108 struct midgard_payload_vertex_tiler {
1109 struct mali_vertex_tiler_prefix prefix;
1110
1111 u16 gl_enables; // 0x5
1112
1113 /* Both zero for non-instanced draws. For instanced draws, a
1114 * decomposition of padded_num_vertices. See the comments about the
1115 * corresponding fields in mali_attr for context. */
1116
1117 unsigned instance_shift : 5;
1118 unsigned instance_odd : 3;
1119
1120 u8 zero4;
1121
1122 /* Offset for first vertex in buffer */
1123 u32 offset_start;
1124
1125 u64 zero5;
1126
1127 struct mali_vertex_tiler_postfix postfix;
1128
1129 union midgard_primitive_size primitive_size;
1130 } __attribute__((packed));
1131
1132 struct bifrost_payload_vertex {
1133 struct mali_vertex_tiler_prefix prefix;
1134 struct bifrost_vertex_only vertex;
1135 struct mali_vertex_tiler_postfix postfix;
1136 } __attribute__((packed));
1137
1138 struct bifrost_payload_tiler {
1139 struct mali_vertex_tiler_prefix prefix;
1140 struct bifrost_tiler_only tiler;
1141 struct mali_vertex_tiler_postfix postfix;
1142 } __attribute__((packed));
1143
1144 struct bifrost_payload_fused {
1145 struct mali_vertex_tiler_prefix prefix;
1146 struct bifrost_tiler_only tiler;
1147 struct mali_vertex_tiler_postfix tiler_postfix;
1148 u64 padding; /* zero */
1149 struct bifrost_vertex_only vertex;
1150 struct mali_vertex_tiler_postfix vertex_postfix;
1151 } __attribute__((packed));
1152
1153 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
1154 * texture is stored as (63, 63) in these fields. This adjusts for that.
1155 * There's an identical pattern in the framebuffer descriptor. Even vertex
1156 * count fields work this way, hence the generic name -- integral fields that
1157 * are strictly positive generally need this adjustment. */
1158
1159 #define MALI_POSITIVE(dim) (dim - 1)
1160
1161 /* Used with wrapping. Unclear what top bit conveys */
1162
1163 enum mali_wrap_mode {
1164 MALI_WRAP_REPEAT = 0x8 | 0x0,
1165 MALI_WRAP_CLAMP_TO_EDGE = 0x8 | 0x1,
1166 MALI_WRAP_CLAMP = 0x8 | 0x2,
1167 MALI_WRAP_CLAMP_TO_BORDER = 0x8 | 0x3,
1168 MALI_WRAP_MIRRORED_REPEAT = 0x8 | 0x4 | 0x0,
1169 MALI_WRAP_MIRRORED_CLAMP_TO_EDGE = 0x8 | 0x4 | 0x1,
1170 MALI_WRAP_MIRRORED_CLAMP = 0x8 | 0x4 | 0x2,
1171 MALI_WRAP_MIRRORED_CLAMP_TO_BORDER = 0x8 | 0x4 | 0x3,
1172 };
1173
1174 /* Shared across both command stream and Midgard, and even with Bifrost */
1175
1176 enum mali_texture_type {
1177 MALI_TEX_CUBE = 0x0,
1178 MALI_TEX_1D = 0x1,
1179 MALI_TEX_2D = 0x2,
1180 MALI_TEX_3D = 0x3
1181 };
1182
1183 /* 8192x8192 */
1184 #define MAX_MIP_LEVELS (13)
1185
1186 /* Cubemap bloats everything up */
1187 #define MAX_CUBE_FACES (6)
1188
1189 /* For each pointer, there is an address and optionally also a stride */
1190 #define MAX_ELEMENTS (2)
1191
1192 /* It's not known why there are 4-bits allocated -- this enum is almost
1193 * certainly incomplete */
1194
1195 enum mali_texture_layout {
1196 /* For a Z/S texture, this is linear */
1197 MALI_TEXTURE_TILED = 0x1,
1198
1199 /* Z/S textures cannot be tiled */
1200 MALI_TEXTURE_LINEAR = 0x2,
1201
1202 /* 16x16 sparse */
1203 MALI_TEXTURE_AFBC = 0xC
1204 };
1205
1206 /* Corresponds to the type passed to glTexImage2D and so forth */
1207
1208 struct mali_texture_format {
1209 unsigned swizzle : 12;
1210 enum mali_format format : 8;
1211
1212 unsigned srgb : 1;
1213 unsigned unknown1 : 1;
1214
1215 enum mali_texture_type type : 2;
1216 enum mali_texture_layout layout : 4;
1217
1218 /* Always set */
1219 unsigned unknown2 : 1;
1220
1221 /* Set to allow packing an explicit stride */
1222 unsigned manual_stride : 1;
1223
1224 unsigned zero : 2;
1225 } __attribute__((packed));
1226
1227 struct mali_texture_descriptor {
1228 uint16_t width;
1229 uint16_t height;
1230 uint16_t depth;
1231 uint16_t array_size;
1232
1233 struct mali_texture_format format;
1234
1235 uint16_t unknown3;
1236
1237 /* One for non-mipmapped, zero for mipmapped */
1238 uint8_t unknown3A;
1239
1240 /* Zero for non-mipmapped, (number of levels - 1) for mipmapped */
1241 uint8_t levels;
1242
1243 /* Swizzling is a single 32-bit word, broken up here for convenience.
1244 * Here, swizzling refers to the ES 3.0 texture parameters for channel
1245 * level swizzling, not the internal pixel-level swizzling which is
1246 * below OpenGL's reach */
1247
1248 unsigned swizzle : 12;
1249 unsigned swizzle_zero : 20;
1250
1251 uint32_t unknown5;
1252 uint32_t unknown6;
1253 uint32_t unknown7;
1254 } __attribute__((packed));
1255
1256 /* filter_mode */
1257
1258 #define MALI_SAMP_MAG_NEAREST (1 << 0)
1259 #define MALI_SAMP_MIN_NEAREST (1 << 1)
1260
1261 /* TODO: What do these bits mean individually? Only seen set together */
1262
1263 #define MALI_SAMP_MIP_LINEAR_1 (1 << 3)
1264 #define MALI_SAMP_MIP_LINEAR_2 (1 << 4)
1265
1266 /* Flag in filter_mode, corresponding to OpenCL's NORMALIZED_COORDS_TRUE
1267 * sampler_t flag. For typical OpenGL textures, this is always set. */
1268
1269 #define MALI_SAMP_NORM_COORDS (1 << 5)
1270
1271 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
1272 * be cleaned up a lot. */
1273
1274 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
1275
1276 static inline int16_t
1277 FIXED_16(float x, bool allow_negative)
1278 {
1279 /* Clamp inputs, accounting for float error */
1280 float max_lod = (32.0 - (1.0 / 512.0));
1281 float min_lod = allow_negative ? -max_lod : 0.0;
1282
1283 x = ((x > max_lod) ? max_lod : ((x < min_lod) ? min_lod : x));
1284
1285 return (int) (x * 256.0);
1286 }
1287
1288 struct mali_sampler_descriptor {
1289 uint16_t filter_mode;
1290
1291 /* Fixed point, signed.
1292 * Upper 7 bits before the decimal point, although it caps [0-31].
1293 * Lower 8 bits after the decimal point: int(round(x * 256)) */
1294
1295 int16_t lod_bias;
1296 int16_t min_lod;
1297 int16_t max_lod;
1298
1299 /* All one word in reality, but packed a bit. Comparisons are flipped
1300 * from OpenGL. */
1301
1302 enum mali_wrap_mode wrap_s : 4;
1303 enum mali_wrap_mode wrap_t : 4;
1304 enum mali_wrap_mode wrap_r : 4;
1305 enum mali_func compare_func : 3;
1306
1307 /* No effect on 2D textures. For cubemaps, set for ES3 and clear for
1308 * ES2, controlling seamless cubemapping */
1309 unsigned seamless_cube_map : 1;
1310
1311 unsigned zero : 16;
1312
1313 uint32_t zero2;
1314 float border_color[4];
1315 } __attribute__((packed));
1316
1317 /* viewport0/viewport1 form the arguments to glViewport. viewport1 is
1318 * modified by MALI_POSITIVE; viewport0 is as-is.
1319 */
1320
1321 struct mali_viewport {
1322 /* XY clipping planes */
1323 float clip_minx;
1324 float clip_miny;
1325 float clip_maxx;
1326 float clip_maxy;
1327
1328 /* Depth clipping planes */
1329 float clip_minz;
1330 float clip_maxz;
1331
1332 u16 viewport0[2];
1333 u16 viewport1[2];
1334 } __attribute__((packed));
1335
1336 /* From presentations, 16x16 tiles externally. Use shift for fast computation
1337 * of tile numbers. */
1338
1339 #define MALI_TILE_SHIFT 4
1340 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
1341
1342 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
1343 * each component. Notice that this provides a theoretical upper bound of (1 <<
1344 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
1345 * 65536x65536. Multiplying that together, times another four given that Mali
1346 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
1347 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
1348 * alone rendering in real-time to such a buffer.
1349 *
1350 * Nice job, guys.*/
1351
1352 /* From mali_kbase_10969_workaround.c */
1353 #define MALI_X_COORD_MASK 0x00000FFF
1354 #define MALI_Y_COORD_MASK 0x0FFF0000
1355
1356 /* Extract parts of a tile coordinate */
1357
1358 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
1359 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
1360
1361 /* Helpers to generate tile coordinates based on the boundary coordinates in
1362 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
1363 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
1364 * Intentional "off-by-one"; finding the tile number is a form of fencepost
1365 * problem. */
1366
1367 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
1368 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
1369 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
1370 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
1371 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
1372
1373 struct mali_payload_fragment {
1374 u32 min_tile_coord;
1375 u32 max_tile_coord;
1376 mali_ptr framebuffer;
1377 } __attribute__((packed));
1378
1379 /* Single Framebuffer Descriptor */
1380
1381 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
1382 * configured for 4x. With MSAA_8, it is configured for 8x. */
1383
1384 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
1385 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
1386 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
1387 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
1388
1389 /* Fast/slow based on whether all three buffers are cleared at once */
1390
1391 #define MALI_CLEAR_FAST (1 << 18)
1392 #define MALI_CLEAR_SLOW (1 << 28)
1393 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
1394
1395 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
1396 * within the larget framebuffer descriptor). Analogous to
1397 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
1398
1399 /* See pan_tiler.c for derivation */
1400 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
1401
1402 /* Flag disabling the tiler for clear-only jobs, with
1403 hierarchical tiling */
1404 #define MALI_TILER_DISABLED (1 << 12)
1405
1406 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
1407 * hierarhical tiling. */
1408 #define MALI_TILER_USER 0xFFF
1409
1410 /* Absent any geometry, the minimum size of the polygon list header */
1411 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
1412
1413 struct midgard_tiler_descriptor {
1414 /* Size of the entire polygon list; see pan_tiler.c for the
1415 * computation. It's based on hierarchical tiling */
1416
1417 u32 polygon_list_size;
1418
1419 /* Name known from the replay workaround in the kernel. What exactly is
1420 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
1421 * specifies a mask of hierarchy weights, which explains some of the
1422 * performance mysteries around setting it. We also see the bottom bit
1423 * of tiler_flags set in the kernel, but no comment why.
1424 *
1425 * hierarchy_mask can have the TILER_DISABLED flag */
1426
1427 u16 hierarchy_mask;
1428 u16 flags;
1429
1430 /* See mali_tiler.c for an explanation */
1431 mali_ptr polygon_list;
1432 mali_ptr polygon_list_body;
1433
1434 /* Names based on we see symmetry with replay jobs which name these
1435 * explicitly */
1436
1437 mali_ptr heap_start; /* tiler heap_free_address */
1438 mali_ptr heap_end;
1439
1440 /* Hierarchy weights. We know these are weights based on the kernel,
1441 * but I've never seen them be anything other than zero */
1442 u32 weights[8];
1443 };
1444
1445 enum mali_block_format {
1446 MALI_BLOCK_TILED = 0x0,
1447 MALI_BLOCK_UNKNOWN = 0x1,
1448 MALI_BLOCK_LINEAR = 0x2,
1449 MALI_BLOCK_AFBC = 0x3,
1450 };
1451
1452 struct mali_sfbd_format {
1453 /* 0x1 */
1454 unsigned unk1 : 6;
1455
1456 /* mali_channel_swizzle */
1457 unsigned swizzle : 12;
1458
1459 /* MALI_POSITIVE */
1460 unsigned nr_channels : 2;
1461
1462 /* 0x4 */
1463 unsigned unk2 : 6;
1464
1465 enum mali_block_format block : 2;
1466
1467 /* 0xb */
1468 unsigned unk3 : 4;
1469 };
1470
1471 /* Shared structure at the start of framebuffer descriptors, or used bare for
1472 * compute jobs, configuring stack and shared memory */
1473
1474 struct mali_shared_memory {
1475 u32 stack_shift : 4;
1476 u32 unk0 : 28;
1477
1478 /* Configuration for shared memory for compute shaders.
1479 * shared_workgroup_count is logarithmic and may be computed for a
1480 * compute shader using shared memory as:
1481 *
1482 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
1483 *
1484 * For compute shaders that don't use shared memory, or non-compute
1485 * shaders, this is set to ~0
1486 */
1487
1488 u32 shared_workgroup_count : 5;
1489 u32 shared_unk1 : 3;
1490 u32 shared_shift : 4;
1491 u32 shared_zero : 20;
1492
1493 mali_ptr scratchpad;
1494
1495 /* For compute shaders, the RAM backing of workgroup-shared memory. For
1496 * fragment shaders on Bifrost, apparently multisampling locations */
1497
1498 mali_ptr shared_memory;
1499 mali_ptr unknown1;
1500 } __attribute__((packed));
1501
1502 /* Configures multisampling on Bifrost fragment jobs */
1503
1504 struct bifrost_multisampling {
1505 u64 zero1;
1506 u64 zero2;
1507 mali_ptr sample_locations;
1508 u64 zero4;
1509 } __attribute__((packed));
1510
1511 struct mali_single_framebuffer {
1512 struct mali_shared_memory shared_memory;
1513 struct mali_sfbd_format format;
1514
1515 u32 clear_flags;
1516 u32 zero2;
1517
1518 /* Purposeful off-by-one in these fields should be accounted for by the
1519 * MALI_DIMENSION macro */
1520
1521 u16 width;
1522 u16 height;
1523
1524 u32 zero3[4];
1525 mali_ptr checksum;
1526 u32 checksum_stride;
1527 u32 zero5;
1528
1529 /* By default, the framebuffer is upside down from OpenGL's
1530 * perspective. Set framebuffer to the end and negate the stride to
1531 * flip in the Y direction */
1532
1533 mali_ptr framebuffer;
1534 int32_t stride;
1535
1536 u32 zero4;
1537
1538 /* Depth and stencil buffers are interleaved, it appears, as they are
1539 * set to the same address in captures. Both fields set to zero if the
1540 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
1541 * get a zero enable despite the buffer being present; that still is
1542 * disabled. */
1543
1544 mali_ptr depth_buffer; // not SAME_VA
1545 u32 depth_stride_zero : 4;
1546 u32 depth_stride : 28;
1547 u32 zero7;
1548
1549 mali_ptr stencil_buffer; // not SAME_VA
1550 u32 stencil_stride_zero : 4;
1551 u32 stencil_stride : 28;
1552 u32 zero8;
1553
1554 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1555 u32 clear_color_2; // always equal, but unclear function?
1556 u32 clear_color_3; // always equal, but unclear function?
1557 u32 clear_color_4; // always equal, but unclear function?
1558
1559 /* Set to zero if not cleared */
1560
1561 float clear_depth_1; // float32, ditto
1562 float clear_depth_2; // float32, ditto
1563 float clear_depth_3; // float32, ditto
1564 float clear_depth_4; // float32, ditto
1565
1566 u32 clear_stencil; // Exactly as it appears in OpenGL
1567
1568 u32 zero6[7];
1569
1570 struct midgard_tiler_descriptor tiler;
1571
1572 /* More below this, maybe */
1573 } __attribute__((packed));
1574
1575 /* Format bits for the render target flags */
1576
1577 #define MALI_MFBD_FORMAT_MSAA (1 << 1)
1578 #define MALI_MFBD_FORMAT_SRGB (1 << 2)
1579
1580 struct mali_rt_format {
1581 unsigned unk1 : 32;
1582 unsigned unk2 : 3;
1583
1584 unsigned nr_channels : 2; /* MALI_POSITIVE */
1585
1586 unsigned unk3 : 5;
1587 enum mali_block_format block : 2;
1588 unsigned flags : 4;
1589
1590 unsigned swizzle : 12;
1591
1592 unsigned zero : 3;
1593
1594 /* Disables MFBD preload. When this bit is set, the render target will
1595 * be cleared every frame. When this bit is clear, the hardware will
1596 * automatically wallpaper the render target back from main memory.
1597 * Unfortunately, MFBD preload is very broken on Midgard, so in
1598 * practice, this is a chicken bit that should always be set.
1599 * Discovered by accident, as all good chicken bits are. */
1600
1601 unsigned no_preload : 1;
1602 } __attribute__((packed));
1603
1604 struct mali_render_target {
1605 struct mali_rt_format format;
1606
1607 u64 zero1;
1608
1609 struct {
1610 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1611 * there is an extra metadata buffer that contains 16 bytes per tile.
1612 * The framebuffer needs to be the same size as before, since we don't
1613 * know ahead of time how much space it will take up. The
1614 * framebuffer_stride is set to 0, since the data isn't stored linearly
1615 * anymore.
1616 *
1617 * When AFBC is disabled, these fields are zero.
1618 */
1619
1620 mali_ptr metadata;
1621 u32 stride; // stride in units of tiles
1622 u32 unk; // = 0x20000
1623 } afbc;
1624
1625 mali_ptr framebuffer;
1626
1627 u32 zero2 : 4;
1628 u32 framebuffer_stride : 28; // in units of bytes
1629 u32 zero3;
1630
1631 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1632 u32 clear_color_2; // always equal, but unclear function?
1633 u32 clear_color_3; // always equal, but unclear function?
1634 u32 clear_color_4; // always equal, but unclear function?
1635 } __attribute__((packed));
1636
1637 /* An optional part of mali_framebuffer. It comes between the main structure
1638 * and the array of render targets. It must be included if any of these are
1639 * enabled:
1640 *
1641 * - Transaction Elimination
1642 * - Depth/stencil
1643 * - TODO: Anything else?
1644 */
1645
1646 /* flags_hi */
1647 #define MALI_EXTRA_PRESENT (0x10)
1648
1649 /* flags_lo */
1650 #define MALI_EXTRA_ZS (0x4)
1651
1652 struct mali_framebuffer_extra {
1653 mali_ptr checksum;
1654 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1655 u32 checksum_stride;
1656
1657 unsigned flags_lo : 4;
1658 enum mali_block_format zs_block : 2;
1659 unsigned flags_hi : 26;
1660
1661 union {
1662 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1663 struct {
1664 mali_ptr depth_stencil_afbc_metadata;
1665 u32 depth_stencil_afbc_stride; // in units of tiles
1666 u32 zero1;
1667
1668 mali_ptr depth_stencil;
1669
1670 u64 padding;
1671 } ds_afbc;
1672
1673 struct {
1674 /* Depth becomes depth/stencil in case of combined D/S */
1675 mali_ptr depth;
1676 u32 depth_stride_zero : 4;
1677 u32 depth_stride : 28;
1678 u32 zero1;
1679
1680 mali_ptr stencil;
1681 u32 stencil_stride_zero : 4;
1682 u32 stencil_stride : 28;
1683 u32 zero2;
1684 } ds_linear;
1685 };
1686
1687
1688 u64 zero3, zero4;
1689 } __attribute__((packed));
1690
1691 /* Flags for mfbd_flags */
1692
1693 /* Enables writing depth results back to main memory (rather than keeping them
1694 * on-chip in the tile buffer and then discarding) */
1695
1696 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1697
1698 /* The MFBD contains the extra mali_framebuffer_extra section */
1699
1700 #define MALI_MFBD_EXTRA (1 << 13)
1701
1702 struct mali_framebuffer {
1703 union {
1704 struct mali_shared_memory shared_memory;
1705 struct bifrost_multisampling msaa;
1706 };
1707
1708 /* 0x20 */
1709 u16 width1, height1;
1710 u32 zero3;
1711 u16 width2, height2;
1712 u32 unk1 : 19; // = 0x01000
1713 u32 rt_count_1 : 2; // off-by-one (use MALI_POSITIVE)
1714 u32 unk2 : 3; // = 0
1715 u32 rt_count_2 : 3; // no off-by-one
1716 u32 zero4 : 5;
1717 /* 0x30 */
1718 u32 clear_stencil : 8;
1719 u32 mfbd_flags : 24; // = 0x100
1720 float clear_depth;
1721
1722 struct midgard_tiler_descriptor tiler;
1723
1724 /* optional: struct mali_framebuffer_extra extra */
1725 /* struct mali_render_target rts[] */
1726 } __attribute__((packed));
1727
1728 #endif /* __PANFROST_JOB_H__ */