3f227fdc8b7434ac4c1886a711f119f07f62a5cb
[mesa.git] / src / panfrost / include / panfrost-job.h
1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <inttypes.h>
34
35 typedef uint8_t u8;
36 typedef uint16_t u16;
37 typedef uint32_t u32;
38 typedef uint64_t u64;
39 typedef uint64_t mali_ptr;
40
41 /* Applies to tiler_gl_enables */
42
43 #define MALI_OCCLUSION_QUERY (1 << 3)
44 #define MALI_OCCLUSION_PRECISE (1 << 4)
45
46 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
47 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
48 * disagree about how to do viewport flipping, so the blob actually sets this
49 * for GL_CW but then has a negative viewport stride */
50
51 #define MALI_FRONT_CCW_TOP (1 << 5)
52
53 #define MALI_CULL_FACE_FRONT (1 << 6)
54 #define MALI_CULL_FACE_BACK (1 << 7)
55
56 /* Flags apply to unknown2_3? */
57
58 #define MALI_HAS_MSAA (1 << 0)
59
60 /* Execute fragment shader per-sample if set (e.g. to implement gl_SampleID
61 * reads) */
62 #define MALI_PER_SAMPLE (1 << 2)
63 #define MALI_CAN_DISCARD (1 << 5)
64
65 /* Applies on SFBD systems, specifying that programmable blending is in use */
66 #define MALI_HAS_BLEND_SHADER (1 << 6)
67
68 /* func is mali_func */
69 #define MALI_DEPTH_FUNC(func) (func << 8)
70 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
71 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
72
73 #define MALI_DEPTH_WRITEMASK (1 << 11)
74
75 #define MALI_DEPTH_CLIP_NEAR (1 << 12)
76 #define MALI_DEPTH_CLIP_FAR (1 << 13)
77
78 /* Next flags to unknown2_4 */
79 #define MALI_STENCIL_TEST (1 << 0)
80
81 #define MALI_ALPHA_TO_COVERAGE (1 << 1)
82
83 #define MALI_NO_DITHER (1 << 9)
84 #define MALI_DEPTH_RANGE_A (1 << 12)
85 #define MALI_DEPTH_RANGE_B (1 << 13)
86 #define MALI_NO_MSAA (1 << 14)
87
88 #define MALI_MASK_R (1 << 0)
89 #define MALI_MASK_G (1 << 1)
90 #define MALI_MASK_B (1 << 2)
91 #define MALI_MASK_A (1 << 3)
92
93 enum mali_nondominant_mode {
94 MALI_BLEND_NON_MIRROR = 0,
95 MALI_BLEND_NON_ZERO = 1
96 };
97
98 enum mali_dominant_blend {
99 MALI_BLEND_DOM_SOURCE = 0,
100 MALI_BLEND_DOM_DESTINATION = 1
101 };
102
103 enum mali_dominant_factor {
104 MALI_DOMINANT_UNK0 = 0,
105 MALI_DOMINANT_ZERO = 1,
106 MALI_DOMINANT_SRC_COLOR = 2,
107 MALI_DOMINANT_DST_COLOR = 3,
108 MALI_DOMINANT_UNK4 = 4,
109 MALI_DOMINANT_SRC_ALPHA = 5,
110 MALI_DOMINANT_DST_ALPHA = 6,
111 MALI_DOMINANT_CONSTANT = 7,
112 };
113
114 enum mali_blend_modifier {
115 MALI_BLEND_MOD_UNK0 = 0,
116 MALI_BLEND_MOD_NORMAL = 1,
117 MALI_BLEND_MOD_SOURCE_ONE = 2,
118 MALI_BLEND_MOD_DEST_ONE = 3,
119 };
120
121 struct mali_blend_mode {
122 enum mali_blend_modifier clip_modifier : 2;
123 unsigned unused_0 : 1;
124 unsigned negate_source : 1;
125
126 enum mali_dominant_blend dominant : 1;
127
128 enum mali_nondominant_mode nondominant_mode : 1;
129
130 unsigned unused_1 : 1;
131
132 unsigned negate_dest : 1;
133
134 enum mali_dominant_factor dominant_factor : 3;
135 unsigned complement_dominant : 1;
136 } __attribute__((packed));
137
138 struct mali_blend_equation {
139 /* Of type mali_blend_mode */
140 unsigned rgb_mode : 12;
141 unsigned alpha_mode : 12;
142
143 unsigned zero1 : 4;
144
145 /* Corresponds to MALI_MASK_* above and glColorMask arguments */
146
147 unsigned color_mask : 4;
148 } __attribute__((packed));
149
150 /* Compressed per-pixel formats. Each of these formats expands to one to four
151 * floating-point or integer numbers, as defined by the OpenGL specification.
152 * There are various places in OpenGL where the user can specify a compressed
153 * format in memory, which all use the same 8-bit enum in the various
154 * descriptors, although different hardware units support different formats.
155 */
156
157 /* The top 3 bits specify how the bits of each component are interpreted. */
158
159 /* e.g. ETC2_RGB8 */
160 #define MALI_FORMAT_COMPRESSED (0 << 5)
161
162 /* e.g. R11F_G11F_B10F */
163 #define MALI_FORMAT_SPECIAL (2 << 5)
164
165 /* signed normalized, e.g. RGBA8_SNORM */
166 #define MALI_FORMAT_SNORM (3 << 5)
167
168 /* e.g. RGBA8UI */
169 #define MALI_FORMAT_UINT (4 << 5)
170
171 /* e.g. RGBA8 and RGBA32F */
172 #define MALI_FORMAT_UNORM (5 << 5)
173
174 /* e.g. RGBA8I and RGBA16F */
175 #define MALI_FORMAT_SINT (6 << 5)
176
177 /* These formats seem to largely duplicate the others. They're used at least
178 * for Bifrost framebuffer output.
179 */
180 #define MALI_FORMAT_SPECIAL2 (7 << 5)
181 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
182
183 /* If the high 3 bits are 3 to 6 these two bits say how many components
184 * there are.
185 */
186 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
187 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
188
189 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
190 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
191 * bits mean.
192 */
193
194 #define MALI_CHANNEL_4 2
195
196 #define MALI_CHANNEL_8 3
197
198 #define MALI_CHANNEL_16 4
199
200 #define MALI_CHANNEL_32 5
201
202 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
203 * MALI_FORMAT_UNORM, it means a 32-bit float.
204 */
205 #define MALI_CHANNEL_FLOAT 7
206 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
207
208 /* Applies to midgard1.flags_lo */
209
210 /* Should be set when the fragment shader updates the depth value. */
211 #define MALI_WRITES_Z (1 << 4)
212
213 /* Should the hardware perform early-Z testing? Set if the shader does not use
214 * discard, alpha-to-coverage, shader depth writes, and if the shader has no
215 * side effects (writes to global memory or images) unless early-z testing is
216 * forced in the shader.
217 */
218
219 #define MALI_EARLY_Z (1 << 6)
220
221 /* Should the hardware calculate derivatives (via helper invocations)? Set in a
222 * fragment shader that uses texturing or derivative functions */
223
224 #define MALI_HELPER_INVOCATIONS (1 << 7)
225
226 /* Flags denoting the fragment shader's use of tilebuffer readback. If the
227 * shader might read any part of the tilebuffer, set MALI_READS_TILEBUFFER. If
228 * it might read depth/stencil in particular, also set MALI_READS_ZS */
229
230 #define MALI_READS_ZS (1 << 8)
231
232 /* The shader might write to global memory (via OpenCL, SSBOs, or images).
233 * Reading is okay, as are ordinary writes to the tilebuffer/varyings. Setting
234 * incurs a performance penalty. On a fragment shader, this bit implies there
235 * are side effects, hence it interacts with early-z. */
236 #define MALI_WRITES_GLOBAL (1 << 9)
237
238 #define MALI_READS_TILEBUFFER (1 << 10)
239
240 /* Applies to midgard1.flags_hi */
241
242 /* Should be set when the fragment shader updates the stencil value. */
243 #define MALI_WRITES_S (1 << 2)
244
245 /* Mode to suppress generation of Infinity and NaN values by clamping inf
246 * (-inf) to MAX_FLOAT (-MIN_FLOAT) and flushing NaN to 0.0
247 *
248 * Compare suppress_inf/suppress_nan flags on the Bifrost clause header for the
249 * same functionality.
250 *
251 * This is not conformant on GLES3 or OpenCL, but is optional on GLES2, where
252 * it works around app bugs (e.g. in glmark2-es2 -bterrain with FP16).
253 */
254 #define MALI_SUPPRESS_INF_NAN (1 << 3)
255
256 /* Flags for bifrost1.unk1 */
257
258 /* Shader uses less than 32 registers, partitioned as [R0, R15] U [R48, R63],
259 * allowing for full thread count. If clear, the full [R0, R63] register set is
260 * available at half thread count */
261 #define MALI_BIFROST_FULL_THREAD (1 << 9)
262
263 /* Enable early-z testing (presumably). This flag may not be set if the shader:
264 *
265 * - Uses blending
266 * - Uses discard
267 * - Writes gl_FragDepth
268 *
269 * This differs from Midgard which sets the MALI_EARLY_Z flag even with
270 * blending, although I've begun to suspect that flag does not in fact enable
271 * EARLY_Z alone. */
272 #define MALI_BIFROST_EARLY_Z (1 << 15)
273
274 /* First clause type is ATEST */
275 #define MALI_BIFROST_FIRST_ATEST (1 << 26)
276
277 /* The raw Midgard blend payload can either be an equation or a shader
278 * address, depending on the context */
279
280 union midgard_blend {
281 mali_ptr shader;
282
283 struct {
284 struct mali_blend_equation equation;
285 float constant;
286 };
287 };
288
289 struct midgard_blend_rt {
290 struct mali_blend_flags_packed flags;
291 u32 zero;
292 union midgard_blend blend;
293 } __attribute__((packed));
294
295 /* On Bifrost systems (all MRT), each render target gets one of these
296 * descriptors */
297
298 enum bifrost_shader_type {
299 BIFROST_BLEND_F16 = 0,
300 BIFROST_BLEND_F32 = 1,
301 BIFROST_BLEND_I32 = 2,
302 BIFROST_BLEND_U32 = 3,
303 BIFROST_BLEND_I16 = 4,
304 BIFROST_BLEND_U16 = 5,
305 };
306
307 #define BIFROST_MAX_RENDER_TARGET_COUNT 8
308
309 struct bifrost_blend_rt {
310 /* This is likely an analogue of the flags on
311 * midgard_blend_rt */
312
313 u16 flags; // = 0x200
314
315 /* Single-channel blend constants are encoded in a sort of
316 * fixed-point. Basically, the float is mapped to a byte, becoming
317 * a high byte, and then the lower-byte is added for precision.
318 * For the original float f:
319 *
320 * f = (constant_hi / 255) + (constant_lo / 65535)
321 *
322 * constant_hi = int(f / 255)
323 * constant_lo = 65535*f - (65535/255) * constant_hi
324 */
325 u16 constant;
326
327 struct mali_blend_equation equation;
328
329 /*
330 * - 0x19 normally
331 * - 0x3 when this slot is unused (everything else is 0 except the index)
332 * - 0x11 when this is the fourth slot (and it's used)
333 * - 0 when there is a blend shader
334 */
335 u16 unk2;
336
337 /* increments from 0 to 3 */
338 u16 index;
339
340 union {
341 struct {
342 /* So far, I've only seen:
343 * - R001 for 1-component formats
344 * - RG01 for 2-component formats
345 * - RGB1 for 3-component formats
346 * - RGBA for 4-component formats
347 */
348 u32 swizzle : 12;
349 enum mali_format format : 8;
350
351 /* Type of the shader output variable. Note, this can
352 * be different from the format.
353 * enum bifrost_shader_type
354 */
355 u32 zero1 : 4;
356 u32 shader_type : 3;
357 u32 zero2 : 5;
358 };
359
360 /* Only the low 32 bits of the blend shader are stored, the
361 * high 32 bits are implicitly the same as the original shader.
362 * According to the kernel driver, the program counter for
363 * shaders is actually only 24 bits, so shaders cannot cross
364 * the 2^24-byte boundary, and neither can the blend shader.
365 * The blob handles this by allocating a 2^24 byte pool for
366 * shaders, and making sure that any blend shaders are stored
367 * in the same pool as the original shader. The kernel will
368 * make sure this allocation is aligned to 2^24 bytes.
369 */
370 u32 shader;
371 };
372 } __attribute__((packed));
373
374 /* Descriptor for the shader. Following this is at least one, up to four blend
375 * descriptors for each active render target */
376
377 struct mali_shader_meta {
378 mali_ptr shader;
379 u16 sampler_count;
380 u16 texture_count;
381 u16 attribute_count;
382 u16 varying_count;
383
384 union {
385 struct {
386 u32 uniform_buffer_count : 4;
387 u32 unk1 : 28; // = 0x800000 for vertex, 0x958020 for tiler
388 } bifrost1;
389 struct {
390 unsigned uniform_buffer_count : 4;
391 unsigned flags_lo : 12;
392
393 /* vec4 units */
394 unsigned work_count : 5;
395 unsigned uniform_count : 5;
396 unsigned flags_hi : 6;
397 } midgard1;
398 };
399
400 /* Same as glPolygoOffset() arguments */
401 float depth_units;
402 float depth_factor;
403
404 u32 unknown2_2;
405
406 /* Generated from SAMPLE_COVERAGE_VALUE and SAMPLE_COVERAGE_INVERT. See
407 * 13.8.3 ("Multisample Fragment Operations") in the OpenGL ES 3.2
408 * specification. Only matters when multisampling is enabled. */
409 u16 coverage_mask;
410
411 u16 unknown2_3;
412
413 u8 stencil_mask_front;
414 u8 stencil_mask_back;
415 u16 unknown2_4;
416
417 struct mali_stencil_packed stencil_front;
418 struct mali_stencil_packed stencil_back;
419
420 union {
421 struct {
422 u32 unk3 : 7;
423 /* On Bifrost, some system values are preloaded in
424 * registers R55-R62 by the thread dispatcher prior to
425 * the start of shader execution. This is a bitfield
426 * with one entry for each register saying which
427 * registers need to be preloaded. Right now, the known
428 * values are:
429 *
430 * Vertex/compute:
431 * - R55 : gl_LocalInvocationID.xy
432 * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
433 * - R57 : gl_WorkGroupID.x
434 * - R58 : gl_WorkGroupID.y
435 * - R59 : gl_WorkGroupID.z
436 * - R60 : gl_GlobalInvocationID.x
437 * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
438 * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
439 *
440 * Fragment:
441 * - R55 : unknown, never seen (but the bit for this is
442 * always set?)
443 * - R56 : unknown (bit always unset)
444 * - R57 : gl_PrimitiveID
445 * - R58 : gl_FrontFacing in low bit, potentially other stuff
446 * - R59 : u16 fragment coordinates (used to compute
447 * gl_FragCoord.xy, together with sample positions)
448 * - R60 : gl_SampleMask (used in epilog, so pretty
449 * much always used, but the bit is always 0 -- is
450 * this just always pushed?)
451 * - R61 : gl_SampleMaskIn and gl_SampleID, used by
452 * varying interpolation.
453 * - R62 : unknown (bit always unset).
454 *
455 * Later GPUs (starting with Mali-G52?) support
456 * preloading float varyings into r0-r7. This is
457 * indicated by setting 0x40. There is no distinction
458 * here between 1 varying and 2.
459 */
460 u32 preload_regs : 8;
461 /* In units of 8 bytes or 64 bits, since the
462 * uniform/const port loads 64 bits at a time.
463 */
464 u32 uniform_count : 7;
465 u32 unk4 : 10; // = 2
466 } bifrost2;
467 struct {
468 u32 unknown2_7;
469 } midgard2;
470 };
471
472 u32 padding;
473
474 /* Blending information for the older non-MRT Midgard HW. Check for
475 * MALI_HAS_BLEND_SHADER to decide how to interpret.
476 */
477
478 union midgard_blend blend;
479 } __attribute__((packed));
480
481 /* This only concerns hardware jobs */
482
483 /* Possible values for job_descriptor_size */
484
485 #define MALI_JOB_32 0
486 #define MALI_JOB_64 1
487
488 struct mali_job_descriptor_header {
489 u32 exception_status;
490 u32 first_incomplete_task;
491 u64 fault_pointer;
492 u8 job_descriptor_size : 1;
493 enum mali_job_type job_type : 7;
494 u8 job_barrier : 1;
495 u8 unknown_flags : 7;
496 u16 job_index;
497 u16 job_dependency_index_1;
498 u16 job_dependency_index_2;
499 u64 next_job;
500 } __attribute__((packed));
501
502 /* Details about write_value from panfrost igt tests which use it as a generic
503 * dword write primitive */
504
505 #define MALI_WRITE_VALUE_ZERO 3
506
507 struct mali_payload_write_value {
508 u64 address;
509 u32 value_descriptor;
510 u32 reserved;
511 u64 immediate;
512 } __attribute__((packed));
513
514 /*
515 * Mali Attributes
516 *
517 * This structure lets the attribute unit compute the address of an attribute
518 * given the vertex and instance ID. Unfortunately, the way this works is
519 * rather complicated when instancing is enabled.
520 *
521 * To explain this, first we need to explain how compute and vertex threads are
522 * dispatched. This is a guess (although a pretty firm guess!) since the
523 * details are mostly hidden from the driver, except for attribute instancing.
524 * When a quad is dispatched, it receives a single, linear index. However, we
525 * need to translate that index into a (vertex id, instance id) pair, or a
526 * (local id x, local id y, local id z) triple for compute shaders (although
527 * vertex shaders and compute shaders are handled almost identically).
528 * Focusing on vertex shaders, one option would be to do:
529 *
530 * vertex_id = linear_id % num_vertices
531 * instance_id = linear_id / num_vertices
532 *
533 * but this involves a costly division and modulus by an arbitrary number.
534 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
535 * num_instances threads instead of num_vertices * num_instances, which results
536 * in some "extra" threads with vertex_id >= num_vertices, which we have to
537 * discard. The more we pad num_vertices, the more "wasted" threads we
538 * dispatch, but the division is potentially easier.
539 *
540 * One straightforward choice is to pad num_vertices to the next power of two,
541 * which means that the division and modulus are just simple bit shifts and
542 * masking. But the actual algorithm is a bit more complicated. The thread
543 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
544 * to dividing by a power of two. This is possibly using the technique
545 * described in patent US20170010862A1. As a result, padded_num_vertices can be
546 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
547 * since we need less padding.
548 *
549 * padded_num_vertices is picked by the hardware. The driver just specifies the
550 * actual number of vertices. At least for Mali G71, the first few cases are
551 * given by:
552 *
553 * num_vertices | padded_num_vertices
554 * 3 | 4
555 * 4-7 | 8
556 * 8-11 | 12 (3 * 4)
557 * 12-15 | 16
558 * 16-19 | 20 (5 * 4)
559 *
560 * Note that padded_num_vertices is a multiple of four (presumably because
561 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
562 * at least one more than num_vertices, which seems like a quirk of the
563 * hardware. For larger num_vertices, the hardware uses the following
564 * algorithm: using the binary representation of num_vertices, we look at the
565 * most significant set bit as well as the following 3 bits. Let n be the
566 * number of bits after those 4 bits. Then we set padded_num_vertices according
567 * to the following table:
568 *
569 * high bits | padded_num_vertices
570 * 1000 | 9 * 2^n
571 * 1001 | 5 * 2^(n+1)
572 * 101x | 3 * 2^(n+2)
573 * 110x | 7 * 2^(n+1)
574 * 111x | 2^(n+4)
575 *
576 * For example, if num_vertices = 70 is passed to glDraw(), its binary
577 * representation is 1000110, so n = 3 and the high bits are 1000, and
578 * therefore padded_num_vertices = 9 * 2^3 = 72.
579 *
580 * The attribute unit works in terms of the original linear_id. if
581 * num_instances = 1, then they are the same, and everything is simple.
582 * However, with instancing things get more complicated. There are four
583 * possible modes, two of them we can group together:
584 *
585 * 1. Use the linear_id directly. Only used when there is no instancing.
586 *
587 * 2. Use the linear_id modulo a constant. This is used for per-vertex
588 * attributes with instancing enabled by making the constant equal
589 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
590 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
591 * The shift field specifies the power of two, while the extra_flags field
592 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
593 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
594 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
595 * shift = 3. Note that we must exactly follow the hardware algorithm used to
596 * get padded_num_vertices in order to correctly implement per-vertex
597 * attributes.
598 *
599 * 3. Divide the linear_id by a constant. In order to correctly implement
600 * instance divisors, we have to divide linear_id by padded_num_vertices times
601 * to user-specified divisor. So first we compute padded_num_vertices, again
602 * following the exact same algorithm that the hardware uses, then multiply it
603 * by the GL-level divisor to get the hardware-level divisor. This case is
604 * further divided into two more cases. If the hardware-level divisor is a
605 * power of two, then we just need to shift. The shift amount is specified by
606 * the shift field, so that the hardware-level divisor is just 2^shift.
607 *
608 * If it isn't a power of two, then we have to divide by an arbitrary integer.
609 * For that, we use the well-known technique of multiplying by an approximation
610 * of the inverse. The driver must compute the magic multiplier and shift
611 * amount, and then the hardware does the multiplication and shift. The
612 * hardware and driver also use the "round-down" optimization as described in
613 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
614 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
615 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
616 * presumably this simplifies the hardware multiplier a little. The hardware
617 * first multiplies linear_id by the multiplier and takes the high 32 bits,
618 * then applies the round-down correction if extra_flags = 1, then finally
619 * shifts right by the shift field.
620 *
621 * There are some differences between ridiculousfish's algorithm and the Mali
622 * hardware algorithm, which means that the reference code from ridiculousfish
623 * doesn't always produce the right constants. Mali does not use the pre-shift
624 * optimization, since that would make a hardware implementation slower (it
625 * would have to always do the pre-shift, multiply, and post-shift operations).
626 * It also forces the multplier to be at least 2^31, which means that the
627 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
628 * given the divisor d, the algorithm the driver must follow is:
629 *
630 * 1. Set shift = floor(log2(d)).
631 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
632 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
633 * magic_divisor = m - 1 and extra_flags = 1.
634 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
635 */
636
637 #define FBD_MASK (~0x3f)
638
639 /* MFBD, rather than SFBD */
640 #define MALI_MFBD (0x1)
641
642 /* ORed into an MFBD address to specify the fbx section is included */
643 #define MALI_MFBD_TAG_EXTRA (0x2)
644
645 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
646 * They also seem to be the same between Bifrost and Midgard. They're shared in
647 * fused payloads.
648 */
649
650 /* Applies to unknown_draw */
651
652 #define MALI_DRAW_INDEXED_UINT8 (0x10)
653 #define MALI_DRAW_INDEXED_UINT16 (0x20)
654 #define MALI_DRAW_INDEXED_UINT32 (0x30)
655 #define MALI_DRAW_INDEXED_SIZE (0x30)
656 #define MALI_DRAW_INDEXED_SHIFT (4)
657
658 #define MALI_DRAW_VARYING_SIZE (0x100)
659
660 /* Set to use first vertex as the provoking vertex for flatshading. Clear to
661 * use the last vertex. This is the default in DX and VK, but not in GL. */
662
663 #define MALI_DRAW_FLATSHADE_FIRST (0x800)
664
665 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
666
667 struct mali_vertex_tiler_prefix {
668 /* This is a dynamic bitfield containing the following things in this order:
669 *
670 * - gl_WorkGroupSize.x
671 * - gl_WorkGroupSize.y
672 * - gl_WorkGroupSize.z
673 * - gl_NumWorkGroups.x
674 * - gl_NumWorkGroups.y
675 * - gl_NumWorkGroups.z
676 *
677 * The number of bits allocated for each number is based on the *_shift
678 * fields below. For example, workgroups_y_shift gives the bit that
679 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
680 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
681 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
682 * value is one more than the stored value, since if any of the values
683 * are zero, then there would be no invocations (and hence no job). If
684 * there were 0 bits allocated to a given field, then it must be zero,
685 * and hence the real value is one.
686 *
687 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
688 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
689 * where vertex count is the number of vertices.
690 */
691 u32 invocation_count;
692
693 /* Bitfield for shifts:
694 *
695 * size_y_shift : 5
696 * size_z_shift : 5
697 * workgroups_x_shift : 6
698 * workgroups_y_shift : 6
699 * workgroups_z_shift : 6
700 * workgroups_x_shift_2 : 4
701 */
702 u32 invocation_shifts;
703
704 u32 draw_mode : 4;
705 u32 unknown_draw : 22;
706
707 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
708 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
709 * something to do with how many quads get put in the same execution
710 * engine, which is a balance (you don't want to starve the engine, but
711 * you also want to distribute work evenly).
712 */
713 u32 workgroups_x_shift_3 : 6;
714
715
716 /* Negative of min_index. This is used to compute
717 * the unbiased index in tiler/fragment shader runs.
718 *
719 * The hardware adds offset_bias_correction in each run,
720 * so that absent an index bias, the first vertex processed is
721 * genuinely the first vertex (0). But with an index bias,
722 * the first vertex process is numbered the same as the bias.
723 *
724 * To represent this more conviniently:
725 * unbiased_index = lower_bound_index +
726 * index_bias +
727 * offset_bias_correction
728 *
729 * This is done since the hardware doesn't accept a index_bias
730 * and this allows it to recover the unbiased index.
731 */
732 int32_t offset_bias_correction;
733 u32 zero1;
734
735 /* Like many other strictly nonzero quantities, index_count is
736 * subtracted by one. For an indexed cube, this is equal to 35 = 6
737 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
738 * for an indexed draw, index_count is the number of actual vertices
739 * rendered whereas invocation_count is the number of unique vertices
740 * rendered (the number of times the vertex shader must be invoked).
741 * For non-indexed draws, this is just equal to invocation_count. */
742
743 u32 index_count;
744
745 /* No hidden structure; literally just a pointer to an array of uint
746 * indices (width depends on flags). Thanks, guys, for not making my
747 * life insane for once! NULL for non-indexed draws. */
748
749 u64 indices;
750 } __attribute__((packed));
751
752 /* Point size / line width can either be specified as a 32-bit float (for
753 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
754 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
755 * payload, the contents of varying_pointer will be intepreted as an array of
756 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
757 * creating a special MALI_R16F varying writing to varying_pointer. */
758
759 union midgard_primitive_size {
760 float constant;
761 u64 pointer;
762 };
763
764 struct bifrost_tiler_heap_meta {
765 u32 zero;
766 u32 heap_size;
767 /* note: these are just guesses! */
768 mali_ptr tiler_heap_start;
769 mali_ptr tiler_heap_free;
770 mali_ptr tiler_heap_end;
771
772 /* hierarchy weights? but they're still 0 after the job has run... */
773 u32 zeros[10];
774 u32 unk1;
775 u32 unk7e007e;
776 } __attribute__((packed));
777
778 struct bifrost_tiler_meta {
779 u32 tiler_heap_next_start; /* To be written by the GPU */
780 u32 used_hierarchy_mask; /* To be written by the GPU */
781 u16 hierarchy_mask; /* Five values observed: 0xa, 0x14, 0x28, 0x50, 0xa0 */
782 u16 flags;
783 u16 width;
784 u16 height;
785 u64 zero0;
786 mali_ptr tiler_heap_meta;
787 /* TODO what is this used for? */
788 u64 zeros[20];
789 } __attribute__((packed));
790
791 struct bifrost_tiler_only {
792 /* 0x20 */
793 union midgard_primitive_size primitive_size;
794
795 mali_ptr tiler_meta;
796
797 u64 zero1, zero2, zero3, zero4, zero5, zero6;
798 } __attribute__((packed));
799
800 struct mali_vertex_tiler_postfix {
801 u16 gl_enables; // 0x6 on Midgard, 0x2 on Bifrost
802
803 /* Both zero for non-instanced draws. For instanced draws, a
804 * decomposition of padded_num_vertices. See the comments about the
805 * corresponding fields in mali_attr for context. */
806
807 unsigned instance_shift : 5;
808 unsigned instance_odd : 3;
809
810 u8 zero4;
811
812 /* Offset for first vertex in buffer */
813 u32 offset_start;
814
815 u64 zero5;
816
817 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
818 * output from the vertex shader for tiler jobs.
819 */
820
821 u64 position_varying;
822
823 /* An array of mali_uniform_buffer_meta's. The size is given by the
824 * shader_meta.
825 */
826 u64 uniform_buffers;
827
828 /* On Bifrost, this is a pointer to an array of bifrost_texture_descriptor.
829 * On Midgard, this is a pointer to an array of pointers to the texture
830 * descriptors, number of pointers bounded by number of textures. The
831 * indirection is needed to accomodate varying numbers and sizes of
832 * texture descriptors */
833 u64 textures;
834
835 /* For OpenGL, from what I've seen, this is intimately connected to
836 * texture_meta. cwabbott says this is not the case under Vulkan, hence
837 * why this field is seperate (Midgard is Vulkan capable). Pointer to
838 * array of sampler descriptors (which are uniform in size) */
839 u64 sampler_descriptor;
840
841 u64 uniforms;
842 u64 shader;
843 u64 attributes; /* struct attribute_buffer[] */
844 u64 attribute_meta; /* attribute_meta[] */
845 u64 varyings; /* struct attr */
846 u64 varying_meta; /* pointer */
847 u64 viewport;
848 u64 occlusion_counter; /* A single bit as far as I can tell */
849
850 /* On Bifrost, this points directly to a mali_shared_memory structure.
851 * On Midgard, this points to a framebuffer (either SFBD or MFBD as
852 * tagged), which embeds a mali_shared_memory structure */
853 mali_ptr shared_memory;
854 } __attribute__((packed));
855
856 struct midgard_payload_vertex_tiler {
857 struct mali_vertex_tiler_prefix prefix;
858 struct mali_vertex_tiler_postfix postfix;
859
860 union midgard_primitive_size primitive_size;
861 } __attribute__((packed));
862
863 struct bifrost_payload_vertex {
864 struct mali_vertex_tiler_prefix prefix;
865 struct mali_vertex_tiler_postfix postfix;
866 } __attribute__((packed));
867
868 struct bifrost_payload_tiler {
869 struct mali_vertex_tiler_prefix prefix;
870 struct bifrost_tiler_only tiler;
871 struct mali_vertex_tiler_postfix postfix;
872 } __attribute__((packed));
873
874 struct bifrost_payload_fused {
875 struct mali_vertex_tiler_prefix prefix;
876 struct bifrost_tiler_only tiler;
877 struct mali_vertex_tiler_postfix tiler_postfix;
878 u64 padding; /* zero */
879 struct mali_vertex_tiler_postfix vertex_postfix;
880 } __attribute__((packed));
881
882 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
883 * texture is stored as (63, 63) in these fields. This adjusts for that.
884 * There's an identical pattern in the framebuffer descriptor. Even vertex
885 * count fields work this way, hence the generic name -- integral fields that
886 * are strictly positive generally need this adjustment. */
887
888 #define MALI_POSITIVE(dim) (dim - 1)
889
890 /* 8192x8192 */
891 #define MAX_MIP_LEVELS (13)
892
893 /* Cubemap bloats everything up */
894 #define MAX_CUBE_FACES (6)
895
896 /* For each pointer, there is an address and optionally also a stride */
897 #define MAX_ELEMENTS (2)
898
899 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
900 * be cleaned up a lot. */
901
902 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
903
904 static inline int16_t
905 FIXED_16(float x, bool allow_negative)
906 {
907 /* Clamp inputs, accounting for float error */
908 float max_lod = (32.0 - (1.0 / 512.0));
909 float min_lod = allow_negative ? -max_lod : 0.0;
910
911 x = ((x > max_lod) ? max_lod : ((x < min_lod) ? min_lod : x));
912
913 return (int) (x * 256.0);
914 }
915
916 /* From presentations, 16x16 tiles externally. Use shift for fast computation
917 * of tile numbers. */
918
919 #define MALI_TILE_SHIFT 4
920 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
921
922 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
923 * each component. Notice that this provides a theoretical upper bound of (1 <<
924 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
925 * 65536x65536. Multiplying that together, times another four given that Mali
926 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
927 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
928 * alone rendering in real-time to such a buffer.
929 *
930 * Nice job, guys.*/
931
932 /* From mali_kbase_10969_workaround.c */
933 #define MALI_X_COORD_MASK 0x00000FFF
934 #define MALI_Y_COORD_MASK 0x0FFF0000
935
936 /* Extract parts of a tile coordinate */
937
938 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
939 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
940
941 /* Helpers to generate tile coordinates based on the boundary coordinates in
942 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
943 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
944 * Intentional "off-by-one"; finding the tile number is a form of fencepost
945 * problem. */
946
947 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
948 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
949 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
950 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
951 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
952
953 struct mali_payload_fragment {
954 u32 min_tile_coord;
955 u32 max_tile_coord;
956 mali_ptr framebuffer;
957 } __attribute__((packed));
958
959 /* Single Framebuffer Descriptor */
960
961 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
962 * configured for 4x. With MSAA_8, it is configured for 8x. */
963
964 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
965 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
966 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
967 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
968
969 /* Fast/slow based on whether all three buffers are cleared at once */
970
971 #define MALI_CLEAR_FAST (1 << 18)
972 #define MALI_CLEAR_SLOW (1 << 28)
973 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
974
975 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
976 * within the larget framebuffer descriptor). Analogous to
977 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
978
979 /* See pan_tiler.c for derivation */
980 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
981
982 /* Flag disabling the tiler for clear-only jobs, with
983 hierarchical tiling */
984 #define MALI_TILER_DISABLED (1 << 12)
985
986 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
987 * hierarhical tiling. */
988 #define MALI_TILER_USER 0xFFF
989
990 /* Absent any geometry, the minimum size of the polygon list header */
991 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
992
993 struct midgard_tiler_descriptor {
994 /* Size of the entire polygon list; see pan_tiler.c for the
995 * computation. It's based on hierarchical tiling */
996
997 u32 polygon_list_size;
998
999 /* Name known from the replay workaround in the kernel. What exactly is
1000 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
1001 * specifies a mask of hierarchy weights, which explains some of the
1002 * performance mysteries around setting it. We also see the bottom bit
1003 * of tiler_flags set in the kernel, but no comment why.
1004 *
1005 * hierarchy_mask can have the TILER_DISABLED flag */
1006
1007 u16 hierarchy_mask;
1008 u16 flags;
1009
1010 /* See mali_tiler.c for an explanation */
1011 mali_ptr polygon_list;
1012 mali_ptr polygon_list_body;
1013
1014 /* Names based on we see symmetry with replay jobs which name these
1015 * explicitly */
1016
1017 mali_ptr heap_start; /* tiler heap_free_address */
1018 mali_ptr heap_end;
1019
1020 /* Hierarchy weights. We know these are weights based on the kernel,
1021 * but I've never seen them be anything other than zero */
1022 u32 weights[8];
1023 };
1024
1025 struct mali_sfbd_format {
1026 /* 0x1 */
1027 unsigned unk1 : 6;
1028
1029 /* mali_channel_swizzle */
1030 unsigned swizzle : 12;
1031
1032 /* MALI_POSITIVE */
1033 unsigned nr_channels : 2;
1034
1035 /* 0x4 */
1036 unsigned unk2 : 6;
1037
1038 enum mali_block_format block : 2;
1039
1040 /* 0xb */
1041 unsigned unk3 : 4;
1042 };
1043
1044 /* Shared structure at the start of framebuffer descriptors, or used bare for
1045 * compute jobs, configuring stack and shared memory */
1046
1047 struct mali_shared_memory {
1048 u32 stack_shift : 4;
1049 u32 unk0 : 28;
1050
1051 /* Configuration for shared memory for compute shaders.
1052 * shared_workgroup_count is logarithmic and may be computed for a
1053 * compute shader using shared memory as:
1054 *
1055 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
1056 *
1057 * For compute shaders that don't use shared memory, or non-compute
1058 * shaders, this is set to ~0
1059 */
1060
1061 u32 shared_workgroup_count : 5;
1062 u32 shared_unk1 : 3;
1063 u32 shared_shift : 4;
1064 u32 shared_zero : 20;
1065
1066 mali_ptr scratchpad;
1067
1068 /* For compute shaders, the RAM backing of workgroup-shared memory. For
1069 * fragment shaders on Bifrost, apparently multisampling locations */
1070
1071 mali_ptr shared_memory;
1072 mali_ptr unknown1;
1073 } __attribute__((packed));
1074
1075 /* Configures multisampling on Bifrost fragment jobs */
1076
1077 struct bifrost_multisampling {
1078 u64 zero1;
1079 u64 zero2;
1080 mali_ptr sample_locations;
1081 u64 zero4;
1082 } __attribute__((packed));
1083
1084 struct mali_single_framebuffer {
1085 struct mali_shared_memory shared_memory;
1086 struct mali_sfbd_format format;
1087
1088 u32 clear_flags;
1089 u32 zero2;
1090
1091 /* Purposeful off-by-one in these fields should be accounted for by the
1092 * MALI_DIMENSION macro */
1093
1094 u16 width;
1095 u16 height;
1096
1097 u32 zero3[4];
1098 mali_ptr checksum;
1099 u32 checksum_stride;
1100 u32 zero5;
1101
1102 /* By default, the framebuffer is upside down from OpenGL's
1103 * perspective. Set framebuffer to the end and negate the stride to
1104 * flip in the Y direction */
1105
1106 mali_ptr framebuffer;
1107 int32_t stride;
1108
1109 u32 zero4;
1110
1111 /* Depth and stencil buffers are interleaved, it appears, as they are
1112 * set to the same address in captures. Both fields set to zero if the
1113 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
1114 * get a zero enable despite the buffer being present; that still is
1115 * disabled. */
1116
1117 mali_ptr depth_buffer; // not SAME_VA
1118 u32 depth_stride_zero : 4;
1119 u32 depth_stride : 28;
1120 u32 zero7;
1121
1122 mali_ptr stencil_buffer; // not SAME_VA
1123 u32 stencil_stride_zero : 4;
1124 u32 stencil_stride : 28;
1125 u32 zero8;
1126
1127 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1128 u32 clear_color_2; // always equal, but unclear function?
1129 u32 clear_color_3; // always equal, but unclear function?
1130 u32 clear_color_4; // always equal, but unclear function?
1131
1132 /* Set to zero if not cleared */
1133
1134 float clear_depth_1; // float32, ditto
1135 float clear_depth_2; // float32, ditto
1136 float clear_depth_3; // float32, ditto
1137 float clear_depth_4; // float32, ditto
1138
1139 u32 clear_stencil; // Exactly as it appears in OpenGL
1140
1141 u32 zero6[7];
1142
1143 struct midgard_tiler_descriptor tiler;
1144
1145 /* More below this, maybe */
1146 } __attribute__((packed));
1147
1148
1149 #define MALI_MFBD_FORMAT_SRGB (1 << 0)
1150
1151 struct mali_rt_format {
1152 unsigned unk1 : 32;
1153 unsigned unk2 : 3;
1154
1155 unsigned nr_channels : 2; /* MALI_POSITIVE */
1156
1157 unsigned unk3 : 4;
1158 unsigned unk4 : 1;
1159 enum mali_block_format block : 2;
1160 enum mali_msaa msaa : 2;
1161 unsigned flags : 2;
1162
1163 unsigned swizzle : 12;
1164
1165 unsigned zero : 3;
1166
1167 /* Disables MFBD preload. When this bit is set, the render target will
1168 * be cleared every frame. When this bit is clear, the hardware will
1169 * automatically wallpaper the render target back from main memory.
1170 * Unfortunately, MFBD preload is very broken on Midgard, so in
1171 * practice, this is a chicken bit that should always be set.
1172 * Discovered by accident, as all good chicken bits are. */
1173
1174 unsigned no_preload : 1;
1175 } __attribute__((packed));
1176
1177 /* Flags for afbc.flags and ds_afbc.flags */
1178
1179 #define MALI_AFBC_FLAGS 0x10009
1180
1181 /* Lossless RGB and RGBA colorspace transform */
1182 #define MALI_AFBC_YTR (1 << 17)
1183
1184 struct mali_render_target {
1185 struct mali_rt_format format;
1186
1187 u64 zero1;
1188
1189 struct {
1190 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1191 * there is an extra metadata buffer that contains 16 bytes per tile.
1192 * The framebuffer needs to be the same size as before, since we don't
1193 * know ahead of time how much space it will take up. The
1194 * framebuffer_stride is set to 0, since the data isn't stored linearly
1195 * anymore.
1196 *
1197 * When AFBC is disabled, these fields are zero.
1198 */
1199
1200 mali_ptr metadata;
1201 u32 stride; // stride in units of tiles
1202 u32 flags; // = 0x20000
1203 } afbc;
1204
1205 mali_ptr framebuffer;
1206
1207 u32 zero2 : 4;
1208 u32 framebuffer_stride : 28; // in units of bytes, row to next
1209 u32 layer_stride; /* For multisample rendering */
1210
1211 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1212 u32 clear_color_2; // always equal, but unclear function?
1213 u32 clear_color_3; // always equal, but unclear function?
1214 u32 clear_color_4; // always equal, but unclear function?
1215 } __attribute__((packed));
1216
1217 /* An optional part of mali_framebuffer. It comes between the main structure
1218 * and the array of render targets. It must be included if any of these are
1219 * enabled:
1220 *
1221 * - Transaction Elimination
1222 * - Depth/stencil
1223 * - TODO: Anything else?
1224 */
1225
1226 /* flags_hi */
1227 #define MALI_EXTRA_PRESENT (0x1)
1228
1229 /* flags_lo */
1230 #define MALI_EXTRA_ZS (0x4)
1231
1232 struct mali_framebuffer_extra {
1233 mali_ptr checksum;
1234 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1235 u32 checksum_stride;
1236
1237 unsigned flags_lo : 4;
1238 enum mali_block_format zs_block : 2;
1239
1240 /* Number of samples in Z/S attachment, MALI_POSITIVE. So zero for
1241 * 1-sample (non-MSAA), 0x3 for MSAA 4x, etc */
1242 unsigned zs_samples : 4;
1243 unsigned flags_hi : 22;
1244
1245 union {
1246 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1247 struct {
1248 mali_ptr depth_stencil_afbc_metadata;
1249 u32 depth_stencil_afbc_stride; // in units of tiles
1250 u32 flags;
1251
1252 mali_ptr depth_stencil;
1253
1254 u64 padding;
1255 } ds_afbc;
1256
1257 struct {
1258 /* Depth becomes depth/stencil in case of combined D/S */
1259 mali_ptr depth;
1260 u32 depth_stride_zero : 4;
1261 u32 depth_stride : 28;
1262 u32 depth_layer_stride;
1263
1264 mali_ptr stencil;
1265 u32 stencil_stride_zero : 4;
1266 u32 stencil_stride : 28;
1267 u32 stencil_layer_stride;
1268 } ds_linear;
1269 };
1270
1271
1272 u32 clear_color_1;
1273 u32 clear_color_2;
1274 u64 zero3;
1275 } __attribute__((packed));
1276
1277 /* Flags for mfbd_flags */
1278
1279 /* Enables writing depth results back to main memory (rather than keeping them
1280 * on-chip in the tile buffer and then discarding) */
1281
1282 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1283
1284 /* The MFBD contains the extra mali_framebuffer_extra section */
1285
1286 #define MALI_MFBD_EXTRA (1 << 13)
1287
1288 struct mali_framebuffer {
1289 union {
1290 struct mali_shared_memory shared_memory;
1291 struct bifrost_multisampling msaa;
1292 };
1293
1294 /* 0x20 */
1295 u16 width1, height1;
1296 u32 zero3;
1297 u16 width2, height2;
1298 u32 unk1 : 19; // = 0x01000
1299 u32 rt_count_1 : 3; // off-by-one (use MALI_POSITIVE)
1300 u32 unk2 : 2; // = 0
1301 u32 rt_count_2 : 3; // no off-by-one
1302 u32 zero4 : 5;
1303 /* 0x30 */
1304 u32 clear_stencil : 8;
1305 u32 mfbd_flags : 24; // = 0x100
1306 float clear_depth;
1307
1308 union {
1309 struct midgard_tiler_descriptor tiler;
1310 struct {
1311 mali_ptr tiler_meta;
1312 u32 zeros[16];
1313 };
1314 };
1315
1316 /* optional: struct mali_framebuffer_extra extra */
1317 /* struct mali_render_target rts[] */
1318 } __attribute__((packed));
1319
1320 #endif /* __PANFROST_JOB_H__ */