34d3e98f5c41fcffd05ccf405cb0aa2e1ad5674d
[mesa.git] / src / panfrost / include / panfrost-job.h
1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <inttypes.h>
34
35 typedef uint8_t u8;
36 typedef uint16_t u16;
37 typedef uint32_t u32;
38 typedef uint64_t u64;
39 typedef uint64_t mali_ptr;
40
41 /* Applies to tiler_gl_enables */
42
43 #define MALI_OCCLUSION_QUERY (1 << 3)
44 #define MALI_OCCLUSION_PRECISE (1 << 4)
45
46 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
47 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
48 * disagree about how to do viewport flipping, so the blob actually sets this
49 * for GL_CW but then has a negative viewport stride */
50
51 #define MALI_FRONT_CCW_TOP (1 << 5)
52
53 #define MALI_CULL_FACE_FRONT (1 << 6)
54 #define MALI_CULL_FACE_BACK (1 << 7)
55
56 /* Flags apply to unknown2_3? */
57
58 #define MALI_HAS_MSAA (1 << 0)
59
60 /* Execute fragment shader per-sample if set (e.g. to implement gl_SampleID
61 * reads) */
62 #define MALI_PER_SAMPLE (1 << 2)
63 #define MALI_CAN_DISCARD (1 << 5)
64
65 /* Applies on SFBD systems, specifying that programmable blending is in use */
66 #define MALI_HAS_BLEND_SHADER (1 << 6)
67
68 /* func is mali_func */
69 #define MALI_DEPTH_FUNC(func) (func << 8)
70 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
71 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
72
73 #define MALI_DEPTH_WRITEMASK (1 << 11)
74
75 #define MALI_DEPTH_CLIP_NEAR (1 << 12)
76 #define MALI_DEPTH_CLIP_FAR (1 << 13)
77
78 /* Next flags to unknown2_4 */
79 #define MALI_STENCIL_TEST (1 << 0)
80
81 #define MALI_ALPHA_TO_COVERAGE (1 << 1)
82
83 #define MALI_NO_DITHER (1 << 9)
84 #define MALI_DEPTH_RANGE_A (1 << 12)
85 #define MALI_DEPTH_RANGE_B (1 << 13)
86 #define MALI_NO_MSAA (1 << 14)
87
88 #define MALI_MASK_R (1 << 0)
89 #define MALI_MASK_G (1 << 1)
90 #define MALI_MASK_B (1 << 2)
91 #define MALI_MASK_A (1 << 3)
92
93 enum mali_nondominant_mode {
94 MALI_BLEND_NON_MIRROR = 0,
95 MALI_BLEND_NON_ZERO = 1
96 };
97
98 enum mali_dominant_blend {
99 MALI_BLEND_DOM_SOURCE = 0,
100 MALI_BLEND_DOM_DESTINATION = 1
101 };
102
103 enum mali_dominant_factor {
104 MALI_DOMINANT_UNK0 = 0,
105 MALI_DOMINANT_ZERO = 1,
106 MALI_DOMINANT_SRC_COLOR = 2,
107 MALI_DOMINANT_DST_COLOR = 3,
108 MALI_DOMINANT_UNK4 = 4,
109 MALI_DOMINANT_SRC_ALPHA = 5,
110 MALI_DOMINANT_DST_ALPHA = 6,
111 MALI_DOMINANT_CONSTANT = 7,
112 };
113
114 enum mali_blend_modifier {
115 MALI_BLEND_MOD_UNK0 = 0,
116 MALI_BLEND_MOD_NORMAL = 1,
117 MALI_BLEND_MOD_SOURCE_ONE = 2,
118 MALI_BLEND_MOD_DEST_ONE = 3,
119 };
120
121 struct mali_blend_mode {
122 enum mali_blend_modifier clip_modifier : 2;
123 unsigned unused_0 : 1;
124 unsigned negate_source : 1;
125
126 enum mali_dominant_blend dominant : 1;
127
128 enum mali_nondominant_mode nondominant_mode : 1;
129
130 unsigned unused_1 : 1;
131
132 unsigned negate_dest : 1;
133
134 enum mali_dominant_factor dominant_factor : 3;
135 unsigned complement_dominant : 1;
136 } __attribute__((packed));
137
138 struct mali_blend_equation {
139 /* Of type mali_blend_mode */
140 unsigned rgb_mode : 12;
141 unsigned alpha_mode : 12;
142
143 unsigned zero1 : 4;
144
145 /* Corresponds to MALI_MASK_* above and glColorMask arguments */
146
147 unsigned color_mask : 4;
148 } __attribute__((packed));
149
150 /* Used with channel swizzling */
151 enum mali_channel {
152 MALI_CHANNEL_RED = 0,
153 MALI_CHANNEL_GREEN = 1,
154 MALI_CHANNEL_BLUE = 2,
155 MALI_CHANNEL_ALPHA = 3,
156 MALI_CHANNEL_ZERO = 4,
157 MALI_CHANNEL_ONE = 5,
158 MALI_CHANNEL_RESERVED_0 = 6,
159 MALI_CHANNEL_RESERVED_1 = 7,
160 };
161
162 /* Compressed per-pixel formats. Each of these formats expands to one to four
163 * floating-point or integer numbers, as defined by the OpenGL specification.
164 * There are various places in OpenGL where the user can specify a compressed
165 * format in memory, which all use the same 8-bit enum in the various
166 * descriptors, although different hardware units support different formats.
167 */
168
169 /* The top 3 bits specify how the bits of each component are interpreted. */
170
171 /* e.g. ETC2_RGB8 */
172 #define MALI_FORMAT_COMPRESSED (0 << 5)
173
174 /* e.g. R11F_G11F_B10F */
175 #define MALI_FORMAT_SPECIAL (2 << 5)
176
177 /* signed normalized, e.g. RGBA8_SNORM */
178 #define MALI_FORMAT_SNORM (3 << 5)
179
180 /* e.g. RGBA8UI */
181 #define MALI_FORMAT_UINT (4 << 5)
182
183 /* e.g. RGBA8 and RGBA32F */
184 #define MALI_FORMAT_UNORM (5 << 5)
185
186 /* e.g. RGBA8I and RGBA16F */
187 #define MALI_FORMAT_SINT (6 << 5)
188
189 /* These formats seem to largely duplicate the others. They're used at least
190 * for Bifrost framebuffer output.
191 */
192 #define MALI_FORMAT_SPECIAL2 (7 << 5)
193 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
194
195 /* If the high 3 bits are 3 to 6 these two bits say how many components
196 * there are.
197 */
198 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
199 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
200
201 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
202 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
203 * bits mean.
204 */
205
206 #define MALI_CHANNEL_4 2
207
208 #define MALI_CHANNEL_8 3
209
210 #define MALI_CHANNEL_16 4
211
212 #define MALI_CHANNEL_32 5
213
214 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
215 * MALI_FORMAT_UNORM, it means a 32-bit float.
216 */
217 #define MALI_CHANNEL_FLOAT 7
218 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
219
220 /* Applies to midgard1.flags_lo */
221
222 /* Should be set when the fragment shader updates the depth value. */
223 #define MALI_WRITES_Z (1 << 4)
224
225 /* Should the hardware perform early-Z testing? Set if the shader does not use
226 * discard, alpha-to-coverage, shader depth writes, and if the shader has no
227 * side effects (writes to global memory or images) unless early-z testing is
228 * forced in the shader.
229 */
230
231 #define MALI_EARLY_Z (1 << 6)
232
233 /* Should the hardware calculate derivatives (via helper invocations)? Set in a
234 * fragment shader that uses texturing or derivative functions */
235
236 #define MALI_HELPER_INVOCATIONS (1 << 7)
237
238 /* Flags denoting the fragment shader's use of tilebuffer readback. If the
239 * shader might read any part of the tilebuffer, set MALI_READS_TILEBUFFER. If
240 * it might read depth/stencil in particular, also set MALI_READS_ZS */
241
242 #define MALI_READS_ZS (1 << 8)
243
244 /* The shader might write to global memory (via OpenCL, SSBOs, or images).
245 * Reading is okay, as are ordinary writes to the tilebuffer/varyings. Setting
246 * incurs a performance penalty. On a fragment shader, this bit implies there
247 * are side effects, hence it interacts with early-z. */
248 #define MALI_WRITES_GLOBAL (1 << 9)
249
250 #define MALI_READS_TILEBUFFER (1 << 10)
251
252 /* Applies to midgard1.flags_hi */
253
254 /* Should be set when the fragment shader updates the stencil value. */
255 #define MALI_WRITES_S (1 << 2)
256
257 /* Mode to suppress generation of Infinity and NaN values by clamping inf
258 * (-inf) to MAX_FLOAT (-MIN_FLOAT) and flushing NaN to 0.0
259 *
260 * Compare suppress_inf/suppress_nan flags on the Bifrost clause header for the
261 * same functionality.
262 *
263 * This is not conformant on GLES3 or OpenCL, but is optional on GLES2, where
264 * it works around app bugs (e.g. in glmark2-es2 -bterrain with FP16).
265 */
266 #define MALI_SUPPRESS_INF_NAN (1 << 3)
267
268 /* Flags for bifrost1.unk1 */
269
270 /* Shader uses less than 32 registers, partitioned as [R0, R15] U [R48, R63],
271 * allowing for full thread count. If clear, the full [R0, R63] register set is
272 * available at half thread count */
273 #define MALI_BIFROST_FULL_THREAD (1 << 9)
274
275 /* Enable early-z testing (presumably). This flag may not be set if the shader:
276 *
277 * - Uses blending
278 * - Uses discard
279 * - Writes gl_FragDepth
280 *
281 * This differs from Midgard which sets the MALI_EARLY_Z flag even with
282 * blending, although I've begun to suspect that flag does not in fact enable
283 * EARLY_Z alone. */
284 #define MALI_BIFROST_EARLY_Z (1 << 15)
285
286 /* First clause type is ATEST */
287 #define MALI_BIFROST_FIRST_ATEST (1 << 26)
288
289 /* The raw Midgard blend payload can either be an equation or a shader
290 * address, depending on the context */
291
292 union midgard_blend {
293 mali_ptr shader;
294
295 struct {
296 struct mali_blend_equation equation;
297 float constant;
298 };
299 };
300
301 /* We need to load the tilebuffer to blend (i.e. the destination factor is not
302 * ZERO) */
303
304 #define MALI_BLEND_LOAD_TIB (0x1)
305
306 /* A blend shader is used to blend this render target */
307 #define MALI_BLEND_MRT_SHADER (0x2)
308
309 /* On MRT Midgard systems (using an MFBD), each render target gets its own
310 * blend descriptor */
311
312 #define MALI_BLEND_SRGB (0x400)
313
314 /* Dithering is specified here for MFBD, otherwise NO_DITHER for SFBD */
315 #define MALI_BLEND_NO_DITHER (0x800)
316
317 struct midgard_blend_rt {
318 /* Flags base value of 0x200 to enable the render target.
319 * OR with 0x1 for blending (anything other than REPLACE).
320 * OR with 0x2 for programmable blending
321 * OR with MALI_BLEND_SRGB for implicit sRGB
322 */
323
324 u64 flags;
325 union midgard_blend blend;
326 } __attribute__((packed));
327
328 /* On Bifrost systems (all MRT), each render target gets one of these
329 * descriptors */
330
331 enum bifrost_shader_type {
332 BIFROST_BLEND_F16 = 0,
333 BIFROST_BLEND_F32 = 1,
334 BIFROST_BLEND_I32 = 2,
335 BIFROST_BLEND_U32 = 3,
336 BIFROST_BLEND_I16 = 4,
337 BIFROST_BLEND_U16 = 5,
338 };
339
340 #define BIFROST_MAX_RENDER_TARGET_COUNT 8
341
342 struct bifrost_blend_rt {
343 /* This is likely an analogue of the flags on
344 * midgard_blend_rt */
345
346 u16 flags; // = 0x200
347
348 /* Single-channel blend constants are encoded in a sort of
349 * fixed-point. Basically, the float is mapped to a byte, becoming
350 * a high byte, and then the lower-byte is added for precision.
351 * For the original float f:
352 *
353 * f = (constant_hi / 255) + (constant_lo / 65535)
354 *
355 * constant_hi = int(f / 255)
356 * constant_lo = 65535*f - (65535/255) * constant_hi
357 */
358 u16 constant;
359
360 struct mali_blend_equation equation;
361
362 /*
363 * - 0x19 normally
364 * - 0x3 when this slot is unused (everything else is 0 except the index)
365 * - 0x11 when this is the fourth slot (and it's used)
366 * - 0 when there is a blend shader
367 */
368 u16 unk2;
369
370 /* increments from 0 to 3 */
371 u16 index;
372
373 union {
374 struct {
375 /* So far, I've only seen:
376 * - R001 for 1-component formats
377 * - RG01 for 2-component formats
378 * - RGB1 for 3-component formats
379 * - RGBA for 4-component formats
380 */
381 u32 swizzle : 12;
382 enum mali_format format : 8;
383
384 /* Type of the shader output variable. Note, this can
385 * be different from the format.
386 * enum bifrost_shader_type
387 */
388 u32 zero1 : 4;
389 u32 shader_type : 3;
390 u32 zero2 : 5;
391 };
392
393 /* Only the low 32 bits of the blend shader are stored, the
394 * high 32 bits are implicitly the same as the original shader.
395 * According to the kernel driver, the program counter for
396 * shaders is actually only 24 bits, so shaders cannot cross
397 * the 2^24-byte boundary, and neither can the blend shader.
398 * The blob handles this by allocating a 2^24 byte pool for
399 * shaders, and making sure that any blend shaders are stored
400 * in the same pool as the original shader. The kernel will
401 * make sure this allocation is aligned to 2^24 bytes.
402 */
403 u32 shader;
404 };
405 } __attribute__((packed));
406
407 /* Descriptor for the shader. Following this is at least one, up to four blend
408 * descriptors for each active render target */
409
410 struct mali_shader_meta {
411 mali_ptr shader;
412 u16 sampler_count;
413 u16 texture_count;
414 u16 attribute_count;
415 u16 varying_count;
416
417 union {
418 struct {
419 u32 uniform_buffer_count : 4;
420 u32 unk1 : 28; // = 0x800000 for vertex, 0x958020 for tiler
421 } bifrost1;
422 struct {
423 unsigned uniform_buffer_count : 4;
424 unsigned flags_lo : 12;
425
426 /* vec4 units */
427 unsigned work_count : 5;
428 unsigned uniform_count : 5;
429 unsigned flags_hi : 6;
430 } midgard1;
431 };
432
433 /* Same as glPolygoOffset() arguments */
434 float depth_units;
435 float depth_factor;
436
437 u32 unknown2_2;
438
439 /* Generated from SAMPLE_COVERAGE_VALUE and SAMPLE_COVERAGE_INVERT. See
440 * 13.8.3 ("Multisample Fragment Operations") in the OpenGL ES 3.2
441 * specification. Only matters when multisampling is enabled. */
442 u16 coverage_mask;
443
444 u16 unknown2_3;
445
446 u8 stencil_mask_front;
447 u8 stencil_mask_back;
448 u16 unknown2_4;
449
450 struct mali_stencil_packed stencil_front;
451 struct mali_stencil_packed stencil_back;
452
453 union {
454 struct {
455 u32 unk3 : 7;
456 /* On Bifrost, some system values are preloaded in
457 * registers R55-R62 by the thread dispatcher prior to
458 * the start of shader execution. This is a bitfield
459 * with one entry for each register saying which
460 * registers need to be preloaded. Right now, the known
461 * values are:
462 *
463 * Vertex/compute:
464 * - R55 : gl_LocalInvocationID.xy
465 * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
466 * - R57 : gl_WorkGroupID.x
467 * - R58 : gl_WorkGroupID.y
468 * - R59 : gl_WorkGroupID.z
469 * - R60 : gl_GlobalInvocationID.x
470 * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
471 * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
472 *
473 * Fragment:
474 * - R55 : unknown, never seen (but the bit for this is
475 * always set?)
476 * - R56 : unknown (bit always unset)
477 * - R57 : gl_PrimitiveID
478 * - R58 : gl_FrontFacing in low bit, potentially other stuff
479 * - R59 : u16 fragment coordinates (used to compute
480 * gl_FragCoord.xy, together with sample positions)
481 * - R60 : gl_SampleMask (used in epilog, so pretty
482 * much always used, but the bit is always 0 -- is
483 * this just always pushed?)
484 * - R61 : gl_SampleMaskIn and gl_SampleID, used by
485 * varying interpolation.
486 * - R62 : unknown (bit always unset).
487 *
488 * Later GPUs (starting with Mali-G52?) support
489 * preloading float varyings into r0-r7. This is
490 * indicated by setting 0x40. There is no distinction
491 * here between 1 varying and 2.
492 */
493 u32 preload_regs : 8;
494 /* In units of 8 bytes or 64 bits, since the
495 * uniform/const port loads 64 bits at a time.
496 */
497 u32 uniform_count : 7;
498 u32 unk4 : 10; // = 2
499 } bifrost2;
500 struct {
501 u32 unknown2_7;
502 } midgard2;
503 };
504
505 u32 padding;
506
507 /* Blending information for the older non-MRT Midgard HW. Check for
508 * MALI_HAS_BLEND_SHADER to decide how to interpret.
509 */
510
511 union midgard_blend blend;
512 } __attribute__((packed));
513
514 /* This only concerns hardware jobs */
515
516 /* Possible values for job_descriptor_size */
517
518 #define MALI_JOB_32 0
519 #define MALI_JOB_64 1
520
521 struct mali_job_descriptor_header {
522 u32 exception_status;
523 u32 first_incomplete_task;
524 u64 fault_pointer;
525 u8 job_descriptor_size : 1;
526 enum mali_job_type job_type : 7;
527 u8 job_barrier : 1;
528 u8 unknown_flags : 7;
529 u16 job_index;
530 u16 job_dependency_index_1;
531 u16 job_dependency_index_2;
532 u64 next_job;
533 } __attribute__((packed));
534
535 /* Details about write_value from panfrost igt tests which use it as a generic
536 * dword write primitive */
537
538 #define MALI_WRITE_VALUE_ZERO 3
539
540 struct mali_payload_write_value {
541 u64 address;
542 u32 value_descriptor;
543 u32 reserved;
544 u64 immediate;
545 } __attribute__((packed));
546
547 /*
548 * Mali Attributes
549 *
550 * This structure lets the attribute unit compute the address of an attribute
551 * given the vertex and instance ID. Unfortunately, the way this works is
552 * rather complicated when instancing is enabled.
553 *
554 * To explain this, first we need to explain how compute and vertex threads are
555 * dispatched. This is a guess (although a pretty firm guess!) since the
556 * details are mostly hidden from the driver, except for attribute instancing.
557 * When a quad is dispatched, it receives a single, linear index. However, we
558 * need to translate that index into a (vertex id, instance id) pair, or a
559 * (local id x, local id y, local id z) triple for compute shaders (although
560 * vertex shaders and compute shaders are handled almost identically).
561 * Focusing on vertex shaders, one option would be to do:
562 *
563 * vertex_id = linear_id % num_vertices
564 * instance_id = linear_id / num_vertices
565 *
566 * but this involves a costly division and modulus by an arbitrary number.
567 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
568 * num_instances threads instead of num_vertices * num_instances, which results
569 * in some "extra" threads with vertex_id >= num_vertices, which we have to
570 * discard. The more we pad num_vertices, the more "wasted" threads we
571 * dispatch, but the division is potentially easier.
572 *
573 * One straightforward choice is to pad num_vertices to the next power of two,
574 * which means that the division and modulus are just simple bit shifts and
575 * masking. But the actual algorithm is a bit more complicated. The thread
576 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
577 * to dividing by a power of two. This is possibly using the technique
578 * described in patent US20170010862A1. As a result, padded_num_vertices can be
579 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
580 * since we need less padding.
581 *
582 * padded_num_vertices is picked by the hardware. The driver just specifies the
583 * actual number of vertices. At least for Mali G71, the first few cases are
584 * given by:
585 *
586 * num_vertices | padded_num_vertices
587 * 3 | 4
588 * 4-7 | 8
589 * 8-11 | 12 (3 * 4)
590 * 12-15 | 16
591 * 16-19 | 20 (5 * 4)
592 *
593 * Note that padded_num_vertices is a multiple of four (presumably because
594 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
595 * at least one more than num_vertices, which seems like a quirk of the
596 * hardware. For larger num_vertices, the hardware uses the following
597 * algorithm: using the binary representation of num_vertices, we look at the
598 * most significant set bit as well as the following 3 bits. Let n be the
599 * number of bits after those 4 bits. Then we set padded_num_vertices according
600 * to the following table:
601 *
602 * high bits | padded_num_vertices
603 * 1000 | 9 * 2^n
604 * 1001 | 5 * 2^(n+1)
605 * 101x | 3 * 2^(n+2)
606 * 110x | 7 * 2^(n+1)
607 * 111x | 2^(n+4)
608 *
609 * For example, if num_vertices = 70 is passed to glDraw(), its binary
610 * representation is 1000110, so n = 3 and the high bits are 1000, and
611 * therefore padded_num_vertices = 9 * 2^3 = 72.
612 *
613 * The attribute unit works in terms of the original linear_id. if
614 * num_instances = 1, then they are the same, and everything is simple.
615 * However, with instancing things get more complicated. There are four
616 * possible modes, two of them we can group together:
617 *
618 * 1. Use the linear_id directly. Only used when there is no instancing.
619 *
620 * 2. Use the linear_id modulo a constant. This is used for per-vertex
621 * attributes with instancing enabled by making the constant equal
622 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
623 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
624 * The shift field specifies the power of two, while the extra_flags field
625 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
626 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
627 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
628 * shift = 3. Note that we must exactly follow the hardware algorithm used to
629 * get padded_num_vertices in order to correctly implement per-vertex
630 * attributes.
631 *
632 * 3. Divide the linear_id by a constant. In order to correctly implement
633 * instance divisors, we have to divide linear_id by padded_num_vertices times
634 * to user-specified divisor. So first we compute padded_num_vertices, again
635 * following the exact same algorithm that the hardware uses, then multiply it
636 * by the GL-level divisor to get the hardware-level divisor. This case is
637 * further divided into two more cases. If the hardware-level divisor is a
638 * power of two, then we just need to shift. The shift amount is specified by
639 * the shift field, so that the hardware-level divisor is just 2^shift.
640 *
641 * If it isn't a power of two, then we have to divide by an arbitrary integer.
642 * For that, we use the well-known technique of multiplying by an approximation
643 * of the inverse. The driver must compute the magic multiplier and shift
644 * amount, and then the hardware does the multiplication and shift. The
645 * hardware and driver also use the "round-down" optimization as described in
646 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
647 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
648 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
649 * presumably this simplifies the hardware multiplier a little. The hardware
650 * first multiplies linear_id by the multiplier and takes the high 32 bits,
651 * then applies the round-down correction if extra_flags = 1, then finally
652 * shifts right by the shift field.
653 *
654 * There are some differences between ridiculousfish's algorithm and the Mali
655 * hardware algorithm, which means that the reference code from ridiculousfish
656 * doesn't always produce the right constants. Mali does not use the pre-shift
657 * optimization, since that would make a hardware implementation slower (it
658 * would have to always do the pre-shift, multiply, and post-shift operations).
659 * It also forces the multplier to be at least 2^31, which means that the
660 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
661 * given the divisor d, the algorithm the driver must follow is:
662 *
663 * 1. Set shift = floor(log2(d)).
664 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
665 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
666 * magic_divisor = m - 1 and extra_flags = 1.
667 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
668 *
669 * Unrelated to instancing/actual attributes, images (the OpenCL kind) are
670 * implemented as special attributes, denoted by MALI_ATTR_IMAGE. For images,
671 * let shift=extra_flags=0. Stride is set to the image format's bytes-per-pixel
672 * (*NOT the row stride*). Size is set to the size of the image itself.
673 *
674 * Special internal attribtues and varyings (gl_VertexID, gl_FrontFacing, etc)
675 * use particular fixed addresses with modified structures.
676 */
677
678 enum mali_attr_mode {
679 MALI_ATTR_UNUSED = 0,
680 MALI_ATTR_LINEAR = 1,
681 MALI_ATTR_POT_DIVIDE = 2,
682 MALI_ATTR_MODULO = 3,
683 MALI_ATTR_NPOT_DIVIDE = 4,
684 MALI_ATTR_IMAGE = 5,
685 };
686
687 /* Pseudo-address for gl_VertexID, gl_FragCoord, gl_FrontFacing */
688
689 #define MALI_ATTR_VERTEXID (0x22)
690 #define MALI_ATTR_INSTANCEID (0x24)
691 #define MALI_VARYING_FRAG_COORD (0x25)
692 #define MALI_VARYING_FRONT_FACING (0x26)
693
694 /* This magic "pseudo-address" is used as `elements` to implement
695 * gl_PointCoord. When read from a fragment shader, it generates a point
696 * coordinate per the OpenGL ES 2.0 specification. Flipped coordinate spaces
697 * require an affine transformation in the shader. */
698
699 #define MALI_VARYING_POINT_COORD (0x61)
700
701 /* Used for comparison to check if an address is special. Mostly a guess, but
702 * it doesn't really matter. */
703
704 #define MALI_RECORD_SPECIAL (0x100)
705
706 union mali_attr {
707 /* This is used for actual attributes. */
708 struct {
709 /* The bottom 3 bits are the mode */
710 mali_ptr elements : 64 - 8;
711 u32 shift : 5;
712 u32 extra_flags : 3;
713 u32 stride;
714 u32 size;
715 };
716 /* The entry after an NPOT_DIVIDE entry has this format. It stores
717 * extra information that wouldn't fit in a normal entry.
718 */
719 struct {
720 u32 unk; /* = 0x20 */
721 u32 magic_divisor;
722 u32 zero;
723 /* This is the original, GL-level divisor. */
724 u32 divisor;
725 };
726 } __attribute__((packed));
727
728 struct mali_attr_meta {
729 /* Vertex buffer index */
730 u8 index;
731
732 unsigned unknown1 : 2;
733 unsigned format : 22;
734
735 /* When packing multiple attributes in a buffer, offset addresses by
736 * this value. Obscurely, this is signed. */
737 int32_t src_offset;
738 } __attribute__((packed));
739
740 #define FBD_MASK (~0x3f)
741
742 /* MFBD, rather than SFBD */
743 #define MALI_MFBD (0x1)
744
745 /* ORed into an MFBD address to specify the fbx section is included */
746 #define MALI_MFBD_TAG_EXTRA (0x2)
747
748 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
749 * They also seem to be the same between Bifrost and Midgard. They're shared in
750 * fused payloads.
751 */
752
753 /* Applies to unknown_draw */
754
755 #define MALI_DRAW_INDEXED_UINT8 (0x10)
756 #define MALI_DRAW_INDEXED_UINT16 (0x20)
757 #define MALI_DRAW_INDEXED_UINT32 (0x30)
758 #define MALI_DRAW_INDEXED_SIZE (0x30)
759 #define MALI_DRAW_INDEXED_SHIFT (4)
760
761 #define MALI_DRAW_VARYING_SIZE (0x100)
762
763 /* Set to use first vertex as the provoking vertex for flatshading. Clear to
764 * use the last vertex. This is the default in DX and VK, but not in GL. */
765
766 #define MALI_DRAW_FLATSHADE_FIRST (0x800)
767
768 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
769
770 struct mali_vertex_tiler_prefix {
771 /* This is a dynamic bitfield containing the following things in this order:
772 *
773 * - gl_WorkGroupSize.x
774 * - gl_WorkGroupSize.y
775 * - gl_WorkGroupSize.z
776 * - gl_NumWorkGroups.x
777 * - gl_NumWorkGroups.y
778 * - gl_NumWorkGroups.z
779 *
780 * The number of bits allocated for each number is based on the *_shift
781 * fields below. For example, workgroups_y_shift gives the bit that
782 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
783 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
784 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
785 * value is one more than the stored value, since if any of the values
786 * are zero, then there would be no invocations (and hence no job). If
787 * there were 0 bits allocated to a given field, then it must be zero,
788 * and hence the real value is one.
789 *
790 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
791 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
792 * where vertex count is the number of vertices.
793 */
794 u32 invocation_count;
795
796 /* Bitfield for shifts:
797 *
798 * size_y_shift : 5
799 * size_z_shift : 5
800 * workgroups_x_shift : 6
801 * workgroups_y_shift : 6
802 * workgroups_z_shift : 6
803 * workgroups_x_shift_2 : 4
804 */
805 u32 invocation_shifts;
806
807 u32 draw_mode : 4;
808 u32 unknown_draw : 22;
809
810 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
811 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
812 * something to do with how many quads get put in the same execution
813 * engine, which is a balance (you don't want to starve the engine, but
814 * you also want to distribute work evenly).
815 */
816 u32 workgroups_x_shift_3 : 6;
817
818
819 /* Negative of min_index. This is used to compute
820 * the unbiased index in tiler/fragment shader runs.
821 *
822 * The hardware adds offset_bias_correction in each run,
823 * so that absent an index bias, the first vertex processed is
824 * genuinely the first vertex (0). But with an index bias,
825 * the first vertex process is numbered the same as the bias.
826 *
827 * To represent this more conviniently:
828 * unbiased_index = lower_bound_index +
829 * index_bias +
830 * offset_bias_correction
831 *
832 * This is done since the hardware doesn't accept a index_bias
833 * and this allows it to recover the unbiased index.
834 */
835 int32_t offset_bias_correction;
836 u32 zero1;
837
838 /* Like many other strictly nonzero quantities, index_count is
839 * subtracted by one. For an indexed cube, this is equal to 35 = 6
840 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
841 * for an indexed draw, index_count is the number of actual vertices
842 * rendered whereas invocation_count is the number of unique vertices
843 * rendered (the number of times the vertex shader must be invoked).
844 * For non-indexed draws, this is just equal to invocation_count. */
845
846 u32 index_count;
847
848 /* No hidden structure; literally just a pointer to an array of uint
849 * indices (width depends on flags). Thanks, guys, for not making my
850 * life insane for once! NULL for non-indexed draws. */
851
852 u64 indices;
853 } __attribute__((packed));
854
855 /* Point size / line width can either be specified as a 32-bit float (for
856 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
857 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
858 * payload, the contents of varying_pointer will be intepreted as an array of
859 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
860 * creating a special MALI_R16F varying writing to varying_pointer. */
861
862 union midgard_primitive_size {
863 float constant;
864 u64 pointer;
865 };
866
867 struct bifrost_tiler_heap_meta {
868 u32 zero;
869 u32 heap_size;
870 /* note: these are just guesses! */
871 mali_ptr tiler_heap_start;
872 mali_ptr tiler_heap_free;
873 mali_ptr tiler_heap_end;
874
875 /* hierarchy weights? but they're still 0 after the job has run... */
876 u32 zeros[10];
877 u32 unk1;
878 u32 unk7e007e;
879 } __attribute__((packed));
880
881 struct bifrost_tiler_meta {
882 u32 tiler_heap_next_start; /* To be written by the GPU */
883 u32 used_hierarchy_mask; /* To be written by the GPU */
884 u16 hierarchy_mask; /* Five values observed: 0xa, 0x14, 0x28, 0x50, 0xa0 */
885 u16 flags;
886 u16 width;
887 u16 height;
888 u64 zero0;
889 mali_ptr tiler_heap_meta;
890 /* TODO what is this used for? */
891 u64 zeros[20];
892 } __attribute__((packed));
893
894 struct bifrost_tiler_only {
895 /* 0x20 */
896 union midgard_primitive_size primitive_size;
897
898 mali_ptr tiler_meta;
899
900 u64 zero1, zero2, zero3, zero4, zero5, zero6;
901 } __attribute__((packed));
902
903 struct mali_vertex_tiler_postfix {
904 u16 gl_enables; // 0x6 on Midgard, 0x2 on Bifrost
905
906 /* Both zero for non-instanced draws. For instanced draws, a
907 * decomposition of padded_num_vertices. See the comments about the
908 * corresponding fields in mali_attr for context. */
909
910 unsigned instance_shift : 5;
911 unsigned instance_odd : 3;
912
913 u8 zero4;
914
915 /* Offset for first vertex in buffer */
916 u32 offset_start;
917
918 u64 zero5;
919
920 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
921 * output from the vertex shader for tiler jobs.
922 */
923
924 u64 position_varying;
925
926 /* An array of mali_uniform_buffer_meta's. The size is given by the
927 * shader_meta.
928 */
929 u64 uniform_buffers;
930
931 /* On Bifrost, this is a pointer to an array of bifrost_texture_descriptor.
932 * On Midgard, this is a pointer to an array of pointers to the texture
933 * descriptors, number of pointers bounded by number of textures. The
934 * indirection is needed to accomodate varying numbers and sizes of
935 * texture descriptors */
936 u64 textures;
937
938 /* For OpenGL, from what I've seen, this is intimately connected to
939 * texture_meta. cwabbott says this is not the case under Vulkan, hence
940 * why this field is seperate (Midgard is Vulkan capable). Pointer to
941 * array of sampler descriptors (which are uniform in size) */
942 u64 sampler_descriptor;
943
944 u64 uniforms;
945 u64 shader;
946 u64 attributes; /* struct attribute_buffer[] */
947 u64 attribute_meta; /* attribute_meta[] */
948 u64 varyings; /* struct attr */
949 u64 varying_meta; /* pointer */
950 u64 viewport;
951 u64 occlusion_counter; /* A single bit as far as I can tell */
952
953 /* On Bifrost, this points directly to a mali_shared_memory structure.
954 * On Midgard, this points to a framebuffer (either SFBD or MFBD as
955 * tagged), which embeds a mali_shared_memory structure */
956 mali_ptr shared_memory;
957 } __attribute__((packed));
958
959 struct midgard_payload_vertex_tiler {
960 struct mali_vertex_tiler_prefix prefix;
961 struct mali_vertex_tiler_postfix postfix;
962
963 union midgard_primitive_size primitive_size;
964 } __attribute__((packed));
965
966 struct bifrost_payload_vertex {
967 struct mali_vertex_tiler_prefix prefix;
968 struct mali_vertex_tiler_postfix postfix;
969 } __attribute__((packed));
970
971 struct bifrost_payload_tiler {
972 struct mali_vertex_tiler_prefix prefix;
973 struct bifrost_tiler_only tiler;
974 struct mali_vertex_tiler_postfix postfix;
975 } __attribute__((packed));
976
977 struct bifrost_payload_fused {
978 struct mali_vertex_tiler_prefix prefix;
979 struct bifrost_tiler_only tiler;
980 struct mali_vertex_tiler_postfix tiler_postfix;
981 u64 padding; /* zero */
982 struct mali_vertex_tiler_postfix vertex_postfix;
983 } __attribute__((packed));
984
985 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
986 * texture is stored as (63, 63) in these fields. This adjusts for that.
987 * There's an identical pattern in the framebuffer descriptor. Even vertex
988 * count fields work this way, hence the generic name -- integral fields that
989 * are strictly positive generally need this adjustment. */
990
991 #define MALI_POSITIVE(dim) (dim - 1)
992
993 /* 8192x8192 */
994 #define MAX_MIP_LEVELS (13)
995
996 /* Cubemap bloats everything up */
997 #define MAX_CUBE_FACES (6)
998
999 /* For each pointer, there is an address and optionally also a stride */
1000 #define MAX_ELEMENTS (2)
1001
1002 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
1003 * be cleaned up a lot. */
1004
1005 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
1006
1007 static inline int16_t
1008 FIXED_16(float x, bool allow_negative)
1009 {
1010 /* Clamp inputs, accounting for float error */
1011 float max_lod = (32.0 - (1.0 / 512.0));
1012 float min_lod = allow_negative ? -max_lod : 0.0;
1013
1014 x = ((x > max_lod) ? max_lod : ((x < min_lod) ? min_lod : x));
1015
1016 return (int) (x * 256.0);
1017 }
1018
1019 /* From presentations, 16x16 tiles externally. Use shift for fast computation
1020 * of tile numbers. */
1021
1022 #define MALI_TILE_SHIFT 4
1023 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
1024
1025 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
1026 * each component. Notice that this provides a theoretical upper bound of (1 <<
1027 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
1028 * 65536x65536. Multiplying that together, times another four given that Mali
1029 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
1030 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
1031 * alone rendering in real-time to such a buffer.
1032 *
1033 * Nice job, guys.*/
1034
1035 /* From mali_kbase_10969_workaround.c */
1036 #define MALI_X_COORD_MASK 0x00000FFF
1037 #define MALI_Y_COORD_MASK 0x0FFF0000
1038
1039 /* Extract parts of a tile coordinate */
1040
1041 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
1042 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
1043
1044 /* Helpers to generate tile coordinates based on the boundary coordinates in
1045 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
1046 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
1047 * Intentional "off-by-one"; finding the tile number is a form of fencepost
1048 * problem. */
1049
1050 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
1051 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
1052 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
1053 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
1054 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
1055
1056 struct mali_payload_fragment {
1057 u32 min_tile_coord;
1058 u32 max_tile_coord;
1059 mali_ptr framebuffer;
1060 } __attribute__((packed));
1061
1062 /* Single Framebuffer Descriptor */
1063
1064 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
1065 * configured for 4x. With MSAA_8, it is configured for 8x. */
1066
1067 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
1068 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
1069 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
1070 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
1071
1072 /* Fast/slow based on whether all three buffers are cleared at once */
1073
1074 #define MALI_CLEAR_FAST (1 << 18)
1075 #define MALI_CLEAR_SLOW (1 << 28)
1076 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
1077
1078 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
1079 * within the larget framebuffer descriptor). Analogous to
1080 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
1081
1082 /* See pan_tiler.c for derivation */
1083 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
1084
1085 /* Flag disabling the tiler for clear-only jobs, with
1086 hierarchical tiling */
1087 #define MALI_TILER_DISABLED (1 << 12)
1088
1089 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
1090 * hierarhical tiling. */
1091 #define MALI_TILER_USER 0xFFF
1092
1093 /* Absent any geometry, the minimum size of the polygon list header */
1094 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
1095
1096 struct midgard_tiler_descriptor {
1097 /* Size of the entire polygon list; see pan_tiler.c for the
1098 * computation. It's based on hierarchical tiling */
1099
1100 u32 polygon_list_size;
1101
1102 /* Name known from the replay workaround in the kernel. What exactly is
1103 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
1104 * specifies a mask of hierarchy weights, which explains some of the
1105 * performance mysteries around setting it. We also see the bottom bit
1106 * of tiler_flags set in the kernel, but no comment why.
1107 *
1108 * hierarchy_mask can have the TILER_DISABLED flag */
1109
1110 u16 hierarchy_mask;
1111 u16 flags;
1112
1113 /* See mali_tiler.c for an explanation */
1114 mali_ptr polygon_list;
1115 mali_ptr polygon_list_body;
1116
1117 /* Names based on we see symmetry with replay jobs which name these
1118 * explicitly */
1119
1120 mali_ptr heap_start; /* tiler heap_free_address */
1121 mali_ptr heap_end;
1122
1123 /* Hierarchy weights. We know these are weights based on the kernel,
1124 * but I've never seen them be anything other than zero */
1125 u32 weights[8];
1126 };
1127
1128 struct mali_sfbd_format {
1129 /* 0x1 */
1130 unsigned unk1 : 6;
1131
1132 /* mali_channel_swizzle */
1133 unsigned swizzle : 12;
1134
1135 /* MALI_POSITIVE */
1136 unsigned nr_channels : 2;
1137
1138 /* 0x4 */
1139 unsigned unk2 : 6;
1140
1141 enum mali_block_format block : 2;
1142
1143 /* 0xb */
1144 unsigned unk3 : 4;
1145 };
1146
1147 /* Shared structure at the start of framebuffer descriptors, or used bare for
1148 * compute jobs, configuring stack and shared memory */
1149
1150 struct mali_shared_memory {
1151 u32 stack_shift : 4;
1152 u32 unk0 : 28;
1153
1154 /* Configuration for shared memory for compute shaders.
1155 * shared_workgroup_count is logarithmic and may be computed for a
1156 * compute shader using shared memory as:
1157 *
1158 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
1159 *
1160 * For compute shaders that don't use shared memory, or non-compute
1161 * shaders, this is set to ~0
1162 */
1163
1164 u32 shared_workgroup_count : 5;
1165 u32 shared_unk1 : 3;
1166 u32 shared_shift : 4;
1167 u32 shared_zero : 20;
1168
1169 mali_ptr scratchpad;
1170
1171 /* For compute shaders, the RAM backing of workgroup-shared memory. For
1172 * fragment shaders on Bifrost, apparently multisampling locations */
1173
1174 mali_ptr shared_memory;
1175 mali_ptr unknown1;
1176 } __attribute__((packed));
1177
1178 /* Configures multisampling on Bifrost fragment jobs */
1179
1180 struct bifrost_multisampling {
1181 u64 zero1;
1182 u64 zero2;
1183 mali_ptr sample_locations;
1184 u64 zero4;
1185 } __attribute__((packed));
1186
1187 struct mali_single_framebuffer {
1188 struct mali_shared_memory shared_memory;
1189 struct mali_sfbd_format format;
1190
1191 u32 clear_flags;
1192 u32 zero2;
1193
1194 /* Purposeful off-by-one in these fields should be accounted for by the
1195 * MALI_DIMENSION macro */
1196
1197 u16 width;
1198 u16 height;
1199
1200 u32 zero3[4];
1201 mali_ptr checksum;
1202 u32 checksum_stride;
1203 u32 zero5;
1204
1205 /* By default, the framebuffer is upside down from OpenGL's
1206 * perspective. Set framebuffer to the end and negate the stride to
1207 * flip in the Y direction */
1208
1209 mali_ptr framebuffer;
1210 int32_t stride;
1211
1212 u32 zero4;
1213
1214 /* Depth and stencil buffers are interleaved, it appears, as they are
1215 * set to the same address in captures. Both fields set to zero if the
1216 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
1217 * get a zero enable despite the buffer being present; that still is
1218 * disabled. */
1219
1220 mali_ptr depth_buffer; // not SAME_VA
1221 u32 depth_stride_zero : 4;
1222 u32 depth_stride : 28;
1223 u32 zero7;
1224
1225 mali_ptr stencil_buffer; // not SAME_VA
1226 u32 stencil_stride_zero : 4;
1227 u32 stencil_stride : 28;
1228 u32 zero8;
1229
1230 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1231 u32 clear_color_2; // always equal, but unclear function?
1232 u32 clear_color_3; // always equal, but unclear function?
1233 u32 clear_color_4; // always equal, but unclear function?
1234
1235 /* Set to zero if not cleared */
1236
1237 float clear_depth_1; // float32, ditto
1238 float clear_depth_2; // float32, ditto
1239 float clear_depth_3; // float32, ditto
1240 float clear_depth_4; // float32, ditto
1241
1242 u32 clear_stencil; // Exactly as it appears in OpenGL
1243
1244 u32 zero6[7];
1245
1246 struct midgard_tiler_descriptor tiler;
1247
1248 /* More below this, maybe */
1249 } __attribute__((packed));
1250
1251
1252 #define MALI_MFBD_FORMAT_SRGB (1 << 0)
1253
1254 struct mali_rt_format {
1255 unsigned unk1 : 32;
1256 unsigned unk2 : 3;
1257
1258 unsigned nr_channels : 2; /* MALI_POSITIVE */
1259
1260 unsigned unk3 : 4;
1261 unsigned unk4 : 1;
1262 enum mali_block_format block : 2;
1263 enum mali_msaa msaa : 2;
1264 unsigned flags : 2;
1265
1266 unsigned swizzle : 12;
1267
1268 unsigned zero : 3;
1269
1270 /* Disables MFBD preload. When this bit is set, the render target will
1271 * be cleared every frame. When this bit is clear, the hardware will
1272 * automatically wallpaper the render target back from main memory.
1273 * Unfortunately, MFBD preload is very broken on Midgard, so in
1274 * practice, this is a chicken bit that should always be set.
1275 * Discovered by accident, as all good chicken bits are. */
1276
1277 unsigned no_preload : 1;
1278 } __attribute__((packed));
1279
1280 /* Flags for afbc.flags and ds_afbc.flags */
1281
1282 #define MALI_AFBC_FLAGS 0x10009
1283
1284 /* Lossless RGB and RGBA colorspace transform */
1285 #define MALI_AFBC_YTR (1 << 17)
1286
1287 struct mali_render_target {
1288 struct mali_rt_format format;
1289
1290 u64 zero1;
1291
1292 struct {
1293 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1294 * there is an extra metadata buffer that contains 16 bytes per tile.
1295 * The framebuffer needs to be the same size as before, since we don't
1296 * know ahead of time how much space it will take up. The
1297 * framebuffer_stride is set to 0, since the data isn't stored linearly
1298 * anymore.
1299 *
1300 * When AFBC is disabled, these fields are zero.
1301 */
1302
1303 mali_ptr metadata;
1304 u32 stride; // stride in units of tiles
1305 u32 flags; // = 0x20000
1306 } afbc;
1307
1308 mali_ptr framebuffer;
1309
1310 u32 zero2 : 4;
1311 u32 framebuffer_stride : 28; // in units of bytes, row to next
1312 u32 layer_stride; /* For multisample rendering */
1313
1314 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1315 u32 clear_color_2; // always equal, but unclear function?
1316 u32 clear_color_3; // always equal, but unclear function?
1317 u32 clear_color_4; // always equal, but unclear function?
1318 } __attribute__((packed));
1319
1320 /* An optional part of mali_framebuffer. It comes between the main structure
1321 * and the array of render targets. It must be included if any of these are
1322 * enabled:
1323 *
1324 * - Transaction Elimination
1325 * - Depth/stencil
1326 * - TODO: Anything else?
1327 */
1328
1329 /* flags_hi */
1330 #define MALI_EXTRA_PRESENT (0x1)
1331
1332 /* flags_lo */
1333 #define MALI_EXTRA_ZS (0x4)
1334
1335 struct mali_framebuffer_extra {
1336 mali_ptr checksum;
1337 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1338 u32 checksum_stride;
1339
1340 unsigned flags_lo : 4;
1341 enum mali_block_format zs_block : 2;
1342
1343 /* Number of samples in Z/S attachment, MALI_POSITIVE. So zero for
1344 * 1-sample (non-MSAA), 0x3 for MSAA 4x, etc */
1345 unsigned zs_samples : 4;
1346 unsigned flags_hi : 22;
1347
1348 union {
1349 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1350 struct {
1351 mali_ptr depth_stencil_afbc_metadata;
1352 u32 depth_stencil_afbc_stride; // in units of tiles
1353 u32 flags;
1354
1355 mali_ptr depth_stencil;
1356
1357 u64 padding;
1358 } ds_afbc;
1359
1360 struct {
1361 /* Depth becomes depth/stencil in case of combined D/S */
1362 mali_ptr depth;
1363 u32 depth_stride_zero : 4;
1364 u32 depth_stride : 28;
1365 u32 depth_layer_stride;
1366
1367 mali_ptr stencil;
1368 u32 stencil_stride_zero : 4;
1369 u32 stencil_stride : 28;
1370 u32 stencil_layer_stride;
1371 } ds_linear;
1372 };
1373
1374
1375 u32 clear_color_1;
1376 u32 clear_color_2;
1377 u64 zero3;
1378 } __attribute__((packed));
1379
1380 /* Flags for mfbd_flags */
1381
1382 /* Enables writing depth results back to main memory (rather than keeping them
1383 * on-chip in the tile buffer and then discarding) */
1384
1385 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1386
1387 /* The MFBD contains the extra mali_framebuffer_extra section */
1388
1389 #define MALI_MFBD_EXTRA (1 << 13)
1390
1391 struct mali_framebuffer {
1392 union {
1393 struct mali_shared_memory shared_memory;
1394 struct bifrost_multisampling msaa;
1395 };
1396
1397 /* 0x20 */
1398 u16 width1, height1;
1399 u32 zero3;
1400 u16 width2, height2;
1401 u32 unk1 : 19; // = 0x01000
1402 u32 rt_count_1 : 3; // off-by-one (use MALI_POSITIVE)
1403 u32 unk2 : 2; // = 0
1404 u32 rt_count_2 : 3; // no off-by-one
1405 u32 zero4 : 5;
1406 /* 0x30 */
1407 u32 clear_stencil : 8;
1408 u32 mfbd_flags : 24; // = 0x100
1409 float clear_depth;
1410
1411 union {
1412 struct midgard_tiler_descriptor tiler;
1413 struct {
1414 mali_ptr tiler_meta;
1415 u32 zeros[16];
1416 };
1417 };
1418
1419 /* optional: struct mali_framebuffer_extra extra */
1420 /* struct mali_render_target rts[] */
1421 } __attribute__((packed));
1422
1423 #endif /* __PANFROST_JOB_H__ */