pan/mdg: Fix discard encoding
[mesa.git] / src / panfrost / include / panfrost-job.h
1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <inttypes.h>
34
35 typedef uint8_t u8;
36 typedef uint16_t u16;
37 typedef uint32_t u32;
38 typedef uint64_t u64;
39 typedef uint64_t mali_ptr;
40
41 enum mali_nondominant_mode {
42 MALI_BLEND_NON_MIRROR = 0,
43 MALI_BLEND_NON_ZERO = 1
44 };
45
46 enum mali_dominant_blend {
47 MALI_BLEND_DOM_SOURCE = 0,
48 MALI_BLEND_DOM_DESTINATION = 1
49 };
50
51 enum mali_dominant_factor {
52 MALI_DOMINANT_UNK0 = 0,
53 MALI_DOMINANT_ZERO = 1,
54 MALI_DOMINANT_SRC_COLOR = 2,
55 MALI_DOMINANT_DST_COLOR = 3,
56 MALI_DOMINANT_UNK4 = 4,
57 MALI_DOMINANT_SRC_ALPHA = 5,
58 MALI_DOMINANT_DST_ALPHA = 6,
59 MALI_DOMINANT_CONSTANT = 7,
60 };
61
62 enum mali_blend_modifier {
63 MALI_BLEND_MOD_UNK0 = 0,
64 MALI_BLEND_MOD_NORMAL = 1,
65 MALI_BLEND_MOD_SOURCE_ONE = 2,
66 MALI_BLEND_MOD_DEST_ONE = 3,
67 };
68
69 struct mali_blend_mode {
70 enum mali_blend_modifier clip_modifier : 2;
71 unsigned unused_0 : 1;
72 unsigned negate_source : 1;
73
74 enum mali_dominant_blend dominant : 1;
75
76 enum mali_nondominant_mode nondominant_mode : 1;
77
78 unsigned unused_1 : 1;
79
80 unsigned negate_dest : 1;
81
82 enum mali_dominant_factor dominant_factor : 3;
83 unsigned complement_dominant : 1;
84 } __attribute__((packed));
85
86 /* Compressed per-pixel formats. Each of these formats expands to one to four
87 * floating-point or integer numbers, as defined by the OpenGL specification.
88 * There are various places in OpenGL where the user can specify a compressed
89 * format in memory, which all use the same 8-bit enum in the various
90 * descriptors, although different hardware units support different formats.
91 */
92
93 /* The top 3 bits specify how the bits of each component are interpreted. */
94
95 /* e.g. ETC2_RGB8 */
96 #define MALI_FORMAT_COMPRESSED (0 << 5)
97
98 /* e.g. R11F_G11F_B10F */
99 #define MALI_FORMAT_SPECIAL (2 << 5)
100
101 /* signed normalized, e.g. RGBA8_SNORM */
102 #define MALI_FORMAT_SNORM (3 << 5)
103
104 /* e.g. RGBA8UI */
105 #define MALI_FORMAT_UINT (4 << 5)
106
107 /* e.g. RGBA8 and RGBA32F */
108 #define MALI_FORMAT_UNORM (5 << 5)
109
110 /* e.g. RGBA8I and RGBA16F */
111 #define MALI_FORMAT_SINT (6 << 5)
112
113 /* These formats seem to largely duplicate the others. They're used at least
114 * for Bifrost framebuffer output.
115 */
116 #define MALI_FORMAT_SPECIAL2 (7 << 5)
117 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
118
119 /* If the high 3 bits are 3 to 6 these two bits say how many components
120 * there are.
121 */
122 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
123 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
124
125 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
126 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
127 * bits mean.
128 */
129
130 #define MALI_CHANNEL_4 2
131
132 #define MALI_CHANNEL_8 3
133
134 #define MALI_CHANNEL_16 4
135
136 #define MALI_CHANNEL_32 5
137
138 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
139 * MALI_FORMAT_UNORM, it means a 32-bit float.
140 */
141 #define MALI_CHANNEL_FLOAT 7
142 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
143
144 /* The raw Midgard blend payload can either be an equation or a shader
145 * address, depending on the context */
146
147 union midgard_blend {
148 mali_ptr shader;
149
150 struct {
151 struct mali_blend_equation_packed equation;
152 float constant;
153 };
154 };
155
156 struct midgard_blend_rt {
157 struct mali_blend_flags_packed flags;
158 u32 zero;
159 union midgard_blend blend;
160 } __attribute__((packed));
161
162 /* On Bifrost systems (all MRT), each render target gets one of these
163 * descriptors */
164
165 enum bifrost_shader_type {
166 BIFROST_BLEND_F16 = 0,
167 BIFROST_BLEND_F32 = 1,
168 BIFROST_BLEND_I32 = 2,
169 BIFROST_BLEND_U32 = 3,
170 BIFROST_BLEND_I16 = 4,
171 BIFROST_BLEND_U16 = 5,
172 };
173
174 #define BIFROST_MAX_RENDER_TARGET_COUNT 8
175
176 struct bifrost_blend_rt {
177 /* This is likely an analogue of the flags on
178 * midgard_blend_rt */
179
180 u16 flags; // = 0x200
181
182 /* Single-channel blend constants are encoded in a sort of
183 * fixed-point. Basically, the float is mapped to a byte, becoming
184 * a high byte, and then the lower-byte is added for precision.
185 * For the original float f:
186 *
187 * f = (constant_hi / 255) + (constant_lo / 65535)
188 *
189 * constant_hi = int(f / 255)
190 * constant_lo = 65535*f - (65535/255) * constant_hi
191 */
192 u16 constant;
193
194 struct mali_blend_equation_packed equation;
195
196 /*
197 * - 0x19 normally
198 * - 0x3 when this slot is unused (everything else is 0 except the index)
199 * - 0x11 when this is the fourth slot (and it's used)
200 * - 0 when there is a blend shader
201 */
202 u16 unk2;
203
204 /* increments from 0 to 3 */
205 u16 index;
206
207 union {
208 struct {
209 /* So far, I've only seen:
210 * - R001 for 1-component formats
211 * - RG01 for 2-component formats
212 * - RGB1 for 3-component formats
213 * - RGBA for 4-component formats
214 */
215 u32 swizzle : 12;
216 enum mali_format format : 8;
217
218 /* Type of the shader output variable. Note, this can
219 * be different from the format.
220 * enum bifrost_shader_type
221 */
222 u32 zero1 : 4;
223 u32 shader_type : 3;
224 u32 zero2 : 5;
225 };
226
227 /* Only the low 32 bits of the blend shader are stored, the
228 * high 32 bits are implicitly the same as the original shader.
229 * According to the kernel driver, the program counter for
230 * shaders is actually only 24 bits, so shaders cannot cross
231 * the 2^24-byte boundary, and neither can the blend shader.
232 * The blob handles this by allocating a 2^24 byte pool for
233 * shaders, and making sure that any blend shaders are stored
234 * in the same pool as the original shader. The kernel will
235 * make sure this allocation is aligned to 2^24 bytes.
236 */
237 u32 shader;
238 };
239 } __attribute__((packed));
240
241 /* Possible values for job_descriptor_size */
242
243 #define MALI_JOB_32 0
244 #define MALI_JOB_64 1
245
246 struct mali_job_descriptor_header {
247 u32 exception_status;
248 u32 first_incomplete_task;
249 u64 fault_pointer;
250 u8 job_descriptor_size : 1;
251 enum mali_job_type job_type : 7;
252 u8 job_barrier : 1;
253 u8 unknown_flags : 7;
254 u16 job_index;
255 u16 job_dependency_index_1;
256 u16 job_dependency_index_2;
257 u64 next_job;
258 } __attribute__((packed));
259
260 /* Details about write_value from panfrost igt tests which use it as a generic
261 * dword write primitive */
262
263 #define MALI_WRITE_VALUE_ZERO 3
264
265 struct mali_payload_write_value {
266 u64 address;
267 u32 value_descriptor;
268 u32 reserved;
269 u64 immediate;
270 } __attribute__((packed));
271
272 /*
273 * Mali Attributes
274 *
275 * This structure lets the attribute unit compute the address of an attribute
276 * given the vertex and instance ID. Unfortunately, the way this works is
277 * rather complicated when instancing is enabled.
278 *
279 * To explain this, first we need to explain how compute and vertex threads are
280 * dispatched. This is a guess (although a pretty firm guess!) since the
281 * details are mostly hidden from the driver, except for attribute instancing.
282 * When a quad is dispatched, it receives a single, linear index. However, we
283 * need to translate that index into a (vertex id, instance id) pair, or a
284 * (local id x, local id y, local id z) triple for compute shaders (although
285 * vertex shaders and compute shaders are handled almost identically).
286 * Focusing on vertex shaders, one option would be to do:
287 *
288 * vertex_id = linear_id % num_vertices
289 * instance_id = linear_id / num_vertices
290 *
291 * but this involves a costly division and modulus by an arbitrary number.
292 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
293 * num_instances threads instead of num_vertices * num_instances, which results
294 * in some "extra" threads with vertex_id >= num_vertices, which we have to
295 * discard. The more we pad num_vertices, the more "wasted" threads we
296 * dispatch, but the division is potentially easier.
297 *
298 * One straightforward choice is to pad num_vertices to the next power of two,
299 * which means that the division and modulus are just simple bit shifts and
300 * masking. But the actual algorithm is a bit more complicated. The thread
301 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
302 * to dividing by a power of two. This is possibly using the technique
303 * described in patent US20170010862A1. As a result, padded_num_vertices can be
304 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
305 * since we need less padding.
306 *
307 * padded_num_vertices is picked by the hardware. The driver just specifies the
308 * actual number of vertices. At least for Mali G71, the first few cases are
309 * given by:
310 *
311 * num_vertices | padded_num_vertices
312 * 3 | 4
313 * 4-7 | 8
314 * 8-11 | 12 (3 * 4)
315 * 12-15 | 16
316 * 16-19 | 20 (5 * 4)
317 *
318 * Note that padded_num_vertices is a multiple of four (presumably because
319 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
320 * at least one more than num_vertices, which seems like a quirk of the
321 * hardware. For larger num_vertices, the hardware uses the following
322 * algorithm: using the binary representation of num_vertices, we look at the
323 * most significant set bit as well as the following 3 bits. Let n be the
324 * number of bits after those 4 bits. Then we set padded_num_vertices according
325 * to the following table:
326 *
327 * high bits | padded_num_vertices
328 * 1000 | 9 * 2^n
329 * 1001 | 5 * 2^(n+1)
330 * 101x | 3 * 2^(n+2)
331 * 110x | 7 * 2^(n+1)
332 * 111x | 2^(n+4)
333 *
334 * For example, if num_vertices = 70 is passed to glDraw(), its binary
335 * representation is 1000110, so n = 3 and the high bits are 1000, and
336 * therefore padded_num_vertices = 9 * 2^3 = 72.
337 *
338 * The attribute unit works in terms of the original linear_id. if
339 * num_instances = 1, then they are the same, and everything is simple.
340 * However, with instancing things get more complicated. There are four
341 * possible modes, two of them we can group together:
342 *
343 * 1. Use the linear_id directly. Only used when there is no instancing.
344 *
345 * 2. Use the linear_id modulo a constant. This is used for per-vertex
346 * attributes with instancing enabled by making the constant equal
347 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
348 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
349 * The shift field specifies the power of two, while the extra_flags field
350 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
351 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
352 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
353 * shift = 3. Note that we must exactly follow the hardware algorithm used to
354 * get padded_num_vertices in order to correctly implement per-vertex
355 * attributes.
356 *
357 * 3. Divide the linear_id by a constant. In order to correctly implement
358 * instance divisors, we have to divide linear_id by padded_num_vertices times
359 * to user-specified divisor. So first we compute padded_num_vertices, again
360 * following the exact same algorithm that the hardware uses, then multiply it
361 * by the GL-level divisor to get the hardware-level divisor. This case is
362 * further divided into two more cases. If the hardware-level divisor is a
363 * power of two, then we just need to shift. The shift amount is specified by
364 * the shift field, so that the hardware-level divisor is just 2^shift.
365 *
366 * If it isn't a power of two, then we have to divide by an arbitrary integer.
367 * For that, we use the well-known technique of multiplying by an approximation
368 * of the inverse. The driver must compute the magic multiplier and shift
369 * amount, and then the hardware does the multiplication and shift. The
370 * hardware and driver also use the "round-down" optimization as described in
371 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
372 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
373 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
374 * presumably this simplifies the hardware multiplier a little. The hardware
375 * first multiplies linear_id by the multiplier and takes the high 32 bits,
376 * then applies the round-down correction if extra_flags = 1, then finally
377 * shifts right by the shift field.
378 *
379 * There are some differences between ridiculousfish's algorithm and the Mali
380 * hardware algorithm, which means that the reference code from ridiculousfish
381 * doesn't always produce the right constants. Mali does not use the pre-shift
382 * optimization, since that would make a hardware implementation slower (it
383 * would have to always do the pre-shift, multiply, and post-shift operations).
384 * It also forces the multplier to be at least 2^31, which means that the
385 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
386 * given the divisor d, the algorithm the driver must follow is:
387 *
388 * 1. Set shift = floor(log2(d)).
389 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
390 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
391 * magic_divisor = m - 1 and extra_flags = 1.
392 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
393 */
394
395 #define FBD_MASK (~0x3f)
396
397 /* MFBD, rather than SFBD */
398 #define MALI_MFBD (0x1)
399
400 /* ORed into an MFBD address to specify the fbx section is included */
401 #define MALI_MFBD_TAG_EXTRA (0x2)
402
403 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
404 * They also seem to be the same between Bifrost and Midgard. They're shared in
405 * fused payloads.
406 */
407
408 struct mali_vertex_tiler_prefix {
409 struct mali_invocation_packed invocation;
410 struct mali_primitive_packed primitive;
411 } __attribute__((packed));
412
413 /* Point size / line width can either be specified as a 32-bit float (for
414 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
415 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
416 * payload, the contents of varying_pointer will be intepreted as an array of
417 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
418 * creating a special MALI_R16F varying writing to varying_pointer. */
419
420 union midgard_primitive_size {
421 float constant;
422 u64 pointer;
423 };
424
425 struct bifrost_tiler_heap_meta {
426 u32 zero;
427 u32 heap_size;
428 /* note: these are just guesses! */
429 mali_ptr tiler_heap_start;
430 mali_ptr tiler_heap_free;
431 mali_ptr tiler_heap_end;
432
433 /* hierarchy weights? but they're still 0 after the job has run... */
434 u32 zeros[10];
435 u32 unk1;
436 u32 unk7e007e;
437 } __attribute__((packed));
438
439 struct bifrost_tiler_meta {
440 u32 tiler_heap_next_start; /* To be written by the GPU */
441 u32 used_hierarchy_mask; /* To be written by the GPU */
442 u16 hierarchy_mask; /* Five values observed: 0xa, 0x14, 0x28, 0x50, 0xa0 */
443 u16 flags;
444 u16 width;
445 u16 height;
446 u64 zero0;
447 mali_ptr tiler_heap_meta;
448 /* TODO what is this used for? */
449 u64 zeros[20];
450 } __attribute__((packed));
451
452 struct midgard_payload_vertex_tiler {
453 struct mali_vertex_tiler_prefix prefix;
454 struct mali_draw_packed postfix;
455 union midgard_primitive_size primitive_size;
456 } __attribute__((packed));
457
458 struct bifrost_payload_vertex {
459 struct mali_vertex_tiler_prefix prefix;
460 struct mali_draw_packed postfix;
461 } __attribute__((packed));
462
463 struct bifrost_payload_tiler {
464 struct mali_vertex_tiler_prefix prefix;
465 union midgard_primitive_size primitive_size;
466 mali_ptr tiler_meta;
467 u64 zero1, zero2, zero3, zero4, zero5, zero6;
468 struct mali_draw_packed postfix;
469 } __attribute__((packed));
470
471 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
472 * texture is stored as (63, 63) in these fields. This adjusts for that.
473 * There's an identical pattern in the framebuffer descriptor. Even vertex
474 * count fields work this way, hence the generic name -- integral fields that
475 * are strictly positive generally need this adjustment. */
476
477 #define MALI_POSITIVE(dim) (dim - 1)
478
479 /* 8192x8192 */
480 #define MAX_MIP_LEVELS (13)
481
482 /* Cubemap bloats everything up */
483 #define MAX_CUBE_FACES (6)
484
485 /* For each pointer, there is an address and optionally also a stride */
486 #define MAX_ELEMENTS (2)
487
488 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
489 * be cleaned up a lot. */
490
491 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
492
493 static inline int16_t
494 FIXED_16(float x, bool allow_negative)
495 {
496 /* Clamp inputs, accounting for float error */
497 float max_lod = (32.0 - (1.0 / 512.0));
498 float min_lod = allow_negative ? -max_lod : 0.0;
499
500 x = ((x > max_lod) ? max_lod : ((x < min_lod) ? min_lod : x));
501
502 return (int) (x * 256.0);
503 }
504
505 /* From presentations, 16x16 tiles externally. Use shift for fast computation
506 * of tile numbers. */
507
508 #define MALI_TILE_SHIFT 4
509 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
510
511 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
512 * each component. Notice that this provides a theoretical upper bound of (1 <<
513 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
514 * 65536x65536. Multiplying that together, times another four given that Mali
515 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
516 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
517 * alone rendering in real-time to such a buffer.
518 *
519 * Nice job, guys.*/
520
521 /* From mali_kbase_10969_workaround.c */
522 #define MALI_X_COORD_MASK 0x00000FFF
523 #define MALI_Y_COORD_MASK 0x0FFF0000
524
525 /* Extract parts of a tile coordinate */
526
527 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
528 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
529
530 /* Helpers to generate tile coordinates based on the boundary coordinates in
531 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
532 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
533 * Intentional "off-by-one"; finding the tile number is a form of fencepost
534 * problem. */
535
536 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
537 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
538 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
539 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
540 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
541
542 struct mali_payload_fragment {
543 u32 min_tile_coord;
544 u32 max_tile_coord;
545 mali_ptr framebuffer;
546 } __attribute__((packed));
547
548 /* Single Framebuffer Descriptor */
549
550 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
551 * configured for 4x. With MSAA_8, it is configured for 8x. */
552
553 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
554 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
555 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
556 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
557
558 /* Fast/slow based on whether all three buffers are cleared at once */
559
560 #define MALI_CLEAR_FAST (1 << 18)
561 #define MALI_CLEAR_SLOW (1 << 28)
562 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
563
564 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
565 * within the larget framebuffer descriptor). Analogous to
566 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
567
568 /* See pan_tiler.c for derivation */
569 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
570
571 /* Flag disabling the tiler for clear-only jobs, with
572 hierarchical tiling */
573 #define MALI_TILER_DISABLED (1 << 12)
574
575 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
576 * hierarhical tiling. */
577 #define MALI_TILER_USER 0xFFF
578
579 /* Absent any geometry, the minimum size of the polygon list header */
580 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
581
582 struct midgard_tiler_descriptor {
583 /* Size of the entire polygon list; see pan_tiler.c for the
584 * computation. It's based on hierarchical tiling */
585
586 u32 polygon_list_size;
587
588 /* Name known from the replay workaround in the kernel. What exactly is
589 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
590 * specifies a mask of hierarchy weights, which explains some of the
591 * performance mysteries around setting it. We also see the bottom bit
592 * of tiler_flags set in the kernel, but no comment why.
593 *
594 * hierarchy_mask can have the TILER_DISABLED flag */
595
596 u16 hierarchy_mask;
597 u16 flags;
598
599 /* See mali_tiler.c for an explanation */
600 mali_ptr polygon_list;
601 mali_ptr polygon_list_body;
602
603 /* Names based on we see symmetry with replay jobs which name these
604 * explicitly */
605
606 mali_ptr heap_start; /* tiler heap_free_address */
607 mali_ptr heap_end;
608
609 /* Hierarchy weights. We know these are weights based on the kernel,
610 * but I've never seen them be anything other than zero */
611 u32 weights[8];
612 };
613
614 struct mali_sfbd_format {
615 /* 0x1 */
616 unsigned unk1 : 6;
617
618 /* mali_channel_swizzle */
619 unsigned swizzle : 12;
620
621 /* MALI_POSITIVE */
622 unsigned nr_channels : 2;
623
624 /* 0x4 */
625 unsigned unk2 : 6;
626
627 enum mali_block_format block : 2;
628
629 /* 0xb */
630 unsigned unk3 : 4;
631 };
632
633 /* Shared structure at the start of framebuffer descriptors, or used bare for
634 * compute jobs, configuring stack and shared memory */
635
636 struct mali_shared_memory {
637 u32 stack_shift : 4;
638 u32 unk0 : 28;
639
640 /* Configuration for shared memory for compute shaders.
641 * shared_workgroup_count is logarithmic and may be computed for a
642 * compute shader using shared memory as:
643 *
644 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
645 *
646 * For compute shaders that don't use shared memory, or non-compute
647 * shaders, this is set to ~0
648 */
649
650 u32 shared_workgroup_count : 5;
651 u32 shared_unk1 : 3;
652 u32 shared_shift : 4;
653 u32 shared_zero : 20;
654
655 mali_ptr scratchpad;
656
657 /* For compute shaders, the RAM backing of workgroup-shared memory. For
658 * fragment shaders on Bifrost, apparently multisampling locations */
659
660 mali_ptr shared_memory;
661 mali_ptr unknown1;
662 } __attribute__((packed));
663
664 /* Configures multisampling on Bifrost fragment jobs */
665
666 struct bifrost_multisampling {
667 u64 zero1;
668 u64 zero2;
669 mali_ptr sample_locations;
670 u64 zero4;
671 } __attribute__((packed));
672
673 struct mali_single_framebuffer {
674 struct mali_shared_memory shared_memory;
675 struct mali_sfbd_format format;
676
677 u32 clear_flags;
678 u32 zero2;
679
680 /* Purposeful off-by-one in these fields should be accounted for by the
681 * MALI_DIMENSION macro */
682
683 u16 width;
684 u16 height;
685
686 u32 zero3[4];
687 mali_ptr checksum;
688 u32 checksum_stride;
689 u32 zero5;
690
691 /* By default, the framebuffer is upside down from OpenGL's
692 * perspective. Set framebuffer to the end and negate the stride to
693 * flip in the Y direction */
694
695 mali_ptr framebuffer;
696 int32_t stride;
697
698 u32 zero4;
699
700 /* Depth and stencil buffers are interleaved, it appears, as they are
701 * set to the same address in captures. Both fields set to zero if the
702 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
703 * get a zero enable despite the buffer being present; that still is
704 * disabled. */
705
706 mali_ptr depth_buffer; // not SAME_VA
707 u32 depth_stride_zero : 4;
708 u32 depth_stride : 28;
709 u32 zero7;
710
711 mali_ptr stencil_buffer; // not SAME_VA
712 u32 stencil_stride_zero : 4;
713 u32 stencil_stride : 28;
714 u32 zero8;
715
716 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
717 u32 clear_color_2; // always equal, but unclear function?
718 u32 clear_color_3; // always equal, but unclear function?
719 u32 clear_color_4; // always equal, but unclear function?
720
721 /* Set to zero if not cleared */
722
723 float clear_depth_1; // float32, ditto
724 float clear_depth_2; // float32, ditto
725 float clear_depth_3; // float32, ditto
726 float clear_depth_4; // float32, ditto
727
728 u32 clear_stencil; // Exactly as it appears in OpenGL
729
730 u32 zero6[7];
731
732 struct midgard_tiler_descriptor tiler;
733
734 /* More below this, maybe */
735 } __attribute__((packed));
736
737
738 #define MALI_MFBD_FORMAT_SRGB (1 << 0)
739
740 struct mali_rt_format {
741 unsigned unk1 : 32;
742 unsigned unk2 : 3;
743
744 unsigned nr_channels : 2; /* MALI_POSITIVE */
745
746 unsigned unk3 : 4;
747 unsigned unk4 : 1;
748 enum mali_block_format block : 2;
749 enum mali_msaa msaa : 2;
750 unsigned flags : 2;
751
752 unsigned swizzle : 12;
753
754 unsigned zero : 3;
755
756 /* Disables MFBD preload. When this bit is set, the render target will
757 * be cleared every frame. When this bit is clear, the hardware will
758 * automatically wallpaper the render target back from main memory.
759 * Unfortunately, MFBD preload is very broken on Midgard, so in
760 * practice, this is a chicken bit that should always be set.
761 * Discovered by accident, as all good chicken bits are. */
762
763 unsigned no_preload : 1;
764 } __attribute__((packed));
765
766 /* Flags for afbc.flags and ds_afbc.flags */
767
768 #define MALI_AFBC_FLAGS 0x10009
769
770 /* Lossless RGB and RGBA colorspace transform */
771 #define MALI_AFBC_YTR (1 << 17)
772
773 struct mali_render_target {
774 struct mali_rt_format format;
775
776 u64 zero1;
777
778 struct {
779 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
780 * there is an extra metadata buffer that contains 16 bytes per tile.
781 * The framebuffer needs to be the same size as before, since we don't
782 * know ahead of time how much space it will take up. The
783 * framebuffer_stride is set to 0, since the data isn't stored linearly
784 * anymore.
785 *
786 * When AFBC is disabled, these fields are zero.
787 */
788
789 mali_ptr metadata;
790 u32 stride; // stride in units of tiles
791 u32 flags; // = 0x20000
792 } afbc;
793
794 mali_ptr framebuffer;
795
796 u32 zero2 : 4;
797 u32 framebuffer_stride : 28; // in units of bytes, row to next
798 u32 layer_stride; /* For multisample rendering */
799
800 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
801 u32 clear_color_2; // always equal, but unclear function?
802 u32 clear_color_3; // always equal, but unclear function?
803 u32 clear_color_4; // always equal, but unclear function?
804 } __attribute__((packed));
805
806 /* An optional part of mali_framebuffer. It comes between the main structure
807 * and the array of render targets. It must be included if any of these are
808 * enabled:
809 *
810 * - Transaction Elimination
811 * - Depth/stencil
812 * - TODO: Anything else?
813 */
814
815 /* flags_hi */
816 #define MALI_EXTRA_PRESENT (0x1)
817
818 /* flags_lo */
819 #define MALI_EXTRA_ZS (0x4)
820
821 struct mali_framebuffer_extra {
822 mali_ptr checksum;
823 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
824 u32 checksum_stride;
825
826 unsigned flags_lo : 4;
827 enum mali_block_format zs_block : 2;
828
829 /* Number of samples in Z/S attachment, MALI_POSITIVE. So zero for
830 * 1-sample (non-MSAA), 0x3 for MSAA 4x, etc */
831 unsigned zs_samples : 4;
832 unsigned flags_hi : 22;
833
834 union {
835 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
836 struct {
837 mali_ptr depth_stencil_afbc_metadata;
838 u32 depth_stencil_afbc_stride; // in units of tiles
839 u32 flags;
840
841 mali_ptr depth_stencil;
842
843 u64 padding;
844 } ds_afbc;
845
846 struct {
847 /* Depth becomes depth/stencil in case of combined D/S */
848 mali_ptr depth;
849 u32 depth_stride_zero : 4;
850 u32 depth_stride : 28;
851 u32 depth_layer_stride;
852
853 mali_ptr stencil;
854 u32 stencil_stride_zero : 4;
855 u32 stencil_stride : 28;
856 u32 stencil_layer_stride;
857 } ds_linear;
858 };
859
860
861 u32 clear_color_1;
862 u32 clear_color_2;
863 u64 zero3;
864 } __attribute__((packed));
865
866 /* Flags for mfbd_flags */
867
868 /* Enables writing depth results back to main memory (rather than keeping them
869 * on-chip in the tile buffer and then discarding) */
870
871 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
872
873 /* The MFBD contains the extra mali_framebuffer_extra section */
874
875 #define MALI_MFBD_EXTRA (1 << 13)
876
877 struct mali_framebuffer {
878 union {
879 struct mali_shared_memory shared_memory;
880 struct bifrost_multisampling msaa;
881 };
882
883 /* 0x20 */
884 u16 width1, height1;
885 u32 zero3;
886 u16 width2, height2;
887 u32 unk1 : 19; // = 0x01000
888 u32 rt_count_1 : 3; // off-by-one (use MALI_POSITIVE)
889 u32 unk2 : 2; // = 0
890 u32 rt_count_2 : 3; // no off-by-one
891 u32 zero4 : 5;
892 /* 0x30 */
893 u32 clear_stencil : 8;
894 u32 mfbd_flags : 24; // = 0x100
895 float clear_depth;
896
897 union {
898 struct midgard_tiler_descriptor tiler;
899 struct {
900 mali_ptr tiler_meta;
901 u32 zeros[16];
902 };
903 };
904
905 /* optional: struct mali_framebuffer_extra extra */
906 /* struct mali_render_target rts[] */
907 } __attribute__((packed));
908
909 #endif /* __PANFROST_JOB_H__ */