panfrost: XMLify beginning of shader descriptor
[mesa.git] / src / panfrost / include / panfrost-job.h
1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <inttypes.h>
34
35 typedef uint8_t u8;
36 typedef uint16_t u16;
37 typedef uint32_t u32;
38 typedef uint64_t u64;
39 typedef uint64_t mali_ptr;
40
41 /* Applies to tiler_gl_enables */
42
43 #define MALI_OCCLUSION_QUERY (1 << 3)
44 #define MALI_OCCLUSION_PRECISE (1 << 4)
45
46 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
47 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
48 * disagree about how to do viewport flipping, so the blob actually sets this
49 * for GL_CW but then has a negative viewport stride */
50
51 #define MALI_FRONT_CCW_TOP (1 << 5)
52
53 #define MALI_CULL_FACE_FRONT (1 << 6)
54 #define MALI_CULL_FACE_BACK (1 << 7)
55
56 /* Flags apply to unknown2_3? */
57
58 #define MALI_HAS_MSAA (1 << 0)
59
60 /* Execute fragment shader per-sample if set (e.g. to implement gl_SampleID
61 * reads) */
62 #define MALI_PER_SAMPLE (1 << 2)
63 #define MALI_CAN_DISCARD (1 << 5)
64
65 /* Applies on SFBD systems, specifying that programmable blending is in use */
66 #define MALI_HAS_BLEND_SHADER (1 << 6)
67
68 /* func is mali_func */
69 #define MALI_DEPTH_FUNC(func) (func << 8)
70 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
71 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
72
73 #define MALI_DEPTH_WRITEMASK (1 << 11)
74
75 #define MALI_DEPTH_CLIP_NEAR (1 << 12)
76 #define MALI_DEPTH_CLIP_FAR (1 << 13)
77
78 /* Next flags to unknown2_4 */
79 #define MALI_STENCIL_TEST (1 << 0)
80
81 #define MALI_ALPHA_TO_COVERAGE (1 << 1)
82
83 #define MALI_SFBD_ENABLE (1 << 4)
84 #define MALI_SFBD_SRGB (1 << 8)
85 #define MALI_NO_DITHER (1 << 9)
86 #define MALI_DEPTH_RANGE_A (1 << 12)
87 #define MALI_DEPTH_RANGE_B (1 << 13)
88 #define MALI_NO_MSAA (1 << 14)
89
90 #define MALI_MASK_R (1 << 0)
91 #define MALI_MASK_G (1 << 1)
92 #define MALI_MASK_B (1 << 2)
93 #define MALI_MASK_A (1 << 3)
94
95 enum mali_nondominant_mode {
96 MALI_BLEND_NON_MIRROR = 0,
97 MALI_BLEND_NON_ZERO = 1
98 };
99
100 enum mali_dominant_blend {
101 MALI_BLEND_DOM_SOURCE = 0,
102 MALI_BLEND_DOM_DESTINATION = 1
103 };
104
105 enum mali_dominant_factor {
106 MALI_DOMINANT_UNK0 = 0,
107 MALI_DOMINANT_ZERO = 1,
108 MALI_DOMINANT_SRC_COLOR = 2,
109 MALI_DOMINANT_DST_COLOR = 3,
110 MALI_DOMINANT_UNK4 = 4,
111 MALI_DOMINANT_SRC_ALPHA = 5,
112 MALI_DOMINANT_DST_ALPHA = 6,
113 MALI_DOMINANT_CONSTANT = 7,
114 };
115
116 enum mali_blend_modifier {
117 MALI_BLEND_MOD_UNK0 = 0,
118 MALI_BLEND_MOD_NORMAL = 1,
119 MALI_BLEND_MOD_SOURCE_ONE = 2,
120 MALI_BLEND_MOD_DEST_ONE = 3,
121 };
122
123 struct mali_blend_mode {
124 enum mali_blend_modifier clip_modifier : 2;
125 unsigned unused_0 : 1;
126 unsigned negate_source : 1;
127
128 enum mali_dominant_blend dominant : 1;
129
130 enum mali_nondominant_mode nondominant_mode : 1;
131
132 unsigned unused_1 : 1;
133
134 unsigned negate_dest : 1;
135
136 enum mali_dominant_factor dominant_factor : 3;
137 unsigned complement_dominant : 1;
138 } __attribute__((packed));
139
140 /* Compressed per-pixel formats. Each of these formats expands to one to four
141 * floating-point or integer numbers, as defined by the OpenGL specification.
142 * There are various places in OpenGL where the user can specify a compressed
143 * format in memory, which all use the same 8-bit enum in the various
144 * descriptors, although different hardware units support different formats.
145 */
146
147 /* The top 3 bits specify how the bits of each component are interpreted. */
148
149 /* e.g. ETC2_RGB8 */
150 #define MALI_FORMAT_COMPRESSED (0 << 5)
151
152 /* e.g. R11F_G11F_B10F */
153 #define MALI_FORMAT_SPECIAL (2 << 5)
154
155 /* signed normalized, e.g. RGBA8_SNORM */
156 #define MALI_FORMAT_SNORM (3 << 5)
157
158 /* e.g. RGBA8UI */
159 #define MALI_FORMAT_UINT (4 << 5)
160
161 /* e.g. RGBA8 and RGBA32F */
162 #define MALI_FORMAT_UNORM (5 << 5)
163
164 /* e.g. RGBA8I and RGBA16F */
165 #define MALI_FORMAT_SINT (6 << 5)
166
167 /* These formats seem to largely duplicate the others. They're used at least
168 * for Bifrost framebuffer output.
169 */
170 #define MALI_FORMAT_SPECIAL2 (7 << 5)
171 #define MALI_EXTRACT_TYPE(fmt) ((fmt) & 0xe0)
172
173 /* If the high 3 bits are 3 to 6 these two bits say how many components
174 * there are.
175 */
176 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
177 #define MALI_EXTRACT_CHANNELS(fmt) ((((fmt) >> 3) & 3) + 1)
178
179 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
180 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
181 * bits mean.
182 */
183
184 #define MALI_CHANNEL_4 2
185
186 #define MALI_CHANNEL_8 3
187
188 #define MALI_CHANNEL_16 4
189
190 #define MALI_CHANNEL_32 5
191
192 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
193 * MALI_FORMAT_UNORM, it means a 32-bit float.
194 */
195 #define MALI_CHANNEL_FLOAT 7
196 #define MALI_EXTRACT_BITS(fmt) (fmt & 0x7)
197
198 /* The raw Midgard blend payload can either be an equation or a shader
199 * address, depending on the context */
200
201 union midgard_blend {
202 mali_ptr shader;
203
204 struct {
205 struct mali_blend_equation_packed equation;
206 float constant;
207 };
208 };
209
210 struct midgard_blend_rt {
211 struct mali_blend_flags_packed flags;
212 u32 zero;
213 union midgard_blend blend;
214 } __attribute__((packed));
215
216 /* On Bifrost systems (all MRT), each render target gets one of these
217 * descriptors */
218
219 enum bifrost_shader_type {
220 BIFROST_BLEND_F16 = 0,
221 BIFROST_BLEND_F32 = 1,
222 BIFROST_BLEND_I32 = 2,
223 BIFROST_BLEND_U32 = 3,
224 BIFROST_BLEND_I16 = 4,
225 BIFROST_BLEND_U16 = 5,
226 };
227
228 #define BIFROST_MAX_RENDER_TARGET_COUNT 8
229
230 struct bifrost_blend_rt {
231 /* This is likely an analogue of the flags on
232 * midgard_blend_rt */
233
234 u16 flags; // = 0x200
235
236 /* Single-channel blend constants are encoded in a sort of
237 * fixed-point. Basically, the float is mapped to a byte, becoming
238 * a high byte, and then the lower-byte is added for precision.
239 * For the original float f:
240 *
241 * f = (constant_hi / 255) + (constant_lo / 65535)
242 *
243 * constant_hi = int(f / 255)
244 * constant_lo = 65535*f - (65535/255) * constant_hi
245 */
246 u16 constant;
247
248 struct mali_blend_equation_packed equation;
249
250 /*
251 * - 0x19 normally
252 * - 0x3 when this slot is unused (everything else is 0 except the index)
253 * - 0x11 when this is the fourth slot (and it's used)
254 * - 0 when there is a blend shader
255 */
256 u16 unk2;
257
258 /* increments from 0 to 3 */
259 u16 index;
260
261 union {
262 struct {
263 /* So far, I've only seen:
264 * - R001 for 1-component formats
265 * - RG01 for 2-component formats
266 * - RGB1 for 3-component formats
267 * - RGBA for 4-component formats
268 */
269 u32 swizzle : 12;
270 enum mali_format format : 8;
271
272 /* Type of the shader output variable. Note, this can
273 * be different from the format.
274 * enum bifrost_shader_type
275 */
276 u32 zero1 : 4;
277 u32 shader_type : 3;
278 u32 zero2 : 5;
279 };
280
281 /* Only the low 32 bits of the blend shader are stored, the
282 * high 32 bits are implicitly the same as the original shader.
283 * According to the kernel driver, the program counter for
284 * shaders is actually only 24 bits, so shaders cannot cross
285 * the 2^24-byte boundary, and neither can the blend shader.
286 * The blob handles this by allocating a 2^24 byte pool for
287 * shaders, and making sure that any blend shaders are stored
288 * in the same pool as the original shader. The kernel will
289 * make sure this allocation is aligned to 2^24 bytes.
290 */
291 u32 shader;
292 };
293 } __attribute__((packed));
294
295 /* Descriptor for the shader. Following this is at least one, up to four blend
296 * descriptors for each active render target */
297
298 struct mali_shader_meta {
299 struct mali_shader_packed shader;
300
301 union {
302 struct mali_bifrost_properties_packed bifrost_props;
303 struct mali_midgard_properties_packed midgard_props;
304 };
305
306 /* Same as glPolygoOffset() arguments */
307 float depth_units;
308 float depth_factor;
309
310 u32 unknown2_2;
311
312 /* Generated from SAMPLE_COVERAGE_VALUE and SAMPLE_COVERAGE_INVERT. See
313 * 13.8.3 ("Multisample Fragment Operations") in the OpenGL ES 3.2
314 * specification. Only matters when multisampling is enabled. */
315 u16 coverage_mask;
316
317 u16 unknown2_3;
318
319 u8 stencil_mask_front;
320 u8 stencil_mask_back;
321 u16 unknown2_4;
322
323 struct mali_stencil_packed stencil_front;
324 struct mali_stencil_packed stencil_back;
325
326 union {
327 struct mali_preload_packed bifrost_preload;
328 struct {
329 u32 unknown2_7;
330 } midgard2;
331 };
332
333 u32 padding;
334
335 /* Blending information for the older non-MRT Midgard HW. Check for
336 * MALI_HAS_BLEND_SHADER to decide how to interpret.
337 */
338
339 union midgard_blend blend;
340 } __attribute__((packed));
341
342 /* This only concerns hardware jobs */
343
344 /* Possible values for job_descriptor_size */
345
346 #define MALI_JOB_32 0
347 #define MALI_JOB_64 1
348
349 struct mali_job_descriptor_header {
350 u32 exception_status;
351 u32 first_incomplete_task;
352 u64 fault_pointer;
353 u8 job_descriptor_size : 1;
354 enum mali_job_type job_type : 7;
355 u8 job_barrier : 1;
356 u8 unknown_flags : 7;
357 u16 job_index;
358 u16 job_dependency_index_1;
359 u16 job_dependency_index_2;
360 u64 next_job;
361 } __attribute__((packed));
362
363 /* Details about write_value from panfrost igt tests which use it as a generic
364 * dword write primitive */
365
366 #define MALI_WRITE_VALUE_ZERO 3
367
368 struct mali_payload_write_value {
369 u64 address;
370 u32 value_descriptor;
371 u32 reserved;
372 u64 immediate;
373 } __attribute__((packed));
374
375 /*
376 * Mali Attributes
377 *
378 * This structure lets the attribute unit compute the address of an attribute
379 * given the vertex and instance ID. Unfortunately, the way this works is
380 * rather complicated when instancing is enabled.
381 *
382 * To explain this, first we need to explain how compute and vertex threads are
383 * dispatched. This is a guess (although a pretty firm guess!) since the
384 * details are mostly hidden from the driver, except for attribute instancing.
385 * When a quad is dispatched, it receives a single, linear index. However, we
386 * need to translate that index into a (vertex id, instance id) pair, or a
387 * (local id x, local id y, local id z) triple for compute shaders (although
388 * vertex shaders and compute shaders are handled almost identically).
389 * Focusing on vertex shaders, one option would be to do:
390 *
391 * vertex_id = linear_id % num_vertices
392 * instance_id = linear_id / num_vertices
393 *
394 * but this involves a costly division and modulus by an arbitrary number.
395 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
396 * num_instances threads instead of num_vertices * num_instances, which results
397 * in some "extra" threads with vertex_id >= num_vertices, which we have to
398 * discard. The more we pad num_vertices, the more "wasted" threads we
399 * dispatch, but the division is potentially easier.
400 *
401 * One straightforward choice is to pad num_vertices to the next power of two,
402 * which means that the division and modulus are just simple bit shifts and
403 * masking. But the actual algorithm is a bit more complicated. The thread
404 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
405 * to dividing by a power of two. This is possibly using the technique
406 * described in patent US20170010862A1. As a result, padded_num_vertices can be
407 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
408 * since we need less padding.
409 *
410 * padded_num_vertices is picked by the hardware. The driver just specifies the
411 * actual number of vertices. At least for Mali G71, the first few cases are
412 * given by:
413 *
414 * num_vertices | padded_num_vertices
415 * 3 | 4
416 * 4-7 | 8
417 * 8-11 | 12 (3 * 4)
418 * 12-15 | 16
419 * 16-19 | 20 (5 * 4)
420 *
421 * Note that padded_num_vertices is a multiple of four (presumably because
422 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
423 * at least one more than num_vertices, which seems like a quirk of the
424 * hardware. For larger num_vertices, the hardware uses the following
425 * algorithm: using the binary representation of num_vertices, we look at the
426 * most significant set bit as well as the following 3 bits. Let n be the
427 * number of bits after those 4 bits. Then we set padded_num_vertices according
428 * to the following table:
429 *
430 * high bits | padded_num_vertices
431 * 1000 | 9 * 2^n
432 * 1001 | 5 * 2^(n+1)
433 * 101x | 3 * 2^(n+2)
434 * 110x | 7 * 2^(n+1)
435 * 111x | 2^(n+4)
436 *
437 * For example, if num_vertices = 70 is passed to glDraw(), its binary
438 * representation is 1000110, so n = 3 and the high bits are 1000, and
439 * therefore padded_num_vertices = 9 * 2^3 = 72.
440 *
441 * The attribute unit works in terms of the original linear_id. if
442 * num_instances = 1, then they are the same, and everything is simple.
443 * However, with instancing things get more complicated. There are four
444 * possible modes, two of them we can group together:
445 *
446 * 1. Use the linear_id directly. Only used when there is no instancing.
447 *
448 * 2. Use the linear_id modulo a constant. This is used for per-vertex
449 * attributes with instancing enabled by making the constant equal
450 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
451 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
452 * The shift field specifies the power of two, while the extra_flags field
453 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
454 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
455 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
456 * shift = 3. Note that we must exactly follow the hardware algorithm used to
457 * get padded_num_vertices in order to correctly implement per-vertex
458 * attributes.
459 *
460 * 3. Divide the linear_id by a constant. In order to correctly implement
461 * instance divisors, we have to divide linear_id by padded_num_vertices times
462 * to user-specified divisor. So first we compute padded_num_vertices, again
463 * following the exact same algorithm that the hardware uses, then multiply it
464 * by the GL-level divisor to get the hardware-level divisor. This case is
465 * further divided into two more cases. If the hardware-level divisor is a
466 * power of two, then we just need to shift. The shift amount is specified by
467 * the shift field, so that the hardware-level divisor is just 2^shift.
468 *
469 * If it isn't a power of two, then we have to divide by an arbitrary integer.
470 * For that, we use the well-known technique of multiplying by an approximation
471 * of the inverse. The driver must compute the magic multiplier and shift
472 * amount, and then the hardware does the multiplication and shift. The
473 * hardware and driver also use the "round-down" optimization as described in
474 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
475 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
476 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
477 * presumably this simplifies the hardware multiplier a little. The hardware
478 * first multiplies linear_id by the multiplier and takes the high 32 bits,
479 * then applies the round-down correction if extra_flags = 1, then finally
480 * shifts right by the shift field.
481 *
482 * There are some differences between ridiculousfish's algorithm and the Mali
483 * hardware algorithm, which means that the reference code from ridiculousfish
484 * doesn't always produce the right constants. Mali does not use the pre-shift
485 * optimization, since that would make a hardware implementation slower (it
486 * would have to always do the pre-shift, multiply, and post-shift operations).
487 * It also forces the multplier to be at least 2^31, which means that the
488 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
489 * given the divisor d, the algorithm the driver must follow is:
490 *
491 * 1. Set shift = floor(log2(d)).
492 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
493 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
494 * magic_divisor = m - 1 and extra_flags = 1.
495 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
496 */
497
498 #define FBD_MASK (~0x3f)
499
500 /* MFBD, rather than SFBD */
501 #define MALI_MFBD (0x1)
502
503 /* ORed into an MFBD address to specify the fbx section is included */
504 #define MALI_MFBD_TAG_EXTRA (0x2)
505
506 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
507 * They also seem to be the same between Bifrost and Midgard. They're shared in
508 * fused payloads.
509 */
510
511 /* Applies to unknown_draw */
512
513 #define MALI_DRAW_INDEXED_UINT8 (0x10)
514 #define MALI_DRAW_INDEXED_UINT16 (0x20)
515 #define MALI_DRAW_INDEXED_UINT32 (0x30)
516 #define MALI_DRAW_INDEXED_SIZE (0x30)
517 #define MALI_DRAW_INDEXED_SHIFT (4)
518
519 #define MALI_DRAW_VARYING_SIZE (0x100)
520
521 /* Set to use first vertex as the provoking vertex for flatshading. Clear to
522 * use the last vertex. This is the default in DX and VK, but not in GL. */
523
524 #define MALI_DRAW_FLATSHADE_FIRST (0x800)
525
526 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
527
528 struct mali_vertex_tiler_prefix {
529 /* This is a dynamic bitfield containing the following things in this order:
530 *
531 * - gl_WorkGroupSize.x
532 * - gl_WorkGroupSize.y
533 * - gl_WorkGroupSize.z
534 * - gl_NumWorkGroups.x
535 * - gl_NumWorkGroups.y
536 * - gl_NumWorkGroups.z
537 *
538 * The number of bits allocated for each number is based on the *_shift
539 * fields below. For example, workgroups_y_shift gives the bit that
540 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
541 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
542 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
543 * value is one more than the stored value, since if any of the values
544 * are zero, then there would be no invocations (and hence no job). If
545 * there were 0 bits allocated to a given field, then it must be zero,
546 * and hence the real value is one.
547 *
548 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
549 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
550 * where vertex count is the number of vertices.
551 */
552 u32 invocation_count;
553
554 /* Bitfield for shifts:
555 *
556 * size_y_shift : 5
557 * size_z_shift : 5
558 * workgroups_x_shift : 6
559 * workgroups_y_shift : 6
560 * workgroups_z_shift : 6
561 * workgroups_x_shift_2 : 4
562 */
563 u32 invocation_shifts;
564
565 u32 draw_mode : 4;
566 u32 unknown_draw : 22;
567
568 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
569 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
570 * something to do with how many quads get put in the same execution
571 * engine, which is a balance (you don't want to starve the engine, but
572 * you also want to distribute work evenly).
573 */
574 u32 workgroups_x_shift_3 : 6;
575
576
577 /* Negative of min_index. This is used to compute
578 * the unbiased index in tiler/fragment shader runs.
579 *
580 * The hardware adds offset_bias_correction in each run,
581 * so that absent an index bias, the first vertex processed is
582 * genuinely the first vertex (0). But with an index bias,
583 * the first vertex process is numbered the same as the bias.
584 *
585 * To represent this more conviniently:
586 * unbiased_index = lower_bound_index +
587 * index_bias +
588 * offset_bias_correction
589 *
590 * This is done since the hardware doesn't accept a index_bias
591 * and this allows it to recover the unbiased index.
592 */
593 int32_t offset_bias_correction;
594 u32 zero1;
595
596 /* Like many other strictly nonzero quantities, index_count is
597 * subtracted by one. For an indexed cube, this is equal to 35 = 6
598 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
599 * for an indexed draw, index_count is the number of actual vertices
600 * rendered whereas invocation_count is the number of unique vertices
601 * rendered (the number of times the vertex shader must be invoked).
602 * For non-indexed draws, this is just equal to invocation_count. */
603
604 u32 index_count;
605
606 /* No hidden structure; literally just a pointer to an array of uint
607 * indices (width depends on flags). Thanks, guys, for not making my
608 * life insane for once! NULL for non-indexed draws. */
609
610 u64 indices;
611 } __attribute__((packed));
612
613 /* Point size / line width can either be specified as a 32-bit float (for
614 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
615 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
616 * payload, the contents of varying_pointer will be intepreted as an array of
617 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
618 * creating a special MALI_R16F varying writing to varying_pointer. */
619
620 union midgard_primitive_size {
621 float constant;
622 u64 pointer;
623 };
624
625 struct bifrost_tiler_heap_meta {
626 u32 zero;
627 u32 heap_size;
628 /* note: these are just guesses! */
629 mali_ptr tiler_heap_start;
630 mali_ptr tiler_heap_free;
631 mali_ptr tiler_heap_end;
632
633 /* hierarchy weights? but they're still 0 after the job has run... */
634 u32 zeros[10];
635 u32 unk1;
636 u32 unk7e007e;
637 } __attribute__((packed));
638
639 struct bifrost_tiler_meta {
640 u32 tiler_heap_next_start; /* To be written by the GPU */
641 u32 used_hierarchy_mask; /* To be written by the GPU */
642 u16 hierarchy_mask; /* Five values observed: 0xa, 0x14, 0x28, 0x50, 0xa0 */
643 u16 flags;
644 u16 width;
645 u16 height;
646 u64 zero0;
647 mali_ptr tiler_heap_meta;
648 /* TODO what is this used for? */
649 u64 zeros[20];
650 } __attribute__((packed));
651
652 struct bifrost_tiler_only {
653 /* 0x20 */
654 union midgard_primitive_size primitive_size;
655
656 mali_ptr tiler_meta;
657
658 u64 zero1, zero2, zero3, zero4, zero5, zero6;
659 } __attribute__((packed));
660
661 struct mali_vertex_tiler_postfix {
662 u16 gl_enables; // 0x6 on Midgard, 0x2 on Bifrost
663
664 /* Both zero for non-instanced draws. For instanced draws, a
665 * decomposition of padded_num_vertices. See the comments about the
666 * corresponding fields in mali_attr for context. */
667
668 unsigned instance_shift : 5;
669 unsigned instance_odd : 3;
670
671 u8 zero4;
672
673 /* Offset for first vertex in buffer */
674 u32 offset_start;
675
676 u64 zero5;
677
678 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
679 * output from the vertex shader for tiler jobs.
680 */
681
682 u64 position_varying;
683
684 /* An array of mali_uniform_buffer_meta's. The size is given by the
685 * shader_meta.
686 */
687 u64 uniform_buffers;
688
689 /* On Bifrost, this is a pointer to an array of bifrost_texture_descriptor.
690 * On Midgard, this is a pointer to an array of pointers to the texture
691 * descriptors, number of pointers bounded by number of textures. The
692 * indirection is needed to accomodate varying numbers and sizes of
693 * texture descriptors */
694 u64 textures;
695
696 /* For OpenGL, from what I've seen, this is intimately connected to
697 * texture_meta. cwabbott says this is not the case under Vulkan, hence
698 * why this field is seperate (Midgard is Vulkan capable). Pointer to
699 * array of sampler descriptors (which are uniform in size) */
700 u64 sampler_descriptor;
701
702 u64 uniforms;
703 u64 shader;
704 u64 attributes; /* struct attribute_buffer[] */
705 u64 attribute_meta; /* attribute_meta[] */
706 u64 varyings; /* struct attr */
707 u64 varying_meta; /* pointer */
708 u64 viewport;
709 u64 occlusion_counter; /* A single bit as far as I can tell */
710
711 /* On Bifrost, this points directly to a mali_shared_memory structure.
712 * On Midgard, this points to a framebuffer (either SFBD or MFBD as
713 * tagged), which embeds a mali_shared_memory structure */
714 mali_ptr shared_memory;
715 } __attribute__((packed));
716
717 struct midgard_payload_vertex_tiler {
718 struct mali_vertex_tiler_prefix prefix;
719 struct mali_vertex_tiler_postfix postfix;
720
721 union midgard_primitive_size primitive_size;
722 } __attribute__((packed));
723
724 struct bifrost_payload_vertex {
725 struct mali_vertex_tiler_prefix prefix;
726 struct mali_vertex_tiler_postfix postfix;
727 } __attribute__((packed));
728
729 struct bifrost_payload_tiler {
730 struct mali_vertex_tiler_prefix prefix;
731 struct bifrost_tiler_only tiler;
732 struct mali_vertex_tiler_postfix postfix;
733 } __attribute__((packed));
734
735 struct bifrost_payload_fused {
736 struct mali_vertex_tiler_prefix prefix;
737 struct bifrost_tiler_only tiler;
738 struct mali_vertex_tiler_postfix tiler_postfix;
739 u64 padding; /* zero */
740 struct mali_vertex_tiler_postfix vertex_postfix;
741 } __attribute__((packed));
742
743 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
744 * texture is stored as (63, 63) in these fields. This adjusts for that.
745 * There's an identical pattern in the framebuffer descriptor. Even vertex
746 * count fields work this way, hence the generic name -- integral fields that
747 * are strictly positive generally need this adjustment. */
748
749 #define MALI_POSITIVE(dim) (dim - 1)
750
751 /* 8192x8192 */
752 #define MAX_MIP_LEVELS (13)
753
754 /* Cubemap bloats everything up */
755 #define MAX_CUBE_FACES (6)
756
757 /* For each pointer, there is an address and optionally also a stride */
758 #define MAX_ELEMENTS (2)
759
760 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
761 * be cleaned up a lot. */
762
763 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
764
765 static inline int16_t
766 FIXED_16(float x, bool allow_negative)
767 {
768 /* Clamp inputs, accounting for float error */
769 float max_lod = (32.0 - (1.0 / 512.0));
770 float min_lod = allow_negative ? -max_lod : 0.0;
771
772 x = ((x > max_lod) ? max_lod : ((x < min_lod) ? min_lod : x));
773
774 return (int) (x * 256.0);
775 }
776
777 /* From presentations, 16x16 tiles externally. Use shift for fast computation
778 * of tile numbers. */
779
780 #define MALI_TILE_SHIFT 4
781 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
782
783 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
784 * each component. Notice that this provides a theoretical upper bound of (1 <<
785 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
786 * 65536x65536. Multiplying that together, times another four given that Mali
787 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
788 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
789 * alone rendering in real-time to such a buffer.
790 *
791 * Nice job, guys.*/
792
793 /* From mali_kbase_10969_workaround.c */
794 #define MALI_X_COORD_MASK 0x00000FFF
795 #define MALI_Y_COORD_MASK 0x0FFF0000
796
797 /* Extract parts of a tile coordinate */
798
799 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
800 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
801
802 /* Helpers to generate tile coordinates based on the boundary coordinates in
803 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
804 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
805 * Intentional "off-by-one"; finding the tile number is a form of fencepost
806 * problem. */
807
808 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
809 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
810 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
811 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
812 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
813
814 struct mali_payload_fragment {
815 u32 min_tile_coord;
816 u32 max_tile_coord;
817 mali_ptr framebuffer;
818 } __attribute__((packed));
819
820 /* Single Framebuffer Descriptor */
821
822 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
823 * configured for 4x. With MSAA_8, it is configured for 8x. */
824
825 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
826 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
827 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
828 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
829
830 /* Fast/slow based on whether all three buffers are cleared at once */
831
832 #define MALI_CLEAR_FAST (1 << 18)
833 #define MALI_CLEAR_SLOW (1 << 28)
834 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
835
836 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
837 * within the larget framebuffer descriptor). Analogous to
838 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
839
840 /* See pan_tiler.c for derivation */
841 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
842
843 /* Flag disabling the tiler for clear-only jobs, with
844 hierarchical tiling */
845 #define MALI_TILER_DISABLED (1 << 12)
846
847 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
848 * hierarhical tiling. */
849 #define MALI_TILER_USER 0xFFF
850
851 /* Absent any geometry, the minimum size of the polygon list header */
852 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
853
854 struct midgard_tiler_descriptor {
855 /* Size of the entire polygon list; see pan_tiler.c for the
856 * computation. It's based on hierarchical tiling */
857
858 u32 polygon_list_size;
859
860 /* Name known from the replay workaround in the kernel. What exactly is
861 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
862 * specifies a mask of hierarchy weights, which explains some of the
863 * performance mysteries around setting it. We also see the bottom bit
864 * of tiler_flags set in the kernel, but no comment why.
865 *
866 * hierarchy_mask can have the TILER_DISABLED flag */
867
868 u16 hierarchy_mask;
869 u16 flags;
870
871 /* See mali_tiler.c for an explanation */
872 mali_ptr polygon_list;
873 mali_ptr polygon_list_body;
874
875 /* Names based on we see symmetry with replay jobs which name these
876 * explicitly */
877
878 mali_ptr heap_start; /* tiler heap_free_address */
879 mali_ptr heap_end;
880
881 /* Hierarchy weights. We know these are weights based on the kernel,
882 * but I've never seen them be anything other than zero */
883 u32 weights[8];
884 };
885
886 struct mali_sfbd_format {
887 /* 0x1 */
888 unsigned unk1 : 6;
889
890 /* mali_channel_swizzle */
891 unsigned swizzle : 12;
892
893 /* MALI_POSITIVE */
894 unsigned nr_channels : 2;
895
896 /* 0x4 */
897 unsigned unk2 : 6;
898
899 enum mali_block_format block : 2;
900
901 /* 0xb */
902 unsigned unk3 : 4;
903 };
904
905 /* Shared structure at the start of framebuffer descriptors, or used bare for
906 * compute jobs, configuring stack and shared memory */
907
908 struct mali_shared_memory {
909 u32 stack_shift : 4;
910 u32 unk0 : 28;
911
912 /* Configuration for shared memory for compute shaders.
913 * shared_workgroup_count is logarithmic and may be computed for a
914 * compute shader using shared memory as:
915 *
916 * shared_workgroup_count = MAX2(ceil(log2(count_x)) + ... + ceil(log2(count_z), 10)
917 *
918 * For compute shaders that don't use shared memory, or non-compute
919 * shaders, this is set to ~0
920 */
921
922 u32 shared_workgroup_count : 5;
923 u32 shared_unk1 : 3;
924 u32 shared_shift : 4;
925 u32 shared_zero : 20;
926
927 mali_ptr scratchpad;
928
929 /* For compute shaders, the RAM backing of workgroup-shared memory. For
930 * fragment shaders on Bifrost, apparently multisampling locations */
931
932 mali_ptr shared_memory;
933 mali_ptr unknown1;
934 } __attribute__((packed));
935
936 /* Configures multisampling on Bifrost fragment jobs */
937
938 struct bifrost_multisampling {
939 u64 zero1;
940 u64 zero2;
941 mali_ptr sample_locations;
942 u64 zero4;
943 } __attribute__((packed));
944
945 struct mali_single_framebuffer {
946 struct mali_shared_memory shared_memory;
947 struct mali_sfbd_format format;
948
949 u32 clear_flags;
950 u32 zero2;
951
952 /* Purposeful off-by-one in these fields should be accounted for by the
953 * MALI_DIMENSION macro */
954
955 u16 width;
956 u16 height;
957
958 u32 zero3[4];
959 mali_ptr checksum;
960 u32 checksum_stride;
961 u32 zero5;
962
963 /* By default, the framebuffer is upside down from OpenGL's
964 * perspective. Set framebuffer to the end and negate the stride to
965 * flip in the Y direction */
966
967 mali_ptr framebuffer;
968 int32_t stride;
969
970 u32 zero4;
971
972 /* Depth and stencil buffers are interleaved, it appears, as they are
973 * set to the same address in captures. Both fields set to zero if the
974 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
975 * get a zero enable despite the buffer being present; that still is
976 * disabled. */
977
978 mali_ptr depth_buffer; // not SAME_VA
979 u32 depth_stride_zero : 4;
980 u32 depth_stride : 28;
981 u32 zero7;
982
983 mali_ptr stencil_buffer; // not SAME_VA
984 u32 stencil_stride_zero : 4;
985 u32 stencil_stride : 28;
986 u32 zero8;
987
988 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
989 u32 clear_color_2; // always equal, but unclear function?
990 u32 clear_color_3; // always equal, but unclear function?
991 u32 clear_color_4; // always equal, but unclear function?
992
993 /* Set to zero if not cleared */
994
995 float clear_depth_1; // float32, ditto
996 float clear_depth_2; // float32, ditto
997 float clear_depth_3; // float32, ditto
998 float clear_depth_4; // float32, ditto
999
1000 u32 clear_stencil; // Exactly as it appears in OpenGL
1001
1002 u32 zero6[7];
1003
1004 struct midgard_tiler_descriptor tiler;
1005
1006 /* More below this, maybe */
1007 } __attribute__((packed));
1008
1009
1010 #define MALI_MFBD_FORMAT_SRGB (1 << 0)
1011
1012 struct mali_rt_format {
1013 unsigned unk1 : 32;
1014 unsigned unk2 : 3;
1015
1016 unsigned nr_channels : 2; /* MALI_POSITIVE */
1017
1018 unsigned unk3 : 4;
1019 unsigned unk4 : 1;
1020 enum mali_block_format block : 2;
1021 enum mali_msaa msaa : 2;
1022 unsigned flags : 2;
1023
1024 unsigned swizzle : 12;
1025
1026 unsigned zero : 3;
1027
1028 /* Disables MFBD preload. When this bit is set, the render target will
1029 * be cleared every frame. When this bit is clear, the hardware will
1030 * automatically wallpaper the render target back from main memory.
1031 * Unfortunately, MFBD preload is very broken on Midgard, so in
1032 * practice, this is a chicken bit that should always be set.
1033 * Discovered by accident, as all good chicken bits are. */
1034
1035 unsigned no_preload : 1;
1036 } __attribute__((packed));
1037
1038 /* Flags for afbc.flags and ds_afbc.flags */
1039
1040 #define MALI_AFBC_FLAGS 0x10009
1041
1042 /* Lossless RGB and RGBA colorspace transform */
1043 #define MALI_AFBC_YTR (1 << 17)
1044
1045 struct mali_render_target {
1046 struct mali_rt_format format;
1047
1048 u64 zero1;
1049
1050 struct {
1051 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1052 * there is an extra metadata buffer that contains 16 bytes per tile.
1053 * The framebuffer needs to be the same size as before, since we don't
1054 * know ahead of time how much space it will take up. The
1055 * framebuffer_stride is set to 0, since the data isn't stored linearly
1056 * anymore.
1057 *
1058 * When AFBC is disabled, these fields are zero.
1059 */
1060
1061 mali_ptr metadata;
1062 u32 stride; // stride in units of tiles
1063 u32 flags; // = 0x20000
1064 } afbc;
1065
1066 mali_ptr framebuffer;
1067
1068 u32 zero2 : 4;
1069 u32 framebuffer_stride : 28; // in units of bytes, row to next
1070 u32 layer_stride; /* For multisample rendering */
1071
1072 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1073 u32 clear_color_2; // always equal, but unclear function?
1074 u32 clear_color_3; // always equal, but unclear function?
1075 u32 clear_color_4; // always equal, but unclear function?
1076 } __attribute__((packed));
1077
1078 /* An optional part of mali_framebuffer. It comes between the main structure
1079 * and the array of render targets. It must be included if any of these are
1080 * enabled:
1081 *
1082 * - Transaction Elimination
1083 * - Depth/stencil
1084 * - TODO: Anything else?
1085 */
1086
1087 /* flags_hi */
1088 #define MALI_EXTRA_PRESENT (0x1)
1089
1090 /* flags_lo */
1091 #define MALI_EXTRA_ZS (0x4)
1092
1093 struct mali_framebuffer_extra {
1094 mali_ptr checksum;
1095 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1096 u32 checksum_stride;
1097
1098 unsigned flags_lo : 4;
1099 enum mali_block_format zs_block : 2;
1100
1101 /* Number of samples in Z/S attachment, MALI_POSITIVE. So zero for
1102 * 1-sample (non-MSAA), 0x3 for MSAA 4x, etc */
1103 unsigned zs_samples : 4;
1104 unsigned flags_hi : 22;
1105
1106 union {
1107 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1108 struct {
1109 mali_ptr depth_stencil_afbc_metadata;
1110 u32 depth_stencil_afbc_stride; // in units of tiles
1111 u32 flags;
1112
1113 mali_ptr depth_stencil;
1114
1115 u64 padding;
1116 } ds_afbc;
1117
1118 struct {
1119 /* Depth becomes depth/stencil in case of combined D/S */
1120 mali_ptr depth;
1121 u32 depth_stride_zero : 4;
1122 u32 depth_stride : 28;
1123 u32 depth_layer_stride;
1124
1125 mali_ptr stencil;
1126 u32 stencil_stride_zero : 4;
1127 u32 stencil_stride : 28;
1128 u32 stencil_layer_stride;
1129 } ds_linear;
1130 };
1131
1132
1133 u32 clear_color_1;
1134 u32 clear_color_2;
1135 u64 zero3;
1136 } __attribute__((packed));
1137
1138 /* Flags for mfbd_flags */
1139
1140 /* Enables writing depth results back to main memory (rather than keeping them
1141 * on-chip in the tile buffer and then discarding) */
1142
1143 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1144
1145 /* The MFBD contains the extra mali_framebuffer_extra section */
1146
1147 #define MALI_MFBD_EXTRA (1 << 13)
1148
1149 struct mali_framebuffer {
1150 union {
1151 struct mali_shared_memory shared_memory;
1152 struct bifrost_multisampling msaa;
1153 };
1154
1155 /* 0x20 */
1156 u16 width1, height1;
1157 u32 zero3;
1158 u16 width2, height2;
1159 u32 unk1 : 19; // = 0x01000
1160 u32 rt_count_1 : 3; // off-by-one (use MALI_POSITIVE)
1161 u32 unk2 : 2; // = 0
1162 u32 rt_count_2 : 3; // no off-by-one
1163 u32 zero4 : 5;
1164 /* 0x30 */
1165 u32 clear_stencil : 8;
1166 u32 mfbd_flags : 24; // = 0x100
1167 float clear_depth;
1168
1169 union {
1170 struct midgard_tiler_descriptor tiler;
1171 struct {
1172 mali_ptr tiler_meta;
1173 u32 zeros[16];
1174 };
1175 };
1176
1177 /* optional: struct mali_framebuffer_extra extra */
1178 /* struct mali_render_target rts[] */
1179 } __attribute__((packed));
1180
1181 #endif /* __PANFROST_JOB_H__ */