panfrost: Remove 32-bit next_job path
[mesa.git] / src / panfrost / include / panfrost-job.h
1 /*
2 * © Copyright 2017-2018 Alyssa Rosenzweig
3 * © Copyright 2017-2018 Connor Abbott
4 * © Copyright 2017-2018 Lyude Paul
5 * © Copyright2019 Collabora, Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 */
27
28 #ifndef __PANFROST_JOB_H__
29 #define __PANFROST_JOB_H__
30
31 #include <stdint.h>
32 #include <panfrost-misc.h>
33
34 enum mali_job_type {
35 JOB_NOT_STARTED = 0,
36 JOB_TYPE_NULL = 1,
37 JOB_TYPE_WRITE_VALUE = 2,
38 JOB_TYPE_CACHE_FLUSH = 3,
39 JOB_TYPE_COMPUTE = 4,
40 JOB_TYPE_VERTEX = 5,
41 JOB_TYPE_GEOMETRY = 6,
42 JOB_TYPE_TILER = 7,
43 JOB_TYPE_FUSED = 8,
44 JOB_TYPE_FRAGMENT = 9,
45 };
46
47 enum mali_draw_mode {
48 MALI_DRAW_NONE = 0x0,
49 MALI_POINTS = 0x1,
50 MALI_LINES = 0x2,
51 MALI_LINE_STRIP = 0x4,
52 MALI_LINE_LOOP = 0x6,
53 MALI_TRIANGLES = 0x8,
54 MALI_TRIANGLE_STRIP = 0xA,
55 MALI_TRIANGLE_FAN = 0xC,
56 MALI_POLYGON = 0xD,
57 MALI_QUADS = 0xE,
58 MALI_QUAD_STRIP = 0xF,
59
60 /* All other modes invalid */
61 };
62
63 /* Applies to tiler_gl_enables */
64
65 #define MALI_OCCLUSION_QUERY (1 << 3)
66 #define MALI_OCCLUSION_PRECISE (1 << 4)
67
68 /* Set for a glFrontFace(GL_CCW) in a Y=0=TOP coordinate system (like Gallium).
69 * In OpenGL, this would corresponds to glFrontFace(GL_CW). Mesa and the blob
70 * disagree about how to do viewport flipping, so the blob actually sets this
71 * for GL_CW but then has a negative viewport stride */
72
73 #define MALI_FRONT_CCW_TOP (1 << 5)
74
75 #define MALI_CULL_FACE_FRONT (1 << 6)
76 #define MALI_CULL_FACE_BACK (1 << 7)
77
78 /* Used in stencil and depth tests */
79
80 enum mali_func {
81 MALI_FUNC_NEVER = 0,
82 MALI_FUNC_LESS = 1,
83 MALI_FUNC_EQUAL = 2,
84 MALI_FUNC_LEQUAL = 3,
85 MALI_FUNC_GREATER = 4,
86 MALI_FUNC_NOTEQUAL = 5,
87 MALI_FUNC_GEQUAL = 6,
88 MALI_FUNC_ALWAYS = 7
89 };
90
91 /* Flags apply to unknown2_3? */
92
93 #define MALI_HAS_MSAA (1 << 0)
94 #define MALI_CAN_DISCARD (1 << 5)
95
96 /* Applies on SFBD systems, specifying that programmable blending is in use */
97 #define MALI_HAS_BLEND_SHADER (1 << 6)
98
99 /* func is mali_func */
100 #define MALI_DEPTH_FUNC(func) (func << 8)
101 #define MALI_GET_DEPTH_FUNC(flags) ((flags >> 8) & 0x7)
102 #define MALI_DEPTH_FUNC_MASK MALI_DEPTH_FUNC(0x7)
103
104 #define MALI_DEPTH_WRITEMASK (1 << 11)
105
106 /* Next flags to unknown2_4 */
107 #define MALI_STENCIL_TEST (1 << 0)
108
109 /* What?! */
110 #define MALI_SAMPLE_ALPHA_TO_COVERAGE_NO_BLEND_SHADER (1 << 1)
111
112 #define MALI_NO_DITHER (1 << 9)
113 #define MALI_DEPTH_RANGE_A (1 << 12)
114 #define MALI_DEPTH_RANGE_B (1 << 13)
115 #define MALI_NO_MSAA (1 << 14)
116
117 /* Stencil test state is all encoded in a single u32, just with a lot of
118 * enums... */
119
120 enum mali_stencil_op {
121 MALI_STENCIL_KEEP = 0,
122 MALI_STENCIL_REPLACE = 1,
123 MALI_STENCIL_ZERO = 2,
124 MALI_STENCIL_INVERT = 3,
125 MALI_STENCIL_INCR_WRAP = 4,
126 MALI_STENCIL_DECR_WRAP = 5,
127 MALI_STENCIL_INCR = 6,
128 MALI_STENCIL_DECR = 7
129 };
130
131 struct mali_stencil_test {
132 unsigned ref : 8;
133 unsigned mask : 8;
134 enum mali_func func : 3;
135 enum mali_stencil_op sfail : 3;
136 enum mali_stencil_op dpfail : 3;
137 enum mali_stencil_op dppass : 3;
138 unsigned zero : 4;
139 } __attribute__((packed));
140
141 #define MALI_MASK_R (1 << 0)
142 #define MALI_MASK_G (1 << 1)
143 #define MALI_MASK_B (1 << 2)
144 #define MALI_MASK_A (1 << 3)
145
146 enum mali_nondominant_mode {
147 MALI_BLEND_NON_MIRROR = 0,
148 MALI_BLEND_NON_ZERO = 1
149 };
150
151 enum mali_dominant_blend {
152 MALI_BLEND_DOM_SOURCE = 0,
153 MALI_BLEND_DOM_DESTINATION = 1
154 };
155
156 enum mali_dominant_factor {
157 MALI_DOMINANT_UNK0 = 0,
158 MALI_DOMINANT_ZERO = 1,
159 MALI_DOMINANT_SRC_COLOR = 2,
160 MALI_DOMINANT_DST_COLOR = 3,
161 MALI_DOMINANT_UNK4 = 4,
162 MALI_DOMINANT_SRC_ALPHA = 5,
163 MALI_DOMINANT_DST_ALPHA = 6,
164 MALI_DOMINANT_CONSTANT = 7,
165 };
166
167 enum mali_blend_modifier {
168 MALI_BLEND_MOD_UNK0 = 0,
169 MALI_BLEND_MOD_NORMAL = 1,
170 MALI_BLEND_MOD_SOURCE_ONE = 2,
171 MALI_BLEND_MOD_DEST_ONE = 3,
172 };
173
174 struct mali_blend_mode {
175 enum mali_blend_modifier clip_modifier : 2;
176 unsigned unused_0 : 1;
177 unsigned negate_source : 1;
178
179 enum mali_dominant_blend dominant : 1;
180
181 enum mali_nondominant_mode nondominant_mode : 1;
182
183 unsigned unused_1 : 1;
184
185 unsigned negate_dest : 1;
186
187 enum mali_dominant_factor dominant_factor : 3;
188 unsigned complement_dominant : 1;
189 } __attribute__((packed));
190
191 struct mali_blend_equation {
192 /* Of type mali_blend_mode */
193 unsigned rgb_mode : 12;
194 unsigned alpha_mode : 12;
195
196 unsigned zero1 : 4;
197
198 /* Corresponds to MALI_MASK_* above and glColorMask arguments */
199
200 unsigned color_mask : 4;
201 } __attribute__((packed));
202
203 /* Used with channel swizzling */
204 enum mali_channel {
205 MALI_CHANNEL_RED = 0,
206 MALI_CHANNEL_GREEN = 1,
207 MALI_CHANNEL_BLUE = 2,
208 MALI_CHANNEL_ALPHA = 3,
209 MALI_CHANNEL_ZERO = 4,
210 MALI_CHANNEL_ONE = 5,
211 MALI_CHANNEL_RESERVED_0 = 6,
212 MALI_CHANNEL_RESERVED_1 = 7,
213 };
214
215 struct mali_channel_swizzle {
216 enum mali_channel r : 3;
217 enum mali_channel g : 3;
218 enum mali_channel b : 3;
219 enum mali_channel a : 3;
220 } __attribute__((packed));
221
222 /* Compressed per-pixel formats. Each of these formats expands to one to four
223 * floating-point or integer numbers, as defined by the OpenGL specification.
224 * There are various places in OpenGL where the user can specify a compressed
225 * format in memory, which all use the same 8-bit enum in the various
226 * descriptors, although different hardware units support different formats.
227 */
228
229 /* The top 3 bits specify how the bits of each component are interpreted. */
230
231 /* e.g. R11F_G11F_B10F */
232 #define MALI_FORMAT_SPECIAL (2 << 5)
233
234 /* signed normalized, e.g. RGBA8_SNORM */
235 #define MALI_FORMAT_SNORM (3 << 5)
236
237 /* e.g. RGBA8UI */
238 #define MALI_FORMAT_UINT (4 << 5)
239
240 /* e.g. RGBA8 and RGBA32F */
241 #define MALI_FORMAT_UNORM (5 << 5)
242
243 /* e.g. RGBA8I and RGBA16F */
244 #define MALI_FORMAT_SINT (6 << 5)
245
246 /* These formats seem to largely duplicate the others. They're used at least
247 * for Bifrost framebuffer output.
248 */
249 #define MALI_FORMAT_SPECIAL2 (7 << 5)
250
251 /* If the high 3 bits are 3 to 6 these two bits say how many components
252 * there are.
253 */
254 #define MALI_NR_CHANNELS(n) ((n - 1) << 3)
255
256 /* If the high 3 bits are 3 to 6, then the low 3 bits say how big each
257 * component is, except the special MALI_CHANNEL_FLOAT which overrides what the
258 * bits mean.
259 */
260
261 #define MALI_CHANNEL_4 2
262
263 #define MALI_CHANNEL_8 3
264
265 #define MALI_CHANNEL_16 4
266
267 #define MALI_CHANNEL_32 5
268
269 /* For MALI_FORMAT_SINT it means a half-float (e.g. RG16F). For
270 * MALI_FORMAT_UNORM, it means a 32-bit float.
271 */
272 #define MALI_CHANNEL_FLOAT 7
273
274 enum mali_format {
275 MALI_RGB565 = MALI_FORMAT_SPECIAL | 0x0,
276 MALI_RGB5_A1_UNORM = MALI_FORMAT_SPECIAL | 0x2,
277 MALI_RGB10_A2_UNORM = MALI_FORMAT_SPECIAL | 0x3,
278 MALI_RGB10_A2_SNORM = MALI_FORMAT_SPECIAL | 0x5,
279 MALI_RGB10_A2UI = MALI_FORMAT_SPECIAL | 0x7,
280 MALI_RGB10_A2I = MALI_FORMAT_SPECIAL | 0x9,
281
282 /* YUV formats */
283 MALI_NV12 = MALI_FORMAT_SPECIAL | 0xc,
284
285 MALI_Z32_UNORM = MALI_FORMAT_SPECIAL | 0xD,
286 MALI_R32_FIXED = MALI_FORMAT_SPECIAL | 0x11,
287 MALI_RG32_FIXED = MALI_FORMAT_SPECIAL | 0x12,
288 MALI_RGB32_FIXED = MALI_FORMAT_SPECIAL | 0x13,
289 MALI_RGBA32_FIXED = MALI_FORMAT_SPECIAL | 0x14,
290 MALI_R11F_G11F_B10F = MALI_FORMAT_SPECIAL | 0x19,
291 MALI_R9F_G9F_B9F_E5F = MALI_FORMAT_SPECIAL | 0x1b,
292 /* Only used for varyings, to indicate the transformed gl_Position */
293 MALI_VARYING_POS = MALI_FORMAT_SPECIAL | 0x1e,
294 /* Only used for varyings, to indicate that the write should be
295 * discarded.
296 */
297 MALI_VARYING_DISCARD = MALI_FORMAT_SPECIAL | 0x1f,
298
299 MALI_R8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
300 MALI_R16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
301 MALI_R32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
302 MALI_RG8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
303 MALI_RG16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
304 MALI_RG32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
305 MALI_RGB8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
306 MALI_RGB16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
307 MALI_RGB32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
308 MALI_RGBA8_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
309 MALI_RGBA16_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
310 MALI_RGBA32_SNORM = MALI_FORMAT_SNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
311
312 MALI_R8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
313 MALI_R16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
314 MALI_R32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
315 MALI_RG8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
316 MALI_RG16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
317 MALI_RG32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
318 MALI_RGB8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
319 MALI_RGB16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
320 MALI_RGB32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
321 MALI_RGBA8UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
322 MALI_RGBA16UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
323 MALI_RGBA32UI = MALI_FORMAT_UINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
324
325 MALI_R8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
326 MALI_R16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
327 MALI_R32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
328 MALI_R32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT,
329 MALI_RG8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
330 MALI_RG16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
331 MALI_RG32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
332 MALI_RG32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT,
333 MALI_RGB8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
334 MALI_RGB16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
335 MALI_RGB32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
336 MALI_RGB32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT,
337 MALI_RGBA4_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_4,
338 MALI_RGBA8_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
339 MALI_RGBA16_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
340 MALI_RGBA32_UNORM = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
341 MALI_RGBA32F = MALI_FORMAT_UNORM | MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT,
342
343 MALI_R8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_8,
344 MALI_R16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_16,
345 MALI_R32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_32,
346 MALI_R16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(1) | MALI_CHANNEL_FLOAT,
347 MALI_RG8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_8,
348 MALI_RG16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_16,
349 MALI_RG32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_32,
350 MALI_RG16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(2) | MALI_CHANNEL_FLOAT,
351 MALI_RGB8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_8,
352 MALI_RGB16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_16,
353 MALI_RGB32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_32,
354 MALI_RGB16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(3) | MALI_CHANNEL_FLOAT,
355 MALI_RGBA8I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_8,
356 MALI_RGBA16I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_16,
357 MALI_RGBA32I = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_32,
358 MALI_RGBA16F = MALI_FORMAT_SINT | MALI_NR_CHANNELS(4) | MALI_CHANNEL_FLOAT,
359
360 MALI_RGBA4 = MALI_FORMAT_SPECIAL2 | 0x8,
361 MALI_RGBA8_2 = MALI_FORMAT_SPECIAL2 | 0xd,
362 MALI_RGB10_A2_2 = MALI_FORMAT_SPECIAL2 | 0xe,
363 };
364
365
366 /* Alpha coverage is encoded as 4-bits (from a clampf), with inversion
367 * literally performing a bitwise invert. This function produces slightly wrong
368 * results and I'm not sure why; some rounding issue I suppose... */
369
370 #define MALI_ALPHA_COVERAGE(clampf) ((uint16_t) (int) (clampf * 15.0f))
371 #define MALI_GET_ALPHA_COVERAGE(nibble) ((float) nibble / 15.0f)
372
373 /* Applies to midgard1.flags */
374
375 /* Should the hardware perform early-Z testing? Normally should be set
376 * for performance reasons. Clear if you use: discard,
377 * alpha-to-coverage... * It's also possible this disables
378 * forward-pixel kill; we're not quite sure which bit is which yet.
379 * TODO: How does this interact with blending?*/
380
381 #define MALI_EARLY_Z (1 << 6)
382
383 /* Should the hardware calculate derivatives (via helper invocations)? Set in a
384 * fragment shader that uses texturing or derivative functions */
385
386 #define MALI_HELPER_INVOCATIONS (1 << 7)
387
388 /* Flags denoting the fragment shader's use of tilebuffer readback. If the
389 * shader might read any part of the tilebuffer, set MALI_READS_TILEBUFFER. If
390 * it might read depth/stencil in particular, also set MALI_READS_ZS */
391
392 #define MALI_READS_ZS (1 << 8)
393 #define MALI_READS_TILEBUFFER (1 << 12)
394
395 /* The raw Midgard blend payload can either be an equation or a shader
396 * address, depending on the context */
397
398 union midgard_blend {
399 mali_ptr shader;
400
401 struct {
402 struct mali_blend_equation equation;
403 float constant;
404 };
405 };
406
407 /* We need to load the tilebuffer to blend (i.e. the destination factor is not
408 * ZERO) */
409
410 #define MALI_BLEND_LOAD_TIB (0x1)
411
412 /* A blend shader is used to blend this render target */
413 #define MALI_BLEND_MRT_SHADER (0x2)
414
415 /* On MRT Midgard systems (using an MFBD), each render target gets its own
416 * blend descriptor */
417
418 #define MALI_BLEND_SRGB (0x400)
419
420 /* Dithering is specified here for MFBD, otherwise NO_DITHER for SFBD */
421 #define MALI_BLEND_NO_DITHER (0x800)
422
423 struct midgard_blend_rt {
424 /* Flags base value of 0x200 to enable the render target.
425 * OR with 0x1 for blending (anything other than REPLACE).
426 * OR with 0x2 for programmable blending
427 * OR with MALI_BLEND_SRGB for implicit sRGB
428 */
429
430 u64 flags;
431 union midgard_blend blend;
432 } __attribute__((packed));
433
434 /* On Bifrost systems (all MRT), each render target gets one of these
435 * descriptors */
436
437 struct bifrost_blend_rt {
438 /* This is likely an analogue of the flags on
439 * midgard_blend_rt */
440
441 u16 flags; // = 0x200
442
443 /* Single-channel blend constants are encoded in a sort of
444 * fixed-point. Basically, the float is mapped to a byte, becoming
445 * a high byte, and then the lower-byte is added for precision.
446 * For the original float f:
447 *
448 * f = (constant_hi / 255) + (constant_lo / 65535)
449 *
450 * constant_hi = int(f / 255)
451 * constant_lo = 65535*f - (65535/255) * constant_hi
452 */
453
454 u16 constant;
455
456 struct mali_blend_equation equation;
457 /*
458 * - 0x19 normally
459 * - 0x3 when this slot is unused (everything else is 0 except the index)
460 * - 0x11 when this is the fourth slot (and it's used)
461 + * - 0 when there is a blend shader
462 */
463 u16 unk2;
464 /* increments from 0 to 3 */
465 u16 index;
466
467 union {
468 struct {
469 /* So far, I've only seen:
470 * - R001 for 1-component formats
471 * - RG01 for 2-component formats
472 * - RGB1 for 3-component formats
473 * - RGBA for 4-component formats
474 */
475 u32 swizzle : 12;
476 enum mali_format format : 8;
477
478 /* Type of the shader output variable. Note, this can
479 * be different from the format.
480 *
481 * 0: f16 (mediump float)
482 * 1: f32 (highp float)
483 * 2: i32 (highp int)
484 * 3: u32 (highp uint)
485 * 4: i16 (mediump int)
486 * 5: u16 (mediump uint)
487 */
488 u32 shader_type : 3;
489 u32 zero : 9;
490 };
491
492 /* Only the low 32 bits of the blend shader are stored, the
493 * high 32 bits are implicitly the same as the original shader.
494 * According to the kernel driver, the program counter for
495 * shaders is actually only 24 bits, so shaders cannot cross
496 * the 2^24-byte boundary, and neither can the blend shader.
497 * The blob handles this by allocating a 2^24 byte pool for
498 * shaders, and making sure that any blend shaders are stored
499 * in the same pool as the original shader. The kernel will
500 * make sure this allocation is aligned to 2^24 bytes.
501 */
502 u32 shader;
503 };
504 } __attribute__((packed));
505
506 /* Descriptor for the shader. Following this is at least one, up to four blend
507 * descriptors for each active render target */
508
509 struct mali_shader_meta {
510 mali_ptr shader;
511 u16 sampler_count;
512 u16 texture_count;
513 u16 attribute_count;
514 u16 varying_count;
515
516 union {
517 struct {
518 u32 uniform_buffer_count : 4;
519 u32 unk1 : 28; // = 0x800000 for vertex, 0x958020 for tiler
520 } bifrost1;
521 struct {
522 unsigned uniform_buffer_count : 4;
523 unsigned flags : 12;
524
525 /* vec4 units */
526 unsigned work_count : 5;
527 unsigned uniform_count : 5;
528 unsigned unknown2 : 6;
529 } midgard1;
530 };
531
532 /* Same as glPolygoOffset() arguments */
533 float depth_units;
534 float depth_factor;
535
536 u32 unknown2_2;
537
538 u16 alpha_coverage;
539 u16 unknown2_3;
540
541 u8 stencil_mask_front;
542 u8 stencil_mask_back;
543 u16 unknown2_4;
544
545 struct mali_stencil_test stencil_front;
546 struct mali_stencil_test stencil_back;
547
548 union {
549 struct {
550 u32 unk3 : 7;
551 /* On Bifrost, some system values are preloaded in
552 * registers R55-R62 by the thread dispatcher prior to
553 * the start of shader execution. This is a bitfield
554 * with one entry for each register saying which
555 * registers need to be preloaded. Right now, the known
556 * values are:
557 *
558 * Vertex/compute:
559 * - R55 : gl_LocalInvocationID.xy
560 * - R56 : gl_LocalInvocationID.z + unknown in high 16 bits
561 * - R57 : gl_WorkGroupID.x
562 * - R58 : gl_WorkGroupID.y
563 * - R59 : gl_WorkGroupID.z
564 * - R60 : gl_GlobalInvocationID.x
565 * - R61 : gl_GlobalInvocationID.y/gl_VertexID (without base)
566 * - R62 : gl_GlobalInvocationID.z/gl_InstanceID (without base)
567 *
568 * Fragment:
569 * - R55 : unknown, never seen (but the bit for this is
570 * always set?)
571 * - R56 : unknown (bit always unset)
572 * - R57 : gl_PrimitiveID
573 * - R58 : gl_FrontFacing in low bit, potentially other stuff
574 * - R59 : u16 fragment coordinates (used to compute
575 * gl_FragCoord.xy, together with sample positions)
576 * - R60 : gl_SampleMask (used in epilog, so pretty
577 * much always used, but the bit is always 0 -- is
578 * this just always pushed?)
579 * - R61 : gl_SampleMaskIn and gl_SampleID, used by
580 * varying interpolation.
581 * - R62 : unknown (bit always unset).
582 */
583 u32 preload_regs : 8;
584 /* In units of 8 bytes or 64 bits, since the
585 * uniform/const port loads 64 bits at a time.
586 */
587 u32 uniform_count : 7;
588 u32 unk4 : 10; // = 2
589 } bifrost2;
590 struct {
591 u32 unknown2_7;
592 } midgard2;
593 };
594
595 /* zero on bifrost */
596 u32 unknown2_8;
597
598 /* Blending information for the older non-MRT Midgard HW. Check for
599 * MALI_HAS_BLEND_SHADER to decide how to interpret.
600 */
601
602 union midgard_blend blend;
603 } __attribute__((packed));
604
605 /* This only concerns hardware jobs */
606
607 /* Possible values for job_descriptor_size */
608
609 #define MALI_JOB_32 0
610 #define MALI_JOB_64 1
611
612 struct mali_job_descriptor_header {
613 u32 exception_status;
614 u32 first_incomplete_task;
615 u64 fault_pointer;
616 u8 job_descriptor_size : 1;
617 enum mali_job_type job_type : 7;
618 u8 job_barrier : 1;
619 u8 unknown_flags : 7;
620 u16 job_index;
621 u16 job_dependency_index_1;
622 u16 job_dependency_index_2;
623 u64 next_job;
624 } __attribute__((packed));
625
626 /* These concern exception_status */
627
628 /* Access type causing a fault, paralleling AS_FAULTSTATUS_* entries in the
629 * kernel */
630
631 enum mali_exception_access {
632 /* Atomic in the kernel for MMU, but that doesn't make sense for a job
633 * fault so it's just unused */
634 MALI_EXCEPTION_ACCESS_NONE = 0,
635
636 MALI_EXCEPTION_ACCESS_EXECUTE = 1,
637 MALI_EXCEPTION_ACCESS_READ = 2,
638 MALI_EXCEPTION_ACCESS_WRITE = 3
639 };
640
641 /* Details about write_value from panfrost igt tests which use it as a generic
642 * dword write primitive */
643
644 #define MALI_WRITE_VALUE_ZERO 3
645
646 struct mali_payload_write_value {
647 u64 address;
648 u32 value_descriptor;
649 u32 reserved;
650 u64 immediate;
651 } __attribute__((packed));
652
653 /*
654 * Mali Attributes
655 *
656 * This structure lets the attribute unit compute the address of an attribute
657 * given the vertex and instance ID. Unfortunately, the way this works is
658 * rather complicated when instancing is enabled.
659 *
660 * To explain this, first we need to explain how compute and vertex threads are
661 * dispatched. This is a guess (although a pretty firm guess!) since the
662 * details are mostly hidden from the driver, except for attribute instancing.
663 * When a quad is dispatched, it receives a single, linear index. However, we
664 * need to translate that index into a (vertex id, instance id) pair, or a
665 * (local id x, local id y, local id z) triple for compute shaders (although
666 * vertex shaders and compute shaders are handled almost identically).
667 * Focusing on vertex shaders, one option would be to do:
668 *
669 * vertex_id = linear_id % num_vertices
670 * instance_id = linear_id / num_vertices
671 *
672 * but this involves a costly division and modulus by an arbitrary number.
673 * Instead, we could pad num_vertices. We dispatch padded_num_vertices *
674 * num_instances threads instead of num_vertices * num_instances, which results
675 * in some "extra" threads with vertex_id >= num_vertices, which we have to
676 * discard. The more we pad num_vertices, the more "wasted" threads we
677 * dispatch, but the division is potentially easier.
678 *
679 * One straightforward choice is to pad num_vertices to the next power of two,
680 * which means that the division and modulus are just simple bit shifts and
681 * masking. But the actual algorithm is a bit more complicated. The thread
682 * dispatcher has special support for dividing by 3, 5, 7, and 9, in addition
683 * to dividing by a power of two. This is possibly using the technique
684 * described in patent US20170010862A1. As a result, padded_num_vertices can be
685 * 1, 3, 5, 7, or 9 times a power of two. This results in less wasted threads,
686 * since we need less padding.
687 *
688 * padded_num_vertices is picked by the hardware. The driver just specifies the
689 * actual number of vertices. At least for Mali G71, the first few cases are
690 * given by:
691 *
692 * num_vertices | padded_num_vertices
693 * 3 | 4
694 * 4-7 | 8
695 * 8-11 | 12 (3 * 4)
696 * 12-15 | 16
697 * 16-19 | 20 (5 * 4)
698 *
699 * Note that padded_num_vertices is a multiple of four (presumably because
700 * threads are dispatched in groups of 4). Also, padded_num_vertices is always
701 * at least one more than num_vertices, which seems like a quirk of the
702 * hardware. For larger num_vertices, the hardware uses the following
703 * algorithm: using the binary representation of num_vertices, we look at the
704 * most significant set bit as well as the following 3 bits. Let n be the
705 * number of bits after those 4 bits. Then we set padded_num_vertices according
706 * to the following table:
707 *
708 * high bits | padded_num_vertices
709 * 1000 | 9 * 2^n
710 * 1001 | 5 * 2^(n+1)
711 * 101x | 3 * 2^(n+2)
712 * 110x | 7 * 2^(n+1)
713 * 111x | 2^(n+4)
714 *
715 * For example, if num_vertices = 70 is passed to glDraw(), its binary
716 * representation is 1000110, so n = 3 and the high bits are 1000, and
717 * therefore padded_num_vertices = 9 * 2^3 = 72.
718 *
719 * The attribute unit works in terms of the original linear_id. if
720 * num_instances = 1, then they are the same, and everything is simple.
721 * However, with instancing things get more complicated. There are four
722 * possible modes, two of them we can group together:
723 *
724 * 1. Use the linear_id directly. Only used when there is no instancing.
725 *
726 * 2. Use the linear_id modulo a constant. This is used for per-vertex
727 * attributes with instancing enabled by making the constant equal
728 * padded_num_vertices. Because the modulus is always padded_num_vertices, this
729 * mode only supports a modulus that is a power of 2 times 1, 3, 5, 7, or 9.
730 * The shift field specifies the power of two, while the extra_flags field
731 * specifies the odd number. If shift = n and extra_flags = m, then the modulus
732 * is (2m + 1) * 2^n. As an example, if num_vertices = 70, then as computed
733 * above, padded_num_vertices = 9 * 2^3, so we should set extra_flags = 4 and
734 * shift = 3. Note that we must exactly follow the hardware algorithm used to
735 * get padded_num_vertices in order to correctly implement per-vertex
736 * attributes.
737 *
738 * 3. Divide the linear_id by a constant. In order to correctly implement
739 * instance divisors, we have to divide linear_id by padded_num_vertices times
740 * to user-specified divisor. So first we compute padded_num_vertices, again
741 * following the exact same algorithm that the hardware uses, then multiply it
742 * by the GL-level divisor to get the hardware-level divisor. This case is
743 * further divided into two more cases. If the hardware-level divisor is a
744 * power of two, then we just need to shift. The shift amount is specified by
745 * the shift field, so that the hardware-level divisor is just 2^shift.
746 *
747 * If it isn't a power of two, then we have to divide by an arbitrary integer.
748 * For that, we use the well-known technique of multiplying by an approximation
749 * of the inverse. The driver must compute the magic multiplier and shift
750 * amount, and then the hardware does the multiplication and shift. The
751 * hardware and driver also use the "round-down" optimization as described in
752 * http://ridiculousfish.com/files/faster_unsigned_division_by_constants.pdf.
753 * The hardware further assumes the multiplier is between 2^31 and 2^32, so the
754 * high bit is implicitly set to 1 even though it is set to 0 by the driver --
755 * presumably this simplifies the hardware multiplier a little. The hardware
756 * first multiplies linear_id by the multiplier and takes the high 32 bits,
757 * then applies the round-down correction if extra_flags = 1, then finally
758 * shifts right by the shift field.
759 *
760 * There are some differences between ridiculousfish's algorithm and the Mali
761 * hardware algorithm, which means that the reference code from ridiculousfish
762 * doesn't always produce the right constants. Mali does not use the pre-shift
763 * optimization, since that would make a hardware implementation slower (it
764 * would have to always do the pre-shift, multiply, and post-shift operations).
765 * It also forces the multplier to be at least 2^31, which means that the
766 * exponent is entirely fixed, so there is no trial-and-error. Altogether,
767 * given the divisor d, the algorithm the driver must follow is:
768 *
769 * 1. Set shift = floor(log2(d)).
770 * 2. Compute m = ceil(2^(shift + 32) / d) and e = 2^(shift + 32) % d.
771 * 3. If e <= 2^shift, then we need to use the round-down algorithm. Set
772 * magic_divisor = m - 1 and extra_flags = 1.
773 * 4. Otherwise, set magic_divisor = m and extra_flags = 0.
774 *
775 * Unrelated to instancing/actual attributes, images (the OpenCL kind) are
776 * implemented as special attributes, denoted by MALI_ATTR_IMAGE. For images,
777 * let shift=extra_flags=0. Stride is set to the image format's bytes-per-pixel
778 * (*NOT the row stride*). Size is set to the size of the image itself.
779 *
780 * Special internal attribtues and varyings (gl_VertexID, gl_FrontFacing, etc)
781 * use particular fixed addresses with modified structures.
782 */
783
784 enum mali_attr_mode {
785 MALI_ATTR_UNUSED = 0,
786 MALI_ATTR_LINEAR = 1,
787 MALI_ATTR_POT_DIVIDE = 2,
788 MALI_ATTR_MODULO = 3,
789 MALI_ATTR_NPOT_DIVIDE = 4,
790 MALI_ATTR_IMAGE = 5,
791 };
792
793 /* Pseudo-address for gl_VertexID, gl_FragCoord, gl_FrontFacing */
794
795 #define MALI_ATTR_VERTEXID (0x22)
796 #define MALI_ATTR_INSTANCEID (0x24)
797 #define MALI_VARYING_FRAG_COORD (0x25)
798 #define MALI_VARYING_FRONT_FACING (0x26)
799
800 /* This magic "pseudo-address" is used as `elements` to implement
801 * gl_PointCoord. When read from a fragment shader, it generates a point
802 * coordinate per the OpenGL ES 2.0 specification. Flipped coordinate spaces
803 * require an affine transformation in the shader. */
804
805 #define MALI_VARYING_POINT_COORD (0x61)
806
807 /* Used for comparison to check if an address is special. Mostly a guess, but
808 * it doesn't really matter. */
809
810 #define MALI_RECORD_SPECIAL (0x100)
811
812 union mali_attr {
813 /* This is used for actual attributes. */
814 struct {
815 /* The bottom 3 bits are the mode */
816 mali_ptr elements : 64 - 8;
817 u32 shift : 5;
818 u32 extra_flags : 3;
819 u32 stride;
820 u32 size;
821 };
822 /* The entry after an NPOT_DIVIDE entry has this format. It stores
823 * extra information that wouldn't fit in a normal entry.
824 */
825 struct {
826 u32 unk; /* = 0x20 */
827 u32 magic_divisor;
828 u32 zero;
829 /* This is the original, GL-level divisor. */
830 u32 divisor;
831 };
832 } __attribute__((packed));
833
834 struct mali_attr_meta {
835 /* Vertex buffer index */
836 u8 index;
837
838 unsigned unknown1 : 2;
839 unsigned swizzle : 12;
840 enum mali_format format : 8;
841
842 /* Always observed to be zero at the moment */
843 unsigned unknown3 : 2;
844
845 /* When packing multiple attributes in a buffer, offset addresses by
846 * this value. Obscurely, this is signed. */
847 int32_t src_offset;
848 } __attribute__((packed));
849
850 #define FBD_MASK (~0x3f)
851
852 /* MFBD, rather than SFBD */
853 #define MALI_MFBD (0x1)
854
855 /* ORed into an MFBD address to specify the fbx section is included */
856 #define MALI_MFBD_TAG_EXTRA (0x2)
857
858 struct mali_uniform_buffer_meta {
859 /* This is actually the size minus 1 (MALI_POSITIVE), in units of 16
860 * bytes. This gives a maximum of 2^14 bytes, which just so happens to
861 * be the GL minimum-maximum for GL_MAX_UNIFORM_BLOCK_SIZE.
862 */
863 u64 size : 10;
864
865 /* This is missing the bottom 2 bits and top 8 bits. The top 8 bits
866 * should be 0 for userspace pointers, according to
867 * https://lwn.net/Articles/718895/. By reusing these bits, we can make
868 * each entry in the table only 64 bits.
869 */
870 mali_ptr ptr : 64 - 10;
871 };
872
873 /* On Bifrost, these fields are the same between the vertex and tiler payloads.
874 * They also seem to be the same between Bifrost and Midgard. They're shared in
875 * fused payloads.
876 */
877
878 /* Applies to unknown_draw */
879
880 #define MALI_DRAW_INDEXED_UINT8 (0x10)
881 #define MALI_DRAW_INDEXED_UINT16 (0x20)
882 #define MALI_DRAW_INDEXED_UINT32 (0x30)
883 #define MALI_DRAW_INDEXED_SIZE (0x30)
884 #define MALI_DRAW_INDEXED_SHIFT (4)
885
886 #define MALI_DRAW_VARYING_SIZE (0x100)
887 #define MALI_DRAW_PRIMITIVE_RESTART_FIXED_INDEX (0x10000)
888
889 struct mali_vertex_tiler_prefix {
890 /* This is a dynamic bitfield containing the following things in this order:
891 *
892 * - gl_WorkGroupSize.x
893 * - gl_WorkGroupSize.y
894 * - gl_WorkGroupSize.z
895 * - gl_NumWorkGroups.x
896 * - gl_NumWorkGroups.y
897 * - gl_NumWorkGroups.z
898 *
899 * The number of bits allocated for each number is based on the *_shift
900 * fields below. For example, workgroups_y_shift gives the bit that
901 * gl_NumWorkGroups.y starts at, and workgroups_z_shift gives the bit
902 * that gl_NumWorkGroups.z starts at (and therefore one after the bit
903 * that gl_NumWorkGroups.y ends at). The actual value for each gl_*
904 * value is one more than the stored value, since if any of the values
905 * are zero, then there would be no invocations (and hence no job). If
906 * there were 0 bits allocated to a given field, then it must be zero,
907 * and hence the real value is one.
908 *
909 * Vertex jobs reuse the same job dispatch mechanism as compute jobs,
910 * effectively doing glDispatchCompute(1, vertex_count, instance_count)
911 * where vertex count is the number of vertices.
912 */
913 u32 invocation_count;
914
915 /* Bitfield for shifts:
916 *
917 * size_y_shift : 5
918 * size_z_shift : 5
919 * workgroups_x_shift : 6
920 * workgroups_y_shift : 6
921 * workgroups_z_shift : 6
922 * workgroups_x_shift_2 : 4
923 */
924 u32 invocation_shifts;
925
926 u32 draw_mode : 4;
927 u32 unknown_draw : 22;
928
929 /* This is the the same as workgroups_x_shift_2 in compute shaders, but
930 * always 5 for vertex jobs and 6 for tiler jobs. I suspect this has
931 * something to do with how many quads get put in the same execution
932 * engine, which is a balance (you don't want to starve the engine, but
933 * you also want to distribute work evenly).
934 */
935 u32 workgroups_x_shift_3 : 6;
936
937
938 /* Negative of min_index. This is used to compute
939 * the unbiased index in tiler/fragment shader runs.
940 *
941 * The hardware adds offset_bias_correction in each run,
942 * so that absent an index bias, the first vertex processed is
943 * genuinely the first vertex (0). But with an index bias,
944 * the first vertex process is numbered the same as the bias.
945 *
946 * To represent this more conviniently:
947 * unbiased_index = lower_bound_index +
948 * index_bias +
949 * offset_bias_correction
950 *
951 * This is done since the hardware doesn't accept a index_bias
952 * and this allows it to recover the unbiased index.
953 */
954 int32_t offset_bias_correction;
955 u32 zero1;
956
957 /* Like many other strictly nonzero quantities, index_count is
958 * subtracted by one. For an indexed cube, this is equal to 35 = 6
959 * faces * 2 triangles/per face * 3 vertices/per triangle - 1. That is,
960 * for an indexed draw, index_count is the number of actual vertices
961 * rendered whereas invocation_count is the number of unique vertices
962 * rendered (the number of times the vertex shader must be invoked).
963 * For non-indexed draws, this is just equal to invocation_count. */
964
965 u32 index_count;
966
967 /* No hidden structure; literally just a pointer to an array of uint
968 * indices (width depends on flags). Thanks, guys, for not making my
969 * life insane for once! NULL for non-indexed draws. */
970
971 u64 indices;
972 } __attribute__((packed));
973
974 /* Point size / line width can either be specified as a 32-bit float (for
975 * constant size) or as a [machine word size]-bit GPU pointer (for varying size). If a pointer
976 * is selected, by setting the appropriate MALI_DRAW_VARYING_SIZE bit in the tiler
977 * payload, the contents of varying_pointer will be intepreted as an array of
978 * fp16 sizes, one for each vertex. gl_PointSize is therefore implemented by
979 * creating a special MALI_R16F varying writing to varying_pointer. */
980
981 union midgard_primitive_size {
982 float constant;
983 u64 pointer;
984 };
985
986 struct bifrost_vertex_only {
987 u32 unk2; /* =0x2 */
988
989 u32 zero0;
990
991 u64 zero1;
992 } __attribute__((packed));
993
994 struct bifrost_tiler_heap_meta {
995 u32 zero;
996 u32 heap_size;
997 /* note: these are just guesses! */
998 mali_ptr tiler_heap_start;
999 mali_ptr tiler_heap_free;
1000 mali_ptr tiler_heap_end;
1001
1002 /* hierarchy weights? but they're still 0 after the job has run... */
1003 u32 zeros[12];
1004 } __attribute__((packed));
1005
1006 struct bifrost_tiler_meta {
1007 u64 zero0;
1008 u16 hierarchy_mask;
1009 u16 flags;
1010 u16 width;
1011 u16 height;
1012 u64 zero1;
1013 mali_ptr tiler_heap_meta;
1014 /* TODO what is this used for? */
1015 u64 zeros[20];
1016 } __attribute__((packed));
1017
1018 struct bifrost_tiler_only {
1019 /* 0x20 */
1020 union midgard_primitive_size primitive_size;
1021
1022 mali_ptr tiler_meta;
1023
1024 u64 zero1, zero2, zero3, zero4, zero5, zero6;
1025
1026 u32 gl_enables;
1027 u32 zero7;
1028 u64 zero8;
1029 } __attribute__((packed));
1030
1031 struct bifrost_scratchpad {
1032 u32 zero;
1033 u32 flags; // = 0x1f
1034 /* This is a pointer to a CPU-inaccessible buffer, 16 pages, allocated
1035 * during startup. It seems to serve the same purpose as the
1036 * gpu_scratchpad in the SFBD for Midgard, although it's slightly
1037 * larger.
1038 */
1039 mali_ptr gpu_scratchpad;
1040 } __attribute__((packed));
1041
1042 struct mali_vertex_tiler_postfix {
1043 /* Zero for vertex jobs. Pointer to the position (gl_Position) varying
1044 * output from the vertex shader for tiler jobs.
1045 */
1046
1047 u64 position_varying;
1048
1049 /* An array of mali_uniform_buffer_meta's. The size is given by the
1050 * shader_meta.
1051 */
1052 u64 uniform_buffers;
1053
1054 /* This is a pointer to an array of pointers to the texture
1055 * descriptors, number of pointers bounded by number of textures. The
1056 * indirection is needed to accomodate varying numbers and sizes of
1057 * texture descriptors */
1058 u64 texture_trampoline;
1059
1060 /* For OpenGL, from what I've seen, this is intimately connected to
1061 * texture_meta. cwabbott says this is not the case under Vulkan, hence
1062 * why this field is seperate (Midgard is Vulkan capable). Pointer to
1063 * array of sampler descriptors (which are uniform in size) */
1064 u64 sampler_descriptor;
1065
1066 u64 uniforms;
1067 u64 shader;
1068 u64 attributes; /* struct attribute_buffer[] */
1069 u64 attribute_meta; /* attribute_meta[] */
1070 u64 varyings; /* struct attr */
1071 u64 varying_meta; /* pointer */
1072 u64 viewport;
1073 u64 occlusion_counter; /* A single bit as far as I can tell */
1074
1075 /* Note: on Bifrost, this isn't actually the FBD. It points to
1076 * bifrost_scratchpad instead. However, it does point to the same thing
1077 * in vertex and tiler jobs.
1078 */
1079 mali_ptr framebuffer;
1080 } __attribute__((packed));
1081
1082 struct midgard_payload_vertex_tiler {
1083 struct mali_vertex_tiler_prefix prefix;
1084
1085 u16 gl_enables; // 0x5
1086
1087 /* Both zero for non-instanced draws. For instanced draws, a
1088 * decomposition of padded_num_vertices. See the comments about the
1089 * corresponding fields in mali_attr for context. */
1090
1091 unsigned instance_shift : 5;
1092 unsigned instance_odd : 3;
1093
1094 u8 zero4;
1095
1096 /* Offset for first vertex in buffer */
1097 u32 offset_start;
1098
1099 u64 zero5;
1100
1101 struct mali_vertex_tiler_postfix postfix;
1102
1103 union midgard_primitive_size primitive_size;
1104 } __attribute__((packed));
1105
1106 struct bifrost_payload_vertex {
1107 struct mali_vertex_tiler_prefix prefix;
1108 struct bifrost_vertex_only vertex;
1109 struct mali_vertex_tiler_postfix postfix;
1110 } __attribute__((packed));
1111
1112 struct bifrost_payload_tiler {
1113 struct mali_vertex_tiler_prefix prefix;
1114 struct bifrost_tiler_only tiler;
1115 struct mali_vertex_tiler_postfix postfix;
1116 } __attribute__((packed));
1117
1118 struct bifrost_payload_fused {
1119 struct mali_vertex_tiler_prefix prefix;
1120 struct bifrost_tiler_only tiler;
1121 struct mali_vertex_tiler_postfix tiler_postfix;
1122 u64 padding; /* zero */
1123 struct bifrost_vertex_only vertex;
1124 struct mali_vertex_tiler_postfix vertex_postfix;
1125 } __attribute__((packed));
1126
1127 /* Purposeful off-by-one in width, height fields. For example, a (64, 64)
1128 * texture is stored as (63, 63) in these fields. This adjusts for that.
1129 * There's an identical pattern in the framebuffer descriptor. Even vertex
1130 * count fields work this way, hence the generic name -- integral fields that
1131 * are strictly positive generally need this adjustment. */
1132
1133 #define MALI_POSITIVE(dim) (dim - 1)
1134
1135 /* Used with wrapping. Unclear what top bit conveys */
1136
1137 enum mali_wrap_mode {
1138 MALI_WRAP_REPEAT = 0x8 | 0x0,
1139 MALI_WRAP_CLAMP_TO_EDGE = 0x8 | 0x1,
1140 MALI_WRAP_CLAMP = 0x8 | 0x2,
1141 MALI_WRAP_CLAMP_TO_BORDER = 0x8 | 0x3,
1142 MALI_WRAP_MIRRORED_REPEAT = 0x8 | 0x4 | 0x0,
1143 MALI_WRAP_MIRRORED_CLAMP_TO_EDGE = 0x8 | 0x4 | 0x1,
1144 MALI_WRAP_MIRRORED_CLAMP = 0x8 | 0x4 | 0x2,
1145 MALI_WRAP_MIRRORED_CLAMP_TO_BORDER = 0x8 | 0x4 | 0x3,
1146 };
1147
1148 /* Shared across both command stream and Midgard, and even with Bifrost */
1149
1150 enum mali_texture_type {
1151 MALI_TEX_CUBE = 0x0,
1152 MALI_TEX_1D = 0x1,
1153 MALI_TEX_2D = 0x2,
1154 MALI_TEX_3D = 0x3
1155 };
1156
1157 /* 8192x8192 */
1158 #define MAX_MIP_LEVELS (13)
1159
1160 /* Cubemap bloats everything up */
1161 #define MAX_CUBE_FACES (6)
1162
1163 /* For each pointer, there is an address and optionally also a stride */
1164 #define MAX_ELEMENTS (2)
1165
1166 /* It's not known why there are 4-bits allocated -- this enum is almost
1167 * certainly incomplete */
1168
1169 enum mali_texture_layout {
1170 /* For a Z/S texture, this is linear */
1171 MALI_TEXTURE_TILED = 0x1,
1172
1173 /* Z/S textures cannot be tiled */
1174 MALI_TEXTURE_LINEAR = 0x2,
1175
1176 /* 16x16 sparse */
1177 MALI_TEXTURE_AFBC = 0xC
1178 };
1179
1180 /* Corresponds to the type passed to glTexImage2D and so forth */
1181
1182 struct mali_texture_format {
1183 unsigned swizzle : 12;
1184 enum mali_format format : 8;
1185
1186 unsigned srgb : 1;
1187 unsigned unknown1 : 1;
1188
1189 enum mali_texture_type type : 2;
1190 enum mali_texture_layout layout : 4;
1191
1192 /* Always set */
1193 unsigned unknown2 : 1;
1194
1195 /* Set to allow packing an explicit stride */
1196 unsigned manual_stride : 1;
1197
1198 unsigned zero : 2;
1199 } __attribute__((packed));
1200
1201 struct mali_texture_descriptor {
1202 uint16_t width;
1203 uint16_t height;
1204 uint16_t depth;
1205 uint16_t array_size;
1206
1207 struct mali_texture_format format;
1208
1209 uint16_t unknown3;
1210
1211 /* One for non-mipmapped, zero for mipmapped */
1212 uint8_t unknown3A;
1213
1214 /* Zero for non-mipmapped, (number of levels - 1) for mipmapped */
1215 uint8_t levels;
1216
1217 /* Swizzling is a single 32-bit word, broken up here for convenience.
1218 * Here, swizzling refers to the ES 3.0 texture parameters for channel
1219 * level swizzling, not the internal pixel-level swizzling which is
1220 * below OpenGL's reach */
1221
1222 unsigned swizzle : 12;
1223 unsigned swizzle_zero : 20;
1224
1225 uint32_t unknown5;
1226 uint32_t unknown6;
1227 uint32_t unknown7;
1228
1229 mali_ptr payload[MAX_MIP_LEVELS * MAX_CUBE_FACES * MAX_ELEMENTS];
1230 } __attribute__((packed));
1231
1232 /* filter_mode */
1233
1234 #define MALI_SAMP_MAG_NEAREST (1 << 0)
1235 #define MALI_SAMP_MIN_NEAREST (1 << 1)
1236
1237 /* TODO: What do these bits mean individually? Only seen set together */
1238
1239 #define MALI_SAMP_MIP_LINEAR_1 (1 << 3)
1240 #define MALI_SAMP_MIP_LINEAR_2 (1 << 4)
1241
1242 /* Flag in filter_mode, corresponding to OpenCL's NORMALIZED_COORDS_TRUE
1243 * sampler_t flag. For typical OpenGL textures, this is always set. */
1244
1245 #define MALI_SAMP_NORM_COORDS (1 << 5)
1246
1247 /* Used for lod encoding. Thanks @urjaman for pointing out these routines can
1248 * be cleaned up a lot. */
1249
1250 #define DECODE_FIXED_16(x) ((float) (x / 256.0))
1251
1252 static inline uint16_t
1253 FIXED_16(float x)
1254 {
1255 /* Clamp inputs, accounting for float error */
1256 float max_lod = (32.0 - (1.0 / 512.0));
1257
1258 x = ((x > max_lod) ? max_lod : ((x < 0.0) ? 0.0 : x));
1259
1260 return (int) (x * 256.0);
1261 }
1262
1263 struct mali_sampler_descriptor {
1264 uint16_t filter_mode;
1265
1266 /* Fixed point. Upper 8-bits is before the decimal point, although it
1267 * caps [0-31]. Lower 8-bits is after the decimal point: int(round(x *
1268 * 256)) */
1269
1270 uint16_t lod_bias;
1271 uint16_t min_lod;
1272 uint16_t max_lod;
1273
1274 /* All one word in reality, but packed a bit. Comparisons are flipped
1275 * from OpenGL. */
1276
1277 enum mali_wrap_mode wrap_s : 4;
1278 enum mali_wrap_mode wrap_t : 4;
1279 enum mali_wrap_mode wrap_r : 4;
1280 enum mali_func compare_func : 3;
1281
1282 /* No effect on 2D textures. For cubemaps, set for ES3 and clear for
1283 * ES2, controlling seamless cubemapping */
1284 unsigned seamless_cube_map : 1;
1285
1286 unsigned zero : 16;
1287
1288 uint32_t zero2;
1289 float border_color[4];
1290 } __attribute__((packed));
1291
1292 /* viewport0/viewport1 form the arguments to glViewport. viewport1 is
1293 * modified by MALI_POSITIVE; viewport0 is as-is.
1294 */
1295
1296 struct mali_viewport {
1297 /* XY clipping planes */
1298 float clip_minx;
1299 float clip_miny;
1300 float clip_maxx;
1301 float clip_maxy;
1302
1303 /* Depth clipping planes */
1304 float clip_minz;
1305 float clip_maxz;
1306
1307 u16 viewport0[2];
1308 u16 viewport1[2];
1309 } __attribute__((packed));
1310
1311 /* From presentations, 16x16 tiles externally. Use shift for fast computation
1312 * of tile numbers. */
1313
1314 #define MALI_TILE_SHIFT 4
1315 #define MALI_TILE_LENGTH (1 << MALI_TILE_SHIFT)
1316
1317 /* Tile coordinates are stored as a compact u32, as only 12 bits are needed to
1318 * each component. Notice that this provides a theoretical upper bound of (1 <<
1319 * 12) = 4096 tiles in each direction, addressing a maximum framebuffer of size
1320 * 65536x65536. Multiplying that together, times another four given that Mali
1321 * framebuffers are 32-bit ARGB8888, means that this upper bound would take 16
1322 * gigabytes of RAM just to store the uncompressed framebuffer itself, let
1323 * alone rendering in real-time to such a buffer.
1324 *
1325 * Nice job, guys.*/
1326
1327 /* From mali_kbase_10969_workaround.c */
1328 #define MALI_X_COORD_MASK 0x00000FFF
1329 #define MALI_Y_COORD_MASK 0x0FFF0000
1330
1331 /* Extract parts of a tile coordinate */
1332
1333 #define MALI_TILE_COORD_X(coord) ((coord) & MALI_X_COORD_MASK)
1334 #define MALI_TILE_COORD_Y(coord) (((coord) & MALI_Y_COORD_MASK) >> 16)
1335
1336 /* Helpers to generate tile coordinates based on the boundary coordinates in
1337 * screen space. So, with the bounds (0, 0) to (128, 128) for the screen, these
1338 * functions would convert it to the bounding tiles (0, 0) to (7, 7).
1339 * Intentional "off-by-one"; finding the tile number is a form of fencepost
1340 * problem. */
1341
1342 #define MALI_MAKE_TILE_COORDS(X, Y) ((X) | ((Y) << 16))
1343 #define MALI_BOUND_TO_TILE(B, bias) ((B - bias) >> MALI_TILE_SHIFT)
1344 #define MALI_COORDINATE_TO_TILE(W, H, bias) MALI_MAKE_TILE_COORDS(MALI_BOUND_TO_TILE(W, bias), MALI_BOUND_TO_TILE(H, bias))
1345 #define MALI_COORDINATE_TO_TILE_MIN(W, H) MALI_COORDINATE_TO_TILE(W, H, 0)
1346 #define MALI_COORDINATE_TO_TILE_MAX(W, H) MALI_COORDINATE_TO_TILE(W, H, 1)
1347
1348 struct mali_payload_fragment {
1349 u32 min_tile_coord;
1350 u32 max_tile_coord;
1351 mali_ptr framebuffer;
1352 } __attribute__((packed));
1353
1354 /* Single Framebuffer Descriptor */
1355
1356 /* Flags apply to format. With just MSAA_A and MSAA_B, the framebuffer is
1357 * configured for 4x. With MSAA_8, it is configured for 8x. */
1358
1359 #define MALI_SFBD_FORMAT_MSAA_8 (1 << 3)
1360 #define MALI_SFBD_FORMAT_MSAA_A (1 << 4)
1361 #define MALI_SFBD_FORMAT_MSAA_B (1 << 4)
1362 #define MALI_SFBD_FORMAT_SRGB (1 << 5)
1363
1364 /* Fast/slow based on whether all three buffers are cleared at once */
1365
1366 #define MALI_CLEAR_FAST (1 << 18)
1367 #define MALI_CLEAR_SLOW (1 << 28)
1368 #define MALI_CLEAR_SLOW_STENCIL (1 << 31)
1369
1370 /* Configures hierarchical tiling on Midgard for both SFBD/MFBD (embedded
1371 * within the larget framebuffer descriptor). Analogous to
1372 * bifrost_tiler_heap_meta and bifrost_tiler_meta*/
1373
1374 /* See pan_tiler.c for derivation */
1375 #define MALI_HIERARCHY_MASK ((1 << 9) - 1)
1376
1377 /* Flag disabling the tiler for clear-only jobs, with
1378 hierarchical tiling */
1379 #define MALI_TILER_DISABLED (1 << 12)
1380
1381 /* Flag selecting userspace-generated polygon list, for clear-only jobs without
1382 * hierarhical tiling. */
1383 #define MALI_TILER_USER 0xFFF
1384
1385 /* Absent any geometry, the minimum size of the polygon list header */
1386 #define MALI_TILER_MINIMUM_HEADER_SIZE 0x200
1387
1388 struct midgard_tiler_descriptor {
1389 /* Size of the entire polygon list; see pan_tiler.c for the
1390 * computation. It's based on hierarchical tiling */
1391
1392 u32 polygon_list_size;
1393
1394 /* Name known from the replay workaround in the kernel. What exactly is
1395 * flagged here is less known. We do that (tiler_hierarchy_mask & 0x1ff)
1396 * specifies a mask of hierarchy weights, which explains some of the
1397 * performance mysteries around setting it. We also see the bottom bit
1398 * of tiler_flags set in the kernel, but no comment why.
1399 *
1400 * hierarchy_mask can have the TILER_DISABLED flag */
1401
1402 u16 hierarchy_mask;
1403 u16 flags;
1404
1405 /* See mali_tiler.c for an explanation */
1406 mali_ptr polygon_list;
1407 mali_ptr polygon_list_body;
1408
1409 /* Names based on we see symmetry with replay jobs which name these
1410 * explicitly */
1411
1412 mali_ptr heap_start; /* tiler heap_free_address */
1413 mali_ptr heap_end;
1414
1415 /* Hierarchy weights. We know these are weights based on the kernel,
1416 * but I've never seen them be anything other than zero */
1417 u32 weights[8];
1418 };
1419
1420 enum mali_block_format {
1421 MALI_BLOCK_TILED = 0x0,
1422 MALI_BLOCK_UNKNOWN = 0x1,
1423 MALI_BLOCK_LINEAR = 0x2,
1424 MALI_BLOCK_AFBC = 0x3,
1425 };
1426
1427 struct mali_sfbd_format {
1428 /* 0x1 */
1429 unsigned unk1 : 6;
1430
1431 /* mali_channel_swizzle */
1432 unsigned swizzle : 12;
1433
1434 /* MALI_POSITIVE */
1435 unsigned nr_channels : 2;
1436
1437 /* 0x4 */
1438 unsigned unk2 : 6;
1439
1440 enum mali_block_format block : 2;
1441
1442 /* 0xb */
1443 unsigned unk3 : 4;
1444 };
1445
1446 struct mali_single_framebuffer {
1447 u32 unknown1;
1448 u32 unknown2;
1449 mali_ptr scratchpad;
1450
1451 u64 zero1;
1452 u64 zero0;
1453
1454 struct mali_sfbd_format format;
1455
1456 u32 clear_flags;
1457 u32 zero2;
1458
1459 /* Purposeful off-by-one in these fields should be accounted for by the
1460 * MALI_DIMENSION macro */
1461
1462 u16 width;
1463 u16 height;
1464
1465 u32 zero3[4];
1466 mali_ptr checksum;
1467 u32 checksum_stride;
1468 u32 zero5;
1469
1470 /* By default, the framebuffer is upside down from OpenGL's
1471 * perspective. Set framebuffer to the end and negate the stride to
1472 * flip in the Y direction */
1473
1474 mali_ptr framebuffer;
1475 int32_t stride;
1476
1477 u32 zero4;
1478
1479 /* Depth and stencil buffers are interleaved, it appears, as they are
1480 * set to the same address in captures. Both fields set to zero if the
1481 * buffer is not being cleared. Depending on GL_ENABLE magic, you might
1482 * get a zero enable despite the buffer being present; that still is
1483 * disabled. */
1484
1485 mali_ptr depth_buffer; // not SAME_VA
1486 u32 depth_stride_zero : 4;
1487 u32 depth_stride : 28;
1488 u32 zero7;
1489
1490 mali_ptr stencil_buffer; // not SAME_VA
1491 u32 stencil_stride_zero : 4;
1492 u32 stencil_stride : 28;
1493 u32 zero8;
1494
1495 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1496 u32 clear_color_2; // always equal, but unclear function?
1497 u32 clear_color_3; // always equal, but unclear function?
1498 u32 clear_color_4; // always equal, but unclear function?
1499
1500 /* Set to zero if not cleared */
1501
1502 float clear_depth_1; // float32, ditto
1503 float clear_depth_2; // float32, ditto
1504 float clear_depth_3; // float32, ditto
1505 float clear_depth_4; // float32, ditto
1506
1507 u32 clear_stencil; // Exactly as it appears in OpenGL
1508
1509 u32 zero6[7];
1510
1511 struct midgard_tiler_descriptor tiler;
1512
1513 /* More below this, maybe */
1514 } __attribute__((packed));
1515
1516 /* On Midgard, this "framebuffer descriptor" is used for the framebuffer field
1517 * of compute jobs. Superficially resembles a single framebuffer descriptor */
1518
1519 struct mali_compute_fbd {
1520 u32 unknown1[8];
1521 } __attribute__((packed));
1522
1523 /* Format bits for the render target flags */
1524
1525 #define MALI_MFBD_FORMAT_MSAA (1 << 1)
1526 #define MALI_MFBD_FORMAT_SRGB (1 << 2)
1527
1528 struct mali_rt_format {
1529 unsigned unk1 : 32;
1530 unsigned unk2 : 3;
1531
1532 unsigned nr_channels : 2; /* MALI_POSITIVE */
1533
1534 unsigned unk3 : 5;
1535 enum mali_block_format block : 2;
1536 unsigned flags : 4;
1537
1538 unsigned swizzle : 12;
1539
1540 unsigned zero : 3;
1541
1542 /* Disables MFBD preload. When this bit is set, the render target will
1543 * be cleared every frame. When this bit is clear, the hardware will
1544 * automatically wallpaper the render target back from main memory.
1545 * Unfortunately, MFBD preload is very broken on Midgard, so in
1546 * practice, this is a chicken bit that should always be set.
1547 * Discovered by accident, as all good chicken bits are. */
1548
1549 unsigned no_preload : 1;
1550 } __attribute__((packed));
1551
1552 struct bifrost_render_target {
1553 struct mali_rt_format format;
1554
1555 u64 zero1;
1556
1557 struct {
1558 /* Stuff related to ARM Framebuffer Compression. When AFBC is enabled,
1559 * there is an extra metadata buffer that contains 16 bytes per tile.
1560 * The framebuffer needs to be the same size as before, since we don't
1561 * know ahead of time how much space it will take up. The
1562 * framebuffer_stride is set to 0, since the data isn't stored linearly
1563 * anymore.
1564 *
1565 * When AFBC is disabled, these fields are zero.
1566 */
1567
1568 mali_ptr metadata;
1569 u32 stride; // stride in units of tiles
1570 u32 unk; // = 0x20000
1571 } afbc;
1572
1573 mali_ptr framebuffer;
1574
1575 u32 zero2 : 4;
1576 u32 framebuffer_stride : 28; // in units of bytes
1577 u32 zero3;
1578
1579 u32 clear_color_1; // RGBA8888 from glClear, actually used by hardware
1580 u32 clear_color_2; // always equal, but unclear function?
1581 u32 clear_color_3; // always equal, but unclear function?
1582 u32 clear_color_4; // always equal, but unclear function?
1583 } __attribute__((packed));
1584
1585 /* An optional part of bifrost_framebuffer. It comes between the main structure
1586 * and the array of render targets. It must be included if any of these are
1587 * enabled:
1588 *
1589 * - Transaction Elimination
1590 * - Depth/stencil
1591 * - TODO: Anything else?
1592 */
1593
1594 /* Flags field: note, these are guesses */
1595
1596 #define MALI_EXTRA_PRESENT (0x400)
1597 #define MALI_EXTRA_AFBC (0x20)
1598 #define MALI_EXTRA_AFBC_ZS (0x10)
1599 #define MALI_EXTRA_ZS (0x4)
1600
1601 struct bifrost_fb_extra {
1602 mali_ptr checksum;
1603 /* Each tile has an 8 byte checksum, so the stride is "width in tiles * 8" */
1604 u32 checksum_stride;
1605
1606 u32 flags;
1607
1608 union {
1609 /* Note: AFBC is only allowed for 24/8 combined depth/stencil. */
1610 struct {
1611 mali_ptr depth_stencil_afbc_metadata;
1612 u32 depth_stencil_afbc_stride; // in units of tiles
1613 u32 zero1;
1614
1615 mali_ptr depth_stencil;
1616
1617 u64 padding;
1618 } ds_afbc;
1619
1620 struct {
1621 /* Depth becomes depth/stencil in case of combined D/S */
1622 mali_ptr depth;
1623 u32 depth_stride_zero : 4;
1624 u32 depth_stride : 28;
1625 u32 zero1;
1626
1627 mali_ptr stencil;
1628 u32 stencil_stride_zero : 4;
1629 u32 stencil_stride : 28;
1630 u32 zero2;
1631 } ds_linear;
1632 };
1633
1634
1635 u64 zero3, zero4;
1636 } __attribute__((packed));
1637
1638 /* Flags for mfbd_flags */
1639
1640 /* Enables writing depth results back to main memory (rather than keeping them
1641 * on-chip in the tile buffer and then discarding) */
1642
1643 #define MALI_MFBD_DEPTH_WRITE (1 << 10)
1644
1645 /* The MFBD contains the extra bifrost_fb_extra section */
1646
1647 #define MALI_MFBD_EXTRA (1 << 13)
1648
1649 struct bifrost_framebuffer {
1650 u32 stack_shift : 4;
1651 u32 unk0 : 28;
1652
1653 u32 unknown2; // = 0x1f, same as SFBD
1654 mali_ptr scratchpad;
1655
1656 /* 0x10 */
1657 mali_ptr sample_locations;
1658 mali_ptr unknown1;
1659 /* 0x20 */
1660 u16 width1, height1;
1661 u32 zero3;
1662 u16 width2, height2;
1663 u32 unk1 : 19; // = 0x01000
1664 u32 rt_count_1 : 2; // off-by-one (use MALI_POSITIVE)
1665 u32 unk2 : 3; // = 0
1666 u32 rt_count_2 : 3; // no off-by-one
1667 u32 zero4 : 5;
1668 /* 0x30 */
1669 u32 clear_stencil : 8;
1670 u32 mfbd_flags : 24; // = 0x100
1671 float clear_depth;
1672
1673 struct midgard_tiler_descriptor tiler;
1674
1675 /* optional: struct bifrost_fb_extra extra */
1676 /* struct bifrost_render_target rts[] */
1677 } __attribute__((packed));
1678
1679 #endif /* __PANFROST_JOB_H__ */