5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "panfrost-job.h"
34 #define MIDGARD_DBG_MSGS 0x0001
35 #define MIDGARD_DBG_SHADERS 0x0002
36 #define MIDGARD_DBG_SHADERDB 0x0004
38 extern int midgard_debug
;
41 midgard_word_type_alu
,
42 midgard_word_type_load_store
,
43 midgard_word_type_texture
,
44 midgard_word_type_unknown
60 midgard_alu_op_fadd
= 0x10,
61 midgard_alu_op_fmul
= 0x14,
63 midgard_alu_op_fmin
= 0x28,
64 midgard_alu_op_fmax
= 0x2C,
66 midgard_alu_op_fmov
= 0x30, /* fmov_rte */
67 midgard_alu_op_fmov_rtz
= 0x31,
68 midgard_alu_op_fmov_rtn
= 0x32,
69 midgard_alu_op_fmov_rtp
= 0x33,
70 midgard_alu_op_froundeven
= 0x34,
71 midgard_alu_op_ftrunc
= 0x35,
72 midgard_alu_op_ffloor
= 0x36,
73 midgard_alu_op_fceil
= 0x37,
74 midgard_alu_op_ffma
= 0x38,
75 midgard_alu_op_fdot3
= 0x3C,
76 midgard_alu_op_fdot3r
= 0x3D,
77 midgard_alu_op_fdot4
= 0x3E,
78 midgard_alu_op_freduce
= 0x3F,
80 midgard_alu_op_iadd
= 0x40,
81 midgard_alu_op_ishladd
= 0x41, /* a + (b<<1) */
82 midgard_alu_op_isub
= 0x46,
83 midgard_alu_op_iaddsat
= 0x48,
84 midgard_alu_op_uaddsat
= 0x49,
85 midgard_alu_op_isubsat
= 0x4E,
86 midgard_alu_op_usubsat
= 0x4F,
88 midgard_alu_op_imul
= 0x58,
90 midgard_alu_op_imin
= 0x60,
91 midgard_alu_op_umin
= 0x61,
92 midgard_alu_op_imax
= 0x62,
93 midgard_alu_op_umax
= 0x63,
94 midgard_alu_op_ihadd
= 0x64,
95 midgard_alu_op_uhadd
= 0x65,
96 midgard_alu_op_irhadd
= 0x66,
97 midgard_alu_op_urhadd
= 0x67,
98 midgard_alu_op_iasr
= 0x68,
99 midgard_alu_op_ilsr
= 0x69,
100 midgard_alu_op_ishl
= 0x6E,
102 midgard_alu_op_iand
= 0x70,
103 midgard_alu_op_ior
= 0x71,
104 midgard_alu_op_inand
= 0x72, /* ~(a & b), for inot let a = b */
105 midgard_alu_op_inor
= 0x73, /* ~(a | b) */
106 midgard_alu_op_iandnot
= 0x74, /* (a & ~b), used for not/b2f */
107 midgard_alu_op_iornot
= 0x75, /* (a | ~b) */
108 midgard_alu_op_ixor
= 0x76,
109 midgard_alu_op_inxor
= 0x77, /* ~(a & b) */
110 midgard_alu_op_iclz
= 0x78, /* Number of zeroes on left */
111 midgard_alu_op_ibitcount8
= 0x7A, /* Counts bits in 8-bit increments */
112 midgard_alu_op_imov
= 0x7B,
113 midgard_alu_op_iabsdiff
= 0x7C,
114 midgard_alu_op_uabsdiff
= 0x7D,
115 midgard_alu_op_ichoose
= 0x7E, /* vector, component number - dupe for shuffle() */
117 midgard_alu_op_feq
= 0x80,
118 midgard_alu_op_fne
= 0x81,
119 midgard_alu_op_flt
= 0x82,
120 midgard_alu_op_fle
= 0x83,
121 midgard_alu_op_fball_eq
= 0x88,
122 midgard_alu_op_fball_neq
= 0x89,
123 midgard_alu_op_fball_lt
= 0x8A, /* all(lessThan(.., ..)) */
124 midgard_alu_op_fball_lte
= 0x8B, /* all(lessThanEqual(.., ..)) */
126 midgard_alu_op_fbany_eq
= 0x90,
127 midgard_alu_op_fbany_neq
= 0x91,
128 midgard_alu_op_fbany_lt
= 0x92, /* any(lessThan(.., ..)) */
129 midgard_alu_op_fbany_lte
= 0x93, /* any(lessThanEqual(.., ..)) */
131 midgard_alu_op_f2i_rte
= 0x98,
132 midgard_alu_op_f2i_rtz
= 0x99,
133 midgard_alu_op_f2i_rtn
= 0x9A,
134 midgard_alu_op_f2i_rtp
= 0x9B,
135 midgard_alu_op_f2u_rte
= 0x9C,
136 midgard_alu_op_f2u_rtz
= 0x9D,
137 midgard_alu_op_f2u_rtn
= 0x9E,
138 midgard_alu_op_f2u_rtp
= 0x9F,
140 midgard_alu_op_ieq
= 0xA0,
141 midgard_alu_op_ine
= 0xA1,
142 midgard_alu_op_ult
= 0xA2,
143 midgard_alu_op_ule
= 0xA3,
144 midgard_alu_op_ilt
= 0xA4,
145 midgard_alu_op_ile
= 0xA5,
146 midgard_alu_op_iball_eq
= 0xA8,
147 midgard_alu_op_iball_neq
= 0xA9,
148 midgard_alu_op_uball_lt
= 0xAA,
149 midgard_alu_op_uball_lte
= 0xAB,
150 midgard_alu_op_iball_lt
= 0xAC,
151 midgard_alu_op_iball_lte
= 0xAD,
153 midgard_alu_op_ibany_eq
= 0xB0,
154 midgard_alu_op_ibany_neq
= 0xB1,
155 midgard_alu_op_ubany_lt
= 0xB2,
156 midgard_alu_op_ubany_lte
= 0xB3,
157 midgard_alu_op_ibany_lt
= 0xB4, /* any(lessThan(.., ..)) */
158 midgard_alu_op_ibany_lte
= 0xB5, /* any(lessThanEqual(.., ..)) */
159 midgard_alu_op_i2f_rte
= 0xB8,
160 midgard_alu_op_i2f_rtz
= 0xB9,
161 midgard_alu_op_i2f_rtn
= 0xBA,
162 midgard_alu_op_i2f_rtp
= 0xBB,
163 midgard_alu_op_u2f_rte
= 0xBC,
164 midgard_alu_op_u2f_rtz
= 0xBD,
165 midgard_alu_op_u2f_rtn
= 0xBE,
166 midgard_alu_op_u2f_rtp
= 0xBF,
168 midgard_alu_op_icsel_v
= 0xC0, /* condition code r31 */
169 midgard_alu_op_icsel
= 0xC1, /* condition code r31.w */
170 midgard_alu_op_fcsel_v
= 0xC4,
171 midgard_alu_op_fcsel
= 0xC5,
172 midgard_alu_op_fround
= 0xC6,
174 midgard_alu_op_fatan_pt2
= 0xE8,
175 midgard_alu_op_fpow_pt1
= 0xEC,
176 midgard_alu_op_fpown_pt1
= 0xED,
177 midgard_alu_op_fpowr_pt1
= 0xEE,
179 midgard_alu_op_frcp
= 0xF0,
180 midgard_alu_op_frsqrt
= 0xF2,
181 midgard_alu_op_fsqrt
= 0xF3,
182 midgard_alu_op_fexp2
= 0xF4,
183 midgard_alu_op_flog2
= 0xF5,
184 midgard_alu_op_fsin
= 0xF6,
185 midgard_alu_op_fcos
= 0xF7,
186 midgard_alu_op_fatan2_pt1
= 0xF9,
190 midgard_outmod_none
= 0,
191 midgard_outmod_pos
= 1,
193 midgard_outmod_sat
= 3
194 } midgard_outmod_float
;
197 midgard_outmod_int_saturate
= 0,
198 midgard_outmod_uint_saturate
= 1,
199 midgard_outmod_int_wrap
= 2,
200 midgard_outmod_int_high
= 3, /* Overflowed portion */
201 } midgard_outmod_int
;
204 midgard_reg_mode_8
= 0,
205 midgard_reg_mode_16
= 1,
206 midgard_reg_mode_32
= 2,
207 midgard_reg_mode_64
= 3
211 midgard_dest_override_lower
= 0,
212 midgard_dest_override_upper
= 1,
213 midgard_dest_override_none
= 2
214 } midgard_dest_override
;
217 midgard_int_sign_extend
= 0,
218 midgard_int_zero_extend
= 1,
219 midgard_int_normal
= 2,
220 midgard_int_shift
= 3
223 #define MIDGARD_FLOAT_MOD_ABS (1 << 0)
224 #define MIDGARD_FLOAT_MOD_NEG (1 << 1)
227 __attribute__((__packed__
))
229 /* Either midgard_int_mod or from midgard_float_mod_*, depending on the
233 /* replicate lower half if dest = half, or low/high half selection if
237 bool rep_high
: 1; /* unused if dest = full */
238 bool half
: 1; /* only matters if dest = full */
239 unsigned swizzle
: 8;
241 midgard_vector_alu_src
;
244 __attribute__((__packed__
))
246 midgard_alu_op op
: 8;
247 midgard_reg_mode reg_mode
: 2;
250 midgard_dest_override dest_override
: 2;
257 __attribute__((__packed__
))
261 bool full
: 1; /* 0 = half, 1 = full */
262 unsigned component
: 3;
264 midgard_scalar_alu_src
;
267 __attribute__((__packed__
))
269 midgard_alu_op op
: 8;
272 unsigned unknown
: 1;
274 bool output_full
: 1;
275 unsigned output_component
: 3;
280 __attribute__((__packed__
))
282 unsigned src1_reg
: 5;
283 unsigned src2_reg
: 5;
284 unsigned out_reg
: 5;
289 /* In addition to conditional branches and jumps (unconditional branches),
290 * Midgard implements a bit of fixed function functionality used in fragment
291 * shaders via specially crafted branches. These have special branch opcodes,
292 * which perform a fixed-function operation and/or use the results of a
293 * fixed-function operation as the branch condition. */
296 /* Regular branches */
297 midgard_jmp_writeout_op_branch_uncond
= 1,
298 midgard_jmp_writeout_op_branch_cond
= 2,
300 /* In a fragment shader, execute a discard_if instruction, with the
301 * corresponding condition code. Terminates the shader, so generally
302 * set the branch target to out of the shader */
303 midgard_jmp_writeout_op_discard
= 4,
305 /* Branch if the tilebuffer is not yet ready. At the beginning of a
306 * fragment shader that reads from the tile buffer, for instance via
307 * ARM_shader_framebuffer_fetch or EXT_pixel_local_storage, this branch
308 * operation should be used as a loop. An instruction like
309 * "br.tilebuffer.always -1" does the trick, corresponding to
310 * "while(!is_tilebuffer_ready) */
311 midgard_jmp_writeout_op_tilebuffer_pending
= 6,
313 /* In a fragment shader, try to write out the value pushed to r0 to the
314 * tilebuffer, subject to unknown state in r1.z and r1.w. If this
315 * succeeds, the shader terminates. If it fails, it branches to the
316 * specified branch target. Generally, this should be used in a loop to
317 * itself, acting as "do { write(r0); } while(!write_successful);" */
318 midgard_jmp_writeout_op_writeout
= 7,
319 } midgard_jmp_writeout_op
;
322 midgard_condition_write0
= 0,
324 /* These condition codes denote a conditional branch on FALSE and on
325 * TRUE respectively */
326 midgard_condition_false
= 1,
327 midgard_condition_true
= 2,
329 /* This condition code always branches. For a pure branch, the
330 * unconditional branch coding should be used instead, but for
331 * fixed-function branch opcodes, this is still useful */
332 midgard_condition_always
= 3,
336 __attribute__((__packed__
))
338 midgard_jmp_writeout_op op
: 3; /* == branch_uncond */
339 unsigned dest_tag
: 4; /* tag of branch destination */
340 unsigned unknown
: 2;
343 midgard_branch_uncond
;
346 __attribute__((__packed__
))
348 midgard_jmp_writeout_op op
: 3; /* == branch_cond */
349 unsigned dest_tag
: 4; /* tag of branch destination */
351 midgard_condition cond
: 2;
356 __attribute__((__packed__
))
358 midgard_jmp_writeout_op op
: 3; /* == branch_cond */
359 unsigned dest_tag
: 4; /* tag of branch destination */
360 unsigned unknown
: 2;
363 /* Extended branches permit inputting up to 4 conditions loaded into
364 * r31 (two in r31.w and two in r31.x). In the most general case, we
365 * specify a function f(A, B, C, D) mapping 4 1-bit conditions to a
366 * single 1-bit branch criteria. Note that the domain of f has 2^(2^4)
367 * elements, each mapping to 1-bit of output, so we can trivially
368 * construct a Godel numbering of f as a (2^4)=16-bit integer. This
369 * 16-bit integer serves as a lookup table to compute f, subject to
370 * some swaps for ordering.
372 * Interesting, the standard 2-bit condition codes are also a LUT with
373 * the same format (2^1-bit), but it's usually easier to use enums. */
377 midgard_branch_extended
;
380 __attribute__((__packed__
))
382 midgard_jmp_writeout_op op
: 3; /* == writeout */
383 unsigned unknown
: 13;
392 midgard_op_ld_st_noop
= 0x03,
394 /* Unclear why this is on the L/S unit, but (with an address of 0,
395 * appropriate swizzle, magic constant 0x24, and xy mask?) moves fp32 cube
396 * map coordinates in r27 to its cube map texture coordinate
397 * destination (e.g r29). 0x4 magic for lding from fp16 instead */
399 midgard_op_st_cubemap_coords
= 0x0E,
401 /* Used in OpenCL. Probably can ld other things as well */
402 midgard_op_ld_global_id
= 0x10,
404 /* The L/S unit can do perspective division a clock faster than the ALU
405 * if you're lucky. Put the vec4 in r27, and call with 0x24 as the
406 * unknown state; the output will be <x/w, y/w, z/w, 1>. Replace w with
407 * z for the z version */
408 midgard_op_ldst_perspective_division_z
= 0x12,
409 midgard_op_ldst_perspective_division_w
= 0x13,
411 /* val in r27.y, address embedded, outputs result to argument. Invert val for sub. Let val = +-1 for inc/dec. */
412 midgard_op_atomic_add
= 0x40,
413 midgard_op_atomic_and
= 0x44,
414 midgard_op_atomic_or
= 0x48,
415 midgard_op_atomic_xor
= 0x4C,
417 midgard_op_atomic_imin
= 0x50,
418 midgard_op_atomic_umin
= 0x54,
419 midgard_op_atomic_imax
= 0x58,
420 midgard_op_atomic_umax
= 0x5C,
422 midgard_op_atomic_xchg
= 0x60,
424 /* Used for compute shader's __global arguments, __local variables (or
425 * for register spilling) */
427 midgard_op_ld_char
= 0x81,
428 midgard_op_ld_char2
= 0x84,
429 midgard_op_ld_short
= 0x85,
430 midgard_op_ld_char4
= 0x88, /* short2, int, float */
431 midgard_op_ld_short4
= 0x8C, /* int2, float2, long */
432 midgard_op_ld_int4
= 0x90, /* float4, long2 */
434 midgard_op_ld_attr_32
= 0x94,
435 midgard_op_ld_attr_16
= 0x95,
436 midgard_op_ld_attr_32u
= 0x96,
437 midgard_op_ld_attr_32i
= 0x97,
438 midgard_op_ld_vary_32
= 0x98,
439 midgard_op_ld_vary_16
= 0x99,
440 midgard_op_ld_vary_32u
= 0x9A,
441 midgard_op_ld_vary_32i
= 0x9B,
442 midgard_op_ld_color_buffer_16
= 0x9D,
444 midgard_op_ld_uniform_16
= 0xAC,
445 midgard_op_ld_uniform_32i
= 0xA8,
447 midgard_op_ld_uniform_32
= 0xB0,
448 midgard_op_ld_color_buffer_8
= 0xBA,
450 midgard_op_st_char
= 0xC0,
451 midgard_op_st_char2
= 0xC4, /* short */
452 midgard_op_st_char4
= 0xC8, /* short2, int, float */
453 midgard_op_st_short4
= 0xCC, /* int2, float2, long */
454 midgard_op_st_int4
= 0xD0, /* float4, long2 */
456 midgard_op_st_vary_32
= 0xD4,
457 midgard_op_st_vary_16
= 0xD5,
458 midgard_op_st_vary_32u
= 0xD6,
459 midgard_op_st_vary_32i
= 0xD7,
461 /* Value to st in r27, location r26.w as short2 */
462 midgard_op_st_image_f
= 0xD8,
463 midgard_op_st_image_ui
= 0xDA,
464 midgard_op_st_image_i
= 0xDB,
465 } midgard_load_store_op
;
468 midgard_interp_centroid
= 1,
469 midgard_interp_default
= 2
470 } midgard_interpolation
;
473 midgard_varying_mod_none
= 0,
475 /* Other values unknown */
477 /* Take the would-be result and divide all components by its z/w
478 * (perspective division baked in with the load) */
479 midgard_varying_mod_perspective_z
= 2,
480 midgard_varying_mod_perspective_w
= 3,
481 } midgard_varying_modifier
;
484 __attribute__((__packed__
))
486 unsigned zero0
: 1; /* Always zero */
488 midgard_varying_modifier modifier
: 2;
490 unsigned zero1
: 1; /* Always zero */
492 /* Varying qualifiers, zero if not a varying */
494 unsigned is_varying
: 1; /* Always one for varying, but maybe something else? */
495 midgard_interpolation interpolation
: 2;
497 unsigned zero2
: 2; /* Always zero */
499 midgard_varying_parameter
;
502 __attribute__((__packed__
))
504 midgard_load_store_op op
: 8;
507 unsigned swizzle
: 8;
508 unsigned unknown
: 16;
510 unsigned varying_parameters
: 10;
512 unsigned address
: 9;
514 midgard_load_store_word
;
517 __attribute__((__packed__
))
520 unsigned next_type
: 4;
526 /* 8-bit register selector used in texture ops to select a bias/LOD/gradient
527 * register, shoved into the `bias` field */
530 __attribute__((__packed__
))
532 /* 32-bit register, clear for half-register */
535 /* Register select between r28/r29 */
538 /* For a half-register, selects the upper half */
541 /* Indexes into the register */
542 unsigned component
: 2;
544 /* Padding to make this 8-bit */
547 midgard_tex_register_select
;
549 /* Texture pipeline results are in r28-r29 */
550 #define REG_TEX_BASE 28
552 /* Texture opcodes... maybe? */
553 #define TEXTURE_OP_NORMAL 0x11 /* texture */
554 #define TEXTURE_OP_LOD 0x12 /* textureLod */
555 #define TEXTURE_OP_TEXEL_FETCH 0x14 /* texelFetch */
557 /* Computes horizontal and vertical derivatives respectively. Use with a float
558 * sampler and a "2D" texture. Leave texture/sampler IDs as zero; they ought
559 * to be ignored. Only works for fp32 on 64-bit at a time, so derivatives of a
560 * vec4 require 2 texture ops. For some reason, the blob computes both X and Y
561 * derivatives at the same time and just throws out whichever is unused; it's
562 * not known if this is a quirk of the hardware or just of the blob. */
564 #define TEXTURE_OP_DFDX 0x0D
565 #define TEXTURE_OP_DFDY 0x1D
567 enum mali_sampler_type
{
568 MALI_SAMPLER_UNK
= 0x0,
569 MALI_SAMPLER_FLOAT
= 0x1, /* sampler */
570 MALI_SAMPLER_UNSIGNED
= 0x2, /* usampler */
571 MALI_SAMPLER_SIGNED
= 0x3, /* isampler */
575 __attribute__((__packed__
))
578 unsigned next_type
: 4;
582 unsigned is_gather
: 1;
584 /* A little obscure, but last is set for the last texture operation in
585 * a shader. cont appears to just be last's opposite (?). Yeah, I know,
586 * kind of funky.. BiOpen thinks it could do with memory hinting, or
592 enum mali_texture_type format
: 2;
595 /* Is a register used to specify the
596 * LOD/bias/offset? If set, use the `bias` field as
597 * a register index. If clear, use the `bias` field
598 * as an immediate. */
599 unsigned lod_register
: 1;
601 /* Is a register used to specify an offset? If set, use the
602 * offset_reg_* fields to encode this, duplicated for each of the
603 * components. If clear, there is implcitly always an immediate offst
604 * specificed in offset_imm_* */
605 unsigned offset_register
: 1;
607 unsigned in_reg_full
: 1;
608 unsigned in_reg_select
: 1;
609 unsigned in_reg_upper
: 1;
610 unsigned in_reg_swizzle
: 8;
612 unsigned unknown8
: 2;
614 unsigned out_full
: 1;
616 enum mali_sampler_type sampler_type
: 2;
618 unsigned out_reg_select
: 1;
619 unsigned out_upper
: 1;
623 unsigned unknown2
: 2;
625 unsigned swizzle
: 8;
626 unsigned unknown4
: 8;
628 unsigned unknownA
: 4;
630 /* In immediate mode, each offset field is an immediate range [0, 7].
632 * In register mode, offset_x becomes a register full / select / upper
633 * triplet and a vec3 swizzle is splattered across offset_y/offset_z in
634 * a genuinely bizarre way.
636 * For texel fetches in immediate mode, the range is the full [-8, 7],
637 * but for normal texturing the top bit must be zero and a register
638 * used instead. It's not clear where this limitation is from. */
644 /* In immediate bias mode, for a normal texture op, this is
645 * texture bias, computed as int(2^8 * frac(biasf)), with
646 * bias_int = floor(bias). For a textureLod, it's that, but
647 * s/bias/lod. For a texel fetch, this is the LOD as-is.
649 * In register mode, this is a midgard_tex_register_select
650 * structure and bias_int is zero */
655 unsigned texture_handle
: 16;
656 unsigned sampler_handle
: 16;
658 midgard_texture_word
;