5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
32 #include "panfrost-job.h"
34 #define MIDGARD_DBG_MSGS 0x0001
35 #define MIDGARD_DBG_SHADERS 0x0002
37 extern int midgard_debug
;
40 midgard_word_type_alu
,
41 midgard_word_type_load_store
,
42 midgard_word_type_texture
,
43 midgard_word_type_unknown
59 midgard_alu_op_fadd
= 0x10,
60 midgard_alu_op_fmul
= 0x14,
62 midgard_alu_op_fmin
= 0x28,
63 midgard_alu_op_fmax
= 0x2C,
65 midgard_alu_op_fmov
= 0x30, /* fmov_rte */
66 midgard_alu_op_fmov_rtz
= 0x31,
67 midgard_alu_op_fmov_rtn
= 0x32,
68 midgard_alu_op_fmov_rtp
= 0x33,
69 midgard_alu_op_froundeven
= 0x34,
70 midgard_alu_op_ftrunc
= 0x35,
71 midgard_alu_op_ffloor
= 0x36,
72 midgard_alu_op_fceil
= 0x37,
73 midgard_alu_op_ffma
= 0x38,
74 midgard_alu_op_fdot3
= 0x3C,
75 midgard_alu_op_fdot3r
= 0x3D,
76 midgard_alu_op_fdot4
= 0x3E,
77 midgard_alu_op_freduce
= 0x3F,
79 midgard_alu_op_iadd
= 0x40,
80 midgard_alu_op_ishladd
= 0x41,
81 midgard_alu_op_isub
= 0x46,
82 midgard_alu_op_iaddsat
= 0x48,
83 midgard_alu_op_uaddsat
= 0x49,
84 midgard_alu_op_isubsat
= 0x4E,
85 midgard_alu_op_usubsat
= 0x4F,
87 midgard_alu_op_imul
= 0x58,
89 midgard_alu_op_imin
= 0x60,
90 midgard_alu_op_umin
= 0x61,
91 midgard_alu_op_imax
= 0x62,
92 midgard_alu_op_umax
= 0x63,
93 midgard_alu_op_ihadd
= 0x64,
94 midgard_alu_op_uhadd
= 0x65,
95 midgard_alu_op_irhadd
= 0x66,
96 midgard_alu_op_urhadd
= 0x67,
97 midgard_alu_op_iasr
= 0x68,
98 midgard_alu_op_ilsr
= 0x69,
99 midgard_alu_op_ishl
= 0x6E,
101 midgard_alu_op_iand
= 0x70,
102 midgard_alu_op_ior
= 0x71,
103 midgard_alu_op_inand
= 0x72, /* ~(a & b), for inot let a = b */
104 midgard_alu_op_inor
= 0x73, /* ~(a | b) */
105 midgard_alu_op_iandnot
= 0x74, /* (a & ~b), used for not/b2f */
106 midgard_alu_op_iornot
= 0x75, /* (a | ~b) */
107 midgard_alu_op_ixor
= 0x76,
108 midgard_alu_op_inxor
= 0x77, /* ~(a & b) */
109 midgard_alu_op_iclz
= 0x78, /* Number of zeroes on left */
110 midgard_alu_op_ibitcount8
= 0x7A, /* Counts bits in 8-bit increments */
111 midgard_alu_op_imov
= 0x7B,
112 midgard_alu_op_iabsdiff
= 0x7C,
113 midgard_alu_op_uabsdiff
= 0x7D,
114 midgard_alu_op_ichoose
= 0x7E, /* vector, component number - dupe for shuffle() */
116 midgard_alu_op_feq
= 0x80,
117 midgard_alu_op_fne
= 0x81,
118 midgard_alu_op_flt
= 0x82,
119 midgard_alu_op_fle
= 0x83,
120 midgard_alu_op_fball_eq
= 0x88,
121 midgard_alu_op_bball_eq
= 0x89,
122 midgard_alu_op_fball_lt
= 0x8A, /* all(lessThan(.., ..)) */
123 midgard_alu_op_fball_lte
= 0x8B, /* all(lessThanEqual(.., ..)) */
125 midgard_alu_op_bbany_neq
= 0x90, /* used for bvec4(1) */
126 midgard_alu_op_fbany_neq
= 0x91, /* bvec4(0) also */
127 midgard_alu_op_fbany_lt
= 0x92, /* any(lessThan(.., ..)) */
128 midgard_alu_op_fbany_lte
= 0x93, /* any(lessThanEqual(.., ..)) */
130 midgard_alu_op_f2i_rte
= 0x98,
131 midgard_alu_op_f2i_rtz
= 0x99,
132 midgard_alu_op_f2i_rtn
= 0x9A,
133 midgard_alu_op_f2i_rtp
= 0x9B,
134 midgard_alu_op_f2u_rte
= 0x9C,
135 midgard_alu_op_f2u_rtz
= 0x9D,
136 midgard_alu_op_f2u_rtn
= 0x9E,
137 midgard_alu_op_f2u_rtp
= 0x9F,
139 midgard_alu_op_ieq
= 0xA0,
140 midgard_alu_op_ine
= 0xA1,
141 midgard_alu_op_ult
= 0xA2,
142 midgard_alu_op_ule
= 0xA3,
143 midgard_alu_op_ilt
= 0xA4,
144 midgard_alu_op_ile
= 0xA5,
145 midgard_alu_op_iball_eq
= 0xA8,
146 midgard_alu_op_iball_neq
= 0xA9,
147 midgard_alu_op_uball_lt
= 0xAA,
148 midgard_alu_op_uball_lte
= 0xAB,
149 midgard_alu_op_iball_lt
= 0xAC,
150 midgard_alu_op_iball_lte
= 0xAD,
152 midgard_alu_op_ibany_eq
= 0xB0,
153 midgard_alu_op_ibany_neq
= 0xB1,
154 midgard_alu_op_ubany_lt
= 0xB2,
155 midgard_alu_op_ubany_lte
= 0xB3,
156 midgard_alu_op_ibany_lt
= 0xB4, /* any(lessThan(.., ..)) */
157 midgard_alu_op_ibany_lte
= 0xB5, /* any(lessThanEqual(.., ..)) */
158 midgard_alu_op_i2f_rte
= 0xB8,
159 midgard_alu_op_i2f_rtz
= 0xB9,
160 midgard_alu_op_i2f_rtn
= 0xBA,
161 midgard_alu_op_i2f_rtp
= 0xBB,
162 midgard_alu_op_u2f_rte
= 0xBC,
163 midgard_alu_op_u2f_rtz
= 0xBD,
164 midgard_alu_op_u2f_rtn
= 0xBE,
165 midgard_alu_op_u2f_rtp
= 0xBF,
167 midgard_alu_op_icsel_v
= 0xC0, /* condition code r31 */
168 midgard_alu_op_icsel
= 0xC1, /* condition code r31.w */
169 midgard_alu_op_fcsel_v
= 0xC4,
170 midgard_alu_op_fcsel
= 0xC5,
171 midgard_alu_op_fround
= 0xC6,
173 midgard_alu_op_fatan_pt2
= 0xE8,
174 midgard_alu_op_fpow_pt1
= 0xEC,
175 midgard_alu_op_fpown_pt1
= 0xED,
176 midgard_alu_op_fpowr_pt1
= 0xEE,
178 midgard_alu_op_frcp
= 0xF0,
179 midgard_alu_op_frsqrt
= 0xF2,
180 midgard_alu_op_fsqrt
= 0xF3,
181 midgard_alu_op_fexp2
= 0xF4,
182 midgard_alu_op_flog2
= 0xF5,
183 midgard_alu_op_fsin
= 0xF6,
184 midgard_alu_op_fcos
= 0xF7,
185 midgard_alu_op_fatan2_pt1
= 0xF9,
189 midgard_outmod_none
= 0,
190 midgard_outmod_pos
= 1,
192 midgard_outmod_sat
= 3
193 } midgard_outmod_float
;
196 midgard_outmod_int_saturate
= 0,
197 midgard_outmod_uint_saturate
= 1,
198 midgard_outmod_int_wrap
= 2,
199 midgard_outmod_int_high
= 3, /* Overflowed portion */
200 } midgard_outmod_int
;
203 midgard_reg_mode_8
= 0,
204 midgard_reg_mode_16
= 1,
205 midgard_reg_mode_32
= 2,
206 midgard_reg_mode_64
= 3
210 midgard_dest_override_lower
= 0,
211 midgard_dest_override_upper
= 1,
212 midgard_dest_override_none
= 2
213 } midgard_dest_override
;
216 midgard_int_sign_extend
= 0,
217 midgard_int_zero_extend
= 1,
218 midgard_int_normal
= 2,
219 midgard_int_shift
= 3
222 #define MIDGARD_FLOAT_MOD_ABS (1 << 0)
223 #define MIDGARD_FLOAT_MOD_NEG (1 << 1)
226 __attribute__((__packed__
))
228 /* Either midgard_int_mod or from midgard_float_mod_*, depending on the
232 /* replicate lower half if dest = half, or low/high half selection if
236 bool rep_high
: 1; /* unused if dest = full */
237 bool half
: 1; /* only matters if dest = full */
238 unsigned swizzle
: 8;
240 midgard_vector_alu_src
;
243 __attribute__((__packed__
))
245 midgard_alu_op op
: 8;
246 midgard_reg_mode reg_mode
: 2;
249 midgard_dest_override dest_override
: 2;
250 midgard_outmod_float outmod
: 2;
256 __attribute__((__packed__
))
260 bool full
: 1; /* 0 = half, 1 = full */
261 unsigned component
: 3;
263 midgard_scalar_alu_src
;
266 __attribute__((__packed__
))
268 midgard_alu_op op
: 8;
271 unsigned unknown
: 1;
273 bool output_full
: 1;
274 unsigned output_component
: 3;
279 __attribute__((__packed__
))
281 unsigned src1_reg
: 5;
282 unsigned src2_reg
: 5;
283 unsigned out_reg
: 5;
288 /* In addition to conditional branches and jumps (unconditional branches),
289 * Midgard implements a bit of fixed function functionality used in fragment
290 * shaders via specially crafted branches. These have special branch opcodes,
291 * which perform a fixed-function operation and/or use the results of a
292 * fixed-function operation as the branch condition. */
295 /* Regular branches */
296 midgard_jmp_writeout_op_branch_uncond
= 1,
297 midgard_jmp_writeout_op_branch_cond
= 2,
299 /* In a fragment shader, execute a discard_if instruction, with the
300 * corresponding condition code. Terminates the shader, so generally
301 * set the branch target to out of the shader */
302 midgard_jmp_writeout_op_discard
= 4,
304 /* Branch if the tilebuffer is not yet ready. At the beginning of a
305 * fragment shader that reads from the tile buffer, for instance via
306 * ARM_shader_framebuffer_fetch or EXT_pixel_local_storage, this branch
307 * operation should be used as a loop. An instruction like
308 * "br.tilebuffer.always -1" does the trick, corresponding to
309 * "while(!is_tilebuffer_ready) */
310 midgard_jmp_writeout_op_tilebuffer_pending
= 6,
312 /* In a fragment shader, try to write out the value pushed to r0 to the
313 * tilebuffer, subject to unknown state in r1.z and r1.w. If this
314 * succeeds, the shader terminates. If it fails, it branches to the
315 * specified branch target. Generally, this should be used in a loop to
316 * itself, acting as "do { write(r0); } while(!write_successful);" */
317 midgard_jmp_writeout_op_writeout
= 7,
318 } midgard_jmp_writeout_op
;
321 midgard_condition_write0
= 0,
323 /* These condition codes denote a conditional branch on FALSE and on
324 * TRUE respectively */
325 midgard_condition_false
= 1,
326 midgard_condition_true
= 2,
328 /* This condition code always branches. For a pure branch, the
329 * unconditional branch coding should be used instead, but for
330 * fixed-function branch opcodes, this is still useful */
331 midgard_condition_always
= 3,
335 __attribute__((__packed__
))
337 midgard_jmp_writeout_op op
: 3; /* == branch_uncond */
338 unsigned dest_tag
: 4; /* tag of branch destination */
339 unsigned unknown
: 2;
342 midgard_branch_uncond
;
345 __attribute__((__packed__
))
347 midgard_jmp_writeout_op op
: 3; /* == branch_cond */
348 unsigned dest_tag
: 4; /* tag of branch destination */
350 midgard_condition cond
: 2;
355 __attribute__((__packed__
))
357 midgard_jmp_writeout_op op
: 3; /* == branch_cond */
358 unsigned dest_tag
: 4; /* tag of branch destination */
359 unsigned unknown
: 2;
363 midgard_branch_extended
;
366 __attribute__((__packed__
))
368 midgard_jmp_writeout_op op
: 3; /* == writeout */
369 unsigned unknown
: 13;
378 midgard_op_ld_st_noop
= 0x03,
380 /* Unclear why this is on the L/S unit, but (with an address of 0,
381 * appropriate swizzle, magic constant 0x24, and xy mask?) moves fp32 cube
382 * map coordinates in r27 to its cube map texture coordinate
383 * destination (e.g r29). 0x4 magic for lding from fp16 instead */
385 midgard_op_st_cubemap_coords
= 0x0E,
387 /* Used in OpenCL. Probably can ld other things as well */
388 midgard_op_ld_global_id
= 0x10,
390 /* The L/S unit can do perspective division a clock faster than the ALU
391 * if you're lucky. Put the vec4 in r27, and call with 0x24 as the
392 * unknown state; the output will be <x/w, y/w, z/w, 1>. Replace w with
393 * z for the z version */
394 midgard_op_ldst_perspective_division_z
= 0x12,
395 midgard_op_ldst_perspective_division_w
= 0x13,
397 /* val in r27.y, address embedded, outputs result to argument. Invert val for sub. Let val = +-1 for inc/dec. */
398 midgard_op_atomic_add
= 0x40,
399 midgard_op_atomic_and
= 0x44,
400 midgard_op_atomic_or
= 0x48,
401 midgard_op_atomic_xor
= 0x4C,
403 midgard_op_atomic_imin
= 0x50,
404 midgard_op_atomic_umin
= 0x54,
405 midgard_op_atomic_imax
= 0x58,
406 midgard_op_atomic_umax
= 0x5C,
408 midgard_op_atomic_xchg
= 0x60,
410 /* Used for compute shader's __global arguments, __local variables (or
411 * for register spilling) */
413 midgard_op_ld_char
= 0x81,
414 midgard_op_ld_char2
= 0x84,
415 midgard_op_ld_short
= 0x85,
416 midgard_op_ld_char4
= 0x88, /* short2, int, float */
417 midgard_op_ld_short4
= 0x8C, /* int2, float2, long */
418 midgard_op_ld_int4
= 0x90, /* float4, long2 */
420 midgard_op_ld_attr_32
= 0x94,
421 midgard_op_ld_attr_16
= 0x95,
422 midgard_op_ld_attr_32i
= 0x97,
423 midgard_op_ld_vary_32
= 0x98,
424 midgard_op_ld_vary_16
= 0x99,
425 midgard_op_ld_vary_32i
= 0x9B,
426 midgard_op_ld_color_buffer_16
= 0x9D,
428 midgard_op_ld_uniform_16
= 0xAC,
430 midgard_op_ld_uniform_32
= 0xB0,
431 midgard_op_ld_color_buffer_8
= 0xBA,
433 midgard_op_st_char
= 0xC0,
434 midgard_op_st_char2
= 0xC4, /* short */
435 midgard_op_st_char4
= 0xC8, /* short2, int, float */
436 midgard_op_st_short4
= 0xCC, /* int2, float2, long */
437 midgard_op_st_int4
= 0xD0, /* float4, long2 */
439 midgard_op_st_vary_32
= 0xD4,
440 midgard_op_st_vary_16
= 0xD5,
441 midgard_op_st_vary_32i
= 0xD7,
443 /* Value to st in r27, location r26.w as short2 */
444 midgard_op_st_image_f
= 0xD8,
445 midgard_op_st_image_ui
= 0xDA,
446 midgard_op_st_image_i
= 0xDB,
447 } midgard_load_store_op
;
450 midgard_interp_centroid
= 1,
451 midgard_interp_default
= 2
452 } midgard_interpolation
;
455 midgard_varying_mod_none
= 0,
457 /* Other values unknown */
459 /* Take the would-be result and divide all components by its z/w
460 * (perspective division baked in with the load) */
461 midgard_varying_mod_perspective_z
= 2,
462 midgard_varying_mod_perspective_w
= 3,
463 } midgard_varying_modifier
;
466 __attribute__((__packed__
))
468 unsigned zero0
: 1; /* Always zero */
470 midgard_varying_modifier modifier
: 2;
472 unsigned zero1
: 1; /* Always zero */
474 /* Varying qualifiers, zero if not a varying */
476 unsigned is_varying
: 1; /* Always one for varying, but maybe something else? */
477 midgard_interpolation interpolation
: 2;
479 unsigned zero2
: 2; /* Always zero */
481 midgard_varying_parameter
;
484 __attribute__((__packed__
))
486 midgard_load_store_op op
: 8;
489 unsigned swizzle
: 8;
490 unsigned unknown
: 16;
492 unsigned varying_parameters
: 10;
494 unsigned address
: 9;
496 midgard_load_store_word
;
499 __attribute__((__packed__
))
502 unsigned next_type
: 4;
508 /* 8-bit register selector used in texture ops to select a bias/LOD/gradient
509 * register, shoved into the `bias` field */
512 __attribute__((__packed__
))
514 /* Combines with component_hi to form 2-bit component select out of
515 * xyzw, as the component for bias/LOD and the starting component of a
518 unsigned component_lo
: 1;
520 /* Register select between r28/r29 */
523 /* For a half-register, selects the upper half */
526 /* Specifies a full-register, clear for a half-register. Mutually
527 * exclusive with upper. */
530 /* Higher half of component_lo. Always seen to be set for LOD/bias
531 * and clear for processed gradients, but I'm not sure if that's a
532 * hardware requirement. */
533 unsigned component_hi
: 1;
535 /* Padding to make this 8-bit */
537 } midgard_tex_register_select
;
539 /* Texture pipeline results are in r28-r29 */
540 #define REG_TEX_BASE 28
542 /* Texture opcodes... maybe? */
543 #define TEXTURE_OP_NORMAL 0x11 /* texture */
544 #define TEXTURE_OP_LOD 0x12 /* textureLod */
545 #define TEXTURE_OP_TEXEL_FETCH 0x14 /* texelFetch */
548 __attribute__((__packed__
))
551 unsigned next_type
: 4;
555 unsigned is_gather
: 1;
557 /* A little obscure, but last is set for the last texture operation in
558 * a shader. cont appears to just be last's opposite (?). Yeah, I know,
559 * kind of funky.. BiOpen thinks it could do with memory hinting, or
565 enum mali_texture_type format
: 2;
568 /* Is a register used to specify the
569 * LOD/bias/offset? If set, use the `bias` field as
570 * a register index. If clear, use the `bias` field
571 * as an immediate. */
572 unsigned lod_register
: 1;
574 /* Is a register used to specify an offset? If set, use the
575 * offset_reg_* fields to encode this, duplicated for each of the
576 * components. If clear, there is implcitly always an immediate offst
577 * specificed in offset_imm_* */
578 unsigned offset_register
: 1;
580 unsigned in_reg_full
: 1;
581 unsigned in_reg_select
: 1;
582 unsigned in_reg_upper
: 1;
583 unsigned in_reg_swizzle
: 8;
585 unsigned unknown8
: 2;
587 unsigned out_full
: 1;
589 /* Always 1 afaict... */
590 unsigned unknown7
: 2;
592 unsigned out_reg_select
: 1;
593 unsigned out_upper
: 1;
597 unsigned unknown2
: 2;
599 unsigned swizzle
: 8;
600 unsigned unknown4
: 8;
602 unsigned unknownA
: 4;
604 /* In immediate mode, each offset field is an immediate range [0, 7].
606 * In register mode, offset_x becomes a register full / select / upper
607 * triplet and a vec3 swizzle is splattered across offset_y/offset_z in
608 * a genuinely bizarre way.
610 * For texel fetches in immediate mode, the range is the full [-8, 7],
611 * but for normal texturing the top bit must be zero and a register
612 * used instead. It's not clear where this limitation is from. */
618 /* In immediate bias mode, for a normal texture op, this is
619 * texture bias, computed as int(2^8 * frac(biasf)), with
620 * bias_int = floor(bias). For a textureLod, it's that, but
621 * s/bias/lod. For a texel fetch, this is the LOD as-is.
623 * In register mode, this is a midgard_tex_register_select
624 * structure and bias_int is zero */
629 unsigned texture_handle
: 16;
630 unsigned sampler_handle
: 16;
632 midgard_texture_word
;