5 * Copyright (c) 2013 Connor Abbott (connor@abbott.cx)
6 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 #define MIDGARD_DBG_MSGS 0x0001
34 #define MIDGARD_DBG_SHADERS 0x0002
35 #define MIDGARD_DBG_SHADERDB 0x0004
37 extern int midgard_debug
;
40 midgard_word_type_alu
,
41 midgard_word_type_load_store
,
42 midgard_word_type_texture
,
43 midgard_word_type_unknown
57 TAG_TEXTURE_4_VTX
= 0x2,
59 TAG_TEXTURE_4_BARRIER
= 0x4,
60 TAG_LOAD_STORE_4
= 0x5,
67 TAG_ALU_4_WRITEOUT
= 0xC,
68 TAG_ALU_8_WRITEOUT
= 0xD,
69 TAG_ALU_12_WRITEOUT
= 0xE,
70 TAG_ALU_16_WRITEOUT
= 0xF
78 midgard_alu_op_fadd
= 0x10,
79 midgard_alu_op_fmul
= 0x14,
81 midgard_alu_op_fmin
= 0x28,
82 midgard_alu_op_fmax
= 0x2C,
84 midgard_alu_op_fmov
= 0x30, /* fmov_rte */
85 midgard_alu_op_fmov_rtz
= 0x31,
86 midgard_alu_op_fmov_rtn
= 0x32,
87 midgard_alu_op_fmov_rtp
= 0x33,
88 midgard_alu_op_froundeven
= 0x34,
89 midgard_alu_op_ftrunc
= 0x35,
90 midgard_alu_op_ffloor
= 0x36,
91 midgard_alu_op_fceil
= 0x37,
92 midgard_alu_op_ffma
= 0x38,
93 midgard_alu_op_fdot3
= 0x3C,
94 midgard_alu_op_fdot3r
= 0x3D,
95 midgard_alu_op_fdot4
= 0x3E,
96 midgard_alu_op_freduce
= 0x3F,
98 midgard_alu_op_iadd
= 0x40,
99 midgard_alu_op_ishladd
= 0x41, /* a + (b<<1) */
100 midgard_alu_op_isub
= 0x46,
101 midgard_alu_op_iaddsat
= 0x48,
102 midgard_alu_op_uaddsat
= 0x49,
103 midgard_alu_op_isubsat
= 0x4E,
104 midgard_alu_op_usubsat
= 0x4F,
106 midgard_alu_op_imul
= 0x58,
108 midgard_alu_op_imin
= 0x60,
109 midgard_alu_op_umin
= 0x61,
110 midgard_alu_op_imax
= 0x62,
111 midgard_alu_op_umax
= 0x63,
112 midgard_alu_op_ihadd
= 0x64,
113 midgard_alu_op_uhadd
= 0x65,
114 midgard_alu_op_irhadd
= 0x66,
115 midgard_alu_op_urhadd
= 0x67,
116 midgard_alu_op_iasr
= 0x68,
117 midgard_alu_op_ilsr
= 0x69,
118 midgard_alu_op_ishl
= 0x6E,
120 midgard_alu_op_iand
= 0x70,
121 midgard_alu_op_ior
= 0x71,
122 midgard_alu_op_inand
= 0x72, /* ~(a & b), for inot let a = b */
123 midgard_alu_op_inor
= 0x73, /* ~(a | b) */
124 midgard_alu_op_iandnot
= 0x74, /* (a & ~b), used for not/b2f */
125 midgard_alu_op_iornot
= 0x75, /* (a | ~b) */
126 midgard_alu_op_ixor
= 0x76,
127 midgard_alu_op_inxor
= 0x77, /* ~(a & b) */
128 midgard_alu_op_iclz
= 0x78, /* Number of zeroes on left */
129 midgard_alu_op_ibitcount8
= 0x7A, /* Counts bits in 8-bit increments */
130 midgard_alu_op_imov
= 0x7B,
131 midgard_alu_op_iabsdiff
= 0x7C,
132 midgard_alu_op_uabsdiff
= 0x7D,
133 midgard_alu_op_ichoose
= 0x7E, /* vector, component number - dupe for shuffle() */
135 midgard_alu_op_feq
= 0x80,
136 midgard_alu_op_fne
= 0x81,
137 midgard_alu_op_flt
= 0x82,
138 midgard_alu_op_fle
= 0x83,
139 midgard_alu_op_fball_eq
= 0x88,
140 midgard_alu_op_fball_neq
= 0x89,
141 midgard_alu_op_fball_lt
= 0x8A, /* all(lessThan(.., ..)) */
142 midgard_alu_op_fball_lte
= 0x8B, /* all(lessThanEqual(.., ..)) */
144 midgard_alu_op_fbany_eq
= 0x90,
145 midgard_alu_op_fbany_neq
= 0x91,
146 midgard_alu_op_fbany_lt
= 0x92, /* any(lessThan(.., ..)) */
147 midgard_alu_op_fbany_lte
= 0x93, /* any(lessThanEqual(.., ..)) */
149 midgard_alu_op_f2i_rte
= 0x98,
150 midgard_alu_op_f2i_rtz
= 0x99,
151 midgard_alu_op_f2i_rtn
= 0x9A,
152 midgard_alu_op_f2i_rtp
= 0x9B,
153 midgard_alu_op_f2u_rte
= 0x9C,
154 midgard_alu_op_f2u_rtz
= 0x9D,
155 midgard_alu_op_f2u_rtn
= 0x9E,
156 midgard_alu_op_f2u_rtp
= 0x9F,
158 midgard_alu_op_ieq
= 0xA0,
159 midgard_alu_op_ine
= 0xA1,
160 midgard_alu_op_ult
= 0xA2,
161 midgard_alu_op_ule
= 0xA3,
162 midgard_alu_op_ilt
= 0xA4,
163 midgard_alu_op_ile
= 0xA5,
164 midgard_alu_op_iball_eq
= 0xA8,
165 midgard_alu_op_iball_neq
= 0xA9,
166 midgard_alu_op_uball_lt
= 0xAA,
167 midgard_alu_op_uball_lte
= 0xAB,
168 midgard_alu_op_iball_lt
= 0xAC,
169 midgard_alu_op_iball_lte
= 0xAD,
171 midgard_alu_op_ibany_eq
= 0xB0,
172 midgard_alu_op_ibany_neq
= 0xB1,
173 midgard_alu_op_ubany_lt
= 0xB2,
174 midgard_alu_op_ubany_lte
= 0xB3,
175 midgard_alu_op_ibany_lt
= 0xB4, /* any(lessThan(.., ..)) */
176 midgard_alu_op_ibany_lte
= 0xB5, /* any(lessThanEqual(.., ..)) */
177 midgard_alu_op_i2f_rte
= 0xB8,
178 midgard_alu_op_i2f_rtz
= 0xB9,
179 midgard_alu_op_i2f_rtn
= 0xBA,
180 midgard_alu_op_i2f_rtp
= 0xBB,
181 midgard_alu_op_u2f_rte
= 0xBC,
182 midgard_alu_op_u2f_rtz
= 0xBD,
183 midgard_alu_op_u2f_rtn
= 0xBE,
184 midgard_alu_op_u2f_rtp
= 0xBF,
186 midgard_alu_op_icsel_v
= 0xC0, /* condition code r31 */
187 midgard_alu_op_icsel
= 0xC1, /* condition code r31.w */
188 midgard_alu_op_fcsel_v
= 0xC4,
189 midgard_alu_op_fcsel
= 0xC5,
190 midgard_alu_op_fround
= 0xC6,
192 midgard_alu_op_fatan_pt2
= 0xE8,
193 midgard_alu_op_fpow_pt1
= 0xEC,
194 midgard_alu_op_fpown_pt1
= 0xED,
195 midgard_alu_op_fpowr_pt1
= 0xEE,
197 midgard_alu_op_frcp
= 0xF0,
198 midgard_alu_op_frsqrt
= 0xF2,
199 midgard_alu_op_fsqrt
= 0xF3,
200 midgard_alu_op_fexp2
= 0xF4,
201 midgard_alu_op_flog2
= 0xF5,
202 midgard_alu_op_fsin
= 0xF6,
203 midgard_alu_op_fcos
= 0xF7,
204 midgard_alu_op_fatan2_pt1
= 0xF9,
208 midgard_outmod_none
= 0,
209 midgard_outmod_pos
= 1, /* max(x, 0.0) */
210 midgard_outmod_sat_signed
= 2, /* clamp(x, -1.0, 1.0) */
211 midgard_outmod_sat
= 3 /* clamp(x, 0.0, 1.0) */
212 } midgard_outmod_float
;
215 midgard_outmod_int_saturate
= 0,
216 midgard_outmod_uint_saturate
= 1,
217 midgard_outmod_int_wrap
= 2,
218 midgard_outmod_int_high
= 3, /* Overflowed portion */
219 } midgard_outmod_int
;
222 midgard_reg_mode_8
= 0,
223 midgard_reg_mode_16
= 1,
224 midgard_reg_mode_32
= 2,
225 midgard_reg_mode_64
= 3
229 midgard_dest_override_lower
= 0,
230 midgard_dest_override_upper
= 1,
231 midgard_dest_override_none
= 2
232 } midgard_dest_override
;
235 midgard_int_sign_extend
= 0,
236 midgard_int_zero_extend
= 1,
237 midgard_int_normal
= 2,
238 midgard_int_shift
= 3
241 #define MIDGARD_FLOAT_MOD_ABS (1 << 0)
242 #define MIDGARD_FLOAT_MOD_NEG (1 << 1)
245 __attribute__((__packed__
))
247 /* Either midgard_int_mod or from midgard_float_mod_*, depending on the
251 /* replicate lower half if dest = half, or low/high half selection if
255 bool rep_high
: 1; /* unused if dest = full */
256 bool half
: 1; /* only matters if dest = full */
257 unsigned swizzle
: 8;
259 midgard_vector_alu_src
;
262 __attribute__((__packed__
))
264 midgard_alu_op op
: 8;
265 midgard_reg_mode reg_mode
: 2;
268 midgard_dest_override dest_override
: 2;
275 __attribute__((__packed__
))
278 bool full
: 1; /* 0 = half, 1 = full */
279 unsigned component
: 3;
281 midgard_scalar_alu_src
;
284 __attribute__((__packed__
))
286 midgard_alu_op op
: 8;
289 unsigned unknown
: 1;
291 bool output_full
: 1;
292 unsigned output_component
: 3;
297 __attribute__((__packed__
))
299 unsigned src1_reg
: 5;
300 unsigned src2_reg
: 5;
301 unsigned out_reg
: 5;
306 /* In addition to conditional branches and jumps (unconditional branches),
307 * Midgard implements a bit of fixed function functionality used in fragment
308 * shaders via specially crafted branches. These have special branch opcodes,
309 * which perform a fixed-function operation and/or use the results of a
310 * fixed-function operation as the branch condition. */
313 /* Regular branches */
314 midgard_jmp_writeout_op_branch_uncond
= 1,
315 midgard_jmp_writeout_op_branch_cond
= 2,
317 /* In a fragment shader, execute a discard_if instruction, with the
318 * corresponding condition code. Terminates the shader, so generally
319 * set the branch target to out of the shader */
320 midgard_jmp_writeout_op_discard
= 4,
322 /* Branch if the tilebuffer is not yet ready. At the beginning of a
323 * fragment shader that reads from the tile buffer, for instance via
324 * ARM_shader_framebuffer_fetch or EXT_pixel_local_storage, this branch
325 * operation should be used as a loop. An instruction like
326 * "br.tilebuffer.always -1" does the trick, corresponding to
327 * "while(!is_tilebuffer_ready) */
328 midgard_jmp_writeout_op_tilebuffer_pending
= 6,
330 /* In a fragment shader, try to write out the value pushed to r0 to the
331 * tilebuffer, subject to unknown state in r1.z and r1.w. If this
332 * succeeds, the shader terminates. If it fails, it branches to the
333 * specified branch target. Generally, this should be used in a loop to
334 * itself, acting as "do { write(r0); } while(!write_successful);" */
335 midgard_jmp_writeout_op_writeout
= 7,
336 } midgard_jmp_writeout_op
;
339 midgard_condition_write0
= 0,
341 /* These condition codes denote a conditional branch on FALSE and on
342 * TRUE respectively */
343 midgard_condition_false
= 1,
344 midgard_condition_true
= 2,
346 /* This condition code always branches. For a pure branch, the
347 * unconditional branch coding should be used instead, but for
348 * fixed-function branch opcodes, this is still useful */
349 midgard_condition_always
= 3,
353 __attribute__((__packed__
))
355 midgard_jmp_writeout_op op
: 3; /* == branch_uncond */
356 unsigned dest_tag
: 4; /* tag of branch destination */
357 unsigned unknown
: 2;
360 midgard_branch_uncond
;
363 __attribute__((__packed__
))
365 midgard_jmp_writeout_op op
: 3; /* == branch_cond */
366 unsigned dest_tag
: 4; /* tag of branch destination */
368 midgard_condition cond
: 2;
373 __attribute__((__packed__
))
375 midgard_jmp_writeout_op op
: 3; /* == branch_cond */
376 unsigned dest_tag
: 4; /* tag of branch destination */
377 unsigned unknown
: 2;
380 /* Extended branches permit inputting up to 4 conditions loaded into
381 * r31 (two in r31.w and two in r31.x). In the most general case, we
382 * specify a function f(A, B, C, D) mapping 4 1-bit conditions to a
383 * single 1-bit branch criteria. Note that the domain of f has 2^(2^4)
384 * elements, each mapping to 1-bit of output, so we can trivially
385 * construct a Godel numbering of f as a (2^4)=16-bit integer. This
386 * 16-bit integer serves as a lookup table to compute f, subject to
387 * some swaps for ordering.
389 * Interesting, the standard 2-bit condition codes are also a LUT with
390 * the same format (2^1-bit), but it's usually easier to use enums. */
394 midgard_branch_extended
;
397 __attribute__((__packed__
))
399 midgard_jmp_writeout_op op
: 3; /* == writeout */
400 unsigned unknown
: 13;
409 midgard_op_ld_st_noop
= 0x03,
411 /* Unpack a colour from a native format to fp16 */
412 midgard_op_unpack_colour
= 0x05,
414 /* Packs a colour from fp16 to a native format */
415 midgard_op_pack_colour
= 0x09,
417 /* Likewise packs from fp32 */
418 midgard_op_pack_colour_32
= 0x0A,
420 /* Unclear why this is on the L/S unit, but moves fp32 cube map
421 * coordinates in r27 to its cube map texture coordinate destination
424 midgard_op_ld_cubemap_coords
= 0x0E,
426 /* Loads a global/local/group ID, depending on arguments */
427 midgard_op_ld_compute_id
= 0x10,
429 /* The L/S unit can do perspective division a clock faster than the ALU
430 * if you're lucky. Put the vec4 in r27, and call with 0x24 as the
431 * unknown state; the output will be <x/w, y/w, z/w, 1>. Replace w with
432 * z for the z version */
433 midgard_op_ldst_perspective_division_z
= 0x12,
434 midgard_op_ldst_perspective_division_w
= 0x13,
436 /* val in r27.y, address embedded, outputs result to argument. Invert val for sub. Let val = +-1 for inc/dec. */
437 midgard_op_atomic_add
= 0x40,
438 midgard_op_atomic_add64
= 0x41,
440 midgard_op_atomic_and
= 0x44,
441 midgard_op_atomic_and64
= 0x45,
442 midgard_op_atomic_or
= 0x48,
443 midgard_op_atomic_or64
= 0x49,
444 midgard_op_atomic_xor
= 0x4C,
445 midgard_op_atomic_xor64
= 0x4D,
447 midgard_op_atomic_imin
= 0x50,
448 midgard_op_atomic_imin64
= 0x51,
449 midgard_op_atomic_umin
= 0x54,
450 midgard_op_atomic_umin64
= 0x55,
451 midgard_op_atomic_imax
= 0x58,
452 midgard_op_atomic_imax64
= 0x59,
453 midgard_op_atomic_umax
= 0x5C,
454 midgard_op_atomic_umax64
= 0x5D,
456 midgard_op_atomic_xchg
= 0x60,
457 midgard_op_atomic_xchg64
= 0x61,
459 midgard_op_atomic_cmpxchg
= 0x64,
460 midgard_op_atomic_cmpxchg64
= 0x65,
462 /* Used for compute shader's __global arguments, __local variables (or
463 * for register spilling) */
465 midgard_op_ld_uchar
= 0x80, /* zero extends */
466 midgard_op_ld_char
= 0x81, /* sign extends */
467 midgard_op_ld_ushort
= 0x84, /* zero extends */
468 midgard_op_ld_short
= 0x85, /* sign extends */
469 midgard_op_ld_char4
= 0x88, /* short2, int, float */
470 midgard_op_ld_short4
= 0x8C, /* int2, float2, long */
471 midgard_op_ld_int4
= 0x90, /* float4, long2 */
473 midgard_op_ld_attr_32
= 0x94,
474 midgard_op_ld_attr_16
= 0x95,
475 midgard_op_ld_attr_32u
= 0x96,
476 midgard_op_ld_attr_32i
= 0x97,
477 midgard_op_ld_vary_32
= 0x98,
478 midgard_op_ld_vary_16
= 0x99,
479 midgard_op_ld_vary_32u
= 0x9A,
480 midgard_op_ld_vary_32i
= 0x9B,
482 /* Old version of midgard_op_ld_color_buffer_as_fp16, for T720 */
483 midgard_op_ld_color_buffer_as_fp32_old
= 0x9C,
484 midgard_op_ld_color_buffer_as_fp16_old
= 0x9D,
485 midgard_op_ld_color_buffer_32u_old
= 0x9E,
487 /* The distinction between these ops is the alignment requirement /
488 * accompanying shift. Thus, the offset to ld_ubo_int4 is in 16-byte
489 * units and can load 128-bit. The offset to ld_ubo_short4 is in 8-byte
490 * units; ld_ubo_char4 in 4-byte units. ld_ubo_char/ld_ubo_char2 are
491 * purely theoretical (never seen in the wild) since int8/int16/fp16
492 * UBOs don't really exist. The ops are still listed to maintain
493 * symmetry with generic I/O ops. */
495 midgard_op_ld_ubo_char
= 0xA0, /* theoretical */
496 midgard_op_ld_ubo_char2
= 0xA4, /* theoretical */
497 midgard_op_ld_ubo_char4
= 0xA8,
498 midgard_op_ld_ubo_short4
= 0xAC,
499 midgard_op_ld_ubo_int4
= 0xB0,
501 /* New-style blending ops. Works on T760/T860 */
502 midgard_op_ld_color_buffer_as_fp32
= 0xB8,
503 midgard_op_ld_color_buffer_as_fp16
= 0xB9,
504 midgard_op_ld_color_buffer_32u
= 0xBA,
506 midgard_op_st_char
= 0xC0,
507 midgard_op_st_char2
= 0xC4, /* short */
508 midgard_op_st_char4
= 0xC8, /* short2, int, float */
509 midgard_op_st_short4
= 0xCC, /* int2, float2, long */
510 midgard_op_st_int4
= 0xD0, /* float4, long2 */
512 midgard_op_st_vary_32
= 0xD4,
513 midgard_op_st_vary_16
= 0xD5,
514 midgard_op_st_vary_32u
= 0xD6,
515 midgard_op_st_vary_32i
= 0xD7,
517 /* Value to st in r27, location r26.w as short2 */
518 midgard_op_st_image_f
= 0xD8,
519 midgard_op_st_image_ui
= 0xDA,
520 midgard_op_st_image_i
= 0xDB,
521 } midgard_load_store_op
;
524 midgard_interp_sample
= 0,
525 midgard_interp_centroid
= 1,
526 midgard_interp_default
= 2
527 } midgard_interpolation
;
530 midgard_varying_mod_none
= 0,
532 /* Other values unknown */
534 /* Take the would-be result and divide all components by its z/w
535 * (perspective division baked in with the load) */
536 midgard_varying_mod_perspective_z
= 2,
537 midgard_varying_mod_perspective_w
= 3,
538 } midgard_varying_modifier
;
541 __attribute__((__packed__
))
543 unsigned zero0
: 1; /* Always zero */
545 midgard_varying_modifier modifier
: 2;
547 unsigned zero1
: 1; /* Always zero */
549 /* Varying qualifiers, zero if not a varying */
551 unsigned is_varying
: 1; /* Always one for varying, but maybe something else? */
552 midgard_interpolation interpolation
: 2;
554 unsigned zero2
: 2; /* Always zero */
556 midgard_varying_parameter
;
558 /* 8-bit register/etc selector for load/store ops */
560 __attribute__((__packed__
))
562 /* Indexes into the register */
563 unsigned component
: 2;
565 /* Register select between r26/r27 */
568 unsigned unknown
: 2;
570 /* Like any good Arm instruction set, load/store arguments can be
571 * implicitly left-shifted... but only the second argument. Zero for no
572 * shifting, up to <<7 possible though. This is useful for indexing.
574 * For the first argument, it's unknown what these bits mean */
577 midgard_ldst_register_select
;
580 __attribute__((__packed__
))
582 midgard_load_store_op op
: 8;
585 unsigned swizzle
: 8;
587 /* Load/store ops can take two additional registers as arguments, but
588 * these are limited to load/store registers with only a few supported
589 * mask/swizzle combinations. The tradeoff is these are much more
590 * compact, requiring 8-bits each rather than 17-bits for a full
591 * reg/mask/swizzle. Usually (?) encoded as
592 * midgard_ldst_register_select. */
596 unsigned varying_parameters
: 10;
598 unsigned address
: 9;
600 midgard_load_store_word
;
603 __attribute__((__packed__
))
606 unsigned next_type
: 4;
612 /* 8-bit register selector used in texture ops to select a bias/LOD/gradient
613 * register, shoved into the `bias` field */
616 __attribute__((__packed__
))
618 /* 32-bit register, clear for half-register */
621 /* Register select between r28/r29 */
624 /* For a half-register, selects the upper half */
627 /* Indexes into the register */
628 unsigned component
: 2;
630 /* Padding to make this 8-bit */
633 midgard_tex_register_select
;
635 /* Texture pipeline results are in r28-r29 */
636 #define REG_TEX_BASE 28
638 enum mali_texture_op
{
639 TEXTURE_OP_NORMAL
= 1, /* texture */
640 TEXTURE_OP_LOD
= 2, /* textureLod */
641 TEXTURE_OP_TEXEL_FETCH
= 4,
642 TEXTURE_OP_BARRIER
= 11,
643 TEXTURE_OP_DERIVATIVE
= 13
646 enum mali_sampler_type
{
647 MALI_SAMPLER_UNK
= 0x0,
648 MALI_SAMPLER_FLOAT
= 0x1, /* sampler */
649 MALI_SAMPLER_UNSIGNED
= 0x2, /* usampler */
650 MALI_SAMPLER_SIGNED
= 0x3, /* isampler */
654 enum mali_texture_mode
{
657 TEXTURE_GATHER_SHADOW
= 6,
658 TEXTURE_GATHER_X
= 8,
659 TEXTURE_GATHER_Y
= 9,
660 TEXTURE_GATHER_Z
= 10,
661 TEXTURE_GATHER_W
= 11,
664 enum mali_derivative_mode
{
670 __attribute__((__packed__
))
673 unsigned next_type
: 4;
675 enum mali_texture_op op
: 4;
678 /* A little obscure, but last is set for the last texture operation in
679 * a shader. cont appears to just be last's opposite (?). Yeah, I know,
680 * kind of funky.. BiOpen thinks it could do with memory hinting, or
688 /* Are sampler_handle/texture_handler respectively set by registers? If
689 * true, the lower 8-bits of the respective field is a register word.
690 * If false, they are an immediate */
692 unsigned sampler_register
: 1;
693 unsigned texture_register
: 1;
695 /* Is a register used to specify the
696 * LOD/bias/offset? If set, use the `bias` field as
697 * a register index. If clear, use the `bias` field
698 * as an immediate. */
699 unsigned lod_register
: 1;
701 /* Is a register used to specify an offset? If set, use the
702 * offset_reg_* fields to encode this, duplicated for each of the
703 * components. If clear, there is implcitly always an immediate offst
704 * specificed in offset_imm_* */
705 unsigned offset_register
: 1;
707 unsigned in_reg_full
: 1;
708 unsigned in_reg_select
: 1;
709 unsigned in_reg_upper
: 1;
710 unsigned in_reg_swizzle
: 8;
712 unsigned unknown8
: 2;
714 unsigned out_full
: 1;
716 enum mali_sampler_type sampler_type
: 2;
718 unsigned out_reg_select
: 1;
719 unsigned out_upper
: 1;
723 /* Intriguingly, textures can take an outmod just like alu ops. Int
724 * outmods are not supported as far as I can tell, so this is only
725 * meaningful for float samplers */
726 midgard_outmod_float outmod
: 2;
728 unsigned swizzle
: 8;
730 /* These indicate how many bundles after this texture op may be
731 * executed in parallel with this op. We may execute only ALU and
732 * ld/st in parallel (not other textures), and obviously there cannot
733 * be any dependency (the blob appears to forbid even accessing other
734 * channels of a given texture register). */
736 unsigned out_of_order
: 2;
737 unsigned unknown4
: 10;
739 /* In immediate mode, each offset field is an immediate range [0, 7].
741 * In register mode, offset_x becomes a register (full, select, upper)
742 * triplet followed by a vec3 swizzle is splattered across
743 * offset_y/offset_z in a genuinely bizarre way.
745 * For texel fetches in immediate mode, the range is the full [-8, 7],
746 * but for normal texturing the top bit must be zero and a register
747 * used instead. It's not clear where this limitation is from.
751 * signed offset_x : 4;
752 * signed offset_y : 4;
753 * signed offset_z : 4;
759 * unsigned swizzle : 8;
765 unsigned offset
: 12;
767 /* In immediate bias mode, for a normal texture op, this is
768 * texture bias, computed as int(2^8 * frac(biasf)), with
769 * bias_int = floor(bias). For a textureLod, it's that, but
770 * s/bias/lod. For a texel fetch, this is the LOD as-is.
772 * In register mode, this is a midgard_tex_register_select
773 * structure and bias_int is zero */
778 /* If sampler/texture_register is set, the bottom 8-bits are
779 * midgard_tex_register_select and the top 8-bits are zero. If they are
780 * clear, they are immediate texture indices */
782 unsigned sampler_handle
: 16;
783 unsigned texture_handle
: 16;
785 midgard_texture_word
;
787 /* Technically barriers are texture instructions but it's less work to add them
788 * as an explicitly zeroed special case, since most fields are forced to go to
792 __attribute__((__packed__
))
795 unsigned next_type
: 4;
797 /* op = TEXTURE_OP_BARRIER */
801 /* Since helper invocations don't make any sense, these are forced to one */
807 unsigned out_of_order
: 4;
811 } midgard_texture_barrier_word
;
813 typedef union midgard_constants
{
828 enum midgard_roundmode
{
829 MIDGARD_RTE
= 0x0, /* round to even */
830 MIDGARD_RTZ
= 0x1, /* round to zero */
831 MIDGARD_RTN
= 0x2, /* round to negative */
832 MIDGARD_RTP
= 0x3, /* round to positive */