2 * Copyright (C) 2019 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
29 /* Derivatives in Midgard are implemented on the texture pipe, rather than the
30 * ALU pipe as suggested by NIR. The rationale is that normal texture
31 * instructions require (implicit) derivatives to be calculated anyway, so it
32 * makes sense to reuse the derivative logic. Thus, in addition to the usual
33 * texturing ops that calculate derivatives, there are two explicit texture ops
34 * dFdx/dFdy that perform differencing across helper invocations in either
35 * horizontal or vertical directions.
37 * One major caveat is that derivatives can only be calculated on up to a vec2
38 * at a time. This restriction presumably is to save some silicon, as 99% of
39 * derivatives will be vec2 (autocalculating mip levels of 2D texture
40 * coordinates). Admittedly I'm not sure why 3D textures can have their levels
41 * calculated automatically, umm... Pressing on.
43 * This caveat is handled in two steps. During the first pass (code
44 * generation), we generate texture ops 1:1 to the incoming NIR derivatives.
45 * This works for float/vec2 but not for vec3/vec4. A later lowering pass will
46 * scan for vec3/vec4 derivatives and lower (split) to multiple instructions.
47 * This pass is separated as we'll have to rewrite th e destination into a
48 * register (rather than SSA) and we'd rather do this after we have the whole
49 * IR in front of us to do it at once.
53 mir_derivative_mode(nir_op op
)
57 case nir_op_fddx_fine
:
58 case nir_op_fddx_coarse
:
62 case nir_op_fddy_fine
:
63 case nir_op_fddy_coarse
:
67 unreachable("Invalid derivative op");
71 /* Returns true if a texturing op computes derivatives either explicitly or
75 mir_op_computes_derivatives(gl_shader_stage stage
, unsigned op
)
77 /* Only fragment shaders may compute derivatives, but the sense of
78 * "normal" changes in vertex shaders on certain GPUs */
80 if (op
== TEXTURE_OP_NORMAL
&& stage
!= MESA_SHADER_FRAGMENT
)
84 case TEXTURE_OP_NORMAL
:
85 case TEXTURE_OP_DERIVATIVE
:
86 assert(stage
== MESA_SHADER_FRAGMENT
);
94 midgard_emit_derivatives(compiler_context
*ctx
, nir_alu_instr
*instr
)
96 /* Create texture instructions */
98 unsigned nr_components
= nir_dest_num_components(instr
->dest
.dest
);
100 midgard_instruction ins
= {
101 .type
= TAG_TEXTURE_4
,
102 .mask
= mask_of(nr_components
),
103 .dest
= nir_dest_index(&instr
->dest
.dest
),
104 .dest_type
= nir_type_float32
,
105 .src
= { ~0, nir_src_index(ctx
, &instr
->src
[0].src
), ~0, ~0 },
106 .swizzle
= SWIZZLE_IDENTITY_4
,
107 .src_types
= { nir_type_float32
, nir_type_float32
},
108 .op
= TEXTURE_OP_DERIVATIVE
,
110 .mode
= mir_derivative_mode(instr
->op
),
114 .sampler_type
= MALI_SAMPLER_FLOAT
,
118 if (!instr
->dest
.dest
.is_ssa
)
119 ins
.mask
&= instr
->dest
.write_mask
;
121 emit_mir_instruction(ctx
, ins
);
125 midgard_lower_derivatives(compiler_context
*ctx
, midgard_block
*block
)
127 mir_foreach_instr_in_block_safe(block
, ins
) {
128 if (ins
->type
!= TAG_TEXTURE_4
) continue;
129 if (ins
->op
!= TEXTURE_OP_DERIVATIVE
) continue;
131 /* Check if we need to split */
133 bool upper
= ins
->mask
& 0b1100;
134 bool lower
= ins
->mask
& 0b0011;
136 if (!(upper
&& lower
)) continue;
138 /* Duplicate for dedicated upper instruction */
140 midgard_instruction dup
;
141 memcpy(&dup
, ins
, sizeof(dup
));
143 /* Fixup masks. Make original just lower and dupe just upper */
149 dup
.swizzle
[0][0] = dup
.swizzle
[0][1] = dup
.swizzle
[0][2] = COMPONENT_X
;
150 dup
.swizzle
[0][3] = COMPONENT_Y
;
152 dup
.swizzle
[1][0] = COMPONENT_Z
;
153 dup
.swizzle
[1][1] = dup
.swizzle
[1][2] = dup
.swizzle
[1][3] = COMPONENT_W
;
155 /* Insert the new instruction */
156 mir_insert_instruction_before(ctx
, mir_next_op(ins
), dup
);
158 /* We'll need both instructions to write to the same index, so
159 * rewrite to use a register */
161 unsigned new = make_compiler_temp_reg(ctx
);
162 mir_rewrite_index(ctx
, ins
->dest
, new);