2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * This lowering pass supports (as configured via nir_lower_tex_options)
26 * various texture related conversions:
27 * + texture projector lowering: converts the coordinate division for
28 * texture projection to be done in ALU instructions instead of
29 * asking the texture operation to do so.
30 * + lowering RECT: converts the un-normalized RECT texture coordinates
31 * to normalized coordinates with txs plus ALU instructions
32 * + saturate s/t/r coords: to emulate certain texture clamp/wrap modes,
33 * inserts instructions to clamp specified coordinates to [0.0, 1.0].
34 * Note that this automatically triggers texture projector lowering if
35 * needed, since clamping must happen after projector lowering.
39 #include "nir_builder.h"
40 #include "nir_format_convert.h"
43 project_src(nir_builder
*b
, nir_tex_instr
*tex
)
45 /* Find the projector in the srcs list, if present. */
46 int proj_index
= nir_tex_instr_src_index(tex
, nir_tex_src_projector
);
50 b
->cursor
= nir_before_instr(&tex
->instr
);
52 nir_ssa_def
*inv_proj
=
53 nir_frcp(b
, nir_ssa_for_src(b
, tex
->src
[proj_index
].src
, 1));
55 /* Walk through the sources projecting the arguments. */
56 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
57 switch (tex
->src
[i
].src_type
) {
58 case nir_tex_src_coord
:
59 case nir_tex_src_comparator
:
64 nir_ssa_def
*unprojected
=
65 nir_ssa_for_src(b
, tex
->src
[i
].src
, nir_tex_instr_src_size(tex
, i
));
66 nir_ssa_def
*projected
= nir_fmul(b
, unprojected
, inv_proj
);
68 /* Array indices don't get projected, so make an new vector with the
69 * coordinate's array index untouched.
71 if (tex
->is_array
&& tex
->src
[i
].src_type
== nir_tex_src_coord
) {
72 switch (tex
->coord_components
) {
74 projected
= nir_vec4(b
,
75 nir_channel(b
, projected
, 0),
76 nir_channel(b
, projected
, 1),
77 nir_channel(b
, projected
, 2),
78 nir_channel(b
, unprojected
, 3));
81 projected
= nir_vec3(b
,
82 nir_channel(b
, projected
, 0),
83 nir_channel(b
, projected
, 1),
84 nir_channel(b
, unprojected
, 2));
87 projected
= nir_vec2(b
,
88 nir_channel(b
, projected
, 0),
89 nir_channel(b
, unprojected
, 1));
92 unreachable("bad texture coord count for array");
97 nir_instr_rewrite_src(&tex
->instr
,
99 nir_src_for_ssa(projected
));
102 nir_tex_instr_remove_src(tex
, proj_index
);
106 get_texture_size(nir_builder
*b
, nir_tex_instr
*tex
)
108 b
->cursor
= nir_before_instr(&tex
->instr
);
112 unsigned num_srcs
= 1; /* One for the LOD */
113 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
114 if (tex
->src
[i
].src_type
== nir_tex_src_texture_deref
||
115 tex
->src
[i
].src_type
== nir_tex_src_sampler_deref
||
116 tex
->src
[i
].src_type
== nir_tex_src_texture_offset
||
117 tex
->src
[i
].src_type
== nir_tex_src_sampler_offset
)
121 txs
= nir_tex_instr_create(b
->shader
, num_srcs
);
122 txs
->op
= nir_texop_txs
;
123 txs
->sampler_dim
= tex
->sampler_dim
;
124 txs
->is_array
= tex
->is_array
;
125 txs
->is_shadow
= tex
->is_shadow
;
126 txs
->is_new_style_shadow
= tex
->is_new_style_shadow
;
127 txs
->texture_index
= tex
->texture_index
;
128 txs
->sampler_index
= tex
->sampler_index
;
129 txs
->dest_type
= nir_type_int
;
132 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
133 if (tex
->src
[i
].src_type
== nir_tex_src_texture_deref
||
134 tex
->src
[i
].src_type
== nir_tex_src_sampler_deref
||
135 tex
->src
[i
].src_type
== nir_tex_src_texture_offset
||
136 tex
->src
[i
].src_type
== nir_tex_src_sampler_offset
) {
137 nir_src_copy(&txs
->src
[idx
].src
, &tex
->src
[i
].src
, txs
);
138 txs
->src
[idx
].src_type
= tex
->src
[i
].src_type
;
142 /* Add in an LOD because some back-ends require it */
143 txs
->src
[idx
].src
= nir_src_for_ssa(nir_imm_int(b
, 0));
144 txs
->src
[idx
].src_type
= nir_tex_src_lod
;
146 nir_ssa_dest_init(&txs
->instr
, &txs
->dest
,
147 nir_tex_instr_dest_size(txs
), 32, NULL
);
148 nir_builder_instr_insert(b
, &txs
->instr
);
150 return nir_i2f32(b
, &txs
->dest
.ssa
);
154 lower_offset(nir_builder
*b
, nir_tex_instr
*tex
)
156 int offset_index
= nir_tex_instr_src_index(tex
, nir_tex_src_offset
);
157 if (offset_index
< 0)
160 int coord_index
= nir_tex_instr_src_index(tex
, nir_tex_src_coord
);
161 assert(coord_index
>= 0);
163 assert(tex
->src
[offset_index
].src
.is_ssa
);
164 assert(tex
->src
[coord_index
].src
.is_ssa
);
165 nir_ssa_def
*offset
= tex
->src
[offset_index
].src
.ssa
;
166 nir_ssa_def
*coord
= tex
->src
[coord_index
].src
.ssa
;
168 b
->cursor
= nir_before_instr(&tex
->instr
);
170 nir_ssa_def
*offset_coord
;
171 if (nir_tex_instr_src_type(tex
, coord_index
) == nir_type_float
) {
172 if (tex
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
173 offset_coord
= nir_fadd(b
, coord
, nir_i2f32(b
, offset
));
175 nir_ssa_def
*txs
= get_texture_size(b
, tex
);
176 nir_ssa_def
*scale
= nir_frcp(b
, txs
);
178 offset_coord
= nir_fadd(b
, coord
,
180 nir_i2f32(b
, offset
),
184 offset_coord
= nir_iadd(b
, coord
, offset
);
188 /* The offset is not applied to the array index */
189 if (tex
->coord_components
== 2) {
190 offset_coord
= nir_vec2(b
, nir_channel(b
, offset_coord
, 0),
191 nir_channel(b
, coord
, 1));
192 } else if (tex
->coord_components
== 3) {
193 offset_coord
= nir_vec3(b
, nir_channel(b
, offset_coord
, 0),
194 nir_channel(b
, offset_coord
, 1),
195 nir_channel(b
, coord
, 2));
197 unreachable("Invalid number of components");
201 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[coord_index
].src
,
202 nir_src_for_ssa(offset_coord
));
204 nir_tex_instr_remove_src(tex
, offset_index
);
210 lower_rect(nir_builder
*b
, nir_tex_instr
*tex
)
212 nir_ssa_def
*txs
= get_texture_size(b
, tex
);
213 nir_ssa_def
*scale
= nir_frcp(b
, txs
);
215 /* Walk through the sources normalizing the requested arguments. */
216 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
217 if (tex
->src
[i
].src_type
!= nir_tex_src_coord
)
220 nir_ssa_def
*coords
=
221 nir_ssa_for_src(b
, tex
->src
[i
].src
, tex
->coord_components
);
222 nir_instr_rewrite_src(&tex
->instr
,
224 nir_src_for_ssa(nir_fmul(b
, coords
, scale
)));
227 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
231 sample_plane(nir_builder
*b
, nir_tex_instr
*tex
, int plane
)
233 assert(tex
->dest
.is_ssa
);
234 assert(nir_tex_instr_dest_size(tex
) == 4);
235 assert(nir_alu_type_get_base_type(tex
->dest_type
) == nir_type_float
);
236 assert(tex
->op
== nir_texop_tex
);
237 assert(tex
->coord_components
== 2);
239 nir_tex_instr
*plane_tex
=
240 nir_tex_instr_create(b
->shader
, tex
->num_srcs
+ 1);
241 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
242 nir_src_copy(&plane_tex
->src
[i
].src
, &tex
->src
[i
].src
, plane_tex
);
243 plane_tex
->src
[i
].src_type
= tex
->src
[i
].src_type
;
245 plane_tex
->src
[tex
->num_srcs
].src
= nir_src_for_ssa(nir_imm_int(b
, plane
));
246 plane_tex
->src
[tex
->num_srcs
].src_type
= nir_tex_src_plane
;
247 plane_tex
->op
= nir_texop_tex
;
248 plane_tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
249 plane_tex
->dest_type
= nir_type_float
;
250 plane_tex
->coord_components
= 2;
252 plane_tex
->texture_index
= tex
->texture_index
;
253 plane_tex
->sampler_index
= tex
->sampler_index
;
255 nir_ssa_dest_init(&plane_tex
->instr
, &plane_tex
->dest
, 4, 32, NULL
);
257 nir_builder_instr_insert(b
, &plane_tex
->instr
);
259 return &plane_tex
->dest
.ssa
;
263 convert_yuv_to_rgb(nir_builder
*b
, nir_tex_instr
*tex
,
264 nir_ssa_def
*y
, nir_ssa_def
*u
, nir_ssa_def
*v
,
267 nir_const_value m
[3] = {
268 { .f32
= { 1.0f
, 0.0f
, 1.59602678f
, 0.0f
} },
269 { .f32
= { 1.0f
, -0.39176229f
, -0.81296764f
, 0.0f
} },
270 { .f32
= { 1.0f
, 2.01723214f
, 0.0f
, 0.0f
} }
275 nir_fmul(b
, nir_imm_float(b
, 1.16438356f
),
276 nir_fadd(b
, y
, nir_imm_float(b
, -16.0f
/ 255.0f
))),
277 nir_channel(b
, nir_fadd(b
, u
, nir_imm_float(b
, -128.0f
/ 255.0f
)), 0),
278 nir_channel(b
, nir_fadd(b
, v
, nir_imm_float(b
, -128.0f
/ 255.0f
)), 0),
279 nir_imm_float(b
, 0.0));
281 nir_ssa_def
*red
= nir_fdot4(b
, yuv
, nir_build_imm(b
, 4, 32, m
[0]));
282 nir_ssa_def
*green
= nir_fdot4(b
, yuv
, nir_build_imm(b
, 4, 32, m
[1]));
283 nir_ssa_def
*blue
= nir_fdot4(b
, yuv
, nir_build_imm(b
, 4, 32, m
[2]));
285 nir_ssa_def
*result
= nir_vec4(b
, red
, green
, blue
, a
);
287 nir_ssa_def_rewrite_uses(&tex
->dest
.ssa
, nir_src_for_ssa(result
));
291 lower_y_uv_external(nir_builder
*b
, nir_tex_instr
*tex
)
293 b
->cursor
= nir_after_instr(&tex
->instr
);
295 nir_ssa_def
*y
= sample_plane(b
, tex
, 0);
296 nir_ssa_def
*uv
= sample_plane(b
, tex
, 1);
298 convert_yuv_to_rgb(b
, tex
,
299 nir_channel(b
, y
, 0),
300 nir_channel(b
, uv
, 0),
301 nir_channel(b
, uv
, 1),
302 nir_imm_float(b
, 1.0f
));
306 lower_y_u_v_external(nir_builder
*b
, nir_tex_instr
*tex
)
308 b
->cursor
= nir_after_instr(&tex
->instr
);
310 nir_ssa_def
*y
= sample_plane(b
, tex
, 0);
311 nir_ssa_def
*u
= sample_plane(b
, tex
, 1);
312 nir_ssa_def
*v
= sample_plane(b
, tex
, 2);
314 convert_yuv_to_rgb(b
, tex
,
315 nir_channel(b
, y
, 0),
316 nir_channel(b
, u
, 0),
317 nir_channel(b
, v
, 0),
318 nir_imm_float(b
, 1.0f
));
322 lower_yx_xuxv_external(nir_builder
*b
, nir_tex_instr
*tex
)
324 b
->cursor
= nir_after_instr(&tex
->instr
);
326 nir_ssa_def
*y
= sample_plane(b
, tex
, 0);
327 nir_ssa_def
*xuxv
= sample_plane(b
, tex
, 1);
329 convert_yuv_to_rgb(b
, tex
,
330 nir_channel(b
, y
, 0),
331 nir_channel(b
, xuxv
, 1),
332 nir_channel(b
, xuxv
, 3),
333 nir_imm_float(b
, 1.0f
));
337 lower_xy_uxvx_external(nir_builder
*b
, nir_tex_instr
*tex
)
339 b
->cursor
= nir_after_instr(&tex
->instr
);
341 nir_ssa_def
*y
= sample_plane(b
, tex
, 0);
342 nir_ssa_def
*uxvx
= sample_plane(b
, tex
, 1);
344 convert_yuv_to_rgb(b
, tex
,
345 nir_channel(b
, y
, 1),
346 nir_channel(b
, uxvx
, 0),
347 nir_channel(b
, uxvx
, 2),
348 nir_imm_float(b
, 1.0f
));
352 lower_ayuv_external(nir_builder
*b
, nir_tex_instr
*tex
)
354 b
->cursor
= nir_after_instr(&tex
->instr
);
356 nir_ssa_def
*ayuv
= sample_plane(b
, tex
, 0);
358 convert_yuv_to_rgb(b
, tex
,
359 nir_channel(b
, ayuv
, 2),
360 nir_channel(b
, ayuv
, 1),
361 nir_channel(b
, ayuv
, 0),
362 nir_channel(b
, ayuv
, 3));
366 * Emits a textureLod operation used to replace an existing
367 * textureGrad instruction.
370 replace_gradient_with_lod(nir_builder
*b
, nir_ssa_def
*lod
, nir_tex_instr
*tex
)
372 /* We are going to emit a textureLod() with the same parameters except that
373 * we replace ddx/ddy with lod.
375 int num_srcs
= tex
->num_srcs
- 1;
376 nir_tex_instr
*txl
= nir_tex_instr_create(b
->shader
, num_srcs
);
378 txl
->op
= nir_texop_txl
;
379 txl
->sampler_dim
= tex
->sampler_dim
;
380 txl
->texture_index
= tex
->texture_index
;
381 txl
->dest_type
= tex
->dest_type
;
382 txl
->is_array
= tex
->is_array
;
383 txl
->is_shadow
= tex
->is_shadow
;
384 txl
->is_new_style_shadow
= tex
->is_new_style_shadow
;
385 txl
->sampler_index
= tex
->sampler_index
;
386 txl
->coord_components
= tex
->coord_components
;
388 nir_ssa_dest_init(&txl
->instr
, &txl
->dest
, 4, 32, NULL
);
391 for (int i
= 0; i
< tex
->num_srcs
; i
++) {
392 if (tex
->src
[i
].src_type
== nir_tex_src_ddx
||
393 tex
->src
[i
].src_type
== nir_tex_src_ddy
)
395 nir_src_copy(&txl
->src
[src_num
].src
, &tex
->src
[i
].src
, txl
);
396 txl
->src
[src_num
].src_type
= tex
->src
[i
].src_type
;
400 txl
->src
[src_num
].src
= nir_src_for_ssa(lod
);
401 txl
->src
[src_num
].src_type
= nir_tex_src_lod
;
404 assert(src_num
== num_srcs
);
406 nir_ssa_dest_init(&txl
->instr
, &txl
->dest
,
407 tex
->dest
.ssa
.num_components
, 32, NULL
);
408 nir_builder_instr_insert(b
, &txl
->instr
);
410 nir_ssa_def_rewrite_uses(&tex
->dest
.ssa
, nir_src_for_ssa(&txl
->dest
.ssa
));
412 nir_instr_remove(&tex
->instr
);
416 lower_gradient_cube_map(nir_builder
*b
, nir_tex_instr
*tex
)
418 assert(tex
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
);
419 assert(tex
->op
== nir_texop_txd
);
420 assert(tex
->dest
.is_ssa
);
422 /* Use textureSize() to get the width and height of LOD 0 */
423 nir_ssa_def
*size
= get_texture_size(b
, tex
);
425 /* Cubemap texture lookups first generate a texture coordinate normalized
426 * to [-1, 1] on the appropiate face. The appropiate face is determined
427 * by which component has largest magnitude and its sign. The texture
428 * coordinate is the quotient of the remaining texture coordinates against
429 * that absolute value of the component of largest magnitude. This
430 * division requires that the computing of the derivative of the texel
431 * coordinate must use the quotient rule. The high level GLSL code is as
436 * vec3 abs_p, Q, dQdx, dQdy;
437 * abs_p = abs(ir->coordinate);
438 * if (abs_p.x >= max(abs_p.y, abs_p.z)) {
439 * Q = ir->coordinate.yzx;
440 * dQdx = ir->lod_info.grad.dPdx.yzx;
441 * dQdy = ir->lod_info.grad.dPdy.yzx;
443 * if (abs_p.y >= max(abs_p.x, abs_p.z)) {
444 * Q = ir->coordinate.xzy;
445 * dQdx = ir->lod_info.grad.dPdx.xzy;
446 * dQdy = ir->lod_info.grad.dPdy.xzy;
448 * if (abs_p.z >= max(abs_p.x, abs_p.y)) {
449 * Q = ir->coordinate;
450 * dQdx = ir->lod_info.grad.dPdx;
451 * dQdy = ir->lod_info.grad.dPdy;
454 * Step 2: use quotient rule to compute derivative. The normalized to
455 * [-1, 1] texel coordinate is given by Q.xy / (sign(Q.z) * Q.z). We are
456 * only concerned with the magnitudes of the derivatives whose values are
457 * not affected by the sign. We drop the sign from the computation.
463 * dx = recip * ( dQdx.xy - Q.xy * (dQdx.z * recip) );
464 * dy = recip * ( dQdy.xy - Q.xy * (dQdy.z * recip) );
466 * Step 3: compute LOD. At this point we have the derivatives of the
467 * texture coordinates normalized to [-1,1]. We take the LOD to be
468 * result = log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * 0.5 * L)
469 * = -1.0 + log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * L)
470 * = -1.0 + log2(sqrt(max(dot(dx, dx), dot(dy,dy))) * L)
471 * = -1.0 + log2(sqrt(L * L * max(dot(dx, dx), dot(dy,dy))))
472 * = -1.0 + 0.5 * log2(L * L * max(dot(dx, dx), dot(dy,dy)))
473 * where L is the dimension of the cubemap. The code is:
476 * M = max(dot(dx, dx), dot(dy, dy));
477 * L = textureSize(sampler, 0).x;
478 * result = -1.0 + 0.5 * log2(L * L * M);
483 tex
->src
[nir_tex_instr_src_index(tex
, nir_tex_src_coord
)].src
.ssa
;
485 /* unmodified dPdx, dPdy values */
487 tex
->src
[nir_tex_instr_src_index(tex
, nir_tex_src_ddx
)].src
.ssa
;
489 tex
->src
[nir_tex_instr_src_index(tex
, nir_tex_src_ddy
)].src
.ssa
;
491 nir_ssa_def
*abs_p
= nir_fabs(b
, p
);
492 nir_ssa_def
*abs_p_x
= nir_channel(b
, abs_p
, 0);
493 nir_ssa_def
*abs_p_y
= nir_channel(b
, abs_p
, 1);
494 nir_ssa_def
*abs_p_z
= nir_channel(b
, abs_p
, 2);
496 /* 1. compute selector */
497 nir_ssa_def
*Q
, *dQdx
, *dQdy
;
499 nir_ssa_def
*cond_z
= nir_fge(b
, abs_p_z
, nir_fmax(b
, abs_p_x
, abs_p_y
));
500 nir_ssa_def
*cond_y
= nir_fge(b
, abs_p_y
, nir_fmax(b
, abs_p_x
, abs_p_z
));
502 unsigned yzx
[3] = { 1, 2, 0 };
503 unsigned xzy
[3] = { 0, 2, 1 };
505 Q
= nir_bcsel(b
, cond_z
,
508 nir_swizzle(b
, p
, xzy
, 3, false),
509 nir_swizzle(b
, p
, yzx
, 3, false)));
511 dQdx
= nir_bcsel(b
, cond_z
,
514 nir_swizzle(b
, dPdx
, xzy
, 3, false),
515 nir_swizzle(b
, dPdx
, yzx
, 3, false)));
517 dQdy
= nir_bcsel(b
, cond_z
,
520 nir_swizzle(b
, dPdy
, xzy
, 3, false),
521 nir_swizzle(b
, dPdy
, yzx
, 3, false)));
523 /* 2. quotient rule */
525 /* tmp = Q.xy * recip;
526 * dx = recip * ( dQdx.xy - (tmp * dQdx.z) );
527 * dy = recip * ( dQdy.xy - (tmp * dQdy.z) );
529 nir_ssa_def
*rcp_Q_z
= nir_frcp(b
, nir_channel(b
, Q
, 2));
531 nir_ssa_def
*Q_xy
= nir_channels(b
, Q
, 0x3);
532 nir_ssa_def
*tmp
= nir_fmul(b
, Q_xy
, rcp_Q_z
);
534 nir_ssa_def
*dQdx_xy
= nir_channels(b
, dQdx
, 0x3);
535 nir_ssa_def
*dQdx_z
= nir_channel(b
, dQdx
, 2);
537 nir_fmul(b
, rcp_Q_z
, nir_fsub(b
, dQdx_xy
, nir_fmul(b
, tmp
, dQdx_z
)));
539 nir_ssa_def
*dQdy_xy
= nir_channels(b
, dQdy
, 0x3);
540 nir_ssa_def
*dQdy_z
= nir_channel(b
, dQdy
, 2);
542 nir_fmul(b
, rcp_Q_z
, nir_fsub(b
, dQdy_xy
, nir_fmul(b
, tmp
, dQdy_z
)));
544 /* M = max(dot(dx, dx), dot(dy, dy)); */
545 nir_ssa_def
*M
= nir_fmax(b
, nir_fdot(b
, dx
, dx
), nir_fdot(b
, dy
, dy
));
547 /* size has textureSize() of LOD 0 */
548 nir_ssa_def
*L
= nir_channel(b
, size
, 0);
550 /* lod = -1.0 + 0.5 * log2(L * L * M); */
553 nir_imm_float(b
, -1.0f
),
555 nir_imm_float(b
, 0.5f
),
556 nir_flog2(b
, nir_fmul(b
, L
, nir_fmul(b
, L
, M
)))));
558 /* 3. Replace the gradient instruction with an equivalent lod instruction */
559 replace_gradient_with_lod(b
, lod
, tex
);
563 lower_gradient(nir_builder
*b
, nir_tex_instr
*tex
)
565 /* Cubes are more complicated and have their own function */
566 if (tex
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
567 lower_gradient_cube_map(b
, tex
);
571 assert(tex
->sampler_dim
!= GLSL_SAMPLER_DIM_CUBE
);
572 assert(tex
->op
== nir_texop_txd
);
573 assert(tex
->dest
.is_ssa
);
575 /* Use textureSize() to get the width and height of LOD 0 */
576 unsigned component_mask
;
577 switch (tex
->sampler_dim
) {
578 case GLSL_SAMPLER_DIM_3D
:
581 case GLSL_SAMPLER_DIM_1D
:
590 nir_channels(b
, get_texture_size(b
, tex
), component_mask
);
592 /* Scale the gradients by width and height. Effectively, the incoming
593 * gradients are s'(x,y), t'(x,y), and r'(x,y) from equation 3.19 in the
594 * GL 3.0 spec; we want u'(x,y), which is w_t * s'(x,y).
597 tex
->src
[nir_tex_instr_src_index(tex
, nir_tex_src_ddx
)].src
.ssa
;
599 tex
->src
[nir_tex_instr_src_index(tex
, nir_tex_src_ddy
)].src
.ssa
;
601 nir_ssa_def
*dPdx
= nir_fmul(b
, ddx
, size
);
602 nir_ssa_def
*dPdy
= nir_fmul(b
, ddy
, size
);
605 if (dPdx
->num_components
== 1) {
606 rho
= nir_fmax(b
, nir_fabs(b
, dPdx
), nir_fabs(b
, dPdy
));
609 nir_fsqrt(b
, nir_fdot(b
, dPdx
, dPdx
)),
610 nir_fsqrt(b
, nir_fdot(b
, dPdy
, dPdy
)));
613 /* lod = log2(rho). We're ignoring GL state biases for now. */
614 nir_ssa_def
*lod
= nir_flog2(b
, rho
);
616 /* Replace the gradient instruction with an equivalent lod instruction */
617 replace_gradient_with_lod(b
, lod
, tex
);
621 saturate_src(nir_builder
*b
, nir_tex_instr
*tex
, unsigned sat_mask
)
623 b
->cursor
= nir_before_instr(&tex
->instr
);
625 /* Walk through the sources saturating the requested arguments. */
626 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
627 if (tex
->src
[i
].src_type
!= nir_tex_src_coord
)
631 nir_ssa_for_src(b
, tex
->src
[i
].src
, tex
->coord_components
);
633 /* split src into components: */
634 nir_ssa_def
*comp
[4];
636 assume(tex
->coord_components
>= 1);
638 for (unsigned j
= 0; j
< tex
->coord_components
; j
++)
639 comp
[j
] = nir_channel(b
, src
, j
);
641 /* clamp requested components, array index does not get clamped: */
642 unsigned ncomp
= tex
->coord_components
;
646 for (unsigned j
= 0; j
< ncomp
; j
++) {
647 if ((1 << j
) & sat_mask
) {
648 if (tex
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
649 /* non-normalized texture coords, so clamp to texture
650 * size rather than [0.0, 1.0]
652 nir_ssa_def
*txs
= get_texture_size(b
, tex
);
653 comp
[j
] = nir_fmax(b
, comp
[j
], nir_imm_float(b
, 0.0));
654 comp
[j
] = nir_fmin(b
, comp
[j
], nir_channel(b
, txs
, j
));
656 comp
[j
] = nir_fsat(b
, comp
[j
]);
661 /* and move the result back into a single vecN: */
662 src
= nir_vec(b
, comp
, tex
->coord_components
);
664 nir_instr_rewrite_src(&tex
->instr
,
666 nir_src_for_ssa(src
));
671 get_zero_or_one(nir_builder
*b
, nir_alu_type type
, uint8_t swizzle_val
)
675 memset(&v
, 0, sizeof(v
));
677 if (swizzle_val
== 4) {
678 v
.u32
[0] = v
.u32
[1] = v
.u32
[2] = v
.u32
[3] = 0;
680 assert(swizzle_val
== 5);
681 if (type
== nir_type_float
)
682 v
.f32
[0] = v
.f32
[1] = v
.f32
[2] = v
.f32
[3] = 1.0;
684 v
.u32
[0] = v
.u32
[1] = v
.u32
[2] = v
.u32
[3] = 1;
687 return nir_build_imm(b
, 4, 32, v
);
691 swizzle_result(nir_builder
*b
, nir_tex_instr
*tex
, const uint8_t swizzle
[4])
693 assert(tex
->dest
.is_ssa
);
695 b
->cursor
= nir_after_instr(&tex
->instr
);
697 nir_ssa_def
*swizzled
;
698 if (tex
->op
== nir_texop_tg4
) {
699 if (swizzle
[tex
->component
] < 4) {
700 /* This one's easy */
701 tex
->component
= swizzle
[tex
->component
];
704 swizzled
= get_zero_or_one(b
, tex
->dest_type
, swizzle
[tex
->component
]);
707 assert(nir_tex_instr_dest_size(tex
) == 4);
708 if (swizzle
[0] < 4 && swizzle
[1] < 4 &&
709 swizzle
[2] < 4 && swizzle
[3] < 4) {
710 unsigned swiz
[4] = { swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3] };
711 /* We have no 0s or 1s, just emit a swizzling MOV */
712 swizzled
= nir_swizzle(b
, &tex
->dest
.ssa
, swiz
, 4, false);
714 nir_ssa_def
*srcs
[4];
715 for (unsigned i
= 0; i
< 4; i
++) {
716 if (swizzle
[i
] < 4) {
717 srcs
[i
] = nir_channel(b
, &tex
->dest
.ssa
, swizzle
[i
]);
719 srcs
[i
] = get_zero_or_one(b
, tex
->dest_type
, swizzle
[i
]);
722 swizzled
= nir_vec(b
, srcs
, 4);
726 nir_ssa_def_rewrite_uses_after(&tex
->dest
.ssa
, nir_src_for_ssa(swizzled
),
727 swizzled
->parent_instr
);
731 linearize_srgb_result(nir_builder
*b
, nir_tex_instr
*tex
)
733 assert(tex
->dest
.is_ssa
);
734 assert(nir_tex_instr_dest_size(tex
) == 4);
735 assert(nir_alu_type_get_base_type(tex
->dest_type
) == nir_type_float
);
737 b
->cursor
= nir_after_instr(&tex
->instr
);
740 nir_format_srgb_to_linear(b
, nir_channels(b
, &tex
->dest
.ssa
, 0x7));
742 /* alpha is untouched: */
743 nir_ssa_def
*result
= nir_vec4(b
,
744 nir_channel(b
, rgb
, 0),
745 nir_channel(b
, rgb
, 1),
746 nir_channel(b
, rgb
, 2),
747 nir_channel(b
, &tex
->dest
.ssa
, 3));
749 nir_ssa_def_rewrite_uses_after(&tex
->dest
.ssa
, nir_src_for_ssa(result
),
750 result
->parent_instr
);
754 nir_lower_tex_block(nir_block
*block
, nir_builder
*b
,
755 const nir_lower_tex_options
*options
)
757 bool progress
= false;
759 nir_foreach_instr_safe(instr
, block
) {
760 if (instr
->type
!= nir_instr_type_tex
)
763 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
764 bool lower_txp
= !!(options
->lower_txp
& (1 << tex
->sampler_dim
));
766 /* mask of src coords to saturate (clamp): */
767 unsigned sat_mask
= 0;
769 if ((1 << tex
->sampler_index
) & options
->saturate_r
)
770 sat_mask
|= (1 << 2); /* .z */
771 if ((1 << tex
->sampler_index
) & options
->saturate_t
)
772 sat_mask
|= (1 << 1); /* .y */
773 if ((1 << tex
->sampler_index
) & options
->saturate_s
)
774 sat_mask
|= (1 << 0); /* .x */
776 /* If we are clamping any coords, we must lower projector first
777 * as clamping happens *after* projection:
779 if (lower_txp
|| sat_mask
) {
784 if ((tex
->op
== nir_texop_txf
&& options
->lower_txf_offset
) ||
785 (sat_mask
&& nir_tex_instr_src_index(tex
, nir_tex_src_coord
) >= 0) ||
786 (tex
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
&&
787 options
->lower_rect_offset
)) {
788 progress
= lower_offset(b
, tex
) || progress
;
791 if ((tex
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) && options
->lower_rect
) {
796 if ((1 << tex
->texture_index
) & options
->lower_y_uv_external
) {
797 lower_y_uv_external(b
, tex
);
801 if ((1 << tex
->texture_index
) & options
->lower_y_u_v_external
) {
802 lower_y_u_v_external(b
, tex
);
806 if ((1 << tex
->texture_index
) & options
->lower_yx_xuxv_external
) {
807 lower_yx_xuxv_external(b
, tex
);
811 if ((1 << tex
->texture_index
) & options
->lower_xy_uxvx_external
) {
812 lower_xy_uxvx_external(b
, tex
);
816 if ((1 << tex
->texture_index
) & options
->lower_ayuv_external
) {
817 lower_ayuv_external(b
, tex
);
822 saturate_src(b
, tex
, sat_mask
);
826 if (((1 << tex
->texture_index
) & options
->swizzle_result
) &&
827 !nir_tex_instr_is_query(tex
) &&
828 !(tex
->is_shadow
&& tex
->is_new_style_shadow
)) {
829 swizzle_result(b
, tex
, options
->swizzles
[tex
->texture_index
]);
833 /* should be after swizzle so we know which channels are rgb: */
834 if (((1 << tex
->texture_index
) & options
->lower_srgb
) &&
835 !nir_tex_instr_is_query(tex
) && !tex
->is_shadow
) {
836 linearize_srgb_result(b
, tex
);
840 if (tex
->op
== nir_texop_txd
&&
841 (options
->lower_txd
||
842 (options
->lower_txd_shadow
&& tex
->is_shadow
) ||
843 (options
->lower_txd_cube_map
&&
844 tex
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
))) {
845 lower_gradient(b
, tex
);
850 /* TXF, TXS and TXL require a LOD but not everything we implement using those
851 * three opcodes provides one. Provide a default LOD of 0.
853 if ((nir_tex_instr_src_index(tex
, nir_tex_src_lod
) == -1) &&
854 (tex
->op
== nir_texop_txf
|| tex
->op
== nir_texop_txs
||
855 tex
->op
== nir_texop_txl
|| tex
->op
== nir_texop_query_levels
||
856 (tex
->op
== nir_texop_tex
&&
857 b
->shader
->info
.stage
!= MESA_SHADER_FRAGMENT
))) {
858 b
->cursor
= nir_before_instr(&tex
->instr
);
859 nir_tex_instr_add_src(tex
, nir_tex_src_lod
, nir_src_for_ssa(nir_imm_int(b
, 0)));
869 nir_lower_tex_impl(nir_function_impl
*impl
,
870 const nir_lower_tex_options
*options
)
872 bool progress
= false;
874 nir_builder_init(&builder
, impl
);
876 nir_foreach_block(block
, impl
) {
877 progress
|= nir_lower_tex_block(block
, &builder
, options
);
880 nir_metadata_preserve(impl
, nir_metadata_block_index
|
881 nir_metadata_dominance
);
886 nir_lower_tex(nir_shader
*shader
, const nir_lower_tex_options
*options
)
888 bool progress
= false;
890 nir_foreach_function(function
, shader
) {
892 progress
|= nir_lower_tex_impl(function
->impl
, options
);