nir: Get rid of nir_shader::stage
[mesa.git] / src / compiler / nir / nir_lower_tex.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /*
25 * This lowering pass supports (as configured via nir_lower_tex_options)
26 * various texture related conversions:
27 * + texture projector lowering: converts the coordinate division for
28 * texture projection to be done in ALU instructions instead of
29 * asking the texture operation to do so.
30 * + lowering RECT: converts the un-normalized RECT texture coordinates
31 * to normalized coordinates with txs plus ALU instructions
32 * + saturate s/t/r coords: to emulate certain texture clamp/wrap modes,
33 * inserts instructions to clamp specified coordinates to [0.0, 1.0].
34 * Note that this automatically triggers texture projector lowering if
35 * needed, since clamping must happen after projector lowering.
36 */
37
38 #include "nir.h"
39 #include "nir_builder.h"
40
41 static void
42 project_src(nir_builder *b, nir_tex_instr *tex)
43 {
44 /* Find the projector in the srcs list, if present. */
45 int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
46 if (proj_index < 0)
47 return;
48
49 b->cursor = nir_before_instr(&tex->instr);
50
51 nir_ssa_def *inv_proj =
52 nir_frcp(b, nir_ssa_for_src(b, tex->src[proj_index].src, 1));
53
54 /* Walk through the sources projecting the arguments. */
55 for (unsigned i = 0; i < tex->num_srcs; i++) {
56 switch (tex->src[i].src_type) {
57 case nir_tex_src_coord:
58 case nir_tex_src_comparator:
59 break;
60 default:
61 continue;
62 }
63 nir_ssa_def *unprojected =
64 nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
65 nir_ssa_def *projected = nir_fmul(b, unprojected, inv_proj);
66
67 /* Array indices don't get projected, so make an new vector with the
68 * coordinate's array index untouched.
69 */
70 if (tex->is_array && tex->src[i].src_type == nir_tex_src_coord) {
71 switch (tex->coord_components) {
72 case 4:
73 projected = nir_vec4(b,
74 nir_channel(b, projected, 0),
75 nir_channel(b, projected, 1),
76 nir_channel(b, projected, 2),
77 nir_channel(b, unprojected, 3));
78 break;
79 case 3:
80 projected = nir_vec3(b,
81 nir_channel(b, projected, 0),
82 nir_channel(b, projected, 1),
83 nir_channel(b, unprojected, 2));
84 break;
85 case 2:
86 projected = nir_vec2(b,
87 nir_channel(b, projected, 0),
88 nir_channel(b, unprojected, 1));
89 break;
90 default:
91 unreachable("bad texture coord count for array");
92 break;
93 }
94 }
95
96 nir_instr_rewrite_src(&tex->instr,
97 &tex->src[i].src,
98 nir_src_for_ssa(projected));
99 }
100
101 nir_tex_instr_remove_src(tex, proj_index);
102 }
103
104 static bool
105 lower_offset(nir_builder *b, nir_tex_instr *tex)
106 {
107 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
108 if (offset_index < 0)
109 return false;
110
111 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
112 assert(coord_index >= 0);
113
114 assert(tex->src[offset_index].src.is_ssa);
115 assert(tex->src[coord_index].src.is_ssa);
116 nir_ssa_def *offset = tex->src[offset_index].src.ssa;
117 nir_ssa_def *coord = tex->src[coord_index].src.ssa;
118
119 b->cursor = nir_before_instr(&tex->instr);
120
121 nir_ssa_def *offset_coord;
122 if (nir_tex_instr_src_type(tex, coord_index) == nir_type_float) {
123 assert(tex->sampler_dim == GLSL_SAMPLER_DIM_RECT);
124 offset_coord = nir_fadd(b, coord, nir_i2f32(b, offset));
125 } else {
126 offset_coord = nir_iadd(b, coord, offset);
127 }
128
129 if (tex->is_array) {
130 /* The offset is not applied to the array index */
131 if (tex->coord_components == 2) {
132 offset_coord = nir_vec2(b, nir_channel(b, offset_coord, 0),
133 nir_channel(b, coord, 1));
134 } else if (tex->coord_components == 3) {
135 offset_coord = nir_vec3(b, nir_channel(b, offset_coord, 0),
136 nir_channel(b, offset_coord, 1),
137 nir_channel(b, coord, 2));
138 } else {
139 unreachable("Invalid number of components");
140 }
141 }
142
143 nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src,
144 nir_src_for_ssa(offset_coord));
145
146 nir_tex_instr_remove_src(tex, offset_index);
147
148 return true;
149 }
150
151
152 static nir_ssa_def *
153 get_texture_size(nir_builder *b, nir_tex_instr *tex)
154 {
155 b->cursor = nir_before_instr(&tex->instr);
156
157 nir_tex_instr *txs;
158
159 txs = nir_tex_instr_create(b->shader, 1);
160 txs->op = nir_texop_txs;
161 txs->sampler_dim = tex->sampler_dim;
162 txs->is_array = tex->is_array;
163 txs->is_shadow = tex->is_shadow;
164 txs->is_new_style_shadow = tex->is_new_style_shadow;
165 txs->texture_index = tex->texture_index;
166 txs->texture = nir_deref_var_clone(tex->texture, txs);
167 txs->sampler_index = tex->sampler_index;
168 txs->sampler = nir_deref_var_clone(tex->sampler, txs);
169 txs->dest_type = nir_type_int;
170
171 /* only single src, the lod: */
172 txs->src[0].src = nir_src_for_ssa(nir_imm_int(b, 0));
173 txs->src[0].src_type = nir_tex_src_lod;
174
175 nir_ssa_dest_init(&txs->instr, &txs->dest,
176 nir_tex_instr_dest_size(txs), 32, NULL);
177 nir_builder_instr_insert(b, &txs->instr);
178
179 return nir_i2f32(b, &txs->dest.ssa);
180 }
181
182 static void
183 lower_rect(nir_builder *b, nir_tex_instr *tex)
184 {
185 nir_ssa_def *txs = get_texture_size(b, tex);
186 nir_ssa_def *scale = nir_frcp(b, txs);
187
188 /* Walk through the sources normalizing the requested arguments. */
189 for (unsigned i = 0; i < tex->num_srcs; i++) {
190 if (tex->src[i].src_type != nir_tex_src_coord)
191 continue;
192
193 nir_ssa_def *coords =
194 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
195 nir_instr_rewrite_src(&tex->instr,
196 &tex->src[i].src,
197 nir_src_for_ssa(nir_fmul(b, coords, scale)));
198 }
199
200 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
201 }
202
203 static nir_ssa_def *
204 sample_plane(nir_builder *b, nir_tex_instr *tex, int plane)
205 {
206 assert(tex->dest.is_ssa);
207 assert(nir_tex_instr_dest_size(tex) == 4);
208 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
209 assert(tex->op == nir_texop_tex);
210 assert(tex->coord_components == 2);
211
212 nir_tex_instr *plane_tex = nir_tex_instr_create(b->shader, 2);
213 nir_src_copy(&plane_tex->src[0].src, &tex->src[0].src, plane_tex);
214 plane_tex->src[0].src_type = nir_tex_src_coord;
215 plane_tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, plane));
216 plane_tex->src[1].src_type = nir_tex_src_plane;
217 plane_tex->op = nir_texop_tex;
218 plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
219 plane_tex->dest_type = nir_type_float;
220 plane_tex->coord_components = 2;
221
222 plane_tex->texture_index = tex->texture_index;
223 plane_tex->texture = nir_deref_var_clone(tex->texture, plane_tex);
224 plane_tex->sampler_index = tex->sampler_index;
225 plane_tex->sampler = nir_deref_var_clone(tex->sampler, plane_tex);
226
227 nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4, 32, NULL);
228
229 nir_builder_instr_insert(b, &plane_tex->instr);
230
231 return &plane_tex->dest.ssa;
232 }
233
234 static void
235 convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
236 nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v)
237 {
238 nir_const_value m[3] = {
239 { .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } },
240 { .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } },
241 { .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } }
242 };
243
244 nir_ssa_def *yuv =
245 nir_vec4(b,
246 nir_fmul(b, nir_imm_float(b, 1.16438356f),
247 nir_fadd(b, y, nir_imm_float(b, -16.0f / 255.0f))),
248 nir_channel(b, nir_fadd(b, u, nir_imm_float(b, -128.0f / 255.0f)), 0),
249 nir_channel(b, nir_fadd(b, v, nir_imm_float(b, -128.0f / 255.0f)), 0),
250 nir_imm_float(b, 0.0));
251
252 nir_ssa_def *red = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[0]));
253 nir_ssa_def *green = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[1]));
254 nir_ssa_def *blue = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[2]));
255
256 nir_ssa_def *result = nir_vec4(b, red, green, blue, nir_imm_float(b, 1.0f));
257
258 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result));
259 }
260
261 static void
262 lower_y_uv_external(nir_builder *b, nir_tex_instr *tex)
263 {
264 b->cursor = nir_after_instr(&tex->instr);
265
266 nir_ssa_def *y = sample_plane(b, tex, 0);
267 nir_ssa_def *uv = sample_plane(b, tex, 1);
268
269 convert_yuv_to_rgb(b, tex,
270 nir_channel(b, y, 0),
271 nir_channel(b, uv, 0),
272 nir_channel(b, uv, 1));
273 }
274
275 static void
276 lower_y_u_v_external(nir_builder *b, nir_tex_instr *tex)
277 {
278 b->cursor = nir_after_instr(&tex->instr);
279
280 nir_ssa_def *y = sample_plane(b, tex, 0);
281 nir_ssa_def *u = sample_plane(b, tex, 1);
282 nir_ssa_def *v = sample_plane(b, tex, 2);
283
284 convert_yuv_to_rgb(b, tex,
285 nir_channel(b, y, 0),
286 nir_channel(b, u, 0),
287 nir_channel(b, v, 0));
288 }
289
290 static void
291 lower_yx_xuxv_external(nir_builder *b, nir_tex_instr *tex)
292 {
293 b->cursor = nir_after_instr(&tex->instr);
294
295 nir_ssa_def *y = sample_plane(b, tex, 0);
296 nir_ssa_def *xuxv = sample_plane(b, tex, 1);
297
298 convert_yuv_to_rgb(b, tex,
299 nir_channel(b, y, 0),
300 nir_channel(b, xuxv, 1),
301 nir_channel(b, xuxv, 3));
302 }
303
304 static void
305 lower_xy_uxvx_external(nir_builder *b, nir_tex_instr *tex)
306 {
307 b->cursor = nir_after_instr(&tex->instr);
308
309 nir_ssa_def *y = sample_plane(b, tex, 0);
310 nir_ssa_def *uxvx = sample_plane(b, tex, 1);
311
312 convert_yuv_to_rgb(b, tex,
313 nir_channel(b, y, 1),
314 nir_channel(b, uxvx, 0),
315 nir_channel(b, uxvx, 2));
316 }
317
318 /*
319 * Emits a textureLod operation used to replace an existing
320 * textureGrad instruction.
321 */
322 static void
323 replace_gradient_with_lod(nir_builder *b, nir_ssa_def *lod, nir_tex_instr *tex)
324 {
325 /* We are going to emit a textureLod() with the same parameters except that
326 * we replace ddx/ddy with lod.
327 */
328 int num_srcs = tex->num_srcs - 1;
329 nir_tex_instr *txl = nir_tex_instr_create(b->shader, num_srcs);
330
331 txl->op = nir_texop_txl;
332 txl->sampler_dim = tex->sampler_dim;
333 txl->texture_index = tex->texture_index;
334 txl->dest_type = tex->dest_type;
335 txl->is_array = tex->is_array;
336 txl->is_shadow = tex->is_shadow;
337 txl->is_new_style_shadow = tex->is_new_style_shadow;
338 txl->sampler_index = tex->sampler_index;
339 txl->texture = nir_deref_var_clone(tex->texture, txl);
340 txl->sampler = nir_deref_var_clone(tex->sampler, txl);
341 txl->coord_components = tex->coord_components;
342
343 nir_ssa_dest_init(&txl->instr, &txl->dest, 4, 32, NULL);
344
345 int src_num = 0;
346 for (int i = 0; i < tex->num_srcs; i++) {
347 if (tex->src[i].src_type == nir_tex_src_ddx ||
348 tex->src[i].src_type == nir_tex_src_ddy)
349 continue;
350 nir_src_copy(&txl->src[src_num].src, &tex->src[i].src, txl);
351 txl->src[src_num].src_type = tex->src[i].src_type;
352 src_num++;
353 }
354
355 txl->src[src_num].src = nir_src_for_ssa(lod);
356 txl->src[src_num].src_type = nir_tex_src_lod;
357 src_num++;
358
359 assert(src_num == num_srcs);
360
361 nir_ssa_dest_init(&txl->instr, &txl->dest,
362 tex->dest.ssa.num_components, 32, NULL);
363 nir_builder_instr_insert(b, &txl->instr);
364
365 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(&txl->dest.ssa));
366
367 nir_instr_remove(&tex->instr);
368 }
369
370 static void
371 lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
372 {
373 assert(tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
374 assert(tex->op == nir_texop_txd);
375 assert(tex->dest.is_ssa);
376
377 /* Use textureSize() to get the width and height of LOD 0 */
378 nir_ssa_def *size = get_texture_size(b, tex);
379
380 /* Cubemap texture lookups first generate a texture coordinate normalized
381 * to [-1, 1] on the appropiate face. The appropiate face is determined
382 * by which component has largest magnitude and its sign. The texture
383 * coordinate is the quotient of the remaining texture coordinates against
384 * that absolute value of the component of largest magnitude. This
385 * division requires that the computing of the derivative of the texel
386 * coordinate must use the quotient rule. The high level GLSL code is as
387 * follows:
388 *
389 * Step 1: selection
390 *
391 * vec3 abs_p, Q, dQdx, dQdy;
392 * abs_p = abs(ir->coordinate);
393 * if (abs_p.x >= max(abs_p.y, abs_p.z)) {
394 * Q = ir->coordinate.yzx;
395 * dQdx = ir->lod_info.grad.dPdx.yzx;
396 * dQdy = ir->lod_info.grad.dPdy.yzx;
397 * }
398 * if (abs_p.y >= max(abs_p.x, abs_p.z)) {
399 * Q = ir->coordinate.xzy;
400 * dQdx = ir->lod_info.grad.dPdx.xzy;
401 * dQdy = ir->lod_info.grad.dPdy.xzy;
402 * }
403 * if (abs_p.z >= max(abs_p.x, abs_p.y)) {
404 * Q = ir->coordinate;
405 * dQdx = ir->lod_info.grad.dPdx;
406 * dQdy = ir->lod_info.grad.dPdy;
407 * }
408 *
409 * Step 2: use quotient rule to compute derivative. The normalized to
410 * [-1, 1] texel coordinate is given by Q.xy / (sign(Q.z) * Q.z). We are
411 * only concerned with the magnitudes of the derivatives whose values are
412 * not affected by the sign. We drop the sign from the computation.
413 *
414 * vec2 dx, dy;
415 * float recip;
416 *
417 * recip = 1.0 / Q.z;
418 * dx = recip * ( dQdx.xy - Q.xy * (dQdx.z * recip) );
419 * dy = recip * ( dQdy.xy - Q.xy * (dQdy.z * recip) );
420 *
421 * Step 3: compute LOD. At this point we have the derivatives of the
422 * texture coordinates normalized to [-1,1]. We take the LOD to be
423 * result = log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * 0.5 * L)
424 * = -1.0 + log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * L)
425 * = -1.0 + log2(sqrt(max(dot(dx, dx), dot(dy,dy))) * L)
426 * = -1.0 + log2(sqrt(L * L * max(dot(dx, dx), dot(dy,dy))))
427 * = -1.0 + 0.5 * log2(L * L * max(dot(dx, dx), dot(dy,dy)))
428 * where L is the dimension of the cubemap. The code is:
429 *
430 * float M, result;
431 * M = max(dot(dx, dx), dot(dy, dy));
432 * L = textureSize(sampler, 0).x;
433 * result = -1.0 + 0.5 * log2(L * L * M);
434 */
435
436 /* coordinate */
437 nir_ssa_def *p =
438 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_coord)].src.ssa;
439
440 /* unmodified dPdx, dPdy values */
441 nir_ssa_def *dPdx =
442 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
443 nir_ssa_def *dPdy =
444 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
445
446 nir_ssa_def *abs_p = nir_fabs(b, p);
447 nir_ssa_def *abs_p_x = nir_channel(b, abs_p, 0);
448 nir_ssa_def *abs_p_y = nir_channel(b, abs_p, 1);
449 nir_ssa_def *abs_p_z = nir_channel(b, abs_p, 2);
450
451 /* 1. compute selector */
452 nir_ssa_def *Q, *dQdx, *dQdy;
453
454 nir_ssa_def *cond_z = nir_fge(b, abs_p_z, nir_fmax(b, abs_p_x, abs_p_y));
455 nir_ssa_def *cond_y = nir_fge(b, abs_p_y, nir_fmax(b, abs_p_x, abs_p_z));
456
457 unsigned yzx[4] = { 1, 2, 0, 0 };
458 unsigned xzy[4] = { 0, 2, 1, 0 };
459
460 Q = nir_bcsel(b, cond_z,
461 p,
462 nir_bcsel(b, cond_y,
463 nir_swizzle(b, p, xzy, 3, false),
464 nir_swizzle(b, p, yzx, 3, false)));
465
466 dQdx = nir_bcsel(b, cond_z,
467 dPdx,
468 nir_bcsel(b, cond_y,
469 nir_swizzle(b, dPdx, xzy, 3, false),
470 nir_swizzle(b, dPdx, yzx, 3, false)));
471
472 dQdy = nir_bcsel(b, cond_z,
473 dPdy,
474 nir_bcsel(b, cond_y,
475 nir_swizzle(b, dPdy, xzy, 3, false),
476 nir_swizzle(b, dPdy, yzx, 3, false)));
477
478 /* 2. quotient rule */
479
480 /* tmp = Q.xy * recip;
481 * dx = recip * ( dQdx.xy - (tmp * dQdx.z) );
482 * dy = recip * ( dQdy.xy - (tmp * dQdy.z) );
483 */
484 nir_ssa_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2));
485
486 unsigned xy[4] = { 0, 1, 0, 0 };
487 nir_ssa_def *Q_xy = nir_swizzle(b, Q, xy, 2, false);
488 nir_ssa_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z);
489
490 nir_ssa_def *dQdx_xy = nir_swizzle(b, dQdx, xy, 2, false);
491 nir_ssa_def *dQdx_z = nir_channel(b, dQdx, 2);
492 nir_ssa_def *dx =
493 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdx_xy, nir_fmul(b, tmp, dQdx_z)));
494
495 nir_ssa_def *dQdy_xy = nir_swizzle(b, dQdy, xy, 2, false);
496 nir_ssa_def *dQdy_z = nir_channel(b, dQdy, 2);
497 nir_ssa_def *dy =
498 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdy_xy, nir_fmul(b, tmp, dQdy_z)));
499
500 /* M = max(dot(dx, dx), dot(dy, dy)); */
501 nir_ssa_def *M = nir_fmax(b, nir_fdot(b, dx, dx), nir_fdot(b, dy, dy));
502
503 /* size has textureSize() of LOD 0 */
504 nir_ssa_def *L = nir_channel(b, size, 0);
505
506 /* lod = -1.0 + 0.5 * log2(L * L * M); */
507 nir_ssa_def *lod =
508 nir_fadd(b,
509 nir_imm_float(b, -1.0f),
510 nir_fmul(b,
511 nir_imm_float(b, 0.5f),
512 nir_flog2(b, nir_fmul(b, L, nir_fmul(b, L, M)))));
513
514 /* 3. Replace the gradient instruction with an equivalent lod instruction */
515 replace_gradient_with_lod(b, lod, tex);
516 }
517
518 static void
519 lower_gradient_shadow(nir_builder *b, nir_tex_instr *tex)
520 {
521 assert(tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
522 assert(tex->is_shadow);
523 assert(tex->op == nir_texop_txd);
524 assert(tex->dest.is_ssa);
525
526 /* Use textureSize() to get the width and height of LOD 0 */
527 unsigned component_mask;
528 switch (tex->sampler_dim) {
529 case GLSL_SAMPLER_DIM_3D:
530 component_mask = 7;
531 break;
532 case GLSL_SAMPLER_DIM_1D:
533 component_mask = 1;
534 break;
535 default:
536 component_mask = 3;
537 break;
538 }
539
540 nir_ssa_def *size =
541 nir_channels(b, get_texture_size(b, tex), component_mask);
542
543 /* Scale the gradients by width and height. Effectively, the incoming
544 * gradients are s'(x,y), t'(x,y), and r'(x,y) from equation 3.19 in the
545 * GL 3.0 spec; we want u'(x,y), which is w_t * s'(x,y).
546 */
547 nir_ssa_def *ddx =
548 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
549 nir_ssa_def *ddy =
550 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
551
552 nir_ssa_def *dPdx = nir_fmul(b, ddx, size);
553 nir_ssa_def *dPdy = nir_fmul(b, ddy, size);
554
555 nir_ssa_def *rho;
556 if (dPdx->num_components == 1) {
557 rho = nir_fmax(b, nir_fabs(b, dPdx), nir_fabs(b, dPdy));
558 } else {
559 rho = nir_fmax(b,
560 nir_fsqrt(b, nir_fdot(b, dPdx, dPdx)),
561 nir_fsqrt(b, nir_fdot(b, dPdy, dPdy)));
562 }
563
564 /* lod = log2(rho). We're ignoring GL state biases for now. */
565 nir_ssa_def *lod = nir_flog2(b, rho);
566
567 /* Replace the gradient instruction with an equivalent lod instruction */
568 replace_gradient_with_lod(b, lod, tex);
569 }
570
571 static void
572 saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
573 {
574 b->cursor = nir_before_instr(&tex->instr);
575
576 /* Walk through the sources saturating the requested arguments. */
577 for (unsigned i = 0; i < tex->num_srcs; i++) {
578 if (tex->src[i].src_type != nir_tex_src_coord)
579 continue;
580
581 nir_ssa_def *src =
582 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
583
584 /* split src into components: */
585 nir_ssa_def *comp[4];
586
587 assume(tex->coord_components >= 1);
588
589 for (unsigned j = 0; j < tex->coord_components; j++)
590 comp[j] = nir_channel(b, src, j);
591
592 /* clamp requested components, array index does not get clamped: */
593 unsigned ncomp = tex->coord_components;
594 if (tex->is_array)
595 ncomp--;
596
597 for (unsigned j = 0; j < ncomp; j++) {
598 if ((1 << j) & sat_mask) {
599 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
600 /* non-normalized texture coords, so clamp to texture
601 * size rather than [0.0, 1.0]
602 */
603 nir_ssa_def *txs = get_texture_size(b, tex);
604 comp[j] = nir_fmax(b, comp[j], nir_imm_float(b, 0.0));
605 comp[j] = nir_fmin(b, comp[j], nir_channel(b, txs, j));
606 } else {
607 comp[j] = nir_fsat(b, comp[j]);
608 }
609 }
610 }
611
612 /* and move the result back into a single vecN: */
613 src = nir_vec(b, comp, tex->coord_components);
614
615 nir_instr_rewrite_src(&tex->instr,
616 &tex->src[i].src,
617 nir_src_for_ssa(src));
618 }
619 }
620
621 static nir_ssa_def *
622 get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
623 {
624 nir_const_value v;
625
626 memset(&v, 0, sizeof(v));
627
628 if (swizzle_val == 4) {
629 v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 0;
630 } else {
631 assert(swizzle_val == 5);
632 if (type == nir_type_float)
633 v.f32[0] = v.f32[1] = v.f32[2] = v.f32[3] = 1.0;
634 else
635 v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 1;
636 }
637
638 return nir_build_imm(b, 4, 32, v);
639 }
640
641 static void
642 swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
643 {
644 assert(tex->dest.is_ssa);
645
646 b->cursor = nir_after_instr(&tex->instr);
647
648 nir_ssa_def *swizzled;
649 if (tex->op == nir_texop_tg4) {
650 if (swizzle[tex->component] < 4) {
651 /* This one's easy */
652 tex->component = swizzle[tex->component];
653 return;
654 } else {
655 swizzled = get_zero_or_one(b, tex->dest_type, swizzle[tex->component]);
656 }
657 } else {
658 assert(nir_tex_instr_dest_size(tex) == 4);
659 if (swizzle[0] < 4 && swizzle[1] < 4 &&
660 swizzle[2] < 4 && swizzle[3] < 4) {
661 unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
662 /* We have no 0s or 1s, just emit a swizzling MOV */
663 swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
664 } else {
665 nir_ssa_def *srcs[4];
666 for (unsigned i = 0; i < 4; i++) {
667 if (swizzle[i] < 4) {
668 srcs[i] = nir_channel(b, &tex->dest.ssa, swizzle[i]);
669 } else {
670 srcs[i] = get_zero_or_one(b, tex->dest_type, swizzle[i]);
671 }
672 }
673 swizzled = nir_vec(b, srcs, 4);
674 }
675 }
676
677 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(swizzled),
678 swizzled->parent_instr);
679 }
680
681 static void
682 linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
683 {
684 assert(tex->dest.is_ssa);
685 assert(nir_tex_instr_dest_size(tex) == 4);
686 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
687
688 b->cursor = nir_after_instr(&tex->instr);
689
690 static const unsigned swiz[4] = {0, 1, 2, 0};
691 nir_ssa_def *comp = nir_swizzle(b, &tex->dest.ssa, swiz, 3, true);
692
693 /* Formula is:
694 * (comp <= 0.04045) ?
695 * (comp / 12.92) :
696 * pow((comp + 0.055) / 1.055, 2.4)
697 */
698 nir_ssa_def *low = nir_fmul(b, comp, nir_imm_float(b, 1.0 / 12.92));
699 nir_ssa_def *high = nir_fpow(b,
700 nir_fmul(b,
701 nir_fadd(b,
702 comp,
703 nir_imm_float(b, 0.055)),
704 nir_imm_float(b, 1.0 / 1.055)),
705 nir_imm_float(b, 2.4));
706 nir_ssa_def *cond = nir_fge(b, nir_imm_float(b, 0.04045), comp);
707 nir_ssa_def *rgb = nir_bcsel(b, cond, low, high);
708
709 /* alpha is untouched: */
710 nir_ssa_def *result = nir_vec4(b,
711 nir_channel(b, rgb, 0),
712 nir_channel(b, rgb, 1),
713 nir_channel(b, rgb, 2),
714 nir_channel(b, &tex->dest.ssa, 3));
715
716 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(result),
717 result->parent_instr);
718 }
719
720 static bool
721 nir_lower_tex_block(nir_block *block, nir_builder *b,
722 const nir_lower_tex_options *options)
723 {
724 bool progress = false;
725
726 nir_foreach_instr_safe(instr, block) {
727 if (instr->type != nir_instr_type_tex)
728 continue;
729
730 nir_tex_instr *tex = nir_instr_as_tex(instr);
731 bool lower_txp = !!(options->lower_txp & (1 << tex->sampler_dim));
732
733 /* mask of src coords to saturate (clamp): */
734 unsigned sat_mask = 0;
735
736 if ((1 << tex->sampler_index) & options->saturate_r)
737 sat_mask |= (1 << 2); /* .z */
738 if ((1 << tex->sampler_index) & options->saturate_t)
739 sat_mask |= (1 << 1); /* .y */
740 if ((1 << tex->sampler_index) & options->saturate_s)
741 sat_mask |= (1 << 0); /* .x */
742
743 /* If we are clamping any coords, we must lower projector first
744 * as clamping happens *after* projection:
745 */
746 if (lower_txp || sat_mask) {
747 project_src(b, tex);
748 progress = true;
749 }
750
751 if ((tex->op == nir_texop_txf && options->lower_txf_offset) ||
752 (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT &&
753 options->lower_rect_offset)) {
754 progress = lower_offset(b, tex) || progress;
755 }
756
757 if ((tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) && options->lower_rect) {
758 lower_rect(b, tex);
759 progress = true;
760 }
761
762 if ((1 << tex->texture_index) & options->lower_y_uv_external) {
763 lower_y_uv_external(b, tex);
764 progress = true;
765 }
766
767 if ((1 << tex->texture_index) & options->lower_y_u_v_external) {
768 lower_y_u_v_external(b, tex);
769 progress = true;
770 }
771
772 if ((1 << tex->texture_index) & options->lower_yx_xuxv_external) {
773 lower_yx_xuxv_external(b, tex);
774 progress = true;
775 }
776
777 if ((1 << tex->texture_index) & options->lower_xy_uxvx_external) {
778 lower_xy_uxvx_external(b, tex);
779 progress = true;
780 }
781
782 if (sat_mask) {
783 saturate_src(b, tex, sat_mask);
784 progress = true;
785 }
786
787 if (((1 << tex->texture_index) & options->swizzle_result) &&
788 !nir_tex_instr_is_query(tex) &&
789 !(tex->is_shadow && tex->is_new_style_shadow)) {
790 swizzle_result(b, tex, options->swizzles[tex->texture_index]);
791 progress = true;
792 }
793
794 /* should be after swizzle so we know which channels are rgb: */
795 if (((1 << tex->texture_index) & options->lower_srgb) &&
796 !nir_tex_instr_is_query(tex) && !tex->is_shadow) {
797 linearize_srgb_result(b, tex);
798 progress = true;
799 }
800
801 if (tex->op == nir_texop_txd &&
802 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
803 (options->lower_txd_cube_map ||
804 (tex->is_shadow && options->lower_txd_shadow))) {
805 lower_gradient_cube_map(b, tex);
806 progress = true;
807 continue;
808 }
809
810 if (tex->op == nir_texop_txd && options->lower_txd_shadow &&
811 tex->is_shadow && tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE) {
812 lower_gradient_shadow(b, tex);
813 progress = true;
814 continue;
815 }
816
817 /* TXF, TXS and TXL require a LOD but not everything we implement using those
818 * three opcodes provides one. Provide a default LOD of 0.
819 */
820 if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
821 (tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
822 tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
823 (tex->op == nir_texop_tex &&
824 b->shader->info.stage != MESA_SHADER_FRAGMENT))) {
825 b->cursor = nir_before_instr(&tex->instr);
826 nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
827 progress = true;
828 continue;
829 }
830 }
831
832 return progress;
833 }
834
835 static bool
836 nir_lower_tex_impl(nir_function_impl *impl,
837 const nir_lower_tex_options *options)
838 {
839 bool progress = false;
840 nir_builder builder;
841 nir_builder_init(&builder, impl);
842
843 nir_foreach_block(block, impl) {
844 progress |= nir_lower_tex_block(block, &builder, options);
845 }
846
847 nir_metadata_preserve(impl, nir_metadata_block_index |
848 nir_metadata_dominance);
849 return progress;
850 }
851
852 bool
853 nir_lower_tex(nir_shader *shader, const nir_lower_tex_options *options)
854 {
855 bool progress = false;
856
857 nir_foreach_function(function, shader) {
858 if (function->impl)
859 progress |= nir_lower_tex_impl(function->impl, options);
860 }
861
862 return progress;
863 }