nir/lower_tex: Simplify lower_gradient logic
[mesa.git] / src / compiler / nir / nir_lower_tex.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /*
25 * This lowering pass supports (as configured via nir_lower_tex_options)
26 * various texture related conversions:
27 * + texture projector lowering: converts the coordinate division for
28 * texture projection to be done in ALU instructions instead of
29 * asking the texture operation to do so.
30 * + lowering RECT: converts the un-normalized RECT texture coordinates
31 * to normalized coordinates with txs plus ALU instructions
32 * + saturate s/t/r coords: to emulate certain texture clamp/wrap modes,
33 * inserts instructions to clamp specified coordinates to [0.0, 1.0].
34 * Note that this automatically triggers texture projector lowering if
35 * needed, since clamping must happen after projector lowering.
36 */
37
38 #include "nir.h"
39 #include "nir_builder.h"
40 #include "nir_format_convert.h"
41
42 static void
43 project_src(nir_builder *b, nir_tex_instr *tex)
44 {
45 /* Find the projector in the srcs list, if present. */
46 int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
47 if (proj_index < 0)
48 return;
49
50 b->cursor = nir_before_instr(&tex->instr);
51
52 nir_ssa_def *inv_proj =
53 nir_frcp(b, nir_ssa_for_src(b, tex->src[proj_index].src, 1));
54
55 /* Walk through the sources projecting the arguments. */
56 for (unsigned i = 0; i < tex->num_srcs; i++) {
57 switch (tex->src[i].src_type) {
58 case nir_tex_src_coord:
59 case nir_tex_src_comparator:
60 break;
61 default:
62 continue;
63 }
64 nir_ssa_def *unprojected =
65 nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
66 nir_ssa_def *projected = nir_fmul(b, unprojected, inv_proj);
67
68 /* Array indices don't get projected, so make an new vector with the
69 * coordinate's array index untouched.
70 */
71 if (tex->is_array && tex->src[i].src_type == nir_tex_src_coord) {
72 switch (tex->coord_components) {
73 case 4:
74 projected = nir_vec4(b,
75 nir_channel(b, projected, 0),
76 nir_channel(b, projected, 1),
77 nir_channel(b, projected, 2),
78 nir_channel(b, unprojected, 3));
79 break;
80 case 3:
81 projected = nir_vec3(b,
82 nir_channel(b, projected, 0),
83 nir_channel(b, projected, 1),
84 nir_channel(b, unprojected, 2));
85 break;
86 case 2:
87 projected = nir_vec2(b,
88 nir_channel(b, projected, 0),
89 nir_channel(b, unprojected, 1));
90 break;
91 default:
92 unreachable("bad texture coord count for array");
93 break;
94 }
95 }
96
97 nir_instr_rewrite_src(&tex->instr,
98 &tex->src[i].src,
99 nir_src_for_ssa(projected));
100 }
101
102 nir_tex_instr_remove_src(tex, proj_index);
103 }
104
105 static nir_ssa_def *
106 get_texture_size(nir_builder *b, nir_tex_instr *tex)
107 {
108 b->cursor = nir_before_instr(&tex->instr);
109
110 nir_tex_instr *txs;
111
112 unsigned num_srcs = 1; /* One for the LOD */
113 for (unsigned i = 0; i < tex->num_srcs; i++) {
114 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
115 tex->src[i].src_type == nir_tex_src_sampler_deref ||
116 tex->src[i].src_type == nir_tex_src_texture_offset ||
117 tex->src[i].src_type == nir_tex_src_sampler_offset)
118 num_srcs++;
119 }
120
121 txs = nir_tex_instr_create(b->shader, num_srcs);
122 txs->op = nir_texop_txs;
123 txs->sampler_dim = tex->sampler_dim;
124 txs->is_array = tex->is_array;
125 txs->is_shadow = tex->is_shadow;
126 txs->is_new_style_shadow = tex->is_new_style_shadow;
127 txs->texture_index = tex->texture_index;
128 txs->sampler_index = tex->sampler_index;
129 txs->dest_type = nir_type_int;
130
131 unsigned idx = 0;
132 for (unsigned i = 0; i < tex->num_srcs; i++) {
133 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
134 tex->src[i].src_type == nir_tex_src_sampler_deref ||
135 tex->src[i].src_type == nir_tex_src_texture_offset ||
136 tex->src[i].src_type == nir_tex_src_sampler_offset) {
137 nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs);
138 txs->src[idx].src_type = tex->src[i].src_type;
139 idx++;
140 }
141 }
142 /* Add in an LOD because some back-ends require it */
143 txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
144 txs->src[idx].src_type = nir_tex_src_lod;
145
146 nir_ssa_dest_init(&txs->instr, &txs->dest,
147 nir_tex_instr_dest_size(txs), 32, NULL);
148 nir_builder_instr_insert(b, &txs->instr);
149
150 return nir_i2f32(b, &txs->dest.ssa);
151 }
152
153 static bool
154 lower_offset(nir_builder *b, nir_tex_instr *tex)
155 {
156 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
157 if (offset_index < 0)
158 return false;
159
160 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
161 assert(coord_index >= 0);
162
163 assert(tex->src[offset_index].src.is_ssa);
164 assert(tex->src[coord_index].src.is_ssa);
165 nir_ssa_def *offset = tex->src[offset_index].src.ssa;
166 nir_ssa_def *coord = tex->src[coord_index].src.ssa;
167
168 b->cursor = nir_before_instr(&tex->instr);
169
170 nir_ssa_def *offset_coord;
171 if (nir_tex_instr_src_type(tex, coord_index) == nir_type_float) {
172 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
173 offset_coord = nir_fadd(b, coord, nir_i2f32(b, offset));
174 } else {
175 nir_ssa_def *txs = get_texture_size(b, tex);
176 nir_ssa_def *scale = nir_frcp(b, txs);
177
178 offset_coord = nir_fadd(b, coord,
179 nir_fmul(b,
180 nir_i2f32(b, offset),
181 scale));
182 }
183 } else {
184 offset_coord = nir_iadd(b, coord, offset);
185 }
186
187 if (tex->is_array) {
188 /* The offset is not applied to the array index */
189 if (tex->coord_components == 2) {
190 offset_coord = nir_vec2(b, nir_channel(b, offset_coord, 0),
191 nir_channel(b, coord, 1));
192 } else if (tex->coord_components == 3) {
193 offset_coord = nir_vec3(b, nir_channel(b, offset_coord, 0),
194 nir_channel(b, offset_coord, 1),
195 nir_channel(b, coord, 2));
196 } else {
197 unreachable("Invalid number of components");
198 }
199 }
200
201 nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src,
202 nir_src_for_ssa(offset_coord));
203
204 nir_tex_instr_remove_src(tex, offset_index);
205
206 return true;
207 }
208
209 static void
210 lower_rect(nir_builder *b, nir_tex_instr *tex)
211 {
212 nir_ssa_def *txs = get_texture_size(b, tex);
213 nir_ssa_def *scale = nir_frcp(b, txs);
214
215 /* Walk through the sources normalizing the requested arguments. */
216 for (unsigned i = 0; i < tex->num_srcs; i++) {
217 if (tex->src[i].src_type != nir_tex_src_coord)
218 continue;
219
220 nir_ssa_def *coords =
221 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
222 nir_instr_rewrite_src(&tex->instr,
223 &tex->src[i].src,
224 nir_src_for_ssa(nir_fmul(b, coords, scale)));
225 }
226
227 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
228 }
229
230 static nir_ssa_def *
231 sample_plane(nir_builder *b, nir_tex_instr *tex, int plane)
232 {
233 assert(tex->dest.is_ssa);
234 assert(nir_tex_instr_dest_size(tex) == 4);
235 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
236 assert(tex->op == nir_texop_tex);
237 assert(tex->coord_components == 2);
238
239 nir_tex_instr *plane_tex =
240 nir_tex_instr_create(b->shader, tex->num_srcs + 1);
241 for (unsigned i = 0; i < tex->num_srcs; i++) {
242 nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src, plane_tex);
243 plane_tex->src[i].src_type = tex->src[i].src_type;
244 }
245 plane_tex->src[tex->num_srcs].src = nir_src_for_ssa(nir_imm_int(b, plane));
246 plane_tex->src[tex->num_srcs].src_type = nir_tex_src_plane;
247 plane_tex->op = nir_texop_tex;
248 plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
249 plane_tex->dest_type = nir_type_float;
250 plane_tex->coord_components = 2;
251
252 plane_tex->texture_index = tex->texture_index;
253 plane_tex->sampler_index = tex->sampler_index;
254
255 nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4, 32, NULL);
256
257 nir_builder_instr_insert(b, &plane_tex->instr);
258
259 return &plane_tex->dest.ssa;
260 }
261
262 static void
263 convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
264 nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v,
265 nir_ssa_def *a)
266 {
267 nir_const_value m[3] = {
268 { .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } },
269 { .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } },
270 { .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } }
271 };
272
273 nir_ssa_def *yuv =
274 nir_vec4(b,
275 nir_fmul(b, nir_imm_float(b, 1.16438356f),
276 nir_fadd(b, y, nir_imm_float(b, -16.0f / 255.0f))),
277 nir_channel(b, nir_fadd(b, u, nir_imm_float(b, -128.0f / 255.0f)), 0),
278 nir_channel(b, nir_fadd(b, v, nir_imm_float(b, -128.0f / 255.0f)), 0),
279 nir_imm_float(b, 0.0));
280
281 nir_ssa_def *red = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[0]));
282 nir_ssa_def *green = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[1]));
283 nir_ssa_def *blue = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[2]));
284
285 nir_ssa_def *result = nir_vec4(b, red, green, blue, a);
286
287 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result));
288 }
289
290 static void
291 lower_y_uv_external(nir_builder *b, nir_tex_instr *tex)
292 {
293 b->cursor = nir_after_instr(&tex->instr);
294
295 nir_ssa_def *y = sample_plane(b, tex, 0);
296 nir_ssa_def *uv = sample_plane(b, tex, 1);
297
298 convert_yuv_to_rgb(b, tex,
299 nir_channel(b, y, 0),
300 nir_channel(b, uv, 0),
301 nir_channel(b, uv, 1),
302 nir_imm_float(b, 1.0f));
303 }
304
305 static void
306 lower_y_u_v_external(nir_builder *b, nir_tex_instr *tex)
307 {
308 b->cursor = nir_after_instr(&tex->instr);
309
310 nir_ssa_def *y = sample_plane(b, tex, 0);
311 nir_ssa_def *u = sample_plane(b, tex, 1);
312 nir_ssa_def *v = sample_plane(b, tex, 2);
313
314 convert_yuv_to_rgb(b, tex,
315 nir_channel(b, y, 0),
316 nir_channel(b, u, 0),
317 nir_channel(b, v, 0),
318 nir_imm_float(b, 1.0f));
319 }
320
321 static void
322 lower_yx_xuxv_external(nir_builder *b, nir_tex_instr *tex)
323 {
324 b->cursor = nir_after_instr(&tex->instr);
325
326 nir_ssa_def *y = sample_plane(b, tex, 0);
327 nir_ssa_def *xuxv = sample_plane(b, tex, 1);
328
329 convert_yuv_to_rgb(b, tex,
330 nir_channel(b, y, 0),
331 nir_channel(b, xuxv, 1),
332 nir_channel(b, xuxv, 3),
333 nir_imm_float(b, 1.0f));
334 }
335
336 static void
337 lower_xy_uxvx_external(nir_builder *b, nir_tex_instr *tex)
338 {
339 b->cursor = nir_after_instr(&tex->instr);
340
341 nir_ssa_def *y = sample_plane(b, tex, 0);
342 nir_ssa_def *uxvx = sample_plane(b, tex, 1);
343
344 convert_yuv_to_rgb(b, tex,
345 nir_channel(b, y, 1),
346 nir_channel(b, uxvx, 0),
347 nir_channel(b, uxvx, 2),
348 nir_imm_float(b, 1.0f));
349 }
350
351 static void
352 lower_ayuv_external(nir_builder *b, nir_tex_instr *tex)
353 {
354 b->cursor = nir_after_instr(&tex->instr);
355
356 nir_ssa_def *ayuv = sample_plane(b, tex, 0);
357
358 convert_yuv_to_rgb(b, tex,
359 nir_channel(b, ayuv, 2),
360 nir_channel(b, ayuv, 1),
361 nir_channel(b, ayuv, 0),
362 nir_channel(b, ayuv, 3));
363 }
364
365 /*
366 * Emits a textureLod operation used to replace an existing
367 * textureGrad instruction.
368 */
369 static void
370 replace_gradient_with_lod(nir_builder *b, nir_ssa_def *lod, nir_tex_instr *tex)
371 {
372 /* We are going to emit a textureLod() with the same parameters except that
373 * we replace ddx/ddy with lod.
374 */
375 int num_srcs = tex->num_srcs - 1;
376 nir_tex_instr *txl = nir_tex_instr_create(b->shader, num_srcs);
377
378 txl->op = nir_texop_txl;
379 txl->sampler_dim = tex->sampler_dim;
380 txl->texture_index = tex->texture_index;
381 txl->dest_type = tex->dest_type;
382 txl->is_array = tex->is_array;
383 txl->is_shadow = tex->is_shadow;
384 txl->is_new_style_shadow = tex->is_new_style_shadow;
385 txl->sampler_index = tex->sampler_index;
386 txl->coord_components = tex->coord_components;
387
388 nir_ssa_dest_init(&txl->instr, &txl->dest, 4, 32, NULL);
389
390 int src_num = 0;
391 for (int i = 0; i < tex->num_srcs; i++) {
392 if (tex->src[i].src_type == nir_tex_src_ddx ||
393 tex->src[i].src_type == nir_tex_src_ddy)
394 continue;
395 nir_src_copy(&txl->src[src_num].src, &tex->src[i].src, txl);
396 txl->src[src_num].src_type = tex->src[i].src_type;
397 src_num++;
398 }
399
400 txl->src[src_num].src = nir_src_for_ssa(lod);
401 txl->src[src_num].src_type = nir_tex_src_lod;
402 src_num++;
403
404 assert(src_num == num_srcs);
405
406 nir_ssa_dest_init(&txl->instr, &txl->dest,
407 tex->dest.ssa.num_components, 32, NULL);
408 nir_builder_instr_insert(b, &txl->instr);
409
410 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(&txl->dest.ssa));
411
412 nir_instr_remove(&tex->instr);
413 }
414
415 static void
416 lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
417 {
418 assert(tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
419 assert(tex->op == nir_texop_txd);
420 assert(tex->dest.is_ssa);
421
422 /* Use textureSize() to get the width and height of LOD 0 */
423 nir_ssa_def *size = get_texture_size(b, tex);
424
425 /* Cubemap texture lookups first generate a texture coordinate normalized
426 * to [-1, 1] on the appropiate face. The appropiate face is determined
427 * by which component has largest magnitude and its sign. The texture
428 * coordinate is the quotient of the remaining texture coordinates against
429 * that absolute value of the component of largest magnitude. This
430 * division requires that the computing of the derivative of the texel
431 * coordinate must use the quotient rule. The high level GLSL code is as
432 * follows:
433 *
434 * Step 1: selection
435 *
436 * vec3 abs_p, Q, dQdx, dQdy;
437 * abs_p = abs(ir->coordinate);
438 * if (abs_p.x >= max(abs_p.y, abs_p.z)) {
439 * Q = ir->coordinate.yzx;
440 * dQdx = ir->lod_info.grad.dPdx.yzx;
441 * dQdy = ir->lod_info.grad.dPdy.yzx;
442 * }
443 * if (abs_p.y >= max(abs_p.x, abs_p.z)) {
444 * Q = ir->coordinate.xzy;
445 * dQdx = ir->lod_info.grad.dPdx.xzy;
446 * dQdy = ir->lod_info.grad.dPdy.xzy;
447 * }
448 * if (abs_p.z >= max(abs_p.x, abs_p.y)) {
449 * Q = ir->coordinate;
450 * dQdx = ir->lod_info.grad.dPdx;
451 * dQdy = ir->lod_info.grad.dPdy;
452 * }
453 *
454 * Step 2: use quotient rule to compute derivative. The normalized to
455 * [-1, 1] texel coordinate is given by Q.xy / (sign(Q.z) * Q.z). We are
456 * only concerned with the magnitudes of the derivatives whose values are
457 * not affected by the sign. We drop the sign from the computation.
458 *
459 * vec2 dx, dy;
460 * float recip;
461 *
462 * recip = 1.0 / Q.z;
463 * dx = recip * ( dQdx.xy - Q.xy * (dQdx.z * recip) );
464 * dy = recip * ( dQdy.xy - Q.xy * (dQdy.z * recip) );
465 *
466 * Step 3: compute LOD. At this point we have the derivatives of the
467 * texture coordinates normalized to [-1,1]. We take the LOD to be
468 * result = log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * 0.5 * L)
469 * = -1.0 + log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * L)
470 * = -1.0 + log2(sqrt(max(dot(dx, dx), dot(dy,dy))) * L)
471 * = -1.0 + log2(sqrt(L * L * max(dot(dx, dx), dot(dy,dy))))
472 * = -1.0 + 0.5 * log2(L * L * max(dot(dx, dx), dot(dy,dy)))
473 * where L is the dimension of the cubemap. The code is:
474 *
475 * float M, result;
476 * M = max(dot(dx, dx), dot(dy, dy));
477 * L = textureSize(sampler, 0).x;
478 * result = -1.0 + 0.5 * log2(L * L * M);
479 */
480
481 /* coordinate */
482 nir_ssa_def *p =
483 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_coord)].src.ssa;
484
485 /* unmodified dPdx, dPdy values */
486 nir_ssa_def *dPdx =
487 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
488 nir_ssa_def *dPdy =
489 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
490
491 nir_ssa_def *abs_p = nir_fabs(b, p);
492 nir_ssa_def *abs_p_x = nir_channel(b, abs_p, 0);
493 nir_ssa_def *abs_p_y = nir_channel(b, abs_p, 1);
494 nir_ssa_def *abs_p_z = nir_channel(b, abs_p, 2);
495
496 /* 1. compute selector */
497 nir_ssa_def *Q, *dQdx, *dQdy;
498
499 nir_ssa_def *cond_z = nir_fge(b, abs_p_z, nir_fmax(b, abs_p_x, abs_p_y));
500 nir_ssa_def *cond_y = nir_fge(b, abs_p_y, nir_fmax(b, abs_p_x, abs_p_z));
501
502 unsigned yzx[3] = { 1, 2, 0 };
503 unsigned xzy[3] = { 0, 2, 1 };
504
505 Q = nir_bcsel(b, cond_z,
506 p,
507 nir_bcsel(b, cond_y,
508 nir_swizzle(b, p, xzy, 3, false),
509 nir_swizzle(b, p, yzx, 3, false)));
510
511 dQdx = nir_bcsel(b, cond_z,
512 dPdx,
513 nir_bcsel(b, cond_y,
514 nir_swizzle(b, dPdx, xzy, 3, false),
515 nir_swizzle(b, dPdx, yzx, 3, false)));
516
517 dQdy = nir_bcsel(b, cond_z,
518 dPdy,
519 nir_bcsel(b, cond_y,
520 nir_swizzle(b, dPdy, xzy, 3, false),
521 nir_swizzle(b, dPdy, yzx, 3, false)));
522
523 /* 2. quotient rule */
524
525 /* tmp = Q.xy * recip;
526 * dx = recip * ( dQdx.xy - (tmp * dQdx.z) );
527 * dy = recip * ( dQdy.xy - (tmp * dQdy.z) );
528 */
529 nir_ssa_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2));
530
531 nir_ssa_def *Q_xy = nir_channels(b, Q, 0x3);
532 nir_ssa_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z);
533
534 nir_ssa_def *dQdx_xy = nir_channels(b, dQdx, 0x3);
535 nir_ssa_def *dQdx_z = nir_channel(b, dQdx, 2);
536 nir_ssa_def *dx =
537 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdx_xy, nir_fmul(b, tmp, dQdx_z)));
538
539 nir_ssa_def *dQdy_xy = nir_channels(b, dQdy, 0x3);
540 nir_ssa_def *dQdy_z = nir_channel(b, dQdy, 2);
541 nir_ssa_def *dy =
542 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdy_xy, nir_fmul(b, tmp, dQdy_z)));
543
544 /* M = max(dot(dx, dx), dot(dy, dy)); */
545 nir_ssa_def *M = nir_fmax(b, nir_fdot(b, dx, dx), nir_fdot(b, dy, dy));
546
547 /* size has textureSize() of LOD 0 */
548 nir_ssa_def *L = nir_channel(b, size, 0);
549
550 /* lod = -1.0 + 0.5 * log2(L * L * M); */
551 nir_ssa_def *lod =
552 nir_fadd(b,
553 nir_imm_float(b, -1.0f),
554 nir_fmul(b,
555 nir_imm_float(b, 0.5f),
556 nir_flog2(b, nir_fmul(b, L, nir_fmul(b, L, M)))));
557
558 /* 3. Replace the gradient instruction with an equivalent lod instruction */
559 replace_gradient_with_lod(b, lod, tex);
560 }
561
562 static void
563 lower_gradient(nir_builder *b, nir_tex_instr *tex)
564 {
565 /* Cubes are more complicated and have their own function */
566 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
567 lower_gradient_cube_map(b, tex);
568 return;
569 }
570
571 assert(tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
572 assert(tex->op == nir_texop_txd);
573 assert(tex->dest.is_ssa);
574
575 /* Use textureSize() to get the width and height of LOD 0 */
576 unsigned component_mask;
577 switch (tex->sampler_dim) {
578 case GLSL_SAMPLER_DIM_3D:
579 component_mask = 7;
580 break;
581 case GLSL_SAMPLER_DIM_1D:
582 component_mask = 1;
583 break;
584 default:
585 component_mask = 3;
586 break;
587 }
588
589 nir_ssa_def *size =
590 nir_channels(b, get_texture_size(b, tex), component_mask);
591
592 /* Scale the gradients by width and height. Effectively, the incoming
593 * gradients are s'(x,y), t'(x,y), and r'(x,y) from equation 3.19 in the
594 * GL 3.0 spec; we want u'(x,y), which is w_t * s'(x,y).
595 */
596 nir_ssa_def *ddx =
597 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
598 nir_ssa_def *ddy =
599 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
600
601 nir_ssa_def *dPdx = nir_fmul(b, ddx, size);
602 nir_ssa_def *dPdy = nir_fmul(b, ddy, size);
603
604 nir_ssa_def *rho;
605 if (dPdx->num_components == 1) {
606 rho = nir_fmax(b, nir_fabs(b, dPdx), nir_fabs(b, dPdy));
607 } else {
608 rho = nir_fmax(b,
609 nir_fsqrt(b, nir_fdot(b, dPdx, dPdx)),
610 nir_fsqrt(b, nir_fdot(b, dPdy, dPdy)));
611 }
612
613 /* lod = log2(rho). We're ignoring GL state biases for now. */
614 nir_ssa_def *lod = nir_flog2(b, rho);
615
616 /* Replace the gradient instruction with an equivalent lod instruction */
617 replace_gradient_with_lod(b, lod, tex);
618 }
619
620 static void
621 saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
622 {
623 b->cursor = nir_before_instr(&tex->instr);
624
625 /* Walk through the sources saturating the requested arguments. */
626 for (unsigned i = 0; i < tex->num_srcs; i++) {
627 if (tex->src[i].src_type != nir_tex_src_coord)
628 continue;
629
630 nir_ssa_def *src =
631 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
632
633 /* split src into components: */
634 nir_ssa_def *comp[4];
635
636 assume(tex->coord_components >= 1);
637
638 for (unsigned j = 0; j < tex->coord_components; j++)
639 comp[j] = nir_channel(b, src, j);
640
641 /* clamp requested components, array index does not get clamped: */
642 unsigned ncomp = tex->coord_components;
643 if (tex->is_array)
644 ncomp--;
645
646 for (unsigned j = 0; j < ncomp; j++) {
647 if ((1 << j) & sat_mask) {
648 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
649 /* non-normalized texture coords, so clamp to texture
650 * size rather than [0.0, 1.0]
651 */
652 nir_ssa_def *txs = get_texture_size(b, tex);
653 comp[j] = nir_fmax(b, comp[j], nir_imm_float(b, 0.0));
654 comp[j] = nir_fmin(b, comp[j], nir_channel(b, txs, j));
655 } else {
656 comp[j] = nir_fsat(b, comp[j]);
657 }
658 }
659 }
660
661 /* and move the result back into a single vecN: */
662 src = nir_vec(b, comp, tex->coord_components);
663
664 nir_instr_rewrite_src(&tex->instr,
665 &tex->src[i].src,
666 nir_src_for_ssa(src));
667 }
668 }
669
670 static nir_ssa_def *
671 get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
672 {
673 nir_const_value v;
674
675 memset(&v, 0, sizeof(v));
676
677 if (swizzle_val == 4) {
678 v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 0;
679 } else {
680 assert(swizzle_val == 5);
681 if (type == nir_type_float)
682 v.f32[0] = v.f32[1] = v.f32[2] = v.f32[3] = 1.0;
683 else
684 v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 1;
685 }
686
687 return nir_build_imm(b, 4, 32, v);
688 }
689
690 static void
691 swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
692 {
693 assert(tex->dest.is_ssa);
694
695 b->cursor = nir_after_instr(&tex->instr);
696
697 nir_ssa_def *swizzled;
698 if (tex->op == nir_texop_tg4) {
699 if (swizzle[tex->component] < 4) {
700 /* This one's easy */
701 tex->component = swizzle[tex->component];
702 return;
703 } else {
704 swizzled = get_zero_or_one(b, tex->dest_type, swizzle[tex->component]);
705 }
706 } else {
707 assert(nir_tex_instr_dest_size(tex) == 4);
708 if (swizzle[0] < 4 && swizzle[1] < 4 &&
709 swizzle[2] < 4 && swizzle[3] < 4) {
710 unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
711 /* We have no 0s or 1s, just emit a swizzling MOV */
712 swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
713 } else {
714 nir_ssa_def *srcs[4];
715 for (unsigned i = 0; i < 4; i++) {
716 if (swizzle[i] < 4) {
717 srcs[i] = nir_channel(b, &tex->dest.ssa, swizzle[i]);
718 } else {
719 srcs[i] = get_zero_or_one(b, tex->dest_type, swizzle[i]);
720 }
721 }
722 swizzled = nir_vec(b, srcs, 4);
723 }
724 }
725
726 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(swizzled),
727 swizzled->parent_instr);
728 }
729
730 static void
731 linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
732 {
733 assert(tex->dest.is_ssa);
734 assert(nir_tex_instr_dest_size(tex) == 4);
735 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
736
737 b->cursor = nir_after_instr(&tex->instr);
738
739 nir_ssa_def *rgb =
740 nir_format_srgb_to_linear(b, nir_channels(b, &tex->dest.ssa, 0x7));
741
742 /* alpha is untouched: */
743 nir_ssa_def *result = nir_vec4(b,
744 nir_channel(b, rgb, 0),
745 nir_channel(b, rgb, 1),
746 nir_channel(b, rgb, 2),
747 nir_channel(b, &tex->dest.ssa, 3));
748
749 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(result),
750 result->parent_instr);
751 }
752
753 static bool
754 nir_lower_tex_block(nir_block *block, nir_builder *b,
755 const nir_lower_tex_options *options)
756 {
757 bool progress = false;
758
759 nir_foreach_instr_safe(instr, block) {
760 if (instr->type != nir_instr_type_tex)
761 continue;
762
763 nir_tex_instr *tex = nir_instr_as_tex(instr);
764 bool lower_txp = !!(options->lower_txp & (1 << tex->sampler_dim));
765
766 /* mask of src coords to saturate (clamp): */
767 unsigned sat_mask = 0;
768
769 if ((1 << tex->sampler_index) & options->saturate_r)
770 sat_mask |= (1 << 2); /* .z */
771 if ((1 << tex->sampler_index) & options->saturate_t)
772 sat_mask |= (1 << 1); /* .y */
773 if ((1 << tex->sampler_index) & options->saturate_s)
774 sat_mask |= (1 << 0); /* .x */
775
776 /* If we are clamping any coords, we must lower projector first
777 * as clamping happens *after* projection:
778 */
779 if (lower_txp || sat_mask) {
780 project_src(b, tex);
781 progress = true;
782 }
783
784 if ((tex->op == nir_texop_txf && options->lower_txf_offset) ||
785 (sat_mask && nir_tex_instr_src_index(tex, nir_tex_src_coord) >= 0) ||
786 (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT &&
787 options->lower_rect_offset)) {
788 progress = lower_offset(b, tex) || progress;
789 }
790
791 if ((tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) && options->lower_rect) {
792 lower_rect(b, tex);
793 progress = true;
794 }
795
796 if ((1 << tex->texture_index) & options->lower_y_uv_external) {
797 lower_y_uv_external(b, tex);
798 progress = true;
799 }
800
801 if ((1 << tex->texture_index) & options->lower_y_u_v_external) {
802 lower_y_u_v_external(b, tex);
803 progress = true;
804 }
805
806 if ((1 << tex->texture_index) & options->lower_yx_xuxv_external) {
807 lower_yx_xuxv_external(b, tex);
808 progress = true;
809 }
810
811 if ((1 << tex->texture_index) & options->lower_xy_uxvx_external) {
812 lower_xy_uxvx_external(b, tex);
813 progress = true;
814 }
815
816 if ((1 << tex->texture_index) & options->lower_ayuv_external) {
817 lower_ayuv_external(b, tex);
818 progress = true;
819 }
820
821 if (sat_mask) {
822 saturate_src(b, tex, sat_mask);
823 progress = true;
824 }
825
826 if (((1 << tex->texture_index) & options->swizzle_result) &&
827 !nir_tex_instr_is_query(tex) &&
828 !(tex->is_shadow && tex->is_new_style_shadow)) {
829 swizzle_result(b, tex, options->swizzles[tex->texture_index]);
830 progress = true;
831 }
832
833 /* should be after swizzle so we know which channels are rgb: */
834 if (((1 << tex->texture_index) & options->lower_srgb) &&
835 !nir_tex_instr_is_query(tex) && !tex->is_shadow) {
836 linearize_srgb_result(b, tex);
837 progress = true;
838 }
839
840 if (tex->op == nir_texop_txd &&
841 (options->lower_txd ||
842 (options->lower_txd_shadow && tex->is_shadow) ||
843 (options->lower_txd_cube_map &&
844 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE))) {
845 lower_gradient(b, tex);
846 progress = true;
847 continue;
848 }
849
850 /* TXF, TXS and TXL require a LOD but not everything we implement using those
851 * three opcodes provides one. Provide a default LOD of 0.
852 */
853 if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
854 (tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
855 tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
856 (tex->op == nir_texop_tex &&
857 b->shader->info.stage != MESA_SHADER_FRAGMENT))) {
858 b->cursor = nir_before_instr(&tex->instr);
859 nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
860 progress = true;
861 continue;
862 }
863 }
864
865 return progress;
866 }
867
868 static bool
869 nir_lower_tex_impl(nir_function_impl *impl,
870 const nir_lower_tex_options *options)
871 {
872 bool progress = false;
873 nir_builder builder;
874 nir_builder_init(&builder, impl);
875
876 nir_foreach_block(block, impl) {
877 progress |= nir_lower_tex_block(block, &builder, options);
878 }
879
880 nir_metadata_preserve(impl, nir_metadata_block_index |
881 nir_metadata_dominance);
882 return progress;
883 }
884
885 bool
886 nir_lower_tex(nir_shader *shader, const nir_lower_tex_options *options)
887 {
888 bool progress = false;
889
890 nir_foreach_function(function, shader) {
891 if (function->impl)
892 progress |= nir_lower_tex_impl(function->impl, options);
893 }
894
895 return progress;
896 }