nir/lower_tex: Always copy deref and offset sources
[mesa.git] / src / compiler / nir / nir_lower_tex.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /*
25 * This lowering pass supports (as configured via nir_lower_tex_options)
26 * various texture related conversions:
27 * + texture projector lowering: converts the coordinate division for
28 * texture projection to be done in ALU instructions instead of
29 * asking the texture operation to do so.
30 * + lowering RECT: converts the un-normalized RECT texture coordinates
31 * to normalized coordinates with txs plus ALU instructions
32 * + saturate s/t/r coords: to emulate certain texture clamp/wrap modes,
33 * inserts instructions to clamp specified coordinates to [0.0, 1.0].
34 * Note that this automatically triggers texture projector lowering if
35 * needed, since clamping must happen after projector lowering.
36 */
37
38 #include "nir.h"
39 #include "nir_builder.h"
40
41 static void
42 project_src(nir_builder *b, nir_tex_instr *tex)
43 {
44 /* Find the projector in the srcs list, if present. */
45 int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
46 if (proj_index < 0)
47 return;
48
49 b->cursor = nir_before_instr(&tex->instr);
50
51 nir_ssa_def *inv_proj =
52 nir_frcp(b, nir_ssa_for_src(b, tex->src[proj_index].src, 1));
53
54 /* Walk through the sources projecting the arguments. */
55 for (unsigned i = 0; i < tex->num_srcs; i++) {
56 switch (tex->src[i].src_type) {
57 case nir_tex_src_coord:
58 case nir_tex_src_comparator:
59 break;
60 default:
61 continue;
62 }
63 nir_ssa_def *unprojected =
64 nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
65 nir_ssa_def *projected = nir_fmul(b, unprojected, inv_proj);
66
67 /* Array indices don't get projected, so make an new vector with the
68 * coordinate's array index untouched.
69 */
70 if (tex->is_array && tex->src[i].src_type == nir_tex_src_coord) {
71 switch (tex->coord_components) {
72 case 4:
73 projected = nir_vec4(b,
74 nir_channel(b, projected, 0),
75 nir_channel(b, projected, 1),
76 nir_channel(b, projected, 2),
77 nir_channel(b, unprojected, 3));
78 break;
79 case 3:
80 projected = nir_vec3(b,
81 nir_channel(b, projected, 0),
82 nir_channel(b, projected, 1),
83 nir_channel(b, unprojected, 2));
84 break;
85 case 2:
86 projected = nir_vec2(b,
87 nir_channel(b, projected, 0),
88 nir_channel(b, unprojected, 1));
89 break;
90 default:
91 unreachable("bad texture coord count for array");
92 break;
93 }
94 }
95
96 nir_instr_rewrite_src(&tex->instr,
97 &tex->src[i].src,
98 nir_src_for_ssa(projected));
99 }
100
101 nir_tex_instr_remove_src(tex, proj_index);
102 }
103
104 static nir_ssa_def *
105 get_texture_size(nir_builder *b, nir_tex_instr *tex)
106 {
107 b->cursor = nir_before_instr(&tex->instr);
108
109 nir_tex_instr *txs;
110
111 unsigned num_srcs = 1; /* One for the LOD */
112 for (unsigned i = 0; i < tex->num_srcs; i++) {
113 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
114 tex->src[i].src_type == nir_tex_src_sampler_deref ||
115 tex->src[i].src_type == nir_tex_src_texture_offset ||
116 tex->src[i].src_type == nir_tex_src_sampler_offset)
117 num_srcs++;
118 }
119
120 txs = nir_tex_instr_create(b->shader, num_srcs);
121 txs->op = nir_texop_txs;
122 txs->sampler_dim = tex->sampler_dim;
123 txs->is_array = tex->is_array;
124 txs->is_shadow = tex->is_shadow;
125 txs->is_new_style_shadow = tex->is_new_style_shadow;
126 txs->texture_index = tex->texture_index;
127 txs->texture = nir_deref_var_clone(tex->texture, txs);
128 txs->sampler_index = tex->sampler_index;
129 txs->sampler = nir_deref_var_clone(tex->sampler, txs);
130 txs->dest_type = nir_type_int;
131
132 unsigned idx = 0;
133 for (unsigned i = 0; i < tex->num_srcs; i++) {
134 if (tex->src[i].src_type == nir_tex_src_texture_deref ||
135 tex->src[i].src_type == nir_tex_src_sampler_deref ||
136 tex->src[i].src_type == nir_tex_src_texture_offset ||
137 tex->src[i].src_type == nir_tex_src_sampler_offset) {
138 nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs);
139 txs->src[idx].src_type = tex->src[i].src_type;
140 idx++;
141 }
142 }
143 /* Add in an LOD because some back-ends require it */
144 txs->src[idx].src = nir_src_for_ssa(nir_imm_int(b, 0));
145 txs->src[idx].src_type = nir_tex_src_lod;
146
147 nir_ssa_dest_init(&txs->instr, &txs->dest,
148 nir_tex_instr_dest_size(txs), 32, NULL);
149 nir_builder_instr_insert(b, &txs->instr);
150
151 return nir_i2f32(b, &txs->dest.ssa);
152 }
153
154 static bool
155 lower_offset(nir_builder *b, nir_tex_instr *tex)
156 {
157 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
158 if (offset_index < 0)
159 return false;
160
161 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
162 assert(coord_index >= 0);
163
164 assert(tex->src[offset_index].src.is_ssa);
165 assert(tex->src[coord_index].src.is_ssa);
166 nir_ssa_def *offset = tex->src[offset_index].src.ssa;
167 nir_ssa_def *coord = tex->src[coord_index].src.ssa;
168
169 b->cursor = nir_before_instr(&tex->instr);
170
171 nir_ssa_def *offset_coord;
172 if (nir_tex_instr_src_type(tex, coord_index) == nir_type_float) {
173 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
174 offset_coord = nir_fadd(b, coord, nir_i2f32(b, offset));
175 } else {
176 nir_ssa_def *txs = get_texture_size(b, tex);
177 nir_ssa_def *scale = nir_frcp(b, txs);
178
179 offset_coord = nir_fadd(b, coord,
180 nir_fmul(b,
181 nir_i2f32(b, offset),
182 scale));
183 }
184 } else {
185 offset_coord = nir_iadd(b, coord, offset);
186 }
187
188 if (tex->is_array) {
189 /* The offset is not applied to the array index */
190 if (tex->coord_components == 2) {
191 offset_coord = nir_vec2(b, nir_channel(b, offset_coord, 0),
192 nir_channel(b, coord, 1));
193 } else if (tex->coord_components == 3) {
194 offset_coord = nir_vec3(b, nir_channel(b, offset_coord, 0),
195 nir_channel(b, offset_coord, 1),
196 nir_channel(b, coord, 2));
197 } else {
198 unreachable("Invalid number of components");
199 }
200 }
201
202 nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src,
203 nir_src_for_ssa(offset_coord));
204
205 nir_tex_instr_remove_src(tex, offset_index);
206
207 return true;
208 }
209
210 static void
211 lower_rect(nir_builder *b, nir_tex_instr *tex)
212 {
213 nir_ssa_def *txs = get_texture_size(b, tex);
214 nir_ssa_def *scale = nir_frcp(b, txs);
215
216 /* Walk through the sources normalizing the requested arguments. */
217 for (unsigned i = 0; i < tex->num_srcs; i++) {
218 if (tex->src[i].src_type != nir_tex_src_coord)
219 continue;
220
221 nir_ssa_def *coords =
222 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
223 nir_instr_rewrite_src(&tex->instr,
224 &tex->src[i].src,
225 nir_src_for_ssa(nir_fmul(b, coords, scale)));
226 }
227
228 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
229 }
230
231 static nir_ssa_def *
232 sample_plane(nir_builder *b, nir_tex_instr *tex, int plane)
233 {
234 assert(tex->dest.is_ssa);
235 assert(nir_tex_instr_dest_size(tex) == 4);
236 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
237 assert(tex->op == nir_texop_tex);
238 assert(tex->coord_components == 2);
239
240 nir_tex_instr *plane_tex =
241 nir_tex_instr_create(b->shader, tex->num_srcs + 1);
242 for (unsigned i = 0; i < tex->num_srcs; i++) {
243 nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src, plane_tex);
244 plane_tex->src[i].src_type = tex->src[i].src_type;
245 }
246 plane_tex->src[tex->num_srcs].src = nir_src_for_ssa(nir_imm_int(b, plane));
247 plane_tex->src[tex->num_srcs].src_type = nir_tex_src_plane;
248 plane_tex->op = nir_texop_tex;
249 plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
250 plane_tex->dest_type = nir_type_float;
251 plane_tex->coord_components = 2;
252
253 plane_tex->texture_index = tex->texture_index;
254 plane_tex->texture = nir_deref_var_clone(tex->texture, plane_tex);
255 plane_tex->sampler_index = tex->sampler_index;
256 plane_tex->sampler = nir_deref_var_clone(tex->sampler, plane_tex);
257
258 nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4, 32, NULL);
259
260 nir_builder_instr_insert(b, &plane_tex->instr);
261
262 return &plane_tex->dest.ssa;
263 }
264
265 static void
266 convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
267 nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v)
268 {
269 nir_const_value m[3] = {
270 { .f32 = { 1.0f, 0.0f, 1.59602678f, 0.0f } },
271 { .f32 = { 1.0f, -0.39176229f, -0.81296764f, 0.0f } },
272 { .f32 = { 1.0f, 2.01723214f, 0.0f, 0.0f } }
273 };
274
275 nir_ssa_def *yuv =
276 nir_vec4(b,
277 nir_fmul(b, nir_imm_float(b, 1.16438356f),
278 nir_fadd(b, y, nir_imm_float(b, -16.0f / 255.0f))),
279 nir_channel(b, nir_fadd(b, u, nir_imm_float(b, -128.0f / 255.0f)), 0),
280 nir_channel(b, nir_fadd(b, v, nir_imm_float(b, -128.0f / 255.0f)), 0),
281 nir_imm_float(b, 0.0));
282
283 nir_ssa_def *red = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[0]));
284 nir_ssa_def *green = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[1]));
285 nir_ssa_def *blue = nir_fdot4(b, yuv, nir_build_imm(b, 4, 32, m[2]));
286
287 nir_ssa_def *result = nir_vec4(b, red, green, blue, nir_imm_float(b, 1.0f));
288
289 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result));
290 }
291
292 static void
293 lower_y_uv_external(nir_builder *b, nir_tex_instr *tex)
294 {
295 b->cursor = nir_after_instr(&tex->instr);
296
297 nir_ssa_def *y = sample_plane(b, tex, 0);
298 nir_ssa_def *uv = sample_plane(b, tex, 1);
299
300 convert_yuv_to_rgb(b, tex,
301 nir_channel(b, y, 0),
302 nir_channel(b, uv, 0),
303 nir_channel(b, uv, 1));
304 }
305
306 static void
307 lower_y_u_v_external(nir_builder *b, nir_tex_instr *tex)
308 {
309 b->cursor = nir_after_instr(&tex->instr);
310
311 nir_ssa_def *y = sample_plane(b, tex, 0);
312 nir_ssa_def *u = sample_plane(b, tex, 1);
313 nir_ssa_def *v = sample_plane(b, tex, 2);
314
315 convert_yuv_to_rgb(b, tex,
316 nir_channel(b, y, 0),
317 nir_channel(b, u, 0),
318 nir_channel(b, v, 0));
319 }
320
321 static void
322 lower_yx_xuxv_external(nir_builder *b, nir_tex_instr *tex)
323 {
324 b->cursor = nir_after_instr(&tex->instr);
325
326 nir_ssa_def *y = sample_plane(b, tex, 0);
327 nir_ssa_def *xuxv = sample_plane(b, tex, 1);
328
329 convert_yuv_to_rgb(b, tex,
330 nir_channel(b, y, 0),
331 nir_channel(b, xuxv, 1),
332 nir_channel(b, xuxv, 3));
333 }
334
335 static void
336 lower_xy_uxvx_external(nir_builder *b, nir_tex_instr *tex)
337 {
338 b->cursor = nir_after_instr(&tex->instr);
339
340 nir_ssa_def *y = sample_plane(b, tex, 0);
341 nir_ssa_def *uxvx = sample_plane(b, tex, 1);
342
343 convert_yuv_to_rgb(b, tex,
344 nir_channel(b, y, 1),
345 nir_channel(b, uxvx, 0),
346 nir_channel(b, uxvx, 2));
347 }
348
349 /*
350 * Emits a textureLod operation used to replace an existing
351 * textureGrad instruction.
352 */
353 static void
354 replace_gradient_with_lod(nir_builder *b, nir_ssa_def *lod, nir_tex_instr *tex)
355 {
356 /* We are going to emit a textureLod() with the same parameters except that
357 * we replace ddx/ddy with lod.
358 */
359 int num_srcs = tex->num_srcs - 1;
360 nir_tex_instr *txl = nir_tex_instr_create(b->shader, num_srcs);
361
362 txl->op = nir_texop_txl;
363 txl->sampler_dim = tex->sampler_dim;
364 txl->texture_index = tex->texture_index;
365 txl->dest_type = tex->dest_type;
366 txl->is_array = tex->is_array;
367 txl->is_shadow = tex->is_shadow;
368 txl->is_new_style_shadow = tex->is_new_style_shadow;
369 txl->sampler_index = tex->sampler_index;
370 txl->texture = nir_deref_var_clone(tex->texture, txl);
371 txl->sampler = nir_deref_var_clone(tex->sampler, txl);
372 txl->coord_components = tex->coord_components;
373
374 nir_ssa_dest_init(&txl->instr, &txl->dest, 4, 32, NULL);
375
376 int src_num = 0;
377 for (int i = 0; i < tex->num_srcs; i++) {
378 if (tex->src[i].src_type == nir_tex_src_ddx ||
379 tex->src[i].src_type == nir_tex_src_ddy)
380 continue;
381 nir_src_copy(&txl->src[src_num].src, &tex->src[i].src, txl);
382 txl->src[src_num].src_type = tex->src[i].src_type;
383 src_num++;
384 }
385
386 txl->src[src_num].src = nir_src_for_ssa(lod);
387 txl->src[src_num].src_type = nir_tex_src_lod;
388 src_num++;
389
390 assert(src_num == num_srcs);
391
392 nir_ssa_dest_init(&txl->instr, &txl->dest,
393 tex->dest.ssa.num_components, 32, NULL);
394 nir_builder_instr_insert(b, &txl->instr);
395
396 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(&txl->dest.ssa));
397
398 nir_instr_remove(&tex->instr);
399 }
400
401 static void
402 lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
403 {
404 assert(tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
405 assert(tex->op == nir_texop_txd);
406 assert(tex->dest.is_ssa);
407
408 /* Use textureSize() to get the width and height of LOD 0 */
409 nir_ssa_def *size = get_texture_size(b, tex);
410
411 /* Cubemap texture lookups first generate a texture coordinate normalized
412 * to [-1, 1] on the appropiate face. The appropiate face is determined
413 * by which component has largest magnitude and its sign. The texture
414 * coordinate is the quotient of the remaining texture coordinates against
415 * that absolute value of the component of largest magnitude. This
416 * division requires that the computing of the derivative of the texel
417 * coordinate must use the quotient rule. The high level GLSL code is as
418 * follows:
419 *
420 * Step 1: selection
421 *
422 * vec3 abs_p, Q, dQdx, dQdy;
423 * abs_p = abs(ir->coordinate);
424 * if (abs_p.x >= max(abs_p.y, abs_p.z)) {
425 * Q = ir->coordinate.yzx;
426 * dQdx = ir->lod_info.grad.dPdx.yzx;
427 * dQdy = ir->lod_info.grad.dPdy.yzx;
428 * }
429 * if (abs_p.y >= max(abs_p.x, abs_p.z)) {
430 * Q = ir->coordinate.xzy;
431 * dQdx = ir->lod_info.grad.dPdx.xzy;
432 * dQdy = ir->lod_info.grad.dPdy.xzy;
433 * }
434 * if (abs_p.z >= max(abs_p.x, abs_p.y)) {
435 * Q = ir->coordinate;
436 * dQdx = ir->lod_info.grad.dPdx;
437 * dQdy = ir->lod_info.grad.dPdy;
438 * }
439 *
440 * Step 2: use quotient rule to compute derivative. The normalized to
441 * [-1, 1] texel coordinate is given by Q.xy / (sign(Q.z) * Q.z). We are
442 * only concerned with the magnitudes of the derivatives whose values are
443 * not affected by the sign. We drop the sign from the computation.
444 *
445 * vec2 dx, dy;
446 * float recip;
447 *
448 * recip = 1.0 / Q.z;
449 * dx = recip * ( dQdx.xy - Q.xy * (dQdx.z * recip) );
450 * dy = recip * ( dQdy.xy - Q.xy * (dQdy.z * recip) );
451 *
452 * Step 3: compute LOD. At this point we have the derivatives of the
453 * texture coordinates normalized to [-1,1]. We take the LOD to be
454 * result = log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * 0.5 * L)
455 * = -1.0 + log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * L)
456 * = -1.0 + log2(sqrt(max(dot(dx, dx), dot(dy,dy))) * L)
457 * = -1.0 + log2(sqrt(L * L * max(dot(dx, dx), dot(dy,dy))))
458 * = -1.0 + 0.5 * log2(L * L * max(dot(dx, dx), dot(dy,dy)))
459 * where L is the dimension of the cubemap. The code is:
460 *
461 * float M, result;
462 * M = max(dot(dx, dx), dot(dy, dy));
463 * L = textureSize(sampler, 0).x;
464 * result = -1.0 + 0.5 * log2(L * L * M);
465 */
466
467 /* coordinate */
468 nir_ssa_def *p =
469 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_coord)].src.ssa;
470
471 /* unmodified dPdx, dPdy values */
472 nir_ssa_def *dPdx =
473 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
474 nir_ssa_def *dPdy =
475 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
476
477 nir_ssa_def *abs_p = nir_fabs(b, p);
478 nir_ssa_def *abs_p_x = nir_channel(b, abs_p, 0);
479 nir_ssa_def *abs_p_y = nir_channel(b, abs_p, 1);
480 nir_ssa_def *abs_p_z = nir_channel(b, abs_p, 2);
481
482 /* 1. compute selector */
483 nir_ssa_def *Q, *dQdx, *dQdy;
484
485 nir_ssa_def *cond_z = nir_fge(b, abs_p_z, nir_fmax(b, abs_p_x, abs_p_y));
486 nir_ssa_def *cond_y = nir_fge(b, abs_p_y, nir_fmax(b, abs_p_x, abs_p_z));
487
488 unsigned yzx[4] = { 1, 2, 0, 0 };
489 unsigned xzy[4] = { 0, 2, 1, 0 };
490
491 Q = nir_bcsel(b, cond_z,
492 p,
493 nir_bcsel(b, cond_y,
494 nir_swizzle(b, p, xzy, 3, false),
495 nir_swizzle(b, p, yzx, 3, false)));
496
497 dQdx = nir_bcsel(b, cond_z,
498 dPdx,
499 nir_bcsel(b, cond_y,
500 nir_swizzle(b, dPdx, xzy, 3, false),
501 nir_swizzle(b, dPdx, yzx, 3, false)));
502
503 dQdy = nir_bcsel(b, cond_z,
504 dPdy,
505 nir_bcsel(b, cond_y,
506 nir_swizzle(b, dPdy, xzy, 3, false),
507 nir_swizzle(b, dPdy, yzx, 3, false)));
508
509 /* 2. quotient rule */
510
511 /* tmp = Q.xy * recip;
512 * dx = recip * ( dQdx.xy - (tmp * dQdx.z) );
513 * dy = recip * ( dQdy.xy - (tmp * dQdy.z) );
514 */
515 nir_ssa_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2));
516
517 unsigned xy[4] = { 0, 1, 0, 0 };
518 nir_ssa_def *Q_xy = nir_swizzle(b, Q, xy, 2, false);
519 nir_ssa_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z);
520
521 nir_ssa_def *dQdx_xy = nir_swizzle(b, dQdx, xy, 2, false);
522 nir_ssa_def *dQdx_z = nir_channel(b, dQdx, 2);
523 nir_ssa_def *dx =
524 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdx_xy, nir_fmul(b, tmp, dQdx_z)));
525
526 nir_ssa_def *dQdy_xy = nir_swizzle(b, dQdy, xy, 2, false);
527 nir_ssa_def *dQdy_z = nir_channel(b, dQdy, 2);
528 nir_ssa_def *dy =
529 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdy_xy, nir_fmul(b, tmp, dQdy_z)));
530
531 /* M = max(dot(dx, dx), dot(dy, dy)); */
532 nir_ssa_def *M = nir_fmax(b, nir_fdot(b, dx, dx), nir_fdot(b, dy, dy));
533
534 /* size has textureSize() of LOD 0 */
535 nir_ssa_def *L = nir_channel(b, size, 0);
536
537 /* lod = -1.0 + 0.5 * log2(L * L * M); */
538 nir_ssa_def *lod =
539 nir_fadd(b,
540 nir_imm_float(b, -1.0f),
541 nir_fmul(b,
542 nir_imm_float(b, 0.5f),
543 nir_flog2(b, nir_fmul(b, L, nir_fmul(b, L, M)))));
544
545 /* 3. Replace the gradient instruction with an equivalent lod instruction */
546 replace_gradient_with_lod(b, lod, tex);
547 }
548
549 static void
550 lower_gradient(nir_builder *b, nir_tex_instr *tex)
551 {
552 assert(tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
553 assert(tex->op == nir_texop_txd);
554 assert(tex->dest.is_ssa);
555
556 /* Use textureSize() to get the width and height of LOD 0 */
557 unsigned component_mask;
558 switch (tex->sampler_dim) {
559 case GLSL_SAMPLER_DIM_3D:
560 component_mask = 7;
561 break;
562 case GLSL_SAMPLER_DIM_1D:
563 component_mask = 1;
564 break;
565 default:
566 component_mask = 3;
567 break;
568 }
569
570 nir_ssa_def *size =
571 nir_channels(b, get_texture_size(b, tex), component_mask);
572
573 /* Scale the gradients by width and height. Effectively, the incoming
574 * gradients are s'(x,y), t'(x,y), and r'(x,y) from equation 3.19 in the
575 * GL 3.0 spec; we want u'(x,y), which is w_t * s'(x,y).
576 */
577 nir_ssa_def *ddx =
578 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
579 nir_ssa_def *ddy =
580 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
581
582 nir_ssa_def *dPdx = nir_fmul(b, ddx, size);
583 nir_ssa_def *dPdy = nir_fmul(b, ddy, size);
584
585 nir_ssa_def *rho;
586 if (dPdx->num_components == 1) {
587 rho = nir_fmax(b, nir_fabs(b, dPdx), nir_fabs(b, dPdy));
588 } else {
589 rho = nir_fmax(b,
590 nir_fsqrt(b, nir_fdot(b, dPdx, dPdx)),
591 nir_fsqrt(b, nir_fdot(b, dPdy, dPdy)));
592 }
593
594 /* lod = log2(rho). We're ignoring GL state biases for now. */
595 nir_ssa_def *lod = nir_flog2(b, rho);
596
597 /* Replace the gradient instruction with an equivalent lod instruction */
598 replace_gradient_with_lod(b, lod, tex);
599 }
600
601 static void
602 saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
603 {
604 b->cursor = nir_before_instr(&tex->instr);
605
606 /* Walk through the sources saturating the requested arguments. */
607 for (unsigned i = 0; i < tex->num_srcs; i++) {
608 if (tex->src[i].src_type != nir_tex_src_coord)
609 continue;
610
611 nir_ssa_def *src =
612 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
613
614 /* split src into components: */
615 nir_ssa_def *comp[4];
616
617 assume(tex->coord_components >= 1);
618
619 for (unsigned j = 0; j < tex->coord_components; j++)
620 comp[j] = nir_channel(b, src, j);
621
622 /* clamp requested components, array index does not get clamped: */
623 unsigned ncomp = tex->coord_components;
624 if (tex->is_array)
625 ncomp--;
626
627 for (unsigned j = 0; j < ncomp; j++) {
628 if ((1 << j) & sat_mask) {
629 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
630 /* non-normalized texture coords, so clamp to texture
631 * size rather than [0.0, 1.0]
632 */
633 nir_ssa_def *txs = get_texture_size(b, tex);
634 comp[j] = nir_fmax(b, comp[j], nir_imm_float(b, 0.0));
635 comp[j] = nir_fmin(b, comp[j], nir_channel(b, txs, j));
636 } else {
637 comp[j] = nir_fsat(b, comp[j]);
638 }
639 }
640 }
641
642 /* and move the result back into a single vecN: */
643 src = nir_vec(b, comp, tex->coord_components);
644
645 nir_instr_rewrite_src(&tex->instr,
646 &tex->src[i].src,
647 nir_src_for_ssa(src));
648 }
649 }
650
651 static nir_ssa_def *
652 get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
653 {
654 nir_const_value v;
655
656 memset(&v, 0, sizeof(v));
657
658 if (swizzle_val == 4) {
659 v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 0;
660 } else {
661 assert(swizzle_val == 5);
662 if (type == nir_type_float)
663 v.f32[0] = v.f32[1] = v.f32[2] = v.f32[3] = 1.0;
664 else
665 v.u32[0] = v.u32[1] = v.u32[2] = v.u32[3] = 1;
666 }
667
668 return nir_build_imm(b, 4, 32, v);
669 }
670
671 static void
672 swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
673 {
674 assert(tex->dest.is_ssa);
675
676 b->cursor = nir_after_instr(&tex->instr);
677
678 nir_ssa_def *swizzled;
679 if (tex->op == nir_texop_tg4) {
680 if (swizzle[tex->component] < 4) {
681 /* This one's easy */
682 tex->component = swizzle[tex->component];
683 return;
684 } else {
685 swizzled = get_zero_or_one(b, tex->dest_type, swizzle[tex->component]);
686 }
687 } else {
688 assert(nir_tex_instr_dest_size(tex) == 4);
689 if (swizzle[0] < 4 && swizzle[1] < 4 &&
690 swizzle[2] < 4 && swizzle[3] < 4) {
691 unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
692 /* We have no 0s or 1s, just emit a swizzling MOV */
693 swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
694 } else {
695 nir_ssa_def *srcs[4];
696 for (unsigned i = 0; i < 4; i++) {
697 if (swizzle[i] < 4) {
698 srcs[i] = nir_channel(b, &tex->dest.ssa, swizzle[i]);
699 } else {
700 srcs[i] = get_zero_or_one(b, tex->dest_type, swizzle[i]);
701 }
702 }
703 swizzled = nir_vec(b, srcs, 4);
704 }
705 }
706
707 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(swizzled),
708 swizzled->parent_instr);
709 }
710
711 static void
712 linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
713 {
714 assert(tex->dest.is_ssa);
715 assert(nir_tex_instr_dest_size(tex) == 4);
716 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
717
718 b->cursor = nir_after_instr(&tex->instr);
719
720 static const unsigned swiz[4] = {0, 1, 2, 0};
721 nir_ssa_def *comp = nir_swizzle(b, &tex->dest.ssa, swiz, 3, true);
722
723 /* Formula is:
724 * (comp <= 0.04045) ?
725 * (comp / 12.92) :
726 * pow((comp + 0.055) / 1.055, 2.4)
727 */
728 nir_ssa_def *low = nir_fmul(b, comp, nir_imm_float(b, 1.0 / 12.92));
729 nir_ssa_def *high = nir_fpow(b,
730 nir_fmul(b,
731 nir_fadd(b,
732 comp,
733 nir_imm_float(b, 0.055)),
734 nir_imm_float(b, 1.0 / 1.055)),
735 nir_imm_float(b, 2.4));
736 nir_ssa_def *cond = nir_fge(b, nir_imm_float(b, 0.04045), comp);
737 nir_ssa_def *rgb = nir_bcsel(b, cond, low, high);
738
739 /* alpha is untouched: */
740 nir_ssa_def *result = nir_vec4(b,
741 nir_channel(b, rgb, 0),
742 nir_channel(b, rgb, 1),
743 nir_channel(b, rgb, 2),
744 nir_channel(b, &tex->dest.ssa, 3));
745
746 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(result),
747 result->parent_instr);
748 }
749
750 static bool
751 nir_lower_tex_block(nir_block *block, nir_builder *b,
752 const nir_lower_tex_options *options)
753 {
754 bool progress = false;
755
756 nir_foreach_instr_safe(instr, block) {
757 if (instr->type != nir_instr_type_tex)
758 continue;
759
760 nir_tex_instr *tex = nir_instr_as_tex(instr);
761 bool lower_txp = !!(options->lower_txp & (1 << tex->sampler_dim));
762
763 /* mask of src coords to saturate (clamp): */
764 unsigned sat_mask = 0;
765
766 if ((1 << tex->sampler_index) & options->saturate_r)
767 sat_mask |= (1 << 2); /* .z */
768 if ((1 << tex->sampler_index) & options->saturate_t)
769 sat_mask |= (1 << 1); /* .y */
770 if ((1 << tex->sampler_index) & options->saturate_s)
771 sat_mask |= (1 << 0); /* .x */
772
773 /* If we are clamping any coords, we must lower projector first
774 * as clamping happens *after* projection:
775 */
776 if (lower_txp || sat_mask) {
777 project_src(b, tex);
778 progress = true;
779 }
780
781 if ((tex->op == nir_texop_txf && options->lower_txf_offset) ||
782 (sat_mask && nir_tex_instr_src_index(tex, nir_tex_src_coord) >= 0) ||
783 (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT &&
784 options->lower_rect_offset)) {
785 progress = lower_offset(b, tex) || progress;
786 }
787
788 if ((tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) && options->lower_rect) {
789 lower_rect(b, tex);
790 progress = true;
791 }
792
793 if ((1 << tex->texture_index) & options->lower_y_uv_external) {
794 lower_y_uv_external(b, tex);
795 progress = true;
796 }
797
798 if ((1 << tex->texture_index) & options->lower_y_u_v_external) {
799 lower_y_u_v_external(b, tex);
800 progress = true;
801 }
802
803 if ((1 << tex->texture_index) & options->lower_yx_xuxv_external) {
804 lower_yx_xuxv_external(b, tex);
805 progress = true;
806 }
807
808 if ((1 << tex->texture_index) & options->lower_xy_uxvx_external) {
809 lower_xy_uxvx_external(b, tex);
810 progress = true;
811 }
812
813 if (sat_mask) {
814 saturate_src(b, tex, sat_mask);
815 progress = true;
816 }
817
818 if (((1 << tex->texture_index) & options->swizzle_result) &&
819 !nir_tex_instr_is_query(tex) &&
820 !(tex->is_shadow && tex->is_new_style_shadow)) {
821 swizzle_result(b, tex, options->swizzles[tex->texture_index]);
822 progress = true;
823 }
824
825 /* should be after swizzle so we know which channels are rgb: */
826 if (((1 << tex->texture_index) & options->lower_srgb) &&
827 !nir_tex_instr_is_query(tex) && !tex->is_shadow) {
828 linearize_srgb_result(b, tex);
829 progress = true;
830 }
831
832 if (tex->op == nir_texop_txd &&
833 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
834 (options->lower_txd ||
835 options->lower_txd_cube_map ||
836 (tex->is_shadow && options->lower_txd_shadow))) {
837 lower_gradient_cube_map(b, tex);
838 progress = true;
839 continue;
840 }
841
842 if (tex->op == nir_texop_txd &&
843 (options->lower_txd ||
844 (options->lower_txd_shadow &&
845 tex->is_shadow && tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE))) {
846 lower_gradient(b, tex);
847 progress = true;
848 continue;
849 }
850
851 /* TXF, TXS and TXL require a LOD but not everything we implement using those
852 * three opcodes provides one. Provide a default LOD of 0.
853 */
854 if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
855 (tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
856 tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
857 (tex->op == nir_texop_tex &&
858 b->shader->info.stage != MESA_SHADER_FRAGMENT))) {
859 b->cursor = nir_before_instr(&tex->instr);
860 nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
861 progress = true;
862 continue;
863 }
864 }
865
866 return progress;
867 }
868
869 static bool
870 nir_lower_tex_impl(nir_function_impl *impl,
871 const nir_lower_tex_options *options)
872 {
873 bool progress = false;
874 nir_builder builder;
875 nir_builder_init(&builder, impl);
876
877 nir_foreach_block(block, impl) {
878 progress |= nir_lower_tex_block(block, &builder, options);
879 }
880
881 nir_metadata_preserve(impl, nir_metadata_block_index |
882 nir_metadata_dominance);
883 return progress;
884 }
885
886 bool
887 nir_lower_tex(nir_shader *shader, const nir_lower_tex_options *options)
888 {
889 bool progress = false;
890
891 nir_foreach_function(function, shader) {
892 if (function->impl)
893 progress |= nir_lower_tex_impl(function->impl, options);
894 }
895
896 return progress;
897 }