nir: Validate jump instructions as an instruction type
[mesa.git] / src / compiler / nir / nir_lower_tex.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /*
25 * This lowering pass supports (as configured via nir_lower_tex_options)
26 * various texture related conversions:
27 * + texture projector lowering: converts the coordinate division for
28 * texture projection to be done in ALU instructions instead of
29 * asking the texture operation to do so.
30 * + lowering RECT: converts the un-normalized RECT texture coordinates
31 * to normalized coordinates with txs plus ALU instructions
32 * + saturate s/t/r coords: to emulate certain texture clamp/wrap modes,
33 * inserts instructions to clamp specified coordinates to [0.0, 1.0].
34 * Note that this automatically triggers texture projector lowering if
35 * needed, since clamping must happen after projector lowering.
36 */
37
38 #include "nir.h"
39 #include "nir_builder.h"
40 #include "nir_builtin_builder.h"
41 #include "nir_format_convert.h"
42
43 static bool
44 project_src(nir_builder *b, nir_tex_instr *tex)
45 {
46 /* Find the projector in the srcs list, if present. */
47 int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
48 if (proj_index < 0)
49 return false;
50
51 b->cursor = nir_before_instr(&tex->instr);
52
53 nir_ssa_def *inv_proj =
54 nir_frcp(b, nir_ssa_for_src(b, tex->src[proj_index].src, 1));
55
56 /* Walk through the sources projecting the arguments. */
57 for (unsigned i = 0; i < tex->num_srcs; i++) {
58 switch (tex->src[i].src_type) {
59 case nir_tex_src_coord:
60 case nir_tex_src_comparator:
61 break;
62 default:
63 continue;
64 }
65 nir_ssa_def *unprojected =
66 nir_ssa_for_src(b, tex->src[i].src, nir_tex_instr_src_size(tex, i));
67 nir_ssa_def *projected = nir_fmul(b, unprojected, inv_proj);
68
69 /* Array indices don't get projected, so make an new vector with the
70 * coordinate's array index untouched.
71 */
72 if (tex->is_array && tex->src[i].src_type == nir_tex_src_coord) {
73 switch (tex->coord_components) {
74 case 4:
75 projected = nir_vec4(b,
76 nir_channel(b, projected, 0),
77 nir_channel(b, projected, 1),
78 nir_channel(b, projected, 2),
79 nir_channel(b, unprojected, 3));
80 break;
81 case 3:
82 projected = nir_vec3(b,
83 nir_channel(b, projected, 0),
84 nir_channel(b, projected, 1),
85 nir_channel(b, unprojected, 2));
86 break;
87 case 2:
88 projected = nir_vec2(b,
89 nir_channel(b, projected, 0),
90 nir_channel(b, unprojected, 1));
91 break;
92 default:
93 unreachable("bad texture coord count for array");
94 break;
95 }
96 }
97
98 nir_instr_rewrite_src(&tex->instr,
99 &tex->src[i].src,
100 nir_src_for_ssa(projected));
101 }
102
103 nir_tex_instr_remove_src(tex, proj_index);
104 return true;
105 }
106
107 static bool
108 lower_offset(nir_builder *b, nir_tex_instr *tex)
109 {
110 int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
111 if (offset_index < 0)
112 return false;
113
114 int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
115 assert(coord_index >= 0);
116
117 assert(tex->src[offset_index].src.is_ssa);
118 assert(tex->src[coord_index].src.is_ssa);
119 nir_ssa_def *offset = tex->src[offset_index].src.ssa;
120 nir_ssa_def *coord = tex->src[coord_index].src.ssa;
121
122 b->cursor = nir_before_instr(&tex->instr);
123
124 nir_ssa_def *offset_coord;
125 if (nir_tex_instr_src_type(tex, coord_index) == nir_type_float) {
126 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
127 offset_coord = nir_fadd(b, coord, nir_i2f32(b, offset));
128 } else {
129 nir_ssa_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
130 nir_ssa_def *scale = nir_frcp(b, txs);
131
132 offset_coord = nir_fadd(b, coord,
133 nir_fmul(b,
134 nir_i2f32(b, offset),
135 scale));
136 }
137 } else {
138 offset_coord = nir_iadd(b, coord, offset);
139 }
140
141 if (tex->is_array) {
142 /* The offset is not applied to the array index */
143 if (tex->coord_components == 2) {
144 offset_coord = nir_vec2(b, nir_channel(b, offset_coord, 0),
145 nir_channel(b, coord, 1));
146 } else if (tex->coord_components == 3) {
147 offset_coord = nir_vec3(b, nir_channel(b, offset_coord, 0),
148 nir_channel(b, offset_coord, 1),
149 nir_channel(b, coord, 2));
150 } else {
151 unreachable("Invalid number of components");
152 }
153 }
154
155 nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src,
156 nir_src_for_ssa(offset_coord));
157
158 nir_tex_instr_remove_src(tex, offset_index);
159
160 return true;
161 }
162
163 static void
164 lower_rect(nir_builder *b, nir_tex_instr *tex)
165 {
166 /* Set the sampler_dim to 2D here so that get_texture_size picks up the
167 * right dimensionality.
168 */
169 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
170
171 nir_ssa_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
172 nir_ssa_def *scale = nir_frcp(b, txs);
173
174 /* Walk through the sources normalizing the requested arguments. */
175 for (unsigned i = 0; i < tex->num_srcs; i++) {
176 if (tex->src[i].src_type != nir_tex_src_coord)
177 continue;
178
179 nir_ssa_def *coords =
180 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
181 nir_instr_rewrite_src(&tex->instr,
182 &tex->src[i].src,
183 nir_src_for_ssa(nir_fmul(b, coords, scale)));
184 }
185 }
186
187 static void
188 lower_implicit_lod(nir_builder *b, nir_tex_instr *tex)
189 {
190 assert(tex->op == nir_texop_tex || tex->op == nir_texop_txb);
191 assert(nir_tex_instr_src_index(tex, nir_tex_src_lod) < 0);
192 assert(nir_tex_instr_src_index(tex, nir_tex_src_ddx) < 0);
193 assert(nir_tex_instr_src_index(tex, nir_tex_src_ddy) < 0);
194
195 b->cursor = nir_before_instr(&tex->instr);
196
197 nir_ssa_def *lod = nir_get_texture_lod(b, tex);
198
199 int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
200 if (bias_idx >= 0) {
201 /* If we have a bias, add it in */
202 lod = nir_fadd(b, lod, nir_ssa_for_src(b, tex->src[bias_idx].src, 1));
203 nir_tex_instr_remove_src(tex, bias_idx);
204 }
205
206 int min_lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_min_lod);
207 if (min_lod_idx >= 0) {
208 /* If we have a minimum LOD, clamp LOD accordingly */
209 lod = nir_fmax(b, lod, nir_ssa_for_src(b, tex->src[min_lod_idx].src, 1));
210 nir_tex_instr_remove_src(tex, min_lod_idx);
211 }
212
213 nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(lod));
214 tex->op = nir_texop_txl;
215 }
216
217 static nir_ssa_def *
218 sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
219 const nir_lower_tex_options *options)
220 {
221 assert(tex->dest.is_ssa);
222 assert(nir_tex_instr_dest_size(tex) == 4);
223 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
224 assert(tex->op == nir_texop_tex);
225 assert(tex->coord_components == 2);
226
227 nir_tex_instr *plane_tex =
228 nir_tex_instr_create(b->shader, tex->num_srcs + 1);
229 for (unsigned i = 0; i < tex->num_srcs; i++) {
230 nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src, plane_tex);
231 plane_tex->src[i].src_type = tex->src[i].src_type;
232 }
233 plane_tex->src[tex->num_srcs].src = nir_src_for_ssa(nir_imm_int(b, plane));
234 plane_tex->src[tex->num_srcs].src_type = nir_tex_src_plane;
235 plane_tex->op = nir_texop_tex;
236 plane_tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
237 plane_tex->dest_type = nir_type_float;
238 plane_tex->coord_components = 2;
239
240 plane_tex->texture_index = tex->texture_index;
241 plane_tex->sampler_index = tex->sampler_index;
242
243 nir_ssa_dest_init(&plane_tex->instr, &plane_tex->dest, 4, 32, NULL);
244
245 nir_builder_instr_insert(b, &plane_tex->instr);
246
247 /* If scaling_factor is set, return a scaled value. */
248 if (options->scale_factors[tex->texture_index])
249 return nir_fmul_imm(b, &plane_tex->dest.ssa,
250 options->scale_factors[tex->texture_index]);
251
252 return &plane_tex->dest.ssa;
253 }
254
255 static void
256 convert_yuv_to_rgb(nir_builder *b, nir_tex_instr *tex,
257 nir_ssa_def *y, nir_ssa_def *u, nir_ssa_def *v,
258 nir_ssa_def *a)
259 {
260 nir_const_value m[3][4] = {
261 { { .f32 = 1.16438356f }, { .f32 = 1.16438356f }, { .f32 = 1.16438356f }, { .f32 = 0.0f } },
262 { { .f32 = 0.0f }, { .f32 = -0.39176229f }, { .f32 = 2.01723214f }, { .f32 = 0.0f } },
263 { { .f32 = 1.59602678f }, { .f32 = -0.81296764f }, { .f32 = 0.0f }, { .f32 = 0.0f } },
264 };
265
266 nir_ssa_def *offset =
267 nir_vec4(b,
268 nir_imm_float(b, -0.874202214f),
269 nir_imm_float(b, 0.531667820f),
270 nir_imm_float(b, -1.085630787f),
271 a);
272
273 nir_ssa_def *result =
274 nir_ffma(b, y, nir_build_imm(b, 4, 32, m[0]),
275 nir_ffma(b, u, nir_build_imm(b, 4, 32, m[1]),
276 nir_ffma(b, v, nir_build_imm(b, 4, 32, m[2]),
277 offset)));
278
279 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(result));
280 }
281
282 static void
283 lower_y_uv_external(nir_builder *b, nir_tex_instr *tex,
284 const nir_lower_tex_options *options)
285 {
286 b->cursor = nir_after_instr(&tex->instr);
287
288 nir_ssa_def *y = sample_plane(b, tex, 0, options);
289 nir_ssa_def *uv = sample_plane(b, tex, 1, options);
290
291 convert_yuv_to_rgb(b, tex,
292 nir_channel(b, y, 0),
293 nir_channel(b, uv, 0),
294 nir_channel(b, uv, 1),
295 nir_imm_float(b, 1.0f));
296 }
297
298 static void
299 lower_y_u_v_external(nir_builder *b, nir_tex_instr *tex,
300 const nir_lower_tex_options *options)
301 {
302 b->cursor = nir_after_instr(&tex->instr);
303
304 nir_ssa_def *y = sample_plane(b, tex, 0, options);
305 nir_ssa_def *u = sample_plane(b, tex, 1, options);
306 nir_ssa_def *v = sample_plane(b, tex, 2, options);
307
308 convert_yuv_to_rgb(b, tex,
309 nir_channel(b, y, 0),
310 nir_channel(b, u, 0),
311 nir_channel(b, v, 0),
312 nir_imm_float(b, 1.0f));
313 }
314
315 static void
316 lower_yx_xuxv_external(nir_builder *b, nir_tex_instr *tex,
317 const nir_lower_tex_options *options)
318 {
319 b->cursor = nir_after_instr(&tex->instr);
320
321 nir_ssa_def *y = sample_plane(b, tex, 0, options);
322 nir_ssa_def *xuxv = sample_plane(b, tex, 1, options);
323
324 convert_yuv_to_rgb(b, tex,
325 nir_channel(b, y, 0),
326 nir_channel(b, xuxv, 1),
327 nir_channel(b, xuxv, 3),
328 nir_imm_float(b, 1.0f));
329 }
330
331 static void
332 lower_xy_uxvx_external(nir_builder *b, nir_tex_instr *tex,
333 const nir_lower_tex_options *options)
334 {
335 b->cursor = nir_after_instr(&tex->instr);
336
337 nir_ssa_def *y = sample_plane(b, tex, 0, options);
338 nir_ssa_def *uxvx = sample_plane(b, tex, 1, options);
339
340 convert_yuv_to_rgb(b, tex,
341 nir_channel(b, y, 1),
342 nir_channel(b, uxvx, 0),
343 nir_channel(b, uxvx, 2),
344 nir_imm_float(b, 1.0f));
345 }
346
347 static void
348 lower_ayuv_external(nir_builder *b, nir_tex_instr *tex,
349 const nir_lower_tex_options *options)
350 {
351 b->cursor = nir_after_instr(&tex->instr);
352
353 nir_ssa_def *ayuv = sample_plane(b, tex, 0, options);
354
355 convert_yuv_to_rgb(b, tex,
356 nir_channel(b, ayuv, 2),
357 nir_channel(b, ayuv, 1),
358 nir_channel(b, ayuv, 0),
359 nir_channel(b, ayuv, 3));
360 }
361
362 static void
363 lower_xyuv_external(nir_builder *b, nir_tex_instr *tex,
364 const nir_lower_tex_options *options)
365 {
366 b->cursor = nir_after_instr(&tex->instr);
367
368 nir_ssa_def *xyuv = sample_plane(b, tex, 0, options);
369
370 convert_yuv_to_rgb(b, tex,
371 nir_channel(b, xyuv, 2),
372 nir_channel(b, xyuv, 1),
373 nir_channel(b, xyuv, 0),
374 nir_imm_float(b, 1.0f));
375 }
376
377 /*
378 * Converts a nir_texop_txd instruction to nir_texop_txl with the given lod
379 * computed from the gradients.
380 */
381 static void
382 replace_gradient_with_lod(nir_builder *b, nir_ssa_def *lod, nir_tex_instr *tex)
383 {
384 assert(tex->op == nir_texop_txd);
385
386 nir_tex_instr_remove_src(tex, nir_tex_instr_src_index(tex, nir_tex_src_ddx));
387 nir_tex_instr_remove_src(tex, nir_tex_instr_src_index(tex, nir_tex_src_ddy));
388
389 int min_lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_min_lod);
390 if (min_lod_idx >= 0) {
391 /* If we have a minimum LOD, clamp LOD accordingly */
392 lod = nir_fmax(b, lod, nir_ssa_for_src(b, tex->src[min_lod_idx].src, 1));
393 nir_tex_instr_remove_src(tex, min_lod_idx);
394 }
395
396 nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(lod));
397 tex->op = nir_texop_txl;
398 }
399
400 static void
401 lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex)
402 {
403 assert(tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE);
404 assert(tex->op == nir_texop_txd);
405 assert(tex->dest.is_ssa);
406
407 /* Use textureSize() to get the width and height of LOD 0 */
408 nir_ssa_def *size = nir_i2f32(b, nir_get_texture_size(b, tex));
409
410 /* Cubemap texture lookups first generate a texture coordinate normalized
411 * to [-1, 1] on the appropiate face. The appropiate face is determined
412 * by which component has largest magnitude and its sign. The texture
413 * coordinate is the quotient of the remaining texture coordinates against
414 * that absolute value of the component of largest magnitude. This
415 * division requires that the computing of the derivative of the texel
416 * coordinate must use the quotient rule. The high level GLSL code is as
417 * follows:
418 *
419 * Step 1: selection
420 *
421 * vec3 abs_p, Q, dQdx, dQdy;
422 * abs_p = abs(ir->coordinate);
423 * if (abs_p.x >= max(abs_p.y, abs_p.z)) {
424 * Q = ir->coordinate.yzx;
425 * dQdx = ir->lod_info.grad.dPdx.yzx;
426 * dQdy = ir->lod_info.grad.dPdy.yzx;
427 * }
428 * if (abs_p.y >= max(abs_p.x, abs_p.z)) {
429 * Q = ir->coordinate.xzy;
430 * dQdx = ir->lod_info.grad.dPdx.xzy;
431 * dQdy = ir->lod_info.grad.dPdy.xzy;
432 * }
433 * if (abs_p.z >= max(abs_p.x, abs_p.y)) {
434 * Q = ir->coordinate;
435 * dQdx = ir->lod_info.grad.dPdx;
436 * dQdy = ir->lod_info.grad.dPdy;
437 * }
438 *
439 * Step 2: use quotient rule to compute derivative. The normalized to
440 * [-1, 1] texel coordinate is given by Q.xy / (sign(Q.z) * Q.z). We are
441 * only concerned with the magnitudes of the derivatives whose values are
442 * not affected by the sign. We drop the sign from the computation.
443 *
444 * vec2 dx, dy;
445 * float recip;
446 *
447 * recip = 1.0 / Q.z;
448 * dx = recip * ( dQdx.xy - Q.xy * (dQdx.z * recip) );
449 * dy = recip * ( dQdy.xy - Q.xy * (dQdy.z * recip) );
450 *
451 * Step 3: compute LOD. At this point we have the derivatives of the
452 * texture coordinates normalized to [-1,1]. We take the LOD to be
453 * result = log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * 0.5 * L)
454 * = -1.0 + log2(max(sqrt(dot(dx, dx)), sqrt(dy, dy)) * L)
455 * = -1.0 + log2(sqrt(max(dot(dx, dx), dot(dy,dy))) * L)
456 * = -1.0 + log2(sqrt(L * L * max(dot(dx, dx), dot(dy,dy))))
457 * = -1.0 + 0.5 * log2(L * L * max(dot(dx, dx), dot(dy,dy)))
458 * where L is the dimension of the cubemap. The code is:
459 *
460 * float M, result;
461 * M = max(dot(dx, dx), dot(dy, dy));
462 * L = textureSize(sampler, 0).x;
463 * result = -1.0 + 0.5 * log2(L * L * M);
464 */
465
466 /* coordinate */
467 nir_ssa_def *p =
468 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_coord)].src.ssa;
469
470 /* unmodified dPdx, dPdy values */
471 nir_ssa_def *dPdx =
472 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
473 nir_ssa_def *dPdy =
474 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
475
476 nir_ssa_def *abs_p = nir_fabs(b, p);
477 nir_ssa_def *abs_p_x = nir_channel(b, abs_p, 0);
478 nir_ssa_def *abs_p_y = nir_channel(b, abs_p, 1);
479 nir_ssa_def *abs_p_z = nir_channel(b, abs_p, 2);
480
481 /* 1. compute selector */
482 nir_ssa_def *Q, *dQdx, *dQdy;
483
484 nir_ssa_def *cond_z = nir_fge(b, abs_p_z, nir_fmax(b, abs_p_x, abs_p_y));
485 nir_ssa_def *cond_y = nir_fge(b, abs_p_y, nir_fmax(b, abs_p_x, abs_p_z));
486
487 unsigned yzx[3] = { 1, 2, 0 };
488 unsigned xzy[3] = { 0, 2, 1 };
489
490 Q = nir_bcsel(b, cond_z,
491 p,
492 nir_bcsel(b, cond_y,
493 nir_swizzle(b, p, xzy, 3),
494 nir_swizzle(b, p, yzx, 3)));
495
496 dQdx = nir_bcsel(b, cond_z,
497 dPdx,
498 nir_bcsel(b, cond_y,
499 nir_swizzle(b, dPdx, xzy, 3),
500 nir_swizzle(b, dPdx, yzx, 3)));
501
502 dQdy = nir_bcsel(b, cond_z,
503 dPdy,
504 nir_bcsel(b, cond_y,
505 nir_swizzle(b, dPdy, xzy, 3),
506 nir_swizzle(b, dPdy, yzx, 3)));
507
508 /* 2. quotient rule */
509
510 /* tmp = Q.xy * recip;
511 * dx = recip * ( dQdx.xy - (tmp * dQdx.z) );
512 * dy = recip * ( dQdy.xy - (tmp * dQdy.z) );
513 */
514 nir_ssa_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2));
515
516 nir_ssa_def *Q_xy = nir_channels(b, Q, 0x3);
517 nir_ssa_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z);
518
519 nir_ssa_def *dQdx_xy = nir_channels(b, dQdx, 0x3);
520 nir_ssa_def *dQdx_z = nir_channel(b, dQdx, 2);
521 nir_ssa_def *dx =
522 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdx_xy, nir_fmul(b, tmp, dQdx_z)));
523
524 nir_ssa_def *dQdy_xy = nir_channels(b, dQdy, 0x3);
525 nir_ssa_def *dQdy_z = nir_channel(b, dQdy, 2);
526 nir_ssa_def *dy =
527 nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdy_xy, nir_fmul(b, tmp, dQdy_z)));
528
529 /* M = max(dot(dx, dx), dot(dy, dy)); */
530 nir_ssa_def *M = nir_fmax(b, nir_fdot(b, dx, dx), nir_fdot(b, dy, dy));
531
532 /* size has textureSize() of LOD 0 */
533 nir_ssa_def *L = nir_channel(b, size, 0);
534
535 /* lod = -1.0 + 0.5 * log2(L * L * M); */
536 nir_ssa_def *lod =
537 nir_fadd(b,
538 nir_imm_float(b, -1.0f),
539 nir_fmul(b,
540 nir_imm_float(b, 0.5f),
541 nir_flog2(b, nir_fmul(b, L, nir_fmul(b, L, M)))));
542
543 /* 3. Replace the gradient instruction with an equivalent lod instruction */
544 replace_gradient_with_lod(b, lod, tex);
545 }
546
547 static void
548 lower_gradient(nir_builder *b, nir_tex_instr *tex)
549 {
550 /* Cubes are more complicated and have their own function */
551 if (tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
552 lower_gradient_cube_map(b, tex);
553 return;
554 }
555
556 assert(tex->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
557 assert(tex->op == nir_texop_txd);
558 assert(tex->dest.is_ssa);
559
560 /* Use textureSize() to get the width and height of LOD 0 */
561 unsigned component_mask;
562 switch (tex->sampler_dim) {
563 case GLSL_SAMPLER_DIM_3D:
564 component_mask = 7;
565 break;
566 case GLSL_SAMPLER_DIM_1D:
567 component_mask = 1;
568 break;
569 default:
570 component_mask = 3;
571 break;
572 }
573
574 nir_ssa_def *size =
575 nir_channels(b, nir_i2f32(b, nir_get_texture_size(b, tex)),
576 component_mask);
577
578 /* Scale the gradients by width and height. Effectively, the incoming
579 * gradients are s'(x,y), t'(x,y), and r'(x,y) from equation 3.19 in the
580 * GL 3.0 spec; we want u'(x,y), which is w_t * s'(x,y).
581 */
582 nir_ssa_def *ddx =
583 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddx)].src.ssa;
584 nir_ssa_def *ddy =
585 tex->src[nir_tex_instr_src_index(tex, nir_tex_src_ddy)].src.ssa;
586
587 nir_ssa_def *dPdx = nir_fmul(b, ddx, size);
588 nir_ssa_def *dPdy = nir_fmul(b, ddy, size);
589
590 nir_ssa_def *rho;
591 if (dPdx->num_components == 1) {
592 rho = nir_fmax(b, nir_fabs(b, dPdx), nir_fabs(b, dPdy));
593 } else {
594 rho = nir_fmax(b,
595 nir_fsqrt(b, nir_fdot(b, dPdx, dPdx)),
596 nir_fsqrt(b, nir_fdot(b, dPdy, dPdy)));
597 }
598
599 /* lod = log2(rho). We're ignoring GL state biases for now. */
600 nir_ssa_def *lod = nir_flog2(b, rho);
601
602 /* Replace the gradient instruction with an equivalent lod instruction */
603 replace_gradient_with_lod(b, lod, tex);
604 }
605
606 static void
607 saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
608 {
609 b->cursor = nir_before_instr(&tex->instr);
610
611 /* Walk through the sources saturating the requested arguments. */
612 for (unsigned i = 0; i < tex->num_srcs; i++) {
613 if (tex->src[i].src_type != nir_tex_src_coord)
614 continue;
615
616 nir_ssa_def *src =
617 nir_ssa_for_src(b, tex->src[i].src, tex->coord_components);
618
619 /* split src into components: */
620 nir_ssa_def *comp[4];
621
622 assume(tex->coord_components >= 1);
623
624 for (unsigned j = 0; j < tex->coord_components; j++)
625 comp[j] = nir_channel(b, src, j);
626
627 /* clamp requested components, array index does not get clamped: */
628 unsigned ncomp = tex->coord_components;
629 if (tex->is_array)
630 ncomp--;
631
632 for (unsigned j = 0; j < ncomp; j++) {
633 if ((1 << j) & sat_mask) {
634 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
635 /* non-normalized texture coords, so clamp to texture
636 * size rather than [0.0, 1.0]
637 */
638 nir_ssa_def *txs = nir_i2f32(b, nir_get_texture_size(b, tex));
639 comp[j] = nir_fmax(b, comp[j], nir_imm_float(b, 0.0));
640 comp[j] = nir_fmin(b, comp[j], nir_channel(b, txs, j));
641 } else {
642 comp[j] = nir_fsat(b, comp[j]);
643 }
644 }
645 }
646
647 /* and move the result back into a single vecN: */
648 src = nir_vec(b, comp, tex->coord_components);
649
650 nir_instr_rewrite_src(&tex->instr,
651 &tex->src[i].src,
652 nir_src_for_ssa(src));
653 }
654 }
655
656 static nir_ssa_def *
657 get_zero_or_one(nir_builder *b, nir_alu_type type, uint8_t swizzle_val)
658 {
659 nir_const_value v[4];
660
661 memset(&v, 0, sizeof(v));
662
663 if (swizzle_val == 4) {
664 v[0].u32 = v[1].u32 = v[2].u32 = v[3].u32 = 0;
665 } else {
666 assert(swizzle_val == 5);
667 if (type == nir_type_float)
668 v[0].f32 = v[1].f32 = v[2].f32 = v[3].f32 = 1.0;
669 else
670 v[0].u32 = v[1].u32 = v[2].u32 = v[3].u32 = 1;
671 }
672
673 return nir_build_imm(b, 4, 32, v);
674 }
675
676 static void
677 swizzle_tg4_broadcom(nir_builder *b, nir_tex_instr *tex)
678 {
679 assert(tex->dest.is_ssa);
680
681 b->cursor = nir_after_instr(&tex->instr);
682
683 assert(nir_tex_instr_dest_size(tex) == 4);
684 unsigned swiz[4] = { 2, 3, 1, 0 };
685 nir_ssa_def *swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
686
687 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(swizzled),
688 swizzled->parent_instr);
689 }
690
691 static void
692 swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
693 {
694 assert(tex->dest.is_ssa);
695
696 b->cursor = nir_after_instr(&tex->instr);
697
698 nir_ssa_def *swizzled;
699 if (tex->op == nir_texop_tg4) {
700 if (swizzle[tex->component] < 4) {
701 /* This one's easy */
702 tex->component = swizzle[tex->component];
703 return;
704 } else {
705 swizzled = get_zero_or_one(b, tex->dest_type, swizzle[tex->component]);
706 }
707 } else {
708 assert(nir_tex_instr_dest_size(tex) == 4);
709 if (swizzle[0] < 4 && swizzle[1] < 4 &&
710 swizzle[2] < 4 && swizzle[3] < 4) {
711 unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
712 /* We have no 0s or 1s, just emit a swizzling MOV */
713 swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4);
714 } else {
715 nir_ssa_def *srcs[4];
716 for (unsigned i = 0; i < 4; i++) {
717 if (swizzle[i] < 4) {
718 srcs[i] = nir_channel(b, &tex->dest.ssa, swizzle[i]);
719 } else {
720 srcs[i] = get_zero_or_one(b, tex->dest_type, swizzle[i]);
721 }
722 }
723 swizzled = nir_vec(b, srcs, 4);
724 }
725 }
726
727 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(swizzled),
728 swizzled->parent_instr);
729 }
730
731 static void
732 linearize_srgb_result(nir_builder *b, nir_tex_instr *tex)
733 {
734 assert(tex->dest.is_ssa);
735 assert(nir_tex_instr_dest_size(tex) == 4);
736 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
737
738 b->cursor = nir_after_instr(&tex->instr);
739
740 nir_ssa_def *rgb =
741 nir_format_srgb_to_linear(b, nir_channels(b, &tex->dest.ssa, 0x7));
742
743 /* alpha is untouched: */
744 nir_ssa_def *result = nir_vec4(b,
745 nir_channel(b, rgb, 0),
746 nir_channel(b, rgb, 1),
747 nir_channel(b, rgb, 2),
748 nir_channel(b, &tex->dest.ssa, 3));
749
750 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(result),
751 result->parent_instr);
752 }
753
754 /**
755 * Lowers texture instructions from giving a vec4 result to a vec2 of f16,
756 * i16, or u16, or a single unorm4x8 value.
757 *
758 * Note that we don't change the destination num_components, because
759 * nir_tex_instr_dest_size() will still return 4. The driver is just expected
760 * to not store the other channels, given that nothing at the NIR level will
761 * read them.
762 */
763 static void
764 lower_tex_packing(nir_builder *b, nir_tex_instr *tex,
765 const nir_lower_tex_options *options)
766 {
767 nir_ssa_def *color = &tex->dest.ssa;
768
769 b->cursor = nir_after_instr(&tex->instr);
770
771 switch (options->lower_tex_packing[tex->sampler_index]) {
772 case nir_lower_tex_packing_none:
773 return;
774
775 case nir_lower_tex_packing_16: {
776 static const unsigned bits[4] = {16, 16, 16, 16};
777
778 switch (nir_alu_type_get_base_type(tex->dest_type)) {
779 case nir_type_float:
780 if (tex->is_shadow && tex->is_new_style_shadow) {
781 color = nir_unpack_half_2x16_split_x(b, nir_channel(b, color, 0));
782 } else {
783 nir_ssa_def *rg = nir_channel(b, color, 0);
784 nir_ssa_def *ba = nir_channel(b, color, 1);
785 color = nir_vec4(b,
786 nir_unpack_half_2x16_split_x(b, rg),
787 nir_unpack_half_2x16_split_y(b, rg),
788 nir_unpack_half_2x16_split_x(b, ba),
789 nir_unpack_half_2x16_split_y(b, ba));
790 }
791 break;
792
793 case nir_type_int:
794 color = nir_format_unpack_sint(b, color, bits, 4);
795 break;
796
797 case nir_type_uint:
798 color = nir_format_unpack_uint(b, color, bits, 4);
799 break;
800
801 default:
802 unreachable("unknown base type");
803 }
804 break;
805 }
806
807 case nir_lower_tex_packing_8:
808 assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
809 color = nir_unpack_unorm_4x8(b, nir_channel(b, color, 0));
810 break;
811 }
812
813 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(color),
814 color->parent_instr);
815 }
816
817 static bool
818 sampler_index_lt(nir_tex_instr *tex, unsigned max)
819 {
820 assert(nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref) == -1);
821
822 unsigned sampler_index = tex->sampler_index;
823
824 int sampler_offset_idx =
825 nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
826 if (sampler_offset_idx >= 0) {
827 if (!nir_src_is_const(tex->src[sampler_offset_idx].src))
828 return false;
829
830 sampler_index += nir_src_as_uint(tex->src[sampler_offset_idx].src);
831 }
832
833 return sampler_index < max;
834 }
835
836 static bool
837 lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
838 {
839 assert(tex->op == nir_texop_tg4);
840 assert(nir_tex_instr_has_explicit_tg4_offsets(tex));
841 assert(nir_tex_instr_src_index(tex, nir_tex_src_offset) == -1);
842
843 b->cursor = nir_after_instr(&tex->instr);
844
845 nir_ssa_def *dest[4];
846 for (unsigned i = 0; i < 4; ++i) {
847 nir_tex_instr *tex_copy = nir_tex_instr_create(b->shader, tex->num_srcs + 1);
848 tex_copy->op = tex->op;
849 tex_copy->coord_components = tex->coord_components;
850 tex_copy->sampler_dim = tex->sampler_dim;
851 tex_copy->is_array = tex->is_array;
852 tex_copy->is_shadow = tex->is_shadow;
853 tex_copy->is_new_style_shadow = tex->is_new_style_shadow;
854 tex_copy->component = tex->component;
855 tex_copy->dest_type = tex->dest_type;
856
857 for (unsigned j = 0; j < tex->num_srcs; ++j) {
858 nir_src_copy(&tex_copy->src[j].src, &tex->src[j].src, tex_copy);
859 tex_copy->src[j].src_type = tex->src[j].src_type;
860 }
861
862 nir_tex_src src;
863 src.src = nir_src_for_ssa(nir_imm_ivec2(b, tex->tg4_offsets[i][0],
864 tex->tg4_offsets[i][1]));
865 src.src_type = nir_tex_src_offset;
866 tex_copy->src[tex_copy->num_srcs - 1] = src;
867
868 nir_ssa_dest_init(&tex_copy->instr, &tex_copy->dest,
869 nir_tex_instr_dest_size(tex), 32, NULL);
870
871 nir_builder_instr_insert(b, &tex_copy->instr);
872
873 dest[i] = nir_channel(b, &tex_copy->dest.ssa, 3);
874 }
875
876 nir_ssa_def *res = nir_vec4(b, dest[0], dest[1], dest[2], dest[3]);
877 nir_ssa_def_rewrite_uses(&tex->dest.ssa, nir_src_for_ssa(res));
878 nir_instr_remove(&tex->instr);
879
880 return true;
881 }
882
883 static bool
884 nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
885 {
886 int lod_idx = nir_tex_instr_src_index(tex, nir_tex_src_lod);
887 if (lod_idx < 0 ||
888 (nir_src_is_const(tex->src[lod_idx].src) &&
889 nir_src_as_int(tex->src[lod_idx].src) == 0))
890 return false;
891
892 unsigned dest_size = nir_tex_instr_dest_size(tex);
893
894 b->cursor = nir_before_instr(&tex->instr);
895 nir_ssa_def *lod = nir_ssa_for_src(b, tex->src[lod_idx].src, 1);
896
897 /* Replace the non-0-LOD in the initial TXS operation by a 0-LOD. */
898 nir_instr_rewrite_src(&tex->instr, &tex->src[lod_idx].src,
899 nir_src_for_ssa(nir_imm_int(b, 0)));
900
901 /* TXS(LOD) = max(TXS(0) >> LOD, 1) */
902 b->cursor = nir_after_instr(&tex->instr);
903 nir_ssa_def *minified = nir_imax(b, nir_ushr(b, &tex->dest.ssa, lod),
904 nir_imm_int(b, 1));
905
906 /* Make sure the component encoding the array size (if any) is not
907 * minified.
908 */
909 if (tex->is_array) {
910 nir_ssa_def *comp[3];
911
912 assert(dest_size <= ARRAY_SIZE(comp));
913 for (unsigned i = 0; i < dest_size - 1; i++)
914 comp[i] = nir_channel(b, minified, i);
915
916 comp[dest_size - 1] = nir_channel(b, &tex->dest.ssa, dest_size - 1);
917 minified = nir_vec(b, comp, dest_size);
918 }
919
920 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(minified),
921 minified->parent_instr);
922 return true;
923 }
924
925 static bool
926 nir_lower_tex_block(nir_block *block, nir_builder *b,
927 const nir_lower_tex_options *options)
928 {
929 bool progress = false;
930
931 nir_foreach_instr_safe(instr, block) {
932 if (instr->type != nir_instr_type_tex)
933 continue;
934
935 nir_tex_instr *tex = nir_instr_as_tex(instr);
936 bool lower_txp = !!(options->lower_txp & (1 << tex->sampler_dim));
937
938 /* mask of src coords to saturate (clamp): */
939 unsigned sat_mask = 0;
940
941 if ((1 << tex->sampler_index) & options->saturate_r)
942 sat_mask |= (1 << 2); /* .z */
943 if ((1 << tex->sampler_index) & options->saturate_t)
944 sat_mask |= (1 << 1); /* .y */
945 if ((1 << tex->sampler_index) & options->saturate_s)
946 sat_mask |= (1 << 0); /* .x */
947
948 /* If we are clamping any coords, we must lower projector first
949 * as clamping happens *after* projection:
950 */
951 if (lower_txp || sat_mask) {
952 progress |= project_src(b, tex);
953 }
954
955 if ((tex->op == nir_texop_txf && options->lower_txf_offset) ||
956 (sat_mask && nir_tex_instr_src_index(tex, nir_tex_src_coord) >= 0) ||
957 (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT &&
958 options->lower_rect_offset)) {
959 progress = lower_offset(b, tex) || progress;
960 }
961
962 if ((tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) && options->lower_rect) {
963 lower_rect(b, tex);
964 progress = true;
965 }
966
967 if ((1 << tex->texture_index) & options->lower_y_uv_external) {
968 lower_y_uv_external(b, tex, options);
969 progress = true;
970 }
971
972 if ((1 << tex->texture_index) & options->lower_y_u_v_external) {
973 lower_y_u_v_external(b, tex, options);
974 progress = true;
975 }
976
977 if ((1 << tex->texture_index) & options->lower_yx_xuxv_external) {
978 lower_yx_xuxv_external(b, tex, options);
979 progress = true;
980 }
981
982 if ((1 << tex->texture_index) & options->lower_xy_uxvx_external) {
983 lower_xy_uxvx_external(b, tex, options);
984 progress = true;
985 }
986
987 if ((1 << tex->texture_index) & options->lower_ayuv_external) {
988 lower_ayuv_external(b, tex, options);
989 progress = true;
990 }
991
992 if ((1 << tex->texture_index) & options->lower_xyuv_external) {
993 lower_xyuv_external(b, tex, options);
994 progress = true;
995 }
996
997 if (sat_mask) {
998 saturate_src(b, tex, sat_mask);
999 progress = true;
1000 }
1001
1002 if (tex->op == nir_texop_tg4 && options->lower_tg4_broadcom_swizzle) {
1003 swizzle_tg4_broadcom(b, tex);
1004 progress = true;
1005 }
1006
1007 if (((1 << tex->texture_index) & options->swizzle_result) &&
1008 !nir_tex_instr_is_query(tex) &&
1009 !(tex->is_shadow && tex->is_new_style_shadow)) {
1010 swizzle_result(b, tex, options->swizzles[tex->texture_index]);
1011 progress = true;
1012 }
1013
1014 /* should be after swizzle so we know which channels are rgb: */
1015 if (((1 << tex->texture_index) & options->lower_srgb) &&
1016 !nir_tex_instr_is_query(tex) && !tex->is_shadow) {
1017 linearize_srgb_result(b, tex);
1018 progress = true;
1019 }
1020
1021 const bool has_min_lod =
1022 nir_tex_instr_src_index(tex, nir_tex_src_min_lod) >= 0;
1023 const bool has_offset =
1024 nir_tex_instr_src_index(tex, nir_tex_src_offset) >= 0;
1025
1026 if (tex->op == nir_texop_txb && tex->is_shadow && has_min_lod &&
1027 options->lower_txb_shadow_clamp) {
1028 lower_implicit_lod(b, tex);
1029 progress = true;
1030 }
1031
1032 if (options->lower_tex_packing[tex->sampler_index] !=
1033 nir_lower_tex_packing_none &&
1034 tex->op != nir_texop_txs &&
1035 tex->op != nir_texop_query_levels) {
1036 lower_tex_packing(b, tex, options);
1037 progress = true;
1038 }
1039
1040 if (tex->op == nir_texop_txd &&
1041 (options->lower_txd ||
1042 (options->lower_txd_shadow && tex->is_shadow) ||
1043 (options->lower_txd_shadow_clamp && tex->is_shadow && has_min_lod) ||
1044 (options->lower_txd_offset_clamp && has_offset && has_min_lod) ||
1045 (options->lower_txd_clamp_bindless_sampler && has_min_lod &&
1046 nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle) != -1) ||
1047 (options->lower_txd_clamp_if_sampler_index_not_lt_16 &&
1048 has_min_lod && !sampler_index_lt(tex, 16)) ||
1049 (options->lower_txd_cube_map &&
1050 tex->sampler_dim == GLSL_SAMPLER_DIM_CUBE) ||
1051 (options->lower_txd_3d &&
1052 tex->sampler_dim == GLSL_SAMPLER_DIM_3D))) {
1053 lower_gradient(b, tex);
1054 progress = true;
1055 continue;
1056 }
1057
1058 bool shader_supports_implicit_lod =
1059 b->shader->info.stage == MESA_SHADER_FRAGMENT ||
1060 (b->shader->info.stage == MESA_SHADER_COMPUTE &&
1061 b->shader->info.cs.derivative_group != DERIVATIVE_GROUP_NONE);
1062
1063 /* TXF, TXS and TXL require a LOD but not everything we implement using those
1064 * three opcodes provides one. Provide a default LOD of 0.
1065 */
1066 if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
1067 (tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
1068 tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
1069 (tex->op == nir_texop_tex && !shader_supports_implicit_lod))) {
1070 b->cursor = nir_before_instr(&tex->instr);
1071 nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
1072 if (tex->op == nir_texop_tex && options->lower_tex_without_implicit_lod)
1073 tex->op = nir_texop_txl;
1074 progress = true;
1075 continue;
1076 }
1077
1078 if (options->lower_txs_lod && tex->op == nir_texop_txs) {
1079 progress |= nir_lower_txs_lod(b, tex);
1080 continue;
1081 }
1082
1083 /* has to happen after all the other lowerings as the original tg4 gets
1084 * replaced by 4 tg4 instructions.
1085 */
1086 if (tex->op == nir_texop_tg4 &&
1087 nir_tex_instr_has_explicit_tg4_offsets(tex) &&
1088 options->lower_tg4_offsets) {
1089 progress |= lower_tg4_offsets(b, tex);
1090 continue;
1091 }
1092 }
1093
1094 return progress;
1095 }
1096
1097 static bool
1098 nir_lower_tex_impl(nir_function_impl *impl,
1099 const nir_lower_tex_options *options)
1100 {
1101 bool progress = false;
1102 nir_builder builder;
1103 nir_builder_init(&builder, impl);
1104
1105 nir_foreach_block(block, impl) {
1106 progress |= nir_lower_tex_block(block, &builder, options);
1107 }
1108
1109 nir_metadata_preserve(impl, nir_metadata_block_index |
1110 nir_metadata_dominance);
1111 return progress;
1112 }
1113
1114 bool
1115 nir_lower_tex(nir_shader *shader, const nir_lower_tex_options *options)
1116 {
1117 bool progress = false;
1118
1119 nir_foreach_function(function, shader) {
1120 if (function->impl)
1121 progress |= nir_lower_tex_impl(function->impl, options);
1122 }
1123
1124 return progress;
1125 }