intel/perf: fix raw query kernel metric selection
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 #include "util/u_math.h"
39
40 struct lower_io_state {
41 void *dead_ctx;
42 nir_builder builder;
43 int (*type_size)(const struct glsl_type *type, bool);
44 nir_variable_mode modes;
45 nir_lower_io_options options;
46 };
47
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
50 {
51 switch (deref_op) {
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
53 OP(atomic_exchange)
54 OP(atomic_comp_swap)
55 OP(atomic_add)
56 OP(atomic_imin)
57 OP(atomic_umin)
58 OP(atomic_imax)
59 OP(atomic_umax)
60 OP(atomic_and)
61 OP(atomic_or)
62 OP(atomic_xor)
63 OP(atomic_fadd)
64 OP(atomic_fmin)
65 OP(atomic_fmax)
66 OP(atomic_fcomp_swap)
67 #undef OP
68 default:
69 unreachable("Invalid SSBO atomic");
70 }
71 }
72
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op)
75 {
76 switch (deref_op) {
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
78 OP(atomic_exchange)
79 OP(atomic_comp_swap)
80 OP(atomic_add)
81 OP(atomic_imin)
82 OP(atomic_umin)
83 OP(atomic_imax)
84 OP(atomic_umax)
85 OP(atomic_and)
86 OP(atomic_or)
87 OP(atomic_xor)
88 OP(atomic_fadd)
89 OP(atomic_fmin)
90 OP(atomic_fmax)
91 OP(atomic_fcomp_swap)
92 #undef OP
93 default:
94 unreachable("Invalid SSBO atomic");
95 }
96 }
97
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op)
100 {
101 switch (deref_op) {
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
103 OP(atomic_exchange)
104 OP(atomic_comp_swap)
105 OP(atomic_add)
106 OP(atomic_imin)
107 OP(atomic_umin)
108 OP(atomic_imax)
109 OP(atomic_umax)
110 OP(atomic_and)
111 OP(atomic_or)
112 OP(atomic_xor)
113 OP(atomic_fadd)
114 OP(atomic_fmin)
115 OP(atomic_fmax)
116 OP(atomic_fcomp_swap)
117 #undef OP
118 default:
119 unreachable("Invalid shared atomic");
120 }
121 }
122
123 void
124 nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
125 unsigned *size,
126 int (*type_size)(const struct glsl_type *, bool))
127 {
128 unsigned location = 0;
129
130 nir_foreach_variable_with_modes(var, shader, mode) {
131 var->data.driver_location = location;
132 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
133 var->data.mode == nir_var_shader_out ||
134 var->data.bindless;
135 location += type_size(var->type, bindless_type_size);
136 }
137
138 *size = location;
139 }
140
141 /**
142 * Return true if the given variable is a per-vertex input/output array.
143 * (such as geometry shader inputs).
144 */
145 bool
146 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
147 {
148 if (var->data.patch || !glsl_type_is_array(var->type))
149 return false;
150
151 if (var->data.mode == nir_var_shader_in)
152 return stage == MESA_SHADER_GEOMETRY ||
153 stage == MESA_SHADER_TESS_CTRL ||
154 stage == MESA_SHADER_TESS_EVAL;
155
156 if (var->data.mode == nir_var_shader_out)
157 return stage == MESA_SHADER_TESS_CTRL;
158
159 return false;
160 }
161
162 static nir_ssa_def *
163 get_io_offset(nir_builder *b, nir_deref_instr *deref,
164 nir_ssa_def **vertex_index,
165 int (*type_size)(const struct glsl_type *, bool),
166 unsigned *component, bool bts)
167 {
168 nir_deref_path path;
169 nir_deref_path_init(&path, deref, NULL);
170
171 assert(path.path[0]->deref_type == nir_deref_type_var);
172 nir_deref_instr **p = &path.path[1];
173
174 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
175 * outermost array index separate. Process the rest normally.
176 */
177 if (vertex_index != NULL) {
178 assert((*p)->deref_type == nir_deref_type_array);
179 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
180 p++;
181 }
182
183 if (path.path[0]->var->data.compact) {
184 assert((*p)->deref_type == nir_deref_type_array);
185 assert(glsl_type_is_scalar((*p)->type));
186
187 /* We always lower indirect dereferences for "compact" array vars. */
188 const unsigned index = nir_src_as_uint((*p)->arr.index);
189 const unsigned total_offset = *component + index;
190 const unsigned slot_offset = total_offset / 4;
191 *component = total_offset % 4;
192 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
193 }
194
195 /* Just emit code and let constant-folding go to town */
196 nir_ssa_def *offset = nir_imm_int(b, 0);
197
198 for (; *p; p++) {
199 if ((*p)->deref_type == nir_deref_type_array) {
200 unsigned size = type_size((*p)->type, bts);
201
202 nir_ssa_def *mul =
203 nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
204
205 offset = nir_iadd(b, offset, mul);
206 } else if ((*p)->deref_type == nir_deref_type_struct) {
207 /* p starts at path[1], so this is safe */
208 nir_deref_instr *parent = *(p - 1);
209
210 unsigned field_offset = 0;
211 for (unsigned i = 0; i < (*p)->strct.index; i++) {
212 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
213 }
214 offset = nir_iadd_imm(b, offset, field_offset);
215 } else {
216 unreachable("Unsupported deref type");
217 }
218 }
219
220 nir_deref_path_finish(&path);
221
222 return offset;
223 }
224
225 static nir_ssa_def *
226 emit_load(struct lower_io_state *state,
227 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
228 unsigned component, unsigned num_components, unsigned bit_size,
229 nir_alu_type type)
230 {
231 nir_builder *b = &state->builder;
232 const nir_shader *nir = b->shader;
233 nir_variable_mode mode = var->data.mode;
234 nir_ssa_def *barycentric = NULL;
235
236 nir_intrinsic_op op;
237 switch (mode) {
238 case nir_var_shader_in:
239 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
240 nir->options->use_interpolated_input_intrinsics &&
241 var->data.interpolation != INTERP_MODE_FLAT) {
242 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
243 assert(vertex_index != NULL);
244 op = nir_intrinsic_load_input_vertex;
245 } else {
246 assert(vertex_index == NULL);
247
248 nir_intrinsic_op bary_op;
249 if (var->data.sample ||
250 (state->options & nir_lower_io_force_sample_interpolation))
251 bary_op = nir_intrinsic_load_barycentric_sample;
252 else if (var->data.centroid)
253 bary_op = nir_intrinsic_load_barycentric_centroid;
254 else
255 bary_op = nir_intrinsic_load_barycentric_pixel;
256
257 barycentric = nir_load_barycentric(&state->builder, bary_op,
258 var->data.interpolation);
259 op = nir_intrinsic_load_interpolated_input;
260 }
261 } else {
262 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
263 nir_intrinsic_load_input;
264 }
265 break;
266 case nir_var_shader_out:
267 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
268 nir_intrinsic_load_output;
269 break;
270 case nir_var_uniform:
271 op = nir_intrinsic_load_uniform;
272 break;
273 default:
274 unreachable("Unknown variable mode");
275 }
276
277 nir_intrinsic_instr *load =
278 nir_intrinsic_instr_create(state->builder.shader, op);
279 load->num_components = num_components;
280
281 nir_intrinsic_set_base(load, var->data.driver_location);
282 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
283 nir_intrinsic_set_component(load, component);
284
285 if (load->intrinsic == nir_intrinsic_load_uniform)
286 nir_intrinsic_set_range(load,
287 state->type_size(var->type, var->data.bindless));
288
289 if (load->intrinsic == nir_intrinsic_load_input ||
290 load->intrinsic == nir_intrinsic_load_input_vertex ||
291 load->intrinsic == nir_intrinsic_load_uniform)
292 nir_intrinsic_set_type(load, type);
293
294 if (vertex_index) {
295 load->src[0] = nir_src_for_ssa(vertex_index);
296 load->src[1] = nir_src_for_ssa(offset);
297 } else if (barycentric) {
298 load->src[0] = nir_src_for_ssa(barycentric);
299 load->src[1] = nir_src_for_ssa(offset);
300 } else {
301 load->src[0] = nir_src_for_ssa(offset);
302 }
303
304 nir_ssa_dest_init(&load->instr, &load->dest,
305 num_components, bit_size, NULL);
306 nir_builder_instr_insert(b, &load->instr);
307
308 return &load->dest.ssa;
309 }
310
311 static nir_ssa_def *
312 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
313 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
314 unsigned component, const struct glsl_type *type)
315 {
316 assert(intrin->dest.is_ssa);
317 if (intrin->dest.ssa.bit_size == 64 &&
318 (state->options & nir_lower_io_lower_64bit_to_32)) {
319 nir_builder *b = &state->builder;
320
321 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
322
323 nir_ssa_def *comp64[4];
324 assert(component == 0 || component == 2);
325 unsigned dest_comp = 0;
326 while (dest_comp < intrin->dest.ssa.num_components) {
327 const unsigned num_comps =
328 MIN2(intrin->dest.ssa.num_components - dest_comp,
329 (4 - component) / 2);
330
331 nir_ssa_def *data32 =
332 emit_load(state, vertex_index, var, offset, component,
333 num_comps * 2, 32, nir_type_uint32);
334 for (unsigned i = 0; i < num_comps; i++) {
335 comp64[dest_comp + i] =
336 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
337 }
338
339 /* Only the first store has a component offset */
340 component = 0;
341 dest_comp += num_comps;
342 offset = nir_iadd_imm(b, offset, slot_size);
343 }
344
345 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
346 } else if (intrin->dest.ssa.bit_size == 1) {
347 /* Booleans are 32-bit */
348 assert(glsl_type_is_boolean(type));
349 return nir_b2b1(&state->builder,
350 emit_load(state, vertex_index, var, offset, component,
351 intrin->dest.ssa.num_components, 32,
352 nir_type_bool32));
353 } else {
354 return emit_load(state, vertex_index, var, offset, component,
355 intrin->dest.ssa.num_components,
356 intrin->dest.ssa.bit_size,
357 nir_get_nir_type_for_glsl_type(type));
358 }
359 }
360
361 static void
362 emit_store(struct lower_io_state *state, nir_ssa_def *data,
363 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
364 unsigned component, unsigned num_components,
365 nir_component_mask_t write_mask, nir_alu_type type)
366 {
367 nir_builder *b = &state->builder;
368 nir_variable_mode mode = var->data.mode;
369
370 assert(mode == nir_var_shader_out);
371 nir_intrinsic_op op;
372 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
373 nir_intrinsic_store_output;
374
375 nir_intrinsic_instr *store =
376 nir_intrinsic_instr_create(state->builder.shader, op);
377 store->num_components = num_components;
378
379 store->src[0] = nir_src_for_ssa(data);
380
381 nir_intrinsic_set_base(store, var->data.driver_location);
382
383 if (mode == nir_var_shader_out)
384 nir_intrinsic_set_component(store, component);
385
386 if (store->intrinsic == nir_intrinsic_store_output)
387 nir_intrinsic_set_type(store, type);
388
389 nir_intrinsic_set_write_mask(store, write_mask);
390
391 if (vertex_index)
392 store->src[1] = nir_src_for_ssa(vertex_index);
393
394 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
395
396 nir_builder_instr_insert(b, &store->instr);
397 }
398
399 static void
400 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
401 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
402 unsigned component, const struct glsl_type *type)
403 {
404 assert(intrin->src[1].is_ssa);
405 if (intrin->src[1].ssa->bit_size == 64 &&
406 (state->options & nir_lower_io_lower_64bit_to_32)) {
407 nir_builder *b = &state->builder;
408
409 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
410
411 assert(component == 0 || component == 2);
412 unsigned src_comp = 0;
413 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
414 while (src_comp < intrin->num_components) {
415 const unsigned num_comps =
416 MIN2(intrin->num_components - src_comp,
417 (4 - component) / 2);
418
419 if (write_mask & BITFIELD_MASK(num_comps)) {
420 nir_ssa_def *data =
421 nir_channels(b, intrin->src[1].ssa,
422 BITFIELD_RANGE(src_comp, num_comps));
423 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
424
425 nir_component_mask_t write_mask32 = 0;
426 for (unsigned i = 0; i < num_comps; i++) {
427 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
428 write_mask32 |= 3 << (i * 2);
429 }
430
431 emit_store(state, data32, vertex_index, var, offset,
432 component, data32->num_components, write_mask32,
433 nir_type_uint32);
434 }
435
436 /* Only the first store has a component offset */
437 component = 0;
438 src_comp += num_comps;
439 write_mask >>= num_comps;
440 offset = nir_iadd_imm(b, offset, slot_size);
441 }
442 } else if (intrin->dest.ssa.bit_size == 1) {
443 /* Booleans are 32-bit */
444 assert(glsl_type_is_boolean(type));
445 nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
446 emit_store(state, b32_val, vertex_index, var, offset,
447 component, intrin->num_components,
448 nir_intrinsic_write_mask(intrin),
449 nir_type_bool32);
450 } else {
451 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
452 component, intrin->num_components,
453 nir_intrinsic_write_mask(intrin),
454 nir_get_nir_type_for_glsl_type(type));
455 }
456 }
457
458 static nir_ssa_def *
459 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
460 nir_variable *var, nir_ssa_def *offset, unsigned component,
461 const struct glsl_type *type)
462 {
463 nir_builder *b = &state->builder;
464 assert(var->data.mode == nir_var_shader_in);
465
466 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
467 * interpolateAtVertex() for explicit variables.
468 */
469 if (var->data.interpolation == INTERP_MODE_FLAT ||
470 var->data.interpolation == INTERP_MODE_EXPLICIT) {
471 nir_ssa_def *vertex_index = NULL;
472
473 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
474 assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
475 vertex_index = intrin->src[1].ssa;
476 }
477
478 return lower_load(intrin, state, vertex_index, var, offset, component, type);
479 }
480
481 /* None of the supported APIs allow interpolation on 64-bit things */
482 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
483
484 nir_intrinsic_op bary_op;
485 switch (intrin->intrinsic) {
486 case nir_intrinsic_interp_deref_at_centroid:
487 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
488 nir_intrinsic_load_barycentric_sample :
489 nir_intrinsic_load_barycentric_centroid;
490 break;
491 case nir_intrinsic_interp_deref_at_sample:
492 bary_op = nir_intrinsic_load_barycentric_at_sample;
493 break;
494 case nir_intrinsic_interp_deref_at_offset:
495 bary_op = nir_intrinsic_load_barycentric_at_offset;
496 break;
497 default:
498 unreachable("Bogus interpolateAt() intrinsic.");
499 }
500
501 nir_intrinsic_instr *bary_setup =
502 nir_intrinsic_instr_create(state->builder.shader, bary_op);
503
504 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
505 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
506
507 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
508 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
509 intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
510 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
511
512 nir_builder_instr_insert(b, &bary_setup->instr);
513
514 nir_intrinsic_instr *load =
515 nir_intrinsic_instr_create(state->builder.shader,
516 nir_intrinsic_load_interpolated_input);
517 load->num_components = intrin->num_components;
518
519 nir_intrinsic_set_base(load, var->data.driver_location);
520 nir_intrinsic_set_component(load, component);
521
522 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
523 load->src[1] = nir_src_for_ssa(offset);
524
525 assert(intrin->dest.is_ssa);
526 nir_ssa_dest_init(&load->instr, &load->dest,
527 intrin->dest.ssa.num_components,
528 intrin->dest.ssa.bit_size, NULL);
529 nir_builder_instr_insert(b, &load->instr);
530
531 return &load->dest.ssa;
532 }
533
534 static bool
535 nir_lower_io_block(nir_block *block,
536 struct lower_io_state *state)
537 {
538 nir_builder *b = &state->builder;
539 const nir_shader_compiler_options *options = b->shader->options;
540 bool progress = false;
541
542 nir_foreach_instr_safe(instr, block) {
543 if (instr->type != nir_instr_type_intrinsic)
544 continue;
545
546 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
547
548 switch (intrin->intrinsic) {
549 case nir_intrinsic_load_deref:
550 case nir_intrinsic_store_deref:
551 /* We can lower the io for this nir instrinsic */
552 break;
553 case nir_intrinsic_interp_deref_at_centroid:
554 case nir_intrinsic_interp_deref_at_sample:
555 case nir_intrinsic_interp_deref_at_offset:
556 case nir_intrinsic_interp_deref_at_vertex:
557 /* We can optionally lower these to load_interpolated_input */
558 if (options->use_interpolated_input_intrinsics)
559 break;
560 default:
561 /* We can't lower the io for this nir instrinsic, so skip it */
562 continue;
563 }
564
565 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
566
567 nir_variable_mode mode = deref->mode;
568 assert(util_is_power_of_two_nonzero(mode));
569 if ((state->modes & mode) == 0)
570 continue;
571
572 nir_variable *var = nir_deref_instr_get_variable(deref);
573
574 b->cursor = nir_before_instr(instr);
575
576 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
577
578 nir_ssa_def *offset;
579 nir_ssa_def *vertex_index = NULL;
580 unsigned component_offset = var->data.location_frac;
581 bool bindless_type_size = mode == nir_var_shader_in ||
582 mode == nir_var_shader_out ||
583 var->data.bindless;
584
585 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
586 state->type_size, &component_offset,
587 bindless_type_size);
588
589 nir_ssa_def *replacement = NULL;
590
591 switch (intrin->intrinsic) {
592 case nir_intrinsic_load_deref:
593 replacement = lower_load(intrin, state, vertex_index, var, offset,
594 component_offset, deref->type);
595 break;
596
597 case nir_intrinsic_store_deref:
598 lower_store(intrin, state, vertex_index, var, offset,
599 component_offset, deref->type);
600 break;
601
602 case nir_intrinsic_interp_deref_at_centroid:
603 case nir_intrinsic_interp_deref_at_sample:
604 case nir_intrinsic_interp_deref_at_offset:
605 case nir_intrinsic_interp_deref_at_vertex:
606 assert(vertex_index == NULL);
607 replacement = lower_interpolate_at(intrin, state, var, offset,
608 component_offset, deref->type);
609 break;
610
611 default:
612 continue;
613 }
614
615 if (replacement) {
616 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
617 nir_src_for_ssa(replacement));
618 }
619 nir_instr_remove(&intrin->instr);
620 progress = true;
621 }
622
623 return progress;
624 }
625
626 static bool
627 nir_lower_io_impl(nir_function_impl *impl,
628 nir_variable_mode modes,
629 int (*type_size)(const struct glsl_type *, bool),
630 nir_lower_io_options options)
631 {
632 struct lower_io_state state;
633 bool progress = false;
634
635 nir_builder_init(&state.builder, impl);
636 state.dead_ctx = ralloc_context(NULL);
637 state.modes = modes;
638 state.type_size = type_size;
639 state.options = options;
640
641 ASSERTED nir_variable_mode supported_modes =
642 nir_var_shader_in | nir_var_shader_out | nir_var_uniform;
643 assert(!(modes & ~supported_modes));
644
645 nir_foreach_block(block, impl) {
646 progress |= nir_lower_io_block(block, &state);
647 }
648
649 ralloc_free(state.dead_ctx);
650
651 nir_metadata_preserve(impl, nir_metadata_block_index |
652 nir_metadata_dominance);
653 return progress;
654 }
655
656 /** Lower load/store_deref intrinsics on I/O variables to offset-based intrinsics
657 *
658 * This pass is intended to be used for cross-stage shader I/O and driver-
659 * managed uniforms to turn deref-based access into a simpler model using
660 * locations or offsets. For fragment shader inputs, it can optionally turn
661 * load_deref into an explicit interpolation using barycentrics coming from
662 * one of the load_barycentric_* intrinsics. This pass requires that all
663 * deref chains are complete and contain no casts.
664 */
665 bool
666 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
667 int (*type_size)(const struct glsl_type *, bool),
668 nir_lower_io_options options)
669 {
670 bool progress = false;
671
672 nir_foreach_function(function, shader) {
673 if (function->impl) {
674 progress |= nir_lower_io_impl(function->impl, modes,
675 type_size, options);
676 }
677 }
678
679 return progress;
680 }
681
682 static unsigned
683 type_scalar_size_bytes(const struct glsl_type *type)
684 {
685 assert(glsl_type_is_vector_or_scalar(type) ||
686 glsl_type_is_matrix(type));
687 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
688 }
689
690 static nir_ssa_def *
691 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
692 nir_address_format addr_format, nir_ssa_def *offset)
693 {
694 assert(offset->num_components == 1);
695
696 switch (addr_format) {
697 case nir_address_format_32bit_global:
698 case nir_address_format_64bit_global:
699 case nir_address_format_32bit_offset:
700 assert(addr->bit_size == offset->bit_size);
701 assert(addr->num_components == 1);
702 return nir_iadd(b, addr, offset);
703
704 case nir_address_format_32bit_offset_as_64bit:
705 assert(addr->num_components == 1);
706 assert(offset->bit_size == 32);
707 return nir_u2u64(b, nir_iadd(b, nir_u2u32(b, addr), offset));
708
709 case nir_address_format_64bit_bounded_global:
710 assert(addr->num_components == 4);
711 assert(addr->bit_size == offset->bit_size);
712 return nir_vec4(b, nir_channel(b, addr, 0),
713 nir_channel(b, addr, 1),
714 nir_channel(b, addr, 2),
715 nir_iadd(b, nir_channel(b, addr, 3), offset));
716
717 case nir_address_format_32bit_index_offset:
718 assert(addr->num_components == 2);
719 assert(addr->bit_size == offset->bit_size);
720 return nir_vec2(b, nir_channel(b, addr, 0),
721 nir_iadd(b, nir_channel(b, addr, 1), offset));
722
723 case nir_address_format_32bit_index_offset_pack64:
724 assert(addr->num_components == 1);
725 assert(offset->bit_size == 32);
726 return nir_pack_64_2x32_split(b,
727 nir_iadd(b, nir_unpack_64_2x32_split_x(b, addr), offset),
728 nir_unpack_64_2x32_split_y(b, addr));
729
730 case nir_address_format_vec2_index_32bit_offset:
731 assert(addr->num_components == 3);
732 assert(offset->bit_size == 32);
733 return nir_vec3(b, nir_channel(b, addr, 0), nir_channel(b, addr, 1),
734 nir_iadd(b, nir_channel(b, addr, 2), offset));
735
736 case nir_address_format_logical:
737 unreachable("Unsupported address format");
738 }
739 unreachable("Invalid address format");
740 }
741
742 static unsigned
743 addr_get_offset_bit_size(nir_ssa_def *addr, nir_address_format addr_format)
744 {
745 if (addr_format == nir_address_format_32bit_offset_as_64bit ||
746 addr_format == nir_address_format_32bit_index_offset_pack64)
747 return 32;
748 return addr->bit_size;
749 }
750
751 static nir_ssa_def *
752 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
753 nir_address_format addr_format, int64_t offset)
754 {
755 return build_addr_iadd(b, addr, addr_format,
756 nir_imm_intN_t(b, offset,
757 addr_get_offset_bit_size(addr, addr_format)));
758 }
759
760 static nir_ssa_def *
761 addr_to_index(nir_builder *b, nir_ssa_def *addr,
762 nir_address_format addr_format)
763 {
764 switch (addr_format) {
765 case nir_address_format_32bit_index_offset:
766 assert(addr->num_components == 2);
767 return nir_channel(b, addr, 0);
768 case nir_address_format_32bit_index_offset_pack64:
769 return nir_unpack_64_2x32_split_y(b, addr);
770 case nir_address_format_vec2_index_32bit_offset:
771 assert(addr->num_components == 3);
772 return nir_channels(b, addr, 0x3);
773 default: unreachable("Invalid address format");
774 }
775 }
776
777 static nir_ssa_def *
778 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
779 nir_address_format addr_format)
780 {
781 switch (addr_format) {
782 case nir_address_format_32bit_index_offset:
783 assert(addr->num_components == 2);
784 return nir_channel(b, addr, 1);
785 case nir_address_format_32bit_index_offset_pack64:
786 return nir_unpack_64_2x32_split_x(b, addr);
787 case nir_address_format_vec2_index_32bit_offset:
788 assert(addr->num_components == 3);
789 return nir_channel(b, addr, 2);
790 case nir_address_format_32bit_offset:
791 return addr;
792 case nir_address_format_32bit_offset_as_64bit:
793 return nir_u2u32(b, addr);
794 default:
795 unreachable("Invalid address format");
796 }
797 }
798
799 /** Returns true if the given address format resolves to a global address */
800 static bool
801 addr_format_is_global(nir_address_format addr_format)
802 {
803 return addr_format == nir_address_format_32bit_global ||
804 addr_format == nir_address_format_64bit_global ||
805 addr_format == nir_address_format_64bit_bounded_global;
806 }
807
808 static bool
809 addr_format_is_offset(nir_address_format addr_format)
810 {
811 return addr_format == nir_address_format_32bit_offset ||
812 addr_format == nir_address_format_32bit_offset_as_64bit;
813 }
814
815 static nir_ssa_def *
816 addr_to_global(nir_builder *b, nir_ssa_def *addr,
817 nir_address_format addr_format)
818 {
819 switch (addr_format) {
820 case nir_address_format_32bit_global:
821 case nir_address_format_64bit_global:
822 assert(addr->num_components == 1);
823 return addr;
824
825 case nir_address_format_64bit_bounded_global:
826 assert(addr->num_components == 4);
827 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
828 nir_u2u64(b, nir_channel(b, addr, 3)));
829
830 case nir_address_format_32bit_index_offset:
831 case nir_address_format_32bit_index_offset_pack64:
832 case nir_address_format_vec2_index_32bit_offset:
833 case nir_address_format_32bit_offset:
834 case nir_address_format_32bit_offset_as_64bit:
835 case nir_address_format_logical:
836 unreachable("Cannot get a 64-bit address with this address format");
837 }
838
839 unreachable("Invalid address format");
840 }
841
842 static bool
843 addr_format_needs_bounds_check(nir_address_format addr_format)
844 {
845 return addr_format == nir_address_format_64bit_bounded_global;
846 }
847
848 static nir_ssa_def *
849 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
850 nir_address_format addr_format, unsigned size)
851 {
852 assert(addr_format == nir_address_format_64bit_bounded_global);
853 assert(addr->num_components == 4);
854 return nir_ige(b, nir_channel(b, addr, 2),
855 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
856 }
857
858 static nir_ssa_def *
859 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
860 nir_ssa_def *addr, nir_address_format addr_format,
861 unsigned num_components)
862 {
863 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
864
865 nir_intrinsic_op op;
866 switch (mode) {
867 case nir_var_mem_ubo:
868 op = nir_intrinsic_load_ubo;
869 break;
870 case nir_var_mem_ssbo:
871 if (addr_format_is_global(addr_format))
872 op = nir_intrinsic_load_global;
873 else
874 op = nir_intrinsic_load_ssbo;
875 break;
876 case nir_var_mem_global:
877 assert(addr_format_is_global(addr_format));
878 op = nir_intrinsic_load_global;
879 break;
880 case nir_var_shader_in:
881 assert(addr_format_is_offset(addr_format));
882 op = nir_intrinsic_load_kernel_input;
883 break;
884 case nir_var_mem_shared:
885 assert(addr_format_is_offset(addr_format));
886 op = nir_intrinsic_load_shared;
887 break;
888 case nir_var_shader_temp:
889 case nir_var_function_temp:
890 if (addr_format_is_offset(addr_format)) {
891 op = nir_intrinsic_load_scratch;
892 } else {
893 assert(addr_format_is_global(addr_format));
894 op = nir_intrinsic_load_global;
895 }
896 break;
897 default:
898 unreachable("Unsupported explicit IO variable mode");
899 }
900
901 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
902
903 if (addr_format_is_global(addr_format)) {
904 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
905 } else if (addr_format_is_offset(addr_format)) {
906 assert(addr->num_components == 1);
907 load->src[0] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
908 } else {
909 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
910 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
911 }
912
913 if (nir_intrinsic_has_access(load))
914 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
915
916 unsigned bit_size = intrin->dest.ssa.bit_size;
917 if (bit_size == 1) {
918 /* TODO: Make the native bool bit_size an option. */
919 bit_size = 32;
920 }
921
922 /* TODO: We should try and provide a better alignment. For OpenCL, we need
923 * to plumb the alignment through from SPIR-V when we have one.
924 */
925 nir_intrinsic_set_align(load, bit_size / 8, 0);
926
927 assert(intrin->dest.is_ssa);
928 load->num_components = num_components;
929 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
930 bit_size, intrin->dest.ssa.name);
931
932 assert(bit_size % 8 == 0);
933
934 nir_ssa_def *result;
935 if (addr_format_needs_bounds_check(addr_format)) {
936 /* The Vulkan spec for robustBufferAccess gives us quite a few options
937 * as to what we can do with an OOB read. Unfortunately, returning
938 * undefined values isn't one of them so we return an actual zero.
939 */
940 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
941
942 const unsigned load_size = (bit_size / 8) * load->num_components;
943 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
944
945 nir_builder_instr_insert(b, &load->instr);
946
947 nir_pop_if(b, NULL);
948
949 result = nir_if_phi(b, &load->dest.ssa, zero);
950 } else {
951 nir_builder_instr_insert(b, &load->instr);
952 result = &load->dest.ssa;
953 }
954
955 if (intrin->dest.ssa.bit_size == 1) {
956 /* For shared, we can go ahead and use NIR's and/or the back-end's
957 * standard encoding for booleans rather than forcing a 0/1 boolean.
958 * This should save an instruction or two.
959 */
960 if (mode == nir_var_mem_shared ||
961 mode == nir_var_shader_temp ||
962 mode == nir_var_function_temp)
963 result = nir_b2b1(b, result);
964 else
965 result = nir_i2b(b, result);
966 }
967
968 return result;
969 }
970
971 static void
972 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
973 nir_ssa_def *addr, nir_address_format addr_format,
974 nir_ssa_def *value, nir_component_mask_t write_mask)
975 {
976 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
977
978 nir_intrinsic_op op;
979 switch (mode) {
980 case nir_var_mem_ssbo:
981 if (addr_format_is_global(addr_format))
982 op = nir_intrinsic_store_global;
983 else
984 op = nir_intrinsic_store_ssbo;
985 break;
986 case nir_var_mem_global:
987 assert(addr_format_is_global(addr_format));
988 op = nir_intrinsic_store_global;
989 break;
990 case nir_var_mem_shared:
991 assert(addr_format_is_offset(addr_format));
992 op = nir_intrinsic_store_shared;
993 break;
994 case nir_var_shader_temp:
995 case nir_var_function_temp:
996 if (addr_format_is_offset(addr_format)) {
997 op = nir_intrinsic_store_scratch;
998 } else {
999 assert(addr_format_is_global(addr_format));
1000 op = nir_intrinsic_store_global;
1001 }
1002 break;
1003 default:
1004 unreachable("Unsupported explicit IO variable mode");
1005 }
1006
1007 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
1008
1009 if (value->bit_size == 1) {
1010 /* For shared, we can go ahead and use NIR's and/or the back-end's
1011 * standard encoding for booleans rather than forcing a 0/1 boolean.
1012 * This should save an instruction or two.
1013 *
1014 * TODO: Make the native bool bit_size an option.
1015 */
1016 if (mode == nir_var_mem_shared ||
1017 mode == nir_var_shader_temp ||
1018 mode == nir_var_function_temp)
1019 value = nir_b2b32(b, value);
1020 else
1021 value = nir_b2i(b, value, 32);
1022 }
1023
1024 store->src[0] = nir_src_for_ssa(value);
1025 if (addr_format_is_global(addr_format)) {
1026 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1027 } else if (addr_format_is_offset(addr_format)) {
1028 assert(addr->num_components == 1);
1029 store->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1030 } else {
1031 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1032 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1033 }
1034
1035 nir_intrinsic_set_write_mask(store, write_mask);
1036
1037 if (nir_intrinsic_has_access(store))
1038 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
1039
1040 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1041 * to plumb the alignment through from SPIR-V when we have one.
1042 */
1043 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
1044
1045 assert(value->num_components == 1 ||
1046 value->num_components == intrin->num_components);
1047 store->num_components = value->num_components;
1048
1049 assert(value->bit_size % 8 == 0);
1050
1051 if (addr_format_needs_bounds_check(addr_format)) {
1052 const unsigned store_size = (value->bit_size / 8) * store->num_components;
1053 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
1054
1055 nir_builder_instr_insert(b, &store->instr);
1056
1057 nir_pop_if(b, NULL);
1058 } else {
1059 nir_builder_instr_insert(b, &store->instr);
1060 }
1061 }
1062
1063 static nir_ssa_def *
1064 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
1065 nir_ssa_def *addr, nir_address_format addr_format)
1066 {
1067 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1068 const unsigned num_data_srcs =
1069 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
1070
1071 nir_intrinsic_op op;
1072 switch (mode) {
1073 case nir_var_mem_ssbo:
1074 if (addr_format_is_global(addr_format))
1075 op = global_atomic_for_deref(intrin->intrinsic);
1076 else
1077 op = ssbo_atomic_for_deref(intrin->intrinsic);
1078 break;
1079 case nir_var_mem_global:
1080 assert(addr_format_is_global(addr_format));
1081 op = global_atomic_for_deref(intrin->intrinsic);
1082 break;
1083 case nir_var_mem_shared:
1084 assert(addr_format_is_offset(addr_format));
1085 op = shared_atomic_for_deref(intrin->intrinsic);
1086 break;
1087 default:
1088 unreachable("Unsupported explicit IO variable mode");
1089 }
1090
1091 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1092
1093 unsigned src = 0;
1094 if (addr_format_is_global(addr_format)) {
1095 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1096 } else if (addr_format_is_offset(addr_format)) {
1097 assert(addr->num_components == 1);
1098 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1099 } else {
1100 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1101 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1102 }
1103 for (unsigned i = 0; i < num_data_srcs; i++) {
1104 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1105 }
1106
1107 /* Global atomics don't have access flags because they assume that the
1108 * address may be non-uniform.
1109 */
1110 if (nir_intrinsic_has_access(atomic))
1111 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1112
1113 assert(intrin->dest.ssa.num_components == 1);
1114 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1115 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1116
1117 assert(atomic->dest.ssa.bit_size % 8 == 0);
1118
1119 if (addr_format_needs_bounds_check(addr_format)) {
1120 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1121 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1122
1123 nir_builder_instr_insert(b, &atomic->instr);
1124
1125 nir_pop_if(b, NULL);
1126 return nir_if_phi(b, &atomic->dest.ssa,
1127 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1128 } else {
1129 nir_builder_instr_insert(b, &atomic->instr);
1130 return &atomic->dest.ssa;
1131 }
1132 }
1133
1134 nir_ssa_def *
1135 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1136 nir_ssa_def *base_addr,
1137 nir_address_format addr_format)
1138 {
1139 assert(deref->dest.is_ssa);
1140 switch (deref->deref_type) {
1141 case nir_deref_type_var:
1142 assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared |
1143 nir_var_shader_temp | nir_var_function_temp));
1144 if (addr_format_is_global(addr_format)) {
1145 assert(nir_var_shader_temp | nir_var_function_temp);
1146 base_addr =
1147 nir_load_scratch_base_ptr(b, !(deref->mode & nir_var_shader_temp),
1148 nir_address_format_num_components(addr_format),
1149 nir_address_format_bit_size(addr_format));
1150 return build_addr_iadd_imm(b, base_addr, addr_format,
1151 deref->var->data.driver_location);
1152 } else {
1153 assert(deref->var->data.driver_location <= UINT32_MAX);
1154 return nir_imm_intN_t(b, deref->var->data.driver_location,
1155 deref->dest.ssa.bit_size);
1156 }
1157
1158 case nir_deref_type_array: {
1159 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1160
1161 unsigned stride = glsl_get_explicit_stride(parent->type);
1162 if ((glsl_type_is_matrix(parent->type) &&
1163 glsl_matrix_type_is_row_major(parent->type)) ||
1164 (glsl_type_is_vector(parent->type) && stride == 0))
1165 stride = type_scalar_size_bytes(parent->type);
1166
1167 assert(stride > 0);
1168
1169 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1170 index = nir_i2i(b, index, addr_get_offset_bit_size(base_addr, addr_format));
1171 return build_addr_iadd(b, base_addr, addr_format,
1172 nir_amul_imm(b, index, stride));
1173 }
1174
1175 case nir_deref_type_ptr_as_array: {
1176 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1177 index = nir_i2i(b, index, addr_get_offset_bit_size(base_addr, addr_format));
1178 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1179 return build_addr_iadd(b, base_addr, addr_format,
1180 nir_amul_imm(b, index, stride));
1181 }
1182
1183 case nir_deref_type_array_wildcard:
1184 unreachable("Wildcards should be lowered by now");
1185 break;
1186
1187 case nir_deref_type_struct: {
1188 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1189 int offset = glsl_get_struct_field_offset(parent->type,
1190 deref->strct.index);
1191 assert(offset >= 0);
1192 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1193 }
1194
1195 case nir_deref_type_cast:
1196 /* Nothing to do here */
1197 return base_addr;
1198 }
1199
1200 unreachable("Invalid NIR deref type");
1201 }
1202
1203 void
1204 nir_lower_explicit_io_instr(nir_builder *b,
1205 nir_intrinsic_instr *intrin,
1206 nir_ssa_def *addr,
1207 nir_address_format addr_format)
1208 {
1209 b->cursor = nir_after_instr(&intrin->instr);
1210
1211 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1212 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1213 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1214 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1215 assert(vec_stride == 0 || vec_stride >= scalar_size);
1216
1217 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1218 nir_ssa_def *value;
1219 if (vec_stride > scalar_size) {
1220 nir_ssa_def *comps[4] = { NULL, };
1221 for (unsigned i = 0; i < intrin->num_components; i++) {
1222 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1223 vec_stride * i);
1224 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1225 addr_format, 1);
1226 }
1227 value = nir_vec(b, comps, intrin->num_components);
1228 } else {
1229 value = build_explicit_io_load(b, intrin, addr, addr_format,
1230 intrin->num_components);
1231 }
1232 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1233 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1234 assert(intrin->src[1].is_ssa);
1235 nir_ssa_def *value = intrin->src[1].ssa;
1236 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1237 if (vec_stride > scalar_size) {
1238 for (unsigned i = 0; i < intrin->num_components; i++) {
1239 if (!(write_mask & (1 << i)))
1240 continue;
1241
1242 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1243 vec_stride * i);
1244 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1245 nir_channel(b, value, i), 1);
1246 }
1247 } else {
1248 build_explicit_io_store(b, intrin, addr, addr_format,
1249 value, write_mask);
1250 }
1251 } else {
1252 nir_ssa_def *value =
1253 build_explicit_io_atomic(b, intrin, addr, addr_format);
1254 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1255 }
1256
1257 nir_instr_remove(&intrin->instr);
1258 }
1259
1260 static void
1261 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1262 nir_address_format addr_format)
1263 {
1264 /* Just delete the deref if it's not used. We can't use
1265 * nir_deref_instr_remove_if_unused here because it may remove more than
1266 * one deref which could break our list walking since we walk the list
1267 * backwards.
1268 */
1269 assert(list_is_empty(&deref->dest.ssa.if_uses));
1270 if (list_is_empty(&deref->dest.ssa.uses)) {
1271 nir_instr_remove(&deref->instr);
1272 return;
1273 }
1274
1275 b->cursor = nir_after_instr(&deref->instr);
1276
1277 nir_ssa_def *base_addr = NULL;
1278 if (deref->deref_type != nir_deref_type_var) {
1279 assert(deref->parent.is_ssa);
1280 base_addr = deref->parent.ssa;
1281 }
1282
1283 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1284 addr_format);
1285
1286 nir_instr_remove(&deref->instr);
1287 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1288 }
1289
1290 static void
1291 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1292 nir_address_format addr_format)
1293 {
1294 assert(intrin->src[0].is_ssa);
1295 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1296 }
1297
1298 static void
1299 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1300 nir_address_format addr_format)
1301 {
1302 b->cursor = nir_after_instr(&intrin->instr);
1303
1304 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1305
1306 assert(glsl_type_is_array(deref->type));
1307 assert(glsl_get_length(deref->type) == 0);
1308 unsigned stride = glsl_get_explicit_stride(deref->type);
1309 assert(stride > 0);
1310
1311 nir_ssa_def *addr = &deref->dest.ssa;
1312 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1313 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1314
1315 nir_intrinsic_instr *bsize =
1316 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1317 bsize->src[0] = nir_src_for_ssa(index);
1318 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1319 nir_builder_instr_insert(b, &bsize->instr);
1320
1321 nir_ssa_def *arr_size =
1322 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1323 nir_imm_int(b, stride));
1324
1325 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1326 nir_instr_remove(&intrin->instr);
1327 }
1328
1329 static bool
1330 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1331 nir_address_format addr_format)
1332 {
1333 bool progress = false;
1334
1335 nir_builder b;
1336 nir_builder_init(&b, impl);
1337
1338 /* Walk in reverse order so that we can see the full deref chain when we
1339 * lower the access operations. We lower them assuming that the derefs
1340 * will be turned into address calculations later.
1341 */
1342 nir_foreach_block_reverse(block, impl) {
1343 nir_foreach_instr_reverse_safe(instr, block) {
1344 switch (instr->type) {
1345 case nir_instr_type_deref: {
1346 nir_deref_instr *deref = nir_instr_as_deref(instr);
1347 if (deref->mode & modes) {
1348 lower_explicit_io_deref(&b, deref, addr_format);
1349 progress = true;
1350 }
1351 break;
1352 }
1353
1354 case nir_instr_type_intrinsic: {
1355 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1356 switch (intrin->intrinsic) {
1357 case nir_intrinsic_load_deref:
1358 case nir_intrinsic_store_deref:
1359 case nir_intrinsic_deref_atomic_add:
1360 case nir_intrinsic_deref_atomic_imin:
1361 case nir_intrinsic_deref_atomic_umin:
1362 case nir_intrinsic_deref_atomic_imax:
1363 case nir_intrinsic_deref_atomic_umax:
1364 case nir_intrinsic_deref_atomic_and:
1365 case nir_intrinsic_deref_atomic_or:
1366 case nir_intrinsic_deref_atomic_xor:
1367 case nir_intrinsic_deref_atomic_exchange:
1368 case nir_intrinsic_deref_atomic_comp_swap:
1369 case nir_intrinsic_deref_atomic_fadd:
1370 case nir_intrinsic_deref_atomic_fmin:
1371 case nir_intrinsic_deref_atomic_fmax:
1372 case nir_intrinsic_deref_atomic_fcomp_swap: {
1373 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1374 if (deref->mode & modes) {
1375 lower_explicit_io_access(&b, intrin, addr_format);
1376 progress = true;
1377 }
1378 break;
1379 }
1380
1381 case nir_intrinsic_deref_buffer_array_length: {
1382 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1383 if (deref->mode & modes) {
1384 lower_explicit_io_array_length(&b, intrin, addr_format);
1385 progress = true;
1386 }
1387 break;
1388 }
1389
1390 default:
1391 break;
1392 }
1393 break;
1394 }
1395
1396 default:
1397 /* Nothing to do */
1398 break;
1399 }
1400 }
1401 }
1402
1403 if (progress) {
1404 nir_metadata_preserve(impl, nir_metadata_block_index |
1405 nir_metadata_dominance);
1406 }
1407
1408 return progress;
1409 }
1410
1411 /** Lower explicitly laid out I/O access to byte offset/address intrinsics
1412 *
1413 * This pass is intended to be used for any I/O which touches memory external
1414 * to the shader or which is directly visible to the client. It requires that
1415 * all data types in the given modes have a explicit stride/offset decorations
1416 * to tell it exactly how to calculate the offset/address for the given load,
1417 * store, or atomic operation. If the offset/stride information does not come
1418 * from the client explicitly (as with shared variables in GL or Vulkan),
1419 * nir_lower_vars_to_explicit_types() can be used to add them.
1420 *
1421 * Unlike nir_lower_io, this pass is fully capable of handling incomplete
1422 * pointer chains which may contain cast derefs. It does so by walking the
1423 * deref chain backwards and simply replacing each deref, one at a time, with
1424 * the appropriate address calculation. The pass takes a nir_address_format
1425 * parameter which describes how the offset or address is to be represented
1426 * during calculations. By ensuring that the address is always in a
1427 * consistent format, pointers can safely be conjured from thin air by the
1428 * driver, stored to variables, passed through phis, etc.
1429 *
1430 * The one exception to the simple algorithm described above is for handling
1431 * row-major matrices in which case we may look down one additional level of
1432 * the deref chain.
1433 */
1434 bool
1435 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1436 nir_address_format addr_format)
1437 {
1438 bool progress = false;
1439
1440 nir_foreach_function(function, shader) {
1441 if (function->impl &&
1442 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1443 progress = true;
1444 }
1445
1446 return progress;
1447 }
1448
1449 static bool
1450 nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
1451 nir_variable_mode modes,
1452 glsl_type_size_align_func type_info)
1453 {
1454 bool progress = false;
1455
1456 nir_foreach_block(block, impl) {
1457 nir_foreach_instr(instr, block) {
1458 if (instr->type != nir_instr_type_deref)
1459 continue;
1460
1461 nir_deref_instr *deref = nir_instr_as_deref(instr);
1462 if (!(deref->mode & modes))
1463 continue;
1464
1465 unsigned size, alignment;
1466 const struct glsl_type *new_type =
1467 glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
1468 if (new_type != deref->type) {
1469 progress = true;
1470 deref->type = new_type;
1471 }
1472 if (deref->deref_type == nir_deref_type_cast) {
1473 /* See also glsl_type::get_explicit_type_for_size_align() */
1474 unsigned new_stride = align(size, alignment);
1475 if (new_stride != deref->cast.ptr_stride) {
1476 deref->cast.ptr_stride = new_stride;
1477 progress = true;
1478 }
1479 }
1480 }
1481 }
1482
1483 if (progress) {
1484 nir_metadata_preserve(impl, nir_metadata_block_index |
1485 nir_metadata_dominance |
1486 nir_metadata_live_ssa_defs |
1487 nir_metadata_loop_analysis);
1488 }
1489
1490 return progress;
1491 }
1492
1493 static bool
1494 lower_vars_to_explicit(nir_shader *shader,
1495 struct exec_list *vars, nir_variable_mode mode,
1496 glsl_type_size_align_func type_info)
1497 {
1498 bool progress = false;
1499 unsigned offset;
1500 switch (mode) {
1501 case nir_var_function_temp:
1502 case nir_var_shader_temp:
1503 offset = shader->scratch_size;
1504 break;
1505 case nir_var_mem_shared:
1506 offset = 0;
1507 break;
1508 default:
1509 unreachable("Unsupported mode");
1510 }
1511 nir_foreach_variable_in_list(var, vars) {
1512 if (var->data.mode != mode)
1513 continue;
1514
1515 unsigned size, align;
1516 const struct glsl_type *explicit_type =
1517 glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
1518
1519 if (explicit_type != var->type) {
1520 progress = true;
1521 var->type = explicit_type;
1522 }
1523
1524 var->data.driver_location = ALIGN_POT(offset, align);
1525 offset = var->data.driver_location + size;
1526 }
1527
1528 switch (mode) {
1529 case nir_var_shader_temp:
1530 case nir_var_function_temp:
1531 shader->scratch_size = offset;
1532 break;
1533 case nir_var_mem_shared:
1534 shader->info.cs.shared_size = offset;
1535 shader->num_shared = offset;
1536 break;
1537 default:
1538 unreachable("Unsupported mode");
1539 }
1540
1541 return progress;
1542 }
1543
1544 bool
1545 nir_lower_vars_to_explicit_types(nir_shader *shader,
1546 nir_variable_mode modes,
1547 glsl_type_size_align_func type_info)
1548 {
1549 /* TODO: Situations which need to be handled to support more modes:
1550 * - row-major matrices
1551 * - compact shader inputs/outputs
1552 * - interface types
1553 */
1554 ASSERTED nir_variable_mode supported = nir_var_mem_shared |
1555 nir_var_shader_temp | nir_var_function_temp;
1556 assert(!(modes & ~supported) && "unsupported");
1557
1558 bool progress = false;
1559
1560 if (modes & nir_var_mem_shared)
1561 progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_mem_shared, type_info);
1562 if (modes & nir_var_shader_temp)
1563 progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_shader_temp, type_info);
1564
1565 nir_foreach_function(function, shader) {
1566 if (function->impl) {
1567 if (modes & nir_var_function_temp)
1568 progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
1569
1570 progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
1571 }
1572 }
1573
1574 return progress;
1575 }
1576
1577 /**
1578 * Return the offset source for a load/store intrinsic.
1579 */
1580 nir_src *
1581 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1582 {
1583 switch (instr->intrinsic) {
1584 case nir_intrinsic_load_input:
1585 case nir_intrinsic_load_output:
1586 case nir_intrinsic_load_shared:
1587 case nir_intrinsic_load_uniform:
1588 case nir_intrinsic_load_global:
1589 case nir_intrinsic_load_scratch:
1590 case nir_intrinsic_load_fs_input_interp_deltas:
1591 return &instr->src[0];
1592 case nir_intrinsic_load_ubo:
1593 case nir_intrinsic_load_ssbo:
1594 case nir_intrinsic_load_input_vertex:
1595 case nir_intrinsic_load_per_vertex_input:
1596 case nir_intrinsic_load_per_vertex_output:
1597 case nir_intrinsic_load_interpolated_input:
1598 case nir_intrinsic_store_output:
1599 case nir_intrinsic_store_shared:
1600 case nir_intrinsic_store_global:
1601 case nir_intrinsic_store_scratch:
1602 case nir_intrinsic_ssbo_atomic_add:
1603 case nir_intrinsic_ssbo_atomic_imin:
1604 case nir_intrinsic_ssbo_atomic_umin:
1605 case nir_intrinsic_ssbo_atomic_imax:
1606 case nir_intrinsic_ssbo_atomic_umax:
1607 case nir_intrinsic_ssbo_atomic_and:
1608 case nir_intrinsic_ssbo_atomic_or:
1609 case nir_intrinsic_ssbo_atomic_xor:
1610 case nir_intrinsic_ssbo_atomic_exchange:
1611 case nir_intrinsic_ssbo_atomic_comp_swap:
1612 case nir_intrinsic_ssbo_atomic_fadd:
1613 case nir_intrinsic_ssbo_atomic_fmin:
1614 case nir_intrinsic_ssbo_atomic_fmax:
1615 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1616 return &instr->src[1];
1617 case nir_intrinsic_store_ssbo:
1618 case nir_intrinsic_store_per_vertex_output:
1619 return &instr->src[2];
1620 default:
1621 return NULL;
1622 }
1623 }
1624
1625 /**
1626 * Return the vertex index source for a load/store per_vertex intrinsic.
1627 */
1628 nir_src *
1629 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1630 {
1631 switch (instr->intrinsic) {
1632 case nir_intrinsic_load_per_vertex_input:
1633 case nir_intrinsic_load_per_vertex_output:
1634 return &instr->src[0];
1635 case nir_intrinsic_store_per_vertex_output:
1636 return &instr->src[1];
1637 default:
1638 return NULL;
1639 }
1640 }
1641
1642 /**
1643 * Return the numeric constant that identify a NULL pointer for each address
1644 * format.
1645 */
1646 const nir_const_value *
1647 nir_address_format_null_value(nir_address_format addr_format)
1648 {
1649 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1650 [nir_address_format_32bit_global] = {{0}},
1651 [nir_address_format_64bit_global] = {{0}},
1652 [nir_address_format_64bit_bounded_global] = {{0}},
1653 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1654 [nir_address_format_32bit_index_offset_pack64] = {{.u64 = ~0ull}},
1655 [nir_address_format_vec2_index_32bit_offset] = {{.u32 = ~0}, {.u32 = ~0}, {.u32 = ~0}},
1656 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1657 [nir_address_format_32bit_offset_as_64bit] = {{.u64 = ~0ull}},
1658 [nir_address_format_logical] = {{.u32 = ~0}},
1659 };
1660
1661 assert(addr_format < ARRAY_SIZE(null_values));
1662 return null_values[addr_format];
1663 }
1664
1665 nir_ssa_def *
1666 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1667 nir_address_format addr_format)
1668 {
1669 switch (addr_format) {
1670 case nir_address_format_32bit_global:
1671 case nir_address_format_64bit_global:
1672 case nir_address_format_64bit_bounded_global:
1673 case nir_address_format_32bit_index_offset:
1674 case nir_address_format_vec2_index_32bit_offset:
1675 case nir_address_format_32bit_offset:
1676 return nir_ball_iequal(b, addr0, addr1);
1677
1678 case nir_address_format_32bit_offset_as_64bit:
1679 assert(addr0->num_components == 1 && addr1->num_components == 1);
1680 return nir_ieq(b, nir_u2u32(b, addr0), nir_u2u32(b, addr1));
1681
1682 case nir_address_format_32bit_index_offset_pack64:
1683 assert(addr0->num_components == 1 && addr1->num_components == 1);
1684 return nir_ball_iequal(b, nir_unpack_64_2x32(b, addr0), nir_unpack_64_2x32(b, addr1));
1685
1686 case nir_address_format_logical:
1687 unreachable("Unsupported address format");
1688 }
1689
1690 unreachable("Invalid address format");
1691 }
1692
1693 nir_ssa_def *
1694 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1695 nir_address_format addr_format)
1696 {
1697 switch (addr_format) {
1698 case nir_address_format_32bit_global:
1699 case nir_address_format_64bit_global:
1700 case nir_address_format_32bit_offset:
1701 case nir_address_format_32bit_index_offset_pack64:
1702 assert(addr0->num_components == 1);
1703 assert(addr1->num_components == 1);
1704 return nir_isub(b, addr0, addr1);
1705
1706 case nir_address_format_32bit_offset_as_64bit:
1707 assert(addr0->num_components == 1);
1708 assert(addr1->num_components == 1);
1709 return nir_u2u64(b, nir_isub(b, nir_u2u32(b, addr0), nir_u2u32(b, addr1)));
1710
1711 case nir_address_format_64bit_bounded_global:
1712 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1713 addr_to_global(b, addr1, addr_format));
1714
1715 case nir_address_format_32bit_index_offset:
1716 assert(addr0->num_components == 2);
1717 assert(addr1->num_components == 2);
1718 /* Assume the same buffer index. */
1719 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1720
1721 case nir_address_format_vec2_index_32bit_offset:
1722 assert(addr0->num_components == 3);
1723 assert(addr1->num_components == 3);
1724 /* Assume the same buffer index. */
1725 return nir_isub(b, nir_channel(b, addr0, 2), nir_channel(b, addr1, 2));
1726
1727 case nir_address_format_logical:
1728 unreachable("Unsupported address format");
1729 }
1730
1731 unreachable("Invalid address format");
1732 }
1733
1734 static bool
1735 is_input(nir_intrinsic_instr *intrin)
1736 {
1737 return intrin->intrinsic == nir_intrinsic_load_input ||
1738 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1739 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1740 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1741 }
1742
1743 static bool
1744 is_output(nir_intrinsic_instr *intrin)
1745 {
1746 return intrin->intrinsic == nir_intrinsic_load_output ||
1747 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1748 intrin->intrinsic == nir_intrinsic_store_output ||
1749 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1750 }
1751
1752
1753 /**
1754 * This pass adds constant offsets to instr->const_index[0] for input/output
1755 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1756 * unchanged - since we don't know what part of a compound variable is
1757 * accessed, we allocate storage for the entire thing. For drivers that use
1758 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1759 * the offset source will be 0, so that they don't have to add it in manually.
1760 */
1761
1762 static bool
1763 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1764 nir_variable_mode mode)
1765 {
1766 bool progress = false;
1767 nir_foreach_instr_safe(instr, block) {
1768 if (instr->type != nir_instr_type_intrinsic)
1769 continue;
1770
1771 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1772
1773 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1774 (mode == nir_var_shader_out && is_output(intrin))) {
1775 nir_src *offset = nir_get_io_offset_src(intrin);
1776
1777 if (nir_src_is_const(*offset)) {
1778 intrin->const_index[0] += nir_src_as_uint(*offset);
1779 b->cursor = nir_before_instr(&intrin->instr);
1780 nir_instr_rewrite_src(&intrin->instr, offset,
1781 nir_src_for_ssa(nir_imm_int(b, 0)));
1782 progress = true;
1783 }
1784 }
1785 }
1786
1787 return progress;
1788 }
1789
1790 bool
1791 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1792 {
1793 bool progress = false;
1794
1795 nir_foreach_function(f, nir) {
1796 if (f->impl) {
1797 nir_builder b;
1798 nir_builder_init(&b, f->impl);
1799 nir_foreach_block(block, f->impl) {
1800 progress |= add_const_offset_to_base_block(block, &b, mode);
1801 }
1802 }
1803 }
1804
1805 return progress;
1806 }
1807