b004c62b81e25fc121011f2ef51fcb71f6bdbf90
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 #include "util/u_math.h"
39
40 struct lower_io_state {
41 void *dead_ctx;
42 nir_builder builder;
43 int (*type_size)(const struct glsl_type *type, bool);
44 nir_variable_mode modes;
45 nir_lower_io_options options;
46 };
47
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
50 {
51 switch (deref_op) {
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
53 OP(atomic_exchange)
54 OP(atomic_comp_swap)
55 OP(atomic_add)
56 OP(atomic_imin)
57 OP(atomic_umin)
58 OP(atomic_imax)
59 OP(atomic_umax)
60 OP(atomic_and)
61 OP(atomic_or)
62 OP(atomic_xor)
63 OP(atomic_fadd)
64 OP(atomic_fmin)
65 OP(atomic_fmax)
66 OP(atomic_fcomp_swap)
67 #undef OP
68 default:
69 unreachable("Invalid SSBO atomic");
70 }
71 }
72
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op)
75 {
76 switch (deref_op) {
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
78 OP(atomic_exchange)
79 OP(atomic_comp_swap)
80 OP(atomic_add)
81 OP(atomic_imin)
82 OP(atomic_umin)
83 OP(atomic_imax)
84 OP(atomic_umax)
85 OP(atomic_and)
86 OP(atomic_or)
87 OP(atomic_xor)
88 OP(atomic_fadd)
89 OP(atomic_fmin)
90 OP(atomic_fmax)
91 OP(atomic_fcomp_swap)
92 #undef OP
93 default:
94 unreachable("Invalid SSBO atomic");
95 }
96 }
97
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op)
100 {
101 switch (deref_op) {
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
103 OP(atomic_exchange)
104 OP(atomic_comp_swap)
105 OP(atomic_add)
106 OP(atomic_imin)
107 OP(atomic_umin)
108 OP(atomic_imax)
109 OP(atomic_umax)
110 OP(atomic_and)
111 OP(atomic_or)
112 OP(atomic_xor)
113 OP(atomic_fadd)
114 OP(atomic_fmin)
115 OP(atomic_fmax)
116 OP(atomic_fcomp_swap)
117 #undef OP
118 default:
119 unreachable("Invalid shared atomic");
120 }
121 }
122
123 void
124 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
125 int (*type_size)(const struct glsl_type *, bool))
126 {
127 unsigned location = 0;
128
129 nir_foreach_variable(var, var_list) {
130 /*
131 * UBOs have their own address spaces, so don't count them towards the
132 * number of global uniforms
133 */
134 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
135 continue;
136
137 var->data.driver_location = location;
138 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
139 var->data.mode == nir_var_shader_out ||
140 var->data.bindless;
141 location += type_size(var->type, bindless_type_size);
142 }
143
144 *size = location;
145 }
146
147 /**
148 * Return true if the given variable is a per-vertex input/output array.
149 * (such as geometry shader inputs).
150 */
151 bool
152 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
153 {
154 if (var->data.patch || !glsl_type_is_array(var->type))
155 return false;
156
157 if (var->data.mode == nir_var_shader_in)
158 return stage == MESA_SHADER_GEOMETRY ||
159 stage == MESA_SHADER_TESS_CTRL ||
160 stage == MESA_SHADER_TESS_EVAL;
161
162 if (var->data.mode == nir_var_shader_out)
163 return stage == MESA_SHADER_TESS_CTRL;
164
165 return false;
166 }
167
168 static nir_ssa_def *
169 get_io_offset(nir_builder *b, nir_deref_instr *deref,
170 nir_ssa_def **vertex_index,
171 int (*type_size)(const struct glsl_type *, bool),
172 unsigned *component, bool bts)
173 {
174 nir_deref_path path;
175 nir_deref_path_init(&path, deref, NULL);
176
177 assert(path.path[0]->deref_type == nir_deref_type_var);
178 nir_deref_instr **p = &path.path[1];
179
180 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
181 * outermost array index separate. Process the rest normally.
182 */
183 if (vertex_index != NULL) {
184 assert((*p)->deref_type == nir_deref_type_array);
185 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
186 p++;
187 }
188
189 if (path.path[0]->var->data.compact) {
190 assert((*p)->deref_type == nir_deref_type_array);
191 assert(glsl_type_is_scalar((*p)->type));
192
193 /* We always lower indirect dereferences for "compact" array vars. */
194 const unsigned index = nir_src_as_uint((*p)->arr.index);
195 const unsigned total_offset = *component + index;
196 const unsigned slot_offset = total_offset / 4;
197 *component = total_offset % 4;
198 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
199 }
200
201 /* Just emit code and let constant-folding go to town */
202 nir_ssa_def *offset = nir_imm_int(b, 0);
203
204 for (; *p; p++) {
205 if ((*p)->deref_type == nir_deref_type_array) {
206 unsigned size = type_size((*p)->type, bts);
207
208 nir_ssa_def *mul =
209 nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
210
211 offset = nir_iadd(b, offset, mul);
212 } else if ((*p)->deref_type == nir_deref_type_struct) {
213 /* p starts at path[1], so this is safe */
214 nir_deref_instr *parent = *(p - 1);
215
216 unsigned field_offset = 0;
217 for (unsigned i = 0; i < (*p)->strct.index; i++) {
218 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
219 }
220 offset = nir_iadd_imm(b, offset, field_offset);
221 } else {
222 unreachable("Unsupported deref type");
223 }
224 }
225
226 nir_deref_path_finish(&path);
227
228 return offset;
229 }
230
231 static nir_ssa_def *
232 emit_load(struct lower_io_state *state,
233 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
234 unsigned component, unsigned num_components, unsigned bit_size,
235 nir_alu_type type)
236 {
237 nir_builder *b = &state->builder;
238 const nir_shader *nir = b->shader;
239 nir_variable_mode mode = var->data.mode;
240 nir_ssa_def *barycentric = NULL;
241
242 nir_intrinsic_op op;
243 switch (mode) {
244 case nir_var_shader_in:
245 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
246 nir->options->use_interpolated_input_intrinsics &&
247 var->data.interpolation != INTERP_MODE_FLAT) {
248 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
249 assert(vertex_index != NULL);
250 op = nir_intrinsic_load_input_vertex;
251 } else {
252 assert(vertex_index == NULL);
253
254 nir_intrinsic_op bary_op;
255 if (var->data.sample ||
256 (state->options & nir_lower_io_force_sample_interpolation))
257 bary_op = nir_intrinsic_load_barycentric_sample;
258 else if (var->data.centroid)
259 bary_op = nir_intrinsic_load_barycentric_centroid;
260 else
261 bary_op = nir_intrinsic_load_barycentric_pixel;
262
263 barycentric = nir_load_barycentric(&state->builder, bary_op,
264 var->data.interpolation);
265 op = nir_intrinsic_load_interpolated_input;
266 }
267 } else {
268 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
269 nir_intrinsic_load_input;
270 }
271 break;
272 case nir_var_shader_out:
273 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
274 nir_intrinsic_load_output;
275 break;
276 case nir_var_uniform:
277 op = nir_intrinsic_load_uniform;
278 break;
279 default:
280 unreachable("Unknown variable mode");
281 }
282
283 nir_intrinsic_instr *load =
284 nir_intrinsic_instr_create(state->builder.shader, op);
285 load->num_components = num_components;
286
287 nir_intrinsic_set_base(load, var->data.driver_location);
288 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
289 nir_intrinsic_set_component(load, component);
290
291 if (load->intrinsic == nir_intrinsic_load_uniform)
292 nir_intrinsic_set_range(load,
293 state->type_size(var->type, var->data.bindless));
294
295 if (load->intrinsic == nir_intrinsic_load_input ||
296 load->intrinsic == nir_intrinsic_load_input_vertex ||
297 load->intrinsic == nir_intrinsic_load_uniform)
298 nir_intrinsic_set_type(load, type);
299
300 if (vertex_index) {
301 load->src[0] = nir_src_for_ssa(vertex_index);
302 load->src[1] = nir_src_for_ssa(offset);
303 } else if (barycentric) {
304 load->src[0] = nir_src_for_ssa(barycentric);
305 load->src[1] = nir_src_for_ssa(offset);
306 } else {
307 load->src[0] = nir_src_for_ssa(offset);
308 }
309
310 nir_ssa_dest_init(&load->instr, &load->dest,
311 num_components, bit_size, NULL);
312 nir_builder_instr_insert(b, &load->instr);
313
314 return &load->dest.ssa;
315 }
316
317 static nir_ssa_def *
318 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
319 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
320 unsigned component, const struct glsl_type *type)
321 {
322 assert(intrin->dest.is_ssa);
323 if (intrin->dest.ssa.bit_size == 64 &&
324 (state->options & nir_lower_io_lower_64bit_to_32)) {
325 nir_builder *b = &state->builder;
326
327 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
328
329 nir_ssa_def *comp64[4];
330 assert(component == 0 || component == 2);
331 unsigned dest_comp = 0;
332 while (dest_comp < intrin->dest.ssa.num_components) {
333 const unsigned num_comps =
334 MIN2(intrin->dest.ssa.num_components - dest_comp,
335 (4 - component) / 2);
336
337 nir_ssa_def *data32 =
338 emit_load(state, vertex_index, var, offset, component,
339 num_comps * 2, 32, nir_type_uint32);
340 for (unsigned i = 0; i < num_comps; i++) {
341 comp64[dest_comp + i] =
342 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
343 }
344
345 /* Only the first store has a component offset */
346 component = 0;
347 dest_comp += num_comps;
348 offset = nir_iadd_imm(b, offset, slot_size);
349 }
350
351 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
352 } else if (intrin->dest.ssa.bit_size == 1) {
353 /* Booleans are 32-bit */
354 assert(glsl_type_is_boolean(type));
355 return nir_b2b1(&state->builder,
356 emit_load(state, vertex_index, var, offset, component,
357 intrin->dest.ssa.num_components, 32,
358 nir_type_bool32));
359 } else {
360 return emit_load(state, vertex_index, var, offset, component,
361 intrin->dest.ssa.num_components,
362 intrin->dest.ssa.bit_size,
363 nir_get_nir_type_for_glsl_type(type));
364 }
365 }
366
367 static void
368 emit_store(struct lower_io_state *state, nir_ssa_def *data,
369 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
370 unsigned component, unsigned num_components,
371 nir_component_mask_t write_mask, nir_alu_type type)
372 {
373 nir_builder *b = &state->builder;
374 nir_variable_mode mode = var->data.mode;
375
376 assert(mode == nir_var_shader_out);
377 nir_intrinsic_op op;
378 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
379 nir_intrinsic_store_output;
380
381 nir_intrinsic_instr *store =
382 nir_intrinsic_instr_create(state->builder.shader, op);
383 store->num_components = num_components;
384
385 store->src[0] = nir_src_for_ssa(data);
386
387 nir_intrinsic_set_base(store, var->data.driver_location);
388
389 if (mode == nir_var_shader_out)
390 nir_intrinsic_set_component(store, component);
391
392 if (store->intrinsic == nir_intrinsic_store_output)
393 nir_intrinsic_set_type(store, type);
394
395 nir_intrinsic_set_write_mask(store, write_mask);
396
397 if (vertex_index)
398 store->src[1] = nir_src_for_ssa(vertex_index);
399
400 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
401
402 nir_builder_instr_insert(b, &store->instr);
403 }
404
405 static void
406 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
407 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
408 unsigned component, const struct glsl_type *type)
409 {
410 assert(intrin->src[1].is_ssa);
411 if (intrin->src[1].ssa->bit_size == 64 &&
412 (state->options & nir_lower_io_lower_64bit_to_32)) {
413 nir_builder *b = &state->builder;
414
415 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
416
417 assert(component == 0 || component == 2);
418 unsigned src_comp = 0;
419 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
420 while (src_comp < intrin->num_components) {
421 const unsigned num_comps =
422 MIN2(intrin->num_components - src_comp,
423 (4 - component) / 2);
424
425 if (write_mask & BITFIELD_MASK(num_comps)) {
426 nir_ssa_def *data =
427 nir_channels(b, intrin->src[1].ssa,
428 BITFIELD_RANGE(src_comp, num_comps));
429 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
430
431 nir_component_mask_t write_mask32 = 0;
432 for (unsigned i = 0; i < num_comps; i++) {
433 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
434 write_mask32 |= 3 << (i * 2);
435 }
436
437 emit_store(state, data32, vertex_index, var, offset,
438 component, data32->num_components, write_mask32,
439 nir_type_uint32);
440 }
441
442 /* Only the first store has a component offset */
443 component = 0;
444 src_comp += num_comps;
445 write_mask >>= num_comps;
446 offset = nir_iadd_imm(b, offset, slot_size);
447 }
448 } else if (intrin->dest.ssa.bit_size == 1) {
449 /* Booleans are 32-bit */
450 assert(glsl_type_is_boolean(type));
451 nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
452 emit_store(state, b32_val, vertex_index, var, offset,
453 component, intrin->num_components,
454 nir_intrinsic_write_mask(intrin),
455 nir_type_bool32);
456 } else {
457 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
458 component, intrin->num_components,
459 nir_intrinsic_write_mask(intrin),
460 nir_get_nir_type_for_glsl_type(type));
461 }
462 }
463
464 static nir_ssa_def *
465 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
466 nir_variable *var, nir_ssa_def *offset, unsigned component,
467 const struct glsl_type *type)
468 {
469 nir_builder *b = &state->builder;
470 assert(var->data.mode == nir_var_shader_in);
471
472 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
473 * interpolateAtVertex() for explicit variables.
474 */
475 if (var->data.interpolation == INTERP_MODE_FLAT ||
476 var->data.interpolation == INTERP_MODE_EXPLICIT) {
477 nir_ssa_def *vertex_index = NULL;
478
479 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
480 assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
481 vertex_index = intrin->src[1].ssa;
482 }
483
484 return lower_load(intrin, state, vertex_index, var, offset, component, type);
485 }
486
487 /* None of the supported APIs allow interpolation on 64-bit things */
488 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
489
490 nir_intrinsic_op bary_op;
491 switch (intrin->intrinsic) {
492 case nir_intrinsic_interp_deref_at_centroid:
493 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
494 nir_intrinsic_load_barycentric_sample :
495 nir_intrinsic_load_barycentric_centroid;
496 break;
497 case nir_intrinsic_interp_deref_at_sample:
498 bary_op = nir_intrinsic_load_barycentric_at_sample;
499 break;
500 case nir_intrinsic_interp_deref_at_offset:
501 bary_op = nir_intrinsic_load_barycentric_at_offset;
502 break;
503 default:
504 unreachable("Bogus interpolateAt() intrinsic.");
505 }
506
507 nir_intrinsic_instr *bary_setup =
508 nir_intrinsic_instr_create(state->builder.shader, bary_op);
509
510 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
511 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
512
513 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
514 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
515 intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
516 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
517
518 nir_builder_instr_insert(b, &bary_setup->instr);
519
520 nir_intrinsic_instr *load =
521 nir_intrinsic_instr_create(state->builder.shader,
522 nir_intrinsic_load_interpolated_input);
523 load->num_components = intrin->num_components;
524
525 nir_intrinsic_set_base(load, var->data.driver_location);
526 nir_intrinsic_set_component(load, component);
527
528 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
529 load->src[1] = nir_src_for_ssa(offset);
530
531 assert(intrin->dest.is_ssa);
532 nir_ssa_dest_init(&load->instr, &load->dest,
533 intrin->dest.ssa.num_components,
534 intrin->dest.ssa.bit_size, NULL);
535 nir_builder_instr_insert(b, &load->instr);
536
537 return &load->dest.ssa;
538 }
539
540 static bool
541 nir_lower_io_block(nir_block *block,
542 struct lower_io_state *state)
543 {
544 nir_builder *b = &state->builder;
545 const nir_shader_compiler_options *options = b->shader->options;
546 bool progress = false;
547
548 nir_foreach_instr_safe(instr, block) {
549 if (instr->type != nir_instr_type_intrinsic)
550 continue;
551
552 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
553
554 switch (intrin->intrinsic) {
555 case nir_intrinsic_load_deref:
556 case nir_intrinsic_store_deref:
557 /* We can lower the io for this nir instrinsic */
558 break;
559 case nir_intrinsic_interp_deref_at_centroid:
560 case nir_intrinsic_interp_deref_at_sample:
561 case nir_intrinsic_interp_deref_at_offset:
562 case nir_intrinsic_interp_deref_at_vertex:
563 /* We can optionally lower these to load_interpolated_input */
564 if (options->use_interpolated_input_intrinsics)
565 break;
566 default:
567 /* We can't lower the io for this nir instrinsic, so skip it */
568 continue;
569 }
570
571 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
572
573 nir_variable_mode mode = deref->mode;
574 assert(util_is_power_of_two_nonzero(mode));
575 if ((state->modes & mode) == 0)
576 continue;
577
578 nir_variable *var = nir_deref_instr_get_variable(deref);
579
580 b->cursor = nir_before_instr(instr);
581
582 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
583
584 nir_ssa_def *offset;
585 nir_ssa_def *vertex_index = NULL;
586 unsigned component_offset = var->data.location_frac;
587 bool bindless_type_size = mode == nir_var_shader_in ||
588 mode == nir_var_shader_out ||
589 var->data.bindless;
590
591 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
592 state->type_size, &component_offset,
593 bindless_type_size);
594
595 nir_ssa_def *replacement = NULL;
596
597 switch (intrin->intrinsic) {
598 case nir_intrinsic_load_deref:
599 replacement = lower_load(intrin, state, vertex_index, var, offset,
600 component_offset, deref->type);
601 break;
602
603 case nir_intrinsic_store_deref:
604 lower_store(intrin, state, vertex_index, var, offset,
605 component_offset, deref->type);
606 break;
607
608 case nir_intrinsic_interp_deref_at_centroid:
609 case nir_intrinsic_interp_deref_at_sample:
610 case nir_intrinsic_interp_deref_at_offset:
611 case nir_intrinsic_interp_deref_at_vertex:
612 assert(vertex_index == NULL);
613 replacement = lower_interpolate_at(intrin, state, var, offset,
614 component_offset, deref->type);
615 break;
616
617 default:
618 continue;
619 }
620
621 if (replacement) {
622 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
623 nir_src_for_ssa(replacement));
624 }
625 nir_instr_remove(&intrin->instr);
626 progress = true;
627 }
628
629 return progress;
630 }
631
632 static bool
633 nir_lower_io_impl(nir_function_impl *impl,
634 nir_variable_mode modes,
635 int (*type_size)(const struct glsl_type *, bool),
636 nir_lower_io_options options)
637 {
638 struct lower_io_state state;
639 bool progress = false;
640
641 nir_builder_init(&state.builder, impl);
642 state.dead_ctx = ralloc_context(NULL);
643 state.modes = modes;
644 state.type_size = type_size;
645 state.options = options;
646
647 ASSERTED nir_variable_mode supported_modes =
648 nir_var_shader_in | nir_var_shader_out | nir_var_uniform;
649 assert(!(modes & ~supported_modes));
650
651 nir_foreach_block(block, impl) {
652 progress |= nir_lower_io_block(block, &state);
653 }
654
655 ralloc_free(state.dead_ctx);
656
657 nir_metadata_preserve(impl, nir_metadata_block_index |
658 nir_metadata_dominance);
659 return progress;
660 }
661
662 /** Lower load/store_deref intrinsics on I/O variables to offset-based intrinsics
663 *
664 * This pass is intended to be used for cross-stage shader I/O and driver-
665 * managed uniforms to turn deref-based access into a simpler model using
666 * locations or offsets. For fragment shader inputs, it can optionally turn
667 * load_deref into an explicit interpolation using barycentrics coming from
668 * one of the load_barycentric_* intrinsics. This pass requires that all
669 * deref chains are complete and contain no casts.
670 */
671 bool
672 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
673 int (*type_size)(const struct glsl_type *, bool),
674 nir_lower_io_options options)
675 {
676 bool progress = false;
677
678 nir_foreach_function(function, shader) {
679 if (function->impl) {
680 progress |= nir_lower_io_impl(function->impl, modes,
681 type_size, options);
682 }
683 }
684
685 return progress;
686 }
687
688 static unsigned
689 type_scalar_size_bytes(const struct glsl_type *type)
690 {
691 assert(glsl_type_is_vector_or_scalar(type) ||
692 glsl_type_is_matrix(type));
693 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
694 }
695
696 static nir_ssa_def *
697 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
698 nir_address_format addr_format, nir_ssa_def *offset)
699 {
700 assert(offset->num_components == 1);
701 assert(addr->bit_size == offset->bit_size);
702
703 switch (addr_format) {
704 case nir_address_format_32bit_global:
705 case nir_address_format_64bit_global:
706 case nir_address_format_32bit_offset:
707 assert(addr->num_components == 1);
708 return nir_iadd(b, addr, offset);
709
710 case nir_address_format_64bit_bounded_global:
711 assert(addr->num_components == 4);
712 return nir_vec4(b, nir_channel(b, addr, 0),
713 nir_channel(b, addr, 1),
714 nir_channel(b, addr, 2),
715 nir_iadd(b, nir_channel(b, addr, 3), offset));
716
717 case nir_address_format_32bit_index_offset:
718 assert(addr->num_components == 2);
719 return nir_vec2(b, nir_channel(b, addr, 0),
720 nir_iadd(b, nir_channel(b, addr, 1), offset));
721 case nir_address_format_vec2_index_32bit_offset:
722 assert(addr->num_components == 3);
723 return nir_vec3(b, nir_channel(b, addr, 0), nir_channel(b, addr, 1),
724 nir_iadd(b, nir_channel(b, addr, 2), offset));
725 case nir_address_format_logical:
726 unreachable("Unsupported address format");
727 }
728 unreachable("Invalid address format");
729 }
730
731 static nir_ssa_def *
732 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
733 nir_address_format addr_format, int64_t offset)
734 {
735 return build_addr_iadd(b, addr, addr_format,
736 nir_imm_intN_t(b, offset, addr->bit_size));
737 }
738
739 static nir_ssa_def *
740 addr_to_index(nir_builder *b, nir_ssa_def *addr,
741 nir_address_format addr_format)
742 {
743 if (addr_format == nir_address_format_32bit_index_offset) {
744 assert(addr->num_components == 2);
745 return nir_channel(b, addr, 0);
746 } else if (addr_format == nir_address_format_vec2_index_32bit_offset) {
747 assert(addr->num_components == 3);
748 return nir_channels(b, addr, 0x3);
749 } else {
750 unreachable("bad address format for index");
751 }
752 }
753
754 static nir_ssa_def *
755 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
756 nir_address_format addr_format)
757 {
758 if (addr_format == nir_address_format_32bit_index_offset) {
759 assert(addr->num_components == 2);
760 return nir_channel(b, addr, 1);
761 } else if (addr_format == nir_address_format_vec2_index_32bit_offset) {
762 assert(addr->num_components == 3);
763 return nir_channel(b, addr, 2);
764 } else {
765 unreachable("bad address format for offset");
766 }
767 }
768
769 /** Returns true if the given address format resolves to a global address */
770 static bool
771 addr_format_is_global(nir_address_format addr_format)
772 {
773 return addr_format == nir_address_format_32bit_global ||
774 addr_format == nir_address_format_64bit_global ||
775 addr_format == nir_address_format_64bit_bounded_global;
776 }
777
778 static bool
779 addr_format_is_offset(nir_address_format addr_format)
780 {
781 return addr_format == nir_address_format_32bit_offset;
782 }
783
784 static nir_ssa_def *
785 addr_to_global(nir_builder *b, nir_ssa_def *addr,
786 nir_address_format addr_format)
787 {
788 switch (addr_format) {
789 case nir_address_format_32bit_global:
790 case nir_address_format_64bit_global:
791 assert(addr->num_components == 1);
792 return addr;
793
794 case nir_address_format_64bit_bounded_global:
795 assert(addr->num_components == 4);
796 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
797 nir_u2u64(b, nir_channel(b, addr, 3)));
798
799 case nir_address_format_32bit_index_offset:
800 case nir_address_format_vec2_index_32bit_offset:
801 case nir_address_format_32bit_offset:
802 case nir_address_format_logical:
803 unreachable("Cannot get a 64-bit address with this address format");
804 }
805
806 unreachable("Invalid address format");
807 }
808
809 static bool
810 addr_format_needs_bounds_check(nir_address_format addr_format)
811 {
812 return addr_format == nir_address_format_64bit_bounded_global;
813 }
814
815 static nir_ssa_def *
816 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
817 nir_address_format addr_format, unsigned size)
818 {
819 assert(addr_format == nir_address_format_64bit_bounded_global);
820 assert(addr->num_components == 4);
821 return nir_ige(b, nir_channel(b, addr, 2),
822 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
823 }
824
825 static nir_ssa_def *
826 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
827 nir_ssa_def *addr, nir_address_format addr_format,
828 unsigned num_components)
829 {
830 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
831
832 nir_intrinsic_op op;
833 switch (mode) {
834 case nir_var_mem_ubo:
835 op = nir_intrinsic_load_ubo;
836 break;
837 case nir_var_mem_ssbo:
838 if (addr_format_is_global(addr_format))
839 op = nir_intrinsic_load_global;
840 else
841 op = nir_intrinsic_load_ssbo;
842 break;
843 case nir_var_mem_global:
844 assert(addr_format_is_global(addr_format));
845 op = nir_intrinsic_load_global;
846 break;
847 case nir_var_shader_in:
848 assert(addr_format_is_offset(addr_format));
849 op = nir_intrinsic_load_kernel_input;
850 break;
851 case nir_var_mem_shared:
852 assert(addr_format_is_offset(addr_format));
853 op = nir_intrinsic_load_shared;
854 break;
855 case nir_var_shader_temp:
856 case nir_var_function_temp:
857 if (addr_format_is_offset(addr_format)) {
858 op = nir_intrinsic_load_scratch;
859 } else {
860 assert(addr_format_is_global(addr_format));
861 op = nir_intrinsic_load_global;
862 }
863 break;
864 default:
865 unreachable("Unsupported explicit IO variable mode");
866 }
867
868 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
869
870 if (addr_format_is_global(addr_format)) {
871 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
872 } else if (addr_format == nir_address_format_32bit_offset) {
873 assert(addr->num_components == 1);
874 load->src[0] = nir_src_for_ssa(addr);
875 } else {
876 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
877 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
878 }
879
880 if (nir_intrinsic_infos[op].index_map[NIR_INTRINSIC_ACCESS] > 0)
881 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
882
883 unsigned bit_size = intrin->dest.ssa.bit_size;
884 if (bit_size == 1) {
885 /* TODO: Make the native bool bit_size an option. */
886 bit_size = 32;
887 }
888
889 /* TODO: We should try and provide a better alignment. For OpenCL, we need
890 * to plumb the alignment through from SPIR-V when we have one.
891 */
892 nir_intrinsic_set_align(load, bit_size / 8, 0);
893
894 assert(intrin->dest.is_ssa);
895 load->num_components = num_components;
896 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
897 bit_size, intrin->dest.ssa.name);
898
899 assert(bit_size % 8 == 0);
900
901 nir_ssa_def *result;
902 if (addr_format_needs_bounds_check(addr_format)) {
903 /* The Vulkan spec for robustBufferAccess gives us quite a few options
904 * as to what we can do with an OOB read. Unfortunately, returning
905 * undefined values isn't one of them so we return an actual zero.
906 */
907 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
908
909 const unsigned load_size = (bit_size / 8) * load->num_components;
910 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
911
912 nir_builder_instr_insert(b, &load->instr);
913
914 nir_pop_if(b, NULL);
915
916 result = nir_if_phi(b, &load->dest.ssa, zero);
917 } else {
918 nir_builder_instr_insert(b, &load->instr);
919 result = &load->dest.ssa;
920 }
921
922 if (intrin->dest.ssa.bit_size == 1) {
923 /* For shared, we can go ahead and use NIR's and/or the back-end's
924 * standard encoding for booleans rather than forcing a 0/1 boolean.
925 * This should save an instruction or two.
926 */
927 if (mode == nir_var_mem_shared ||
928 mode == nir_var_shader_temp ||
929 mode == nir_var_function_temp)
930 result = nir_b2b1(b, result);
931 else
932 result = nir_i2b(b, result);
933 }
934
935 return result;
936 }
937
938 static void
939 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
940 nir_ssa_def *addr, nir_address_format addr_format,
941 nir_ssa_def *value, nir_component_mask_t write_mask)
942 {
943 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
944
945 nir_intrinsic_op op;
946 switch (mode) {
947 case nir_var_mem_ssbo:
948 if (addr_format_is_global(addr_format))
949 op = nir_intrinsic_store_global;
950 else
951 op = nir_intrinsic_store_ssbo;
952 break;
953 case nir_var_mem_global:
954 assert(addr_format_is_global(addr_format));
955 op = nir_intrinsic_store_global;
956 break;
957 case nir_var_mem_shared:
958 assert(addr_format_is_offset(addr_format));
959 op = nir_intrinsic_store_shared;
960 break;
961 case nir_var_shader_temp:
962 case nir_var_function_temp:
963 if (addr_format_is_offset(addr_format)) {
964 op = nir_intrinsic_store_scratch;
965 } else {
966 assert(addr_format_is_global(addr_format));
967 op = nir_intrinsic_store_global;
968 }
969 break;
970 default:
971 unreachable("Unsupported explicit IO variable mode");
972 }
973
974 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
975
976 if (value->bit_size == 1) {
977 /* For shared, we can go ahead and use NIR's and/or the back-end's
978 * standard encoding for booleans rather than forcing a 0/1 boolean.
979 * This should save an instruction or two.
980 *
981 * TODO: Make the native bool bit_size an option.
982 */
983 if (mode == nir_var_mem_shared ||
984 mode == nir_var_shader_temp ||
985 mode == nir_var_function_temp)
986 value = nir_b2b32(b, value);
987 else
988 value = nir_b2i(b, value, 32);
989 }
990
991 store->src[0] = nir_src_for_ssa(value);
992 if (addr_format_is_global(addr_format)) {
993 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
994 } else if (addr_format == nir_address_format_32bit_offset) {
995 assert(addr->num_components == 1);
996 store->src[1] = nir_src_for_ssa(addr);
997 } else {
998 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
999 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1000 }
1001
1002 nir_intrinsic_set_write_mask(store, write_mask);
1003
1004 if (nir_intrinsic_infos[op].index_map[NIR_INTRINSIC_ACCESS] > 0)
1005 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
1006
1007 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1008 * to plumb the alignment through from SPIR-V when we have one.
1009 */
1010 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
1011
1012 assert(value->num_components == 1 ||
1013 value->num_components == intrin->num_components);
1014 store->num_components = value->num_components;
1015
1016 assert(value->bit_size % 8 == 0);
1017
1018 if (addr_format_needs_bounds_check(addr_format)) {
1019 const unsigned store_size = (value->bit_size / 8) * store->num_components;
1020 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
1021
1022 nir_builder_instr_insert(b, &store->instr);
1023
1024 nir_pop_if(b, NULL);
1025 } else {
1026 nir_builder_instr_insert(b, &store->instr);
1027 }
1028 }
1029
1030 static nir_ssa_def *
1031 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
1032 nir_ssa_def *addr, nir_address_format addr_format)
1033 {
1034 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1035 const unsigned num_data_srcs =
1036 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
1037
1038 nir_intrinsic_op op;
1039 switch (mode) {
1040 case nir_var_mem_ssbo:
1041 if (addr_format_is_global(addr_format))
1042 op = global_atomic_for_deref(intrin->intrinsic);
1043 else
1044 op = ssbo_atomic_for_deref(intrin->intrinsic);
1045 break;
1046 case nir_var_mem_global:
1047 assert(addr_format_is_global(addr_format));
1048 op = global_atomic_for_deref(intrin->intrinsic);
1049 break;
1050 case nir_var_mem_shared:
1051 assert(addr_format == nir_address_format_32bit_offset);
1052 op = shared_atomic_for_deref(intrin->intrinsic);
1053 break;
1054 default:
1055 unreachable("Unsupported explicit IO variable mode");
1056 }
1057
1058 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1059
1060 unsigned src = 0;
1061 if (addr_format_is_global(addr_format)) {
1062 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1063 } else if (addr_format == nir_address_format_32bit_offset) {
1064 assert(addr->num_components == 1);
1065 atomic->src[src++] = nir_src_for_ssa(addr);
1066 } else {
1067 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1068 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1069 }
1070 for (unsigned i = 0; i < num_data_srcs; i++) {
1071 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1072 }
1073
1074 /* Global atomics don't have access flags because they assume that the
1075 * address may be non-uniform.
1076 */
1077 if (nir_intrinsic_infos[op].index_map[NIR_INTRINSIC_ACCESS] > 0)
1078 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1079
1080 assert(intrin->dest.ssa.num_components == 1);
1081 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1082 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1083
1084 assert(atomic->dest.ssa.bit_size % 8 == 0);
1085
1086 if (addr_format_needs_bounds_check(addr_format)) {
1087 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1088 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1089
1090 nir_builder_instr_insert(b, &atomic->instr);
1091
1092 nir_pop_if(b, NULL);
1093 return nir_if_phi(b, &atomic->dest.ssa,
1094 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1095 } else {
1096 nir_builder_instr_insert(b, &atomic->instr);
1097 return &atomic->dest.ssa;
1098 }
1099 }
1100
1101 nir_ssa_def *
1102 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1103 nir_ssa_def *base_addr,
1104 nir_address_format addr_format)
1105 {
1106 assert(deref->dest.is_ssa);
1107 switch (deref->deref_type) {
1108 case nir_deref_type_var:
1109 assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared |
1110 nir_var_shader_temp | nir_var_function_temp));
1111 if (addr_format_is_global(addr_format)) {
1112 assert(nir_var_shader_temp | nir_var_function_temp);
1113 base_addr =
1114 nir_load_scratch_base_ptr(b, !(deref->mode & nir_var_shader_temp),
1115 nir_address_format_num_components(addr_format),
1116 nir_address_format_bit_size(addr_format));
1117 return build_addr_iadd_imm(b, base_addr, addr_format,
1118 deref->var->data.driver_location);
1119 } else {
1120 return nir_imm_intN_t(b, deref->var->data.driver_location,
1121 deref->dest.ssa.bit_size);
1122 }
1123
1124 case nir_deref_type_array: {
1125 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1126
1127 unsigned stride = glsl_get_explicit_stride(parent->type);
1128 if ((glsl_type_is_matrix(parent->type) &&
1129 glsl_matrix_type_is_row_major(parent->type)) ||
1130 (glsl_type_is_vector(parent->type) && stride == 0))
1131 stride = type_scalar_size_bytes(parent->type);
1132
1133 assert(stride > 0);
1134
1135 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1136 index = nir_i2i(b, index, base_addr->bit_size);
1137 return build_addr_iadd(b, base_addr, addr_format,
1138 nir_amul_imm(b, index, stride));
1139 }
1140
1141 case nir_deref_type_ptr_as_array: {
1142 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1143 index = nir_i2i(b, index, base_addr->bit_size);
1144 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1145 return build_addr_iadd(b, base_addr, addr_format,
1146 nir_amul_imm(b, index, stride));
1147 }
1148
1149 case nir_deref_type_array_wildcard:
1150 unreachable("Wildcards should be lowered by now");
1151 break;
1152
1153 case nir_deref_type_struct: {
1154 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1155 int offset = glsl_get_struct_field_offset(parent->type,
1156 deref->strct.index);
1157 assert(offset >= 0);
1158 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1159 }
1160
1161 case nir_deref_type_cast:
1162 /* Nothing to do here */
1163 return base_addr;
1164 }
1165
1166 unreachable("Invalid NIR deref type");
1167 }
1168
1169 void
1170 nir_lower_explicit_io_instr(nir_builder *b,
1171 nir_intrinsic_instr *intrin,
1172 nir_ssa_def *addr,
1173 nir_address_format addr_format)
1174 {
1175 b->cursor = nir_after_instr(&intrin->instr);
1176
1177 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1178 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1179 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1180 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1181 assert(vec_stride == 0 || vec_stride >= scalar_size);
1182
1183 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1184 nir_ssa_def *value;
1185 if (vec_stride > scalar_size) {
1186 nir_ssa_def *comps[4] = { NULL, };
1187 for (unsigned i = 0; i < intrin->num_components; i++) {
1188 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1189 vec_stride * i);
1190 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1191 addr_format, 1);
1192 }
1193 value = nir_vec(b, comps, intrin->num_components);
1194 } else {
1195 value = build_explicit_io_load(b, intrin, addr, addr_format,
1196 intrin->num_components);
1197 }
1198 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1199 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1200 assert(intrin->src[1].is_ssa);
1201 nir_ssa_def *value = intrin->src[1].ssa;
1202 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1203 if (vec_stride > scalar_size) {
1204 for (unsigned i = 0; i < intrin->num_components; i++) {
1205 if (!(write_mask & (1 << i)))
1206 continue;
1207
1208 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1209 vec_stride * i);
1210 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1211 nir_channel(b, value, i), 1);
1212 }
1213 } else {
1214 build_explicit_io_store(b, intrin, addr, addr_format,
1215 value, write_mask);
1216 }
1217 } else {
1218 nir_ssa_def *value =
1219 build_explicit_io_atomic(b, intrin, addr, addr_format);
1220 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1221 }
1222
1223 nir_instr_remove(&intrin->instr);
1224 }
1225
1226 static void
1227 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1228 nir_address_format addr_format)
1229 {
1230 /* Just delete the deref if it's not used. We can't use
1231 * nir_deref_instr_remove_if_unused here because it may remove more than
1232 * one deref which could break our list walking since we walk the list
1233 * backwards.
1234 */
1235 assert(list_is_empty(&deref->dest.ssa.if_uses));
1236 if (list_is_empty(&deref->dest.ssa.uses)) {
1237 nir_instr_remove(&deref->instr);
1238 return;
1239 }
1240
1241 b->cursor = nir_after_instr(&deref->instr);
1242
1243 nir_ssa_def *base_addr = NULL;
1244 if (deref->deref_type != nir_deref_type_var) {
1245 assert(deref->parent.is_ssa);
1246 base_addr = deref->parent.ssa;
1247 }
1248
1249 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1250 addr_format);
1251
1252 nir_instr_remove(&deref->instr);
1253 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1254 }
1255
1256 static void
1257 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1258 nir_address_format addr_format)
1259 {
1260 assert(intrin->src[0].is_ssa);
1261 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1262 }
1263
1264 static void
1265 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1266 nir_address_format addr_format)
1267 {
1268 b->cursor = nir_after_instr(&intrin->instr);
1269
1270 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1271
1272 assert(glsl_type_is_array(deref->type));
1273 assert(glsl_get_length(deref->type) == 0);
1274 unsigned stride = glsl_get_explicit_stride(deref->type);
1275 assert(stride > 0);
1276
1277 assert(addr_format == nir_address_format_32bit_index_offset ||
1278 addr_format == nir_address_format_vec2_index_32bit_offset);
1279 nir_ssa_def *addr = &deref->dest.ssa;
1280 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1281 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1282
1283 nir_intrinsic_instr *bsize =
1284 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1285 bsize->src[0] = nir_src_for_ssa(index);
1286 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1287 nir_builder_instr_insert(b, &bsize->instr);
1288
1289 nir_ssa_def *arr_size =
1290 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1291 nir_imm_int(b, stride));
1292
1293 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1294 nir_instr_remove(&intrin->instr);
1295 }
1296
1297 static bool
1298 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1299 nir_address_format addr_format)
1300 {
1301 bool progress = false;
1302
1303 nir_builder b;
1304 nir_builder_init(&b, impl);
1305
1306 /* Walk in reverse order so that we can see the full deref chain when we
1307 * lower the access operations. We lower them assuming that the derefs
1308 * will be turned into address calculations later.
1309 */
1310 nir_foreach_block_reverse(block, impl) {
1311 nir_foreach_instr_reverse_safe(instr, block) {
1312 switch (instr->type) {
1313 case nir_instr_type_deref: {
1314 nir_deref_instr *deref = nir_instr_as_deref(instr);
1315 if (deref->mode & modes) {
1316 lower_explicit_io_deref(&b, deref, addr_format);
1317 progress = true;
1318 }
1319 break;
1320 }
1321
1322 case nir_instr_type_intrinsic: {
1323 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1324 switch (intrin->intrinsic) {
1325 case nir_intrinsic_load_deref:
1326 case nir_intrinsic_store_deref:
1327 case nir_intrinsic_deref_atomic_add:
1328 case nir_intrinsic_deref_atomic_imin:
1329 case nir_intrinsic_deref_atomic_umin:
1330 case nir_intrinsic_deref_atomic_imax:
1331 case nir_intrinsic_deref_atomic_umax:
1332 case nir_intrinsic_deref_atomic_and:
1333 case nir_intrinsic_deref_atomic_or:
1334 case nir_intrinsic_deref_atomic_xor:
1335 case nir_intrinsic_deref_atomic_exchange:
1336 case nir_intrinsic_deref_atomic_comp_swap:
1337 case nir_intrinsic_deref_atomic_fadd:
1338 case nir_intrinsic_deref_atomic_fmin:
1339 case nir_intrinsic_deref_atomic_fmax:
1340 case nir_intrinsic_deref_atomic_fcomp_swap: {
1341 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1342 if (deref->mode & modes) {
1343 lower_explicit_io_access(&b, intrin, addr_format);
1344 progress = true;
1345 }
1346 break;
1347 }
1348
1349 case nir_intrinsic_deref_buffer_array_length: {
1350 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1351 if (deref->mode & modes) {
1352 lower_explicit_io_array_length(&b, intrin, addr_format);
1353 progress = true;
1354 }
1355 break;
1356 }
1357
1358 default:
1359 break;
1360 }
1361 break;
1362 }
1363
1364 default:
1365 /* Nothing to do */
1366 break;
1367 }
1368 }
1369 }
1370
1371 if (progress) {
1372 nir_metadata_preserve(impl, nir_metadata_block_index |
1373 nir_metadata_dominance);
1374 }
1375
1376 return progress;
1377 }
1378
1379 /** Lower explicitly laid out I/O access to byte offset/address intrinsics
1380 *
1381 * This pass is intended to be used for any I/O which touches memory external
1382 * to the shader or which is directly visible to the client. It requires that
1383 * all data types in the given modes have a explicit stride/offset decorations
1384 * to tell it exactly how to calculate the offset/address for the given load,
1385 * store, or atomic operation. If the offset/stride information does not come
1386 * from the client explicitly (as with shared variables in GL or Vulkan),
1387 * nir_lower_vars_to_explicit_types() can be used to add them.
1388 *
1389 * Unlike nir_lower_io, this pass is fully capable of handling incomplete
1390 * pointer chains which may contain cast derefs. It does so by walking the
1391 * deref chain backwards and simply replacing each deref, one at a time, with
1392 * the appropriate address calculation. The pass takes a nir_address_format
1393 * parameter which describes how the offset or address is to be represented
1394 * during calculations. By ensuring that the address is always in a
1395 * consistent format, pointers can safely be conjured from thin air by the
1396 * driver, stored to variables, passed through phis, etc.
1397 *
1398 * The one exception to the simple algorithm described above is for handling
1399 * row-major matrices in which case we may look down one additional level of
1400 * the deref chain.
1401 */
1402 bool
1403 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1404 nir_address_format addr_format)
1405 {
1406 bool progress = false;
1407
1408 nir_foreach_function(function, shader) {
1409 if (function->impl &&
1410 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1411 progress = true;
1412 }
1413
1414 return progress;
1415 }
1416
1417 static bool
1418 nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
1419 nir_variable_mode modes,
1420 glsl_type_size_align_func type_info)
1421 {
1422 bool progress = false;
1423
1424 nir_foreach_block(block, impl) {
1425 nir_foreach_instr(instr, block) {
1426 if (instr->type != nir_instr_type_deref)
1427 continue;
1428
1429 nir_deref_instr *deref = nir_instr_as_deref(instr);
1430 if (!(deref->mode & modes))
1431 continue;
1432
1433 unsigned size, alignment;
1434 const struct glsl_type *new_type =
1435 glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
1436 if (new_type != deref->type) {
1437 progress = true;
1438 deref->type = new_type;
1439 }
1440 if (deref->deref_type == nir_deref_type_cast) {
1441 /* See also glsl_type::get_explicit_type_for_size_align() */
1442 unsigned new_stride = align(size, alignment);
1443 if (new_stride != deref->cast.ptr_stride) {
1444 deref->cast.ptr_stride = new_stride;
1445 progress = true;
1446 }
1447 }
1448 }
1449 }
1450
1451 if (progress) {
1452 nir_metadata_preserve(impl, nir_metadata_block_index |
1453 nir_metadata_dominance |
1454 nir_metadata_live_ssa_defs |
1455 nir_metadata_loop_analysis);
1456 }
1457
1458 return progress;
1459 }
1460
1461 static bool
1462 lower_vars_to_explicit(nir_shader *shader,
1463 struct exec_list *vars, nir_variable_mode mode,
1464 glsl_type_size_align_func type_info)
1465 {
1466 bool progress = false;
1467 unsigned offset;
1468 switch (mode) {
1469 case nir_var_function_temp:
1470 case nir_var_shader_temp:
1471 offset = shader->scratch_size;
1472 break;
1473 case nir_var_mem_shared:
1474 offset = 0;
1475 break;
1476 default:
1477 unreachable("Unsupported mode");
1478 }
1479 nir_foreach_variable(var, vars) {
1480 unsigned size, align;
1481 const struct glsl_type *explicit_type =
1482 glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
1483
1484 if (explicit_type != var->type) {
1485 progress = true;
1486 var->type = explicit_type;
1487 }
1488
1489 var->data.driver_location = ALIGN_POT(offset, align);
1490 offset = var->data.driver_location + size;
1491 }
1492
1493 switch (mode) {
1494 case nir_var_shader_temp:
1495 case nir_var_function_temp:
1496 shader->scratch_size = offset;
1497 break;
1498 case nir_var_mem_shared:
1499 shader->info.cs.shared_size = offset;
1500 shader->num_shared = offset;
1501 break;
1502 default:
1503 unreachable("Unsupported mode");
1504 }
1505
1506 return progress;
1507 }
1508
1509 bool
1510 nir_lower_vars_to_explicit_types(nir_shader *shader,
1511 nir_variable_mode modes,
1512 glsl_type_size_align_func type_info)
1513 {
1514 /* TODO: Situations which need to be handled to support more modes:
1515 * - row-major matrices
1516 * - compact shader inputs/outputs
1517 * - interface types
1518 */
1519 ASSERTED nir_variable_mode supported = nir_var_mem_shared |
1520 nir_var_shader_temp | nir_var_function_temp;
1521 assert(!(modes & ~supported) && "unsupported");
1522
1523 bool progress = false;
1524
1525 if (modes & nir_var_mem_shared)
1526 progress |= lower_vars_to_explicit(shader, &shader->shared, nir_var_mem_shared, type_info);
1527 if (modes & nir_var_shader_temp)
1528 progress |= lower_vars_to_explicit(shader, &shader->globals, nir_var_shader_temp, type_info);
1529
1530 nir_foreach_function(function, shader) {
1531 if (function->impl) {
1532 if (modes & nir_var_function_temp)
1533 progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
1534
1535 progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
1536 }
1537 }
1538
1539 return progress;
1540 }
1541
1542 /**
1543 * Return the offset source for a load/store intrinsic.
1544 */
1545 nir_src *
1546 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1547 {
1548 switch (instr->intrinsic) {
1549 case nir_intrinsic_load_input:
1550 case nir_intrinsic_load_output:
1551 case nir_intrinsic_load_shared:
1552 case nir_intrinsic_load_uniform:
1553 case nir_intrinsic_load_global:
1554 case nir_intrinsic_load_scratch:
1555 case nir_intrinsic_load_fs_input_interp_deltas:
1556 return &instr->src[0];
1557 case nir_intrinsic_load_ubo:
1558 case nir_intrinsic_load_ssbo:
1559 case nir_intrinsic_load_per_vertex_input:
1560 case nir_intrinsic_load_per_vertex_output:
1561 case nir_intrinsic_load_interpolated_input:
1562 case nir_intrinsic_store_output:
1563 case nir_intrinsic_store_shared:
1564 case nir_intrinsic_store_global:
1565 case nir_intrinsic_store_scratch:
1566 case nir_intrinsic_ssbo_atomic_add:
1567 case nir_intrinsic_ssbo_atomic_imin:
1568 case nir_intrinsic_ssbo_atomic_umin:
1569 case nir_intrinsic_ssbo_atomic_imax:
1570 case nir_intrinsic_ssbo_atomic_umax:
1571 case nir_intrinsic_ssbo_atomic_and:
1572 case nir_intrinsic_ssbo_atomic_or:
1573 case nir_intrinsic_ssbo_atomic_xor:
1574 case nir_intrinsic_ssbo_atomic_exchange:
1575 case nir_intrinsic_ssbo_atomic_comp_swap:
1576 case nir_intrinsic_ssbo_atomic_fadd:
1577 case nir_intrinsic_ssbo_atomic_fmin:
1578 case nir_intrinsic_ssbo_atomic_fmax:
1579 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1580 return &instr->src[1];
1581 case nir_intrinsic_store_ssbo:
1582 case nir_intrinsic_store_per_vertex_output:
1583 return &instr->src[2];
1584 default:
1585 return NULL;
1586 }
1587 }
1588
1589 /**
1590 * Return the vertex index source for a load/store per_vertex intrinsic.
1591 */
1592 nir_src *
1593 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1594 {
1595 switch (instr->intrinsic) {
1596 case nir_intrinsic_load_per_vertex_input:
1597 case nir_intrinsic_load_per_vertex_output:
1598 return &instr->src[0];
1599 case nir_intrinsic_store_per_vertex_output:
1600 return &instr->src[1];
1601 default:
1602 return NULL;
1603 }
1604 }
1605
1606 /**
1607 * Return the numeric constant that identify a NULL pointer for each address
1608 * format.
1609 */
1610 const nir_const_value *
1611 nir_address_format_null_value(nir_address_format addr_format)
1612 {
1613 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1614 [nir_address_format_32bit_global] = {{0}},
1615 [nir_address_format_64bit_global] = {{0}},
1616 [nir_address_format_64bit_bounded_global] = {{0}},
1617 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1618 [nir_address_format_vec2_index_32bit_offset] = {{.u32 = ~0}, {.u32 = ~0}, {.u32 = ~0}},
1619 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1620 [nir_address_format_logical] = {{.u32 = ~0}},
1621 };
1622
1623 assert(addr_format < ARRAY_SIZE(null_values));
1624 return null_values[addr_format];
1625 }
1626
1627 nir_ssa_def *
1628 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1629 nir_address_format addr_format)
1630 {
1631 switch (addr_format) {
1632 case nir_address_format_32bit_global:
1633 case nir_address_format_64bit_global:
1634 case nir_address_format_64bit_bounded_global:
1635 case nir_address_format_32bit_index_offset:
1636 case nir_address_format_vec2_index_32bit_offset:
1637 case nir_address_format_32bit_offset:
1638 return nir_ball_iequal(b, addr0, addr1);
1639
1640 case nir_address_format_logical:
1641 unreachable("Unsupported address format");
1642 }
1643
1644 unreachable("Invalid address format");
1645 }
1646
1647 nir_ssa_def *
1648 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1649 nir_address_format addr_format)
1650 {
1651 switch (addr_format) {
1652 case nir_address_format_32bit_global:
1653 case nir_address_format_64bit_global:
1654 case nir_address_format_32bit_offset:
1655 assert(addr0->num_components == 1);
1656 assert(addr1->num_components == 1);
1657 return nir_isub(b, addr0, addr1);
1658
1659 case nir_address_format_64bit_bounded_global:
1660 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1661 addr_to_global(b, addr1, addr_format));
1662
1663 case nir_address_format_32bit_index_offset:
1664 assert(addr0->num_components == 2);
1665 assert(addr1->num_components == 2);
1666 /* Assume the same buffer index. */
1667 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1668
1669 case nir_address_format_vec2_index_32bit_offset:
1670 assert(addr0->num_components == 3);
1671 assert(addr1->num_components == 3);
1672 /* Assume the same buffer index. */
1673 return nir_isub(b, nir_channel(b, addr0, 2), nir_channel(b, addr1, 2));
1674
1675 case nir_address_format_logical:
1676 unreachable("Unsupported address format");
1677 }
1678
1679 unreachable("Invalid address format");
1680 }
1681
1682 static bool
1683 is_input(nir_intrinsic_instr *intrin)
1684 {
1685 return intrin->intrinsic == nir_intrinsic_load_input ||
1686 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1687 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1688 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1689 }
1690
1691 static bool
1692 is_output(nir_intrinsic_instr *intrin)
1693 {
1694 return intrin->intrinsic == nir_intrinsic_load_output ||
1695 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1696 intrin->intrinsic == nir_intrinsic_store_output ||
1697 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1698 }
1699
1700
1701 /**
1702 * This pass adds constant offsets to instr->const_index[0] for input/output
1703 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1704 * unchanged - since we don't know what part of a compound variable is
1705 * accessed, we allocate storage for the entire thing. For drivers that use
1706 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1707 * the offset source will be 0, so that they don't have to add it in manually.
1708 */
1709
1710 static bool
1711 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1712 nir_variable_mode mode)
1713 {
1714 bool progress = false;
1715 nir_foreach_instr_safe(instr, block) {
1716 if (instr->type != nir_instr_type_intrinsic)
1717 continue;
1718
1719 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1720
1721 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1722 (mode == nir_var_shader_out && is_output(intrin))) {
1723 nir_src *offset = nir_get_io_offset_src(intrin);
1724
1725 if (nir_src_is_const(*offset)) {
1726 intrin->const_index[0] += nir_src_as_uint(*offset);
1727 b->cursor = nir_before_instr(&intrin->instr);
1728 nir_instr_rewrite_src(&intrin->instr, offset,
1729 nir_src_for_ssa(nir_imm_int(b, 0)));
1730 progress = true;
1731 }
1732 }
1733 }
1734
1735 return progress;
1736 }
1737
1738 bool
1739 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1740 {
1741 bool progress = false;
1742
1743 nir_foreach_function(f, nir) {
1744 if (f->impl) {
1745 nir_builder b;
1746 nir_builder_init(&b, f->impl);
1747 nir_foreach_block(block, f->impl) {
1748 progress |= add_const_offset_to_base_block(block, &b, mode);
1749 }
1750 }
1751 }
1752
1753 return progress;
1754 }
1755