nir/lower_explicit_io: add nir_var_mem_shared support
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 struct lower_io_state {
39 void *dead_ctx;
40 nir_builder builder;
41 int (*type_size)(const struct glsl_type *type, bool);
42 nir_variable_mode modes;
43 nir_lower_io_options options;
44 };
45
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
48 {
49 switch (deref_op) {
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
51 OP(atomic_exchange)
52 OP(atomic_comp_swap)
53 OP(atomic_add)
54 OP(atomic_imin)
55 OP(atomic_umin)
56 OP(atomic_imax)
57 OP(atomic_umax)
58 OP(atomic_and)
59 OP(atomic_or)
60 OP(atomic_xor)
61 OP(atomic_fadd)
62 OP(atomic_fmin)
63 OP(atomic_fmax)
64 OP(atomic_fcomp_swap)
65 #undef OP
66 default:
67 unreachable("Invalid SSBO atomic");
68 }
69 }
70
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op)
73 {
74 switch (deref_op) {
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
76 OP(atomic_exchange)
77 OP(atomic_comp_swap)
78 OP(atomic_add)
79 OP(atomic_imin)
80 OP(atomic_umin)
81 OP(atomic_imax)
82 OP(atomic_umax)
83 OP(atomic_and)
84 OP(atomic_or)
85 OP(atomic_xor)
86 OP(atomic_fadd)
87 OP(atomic_fmin)
88 OP(atomic_fmax)
89 OP(atomic_fcomp_swap)
90 #undef OP
91 default:
92 unreachable("Invalid SSBO atomic");
93 }
94 }
95
96 static nir_intrinsic_op
97 shared_atomic_for_deref(nir_intrinsic_op deref_op)
98 {
99 switch (deref_op) {
100 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
101 OP(atomic_exchange)
102 OP(atomic_comp_swap)
103 OP(atomic_add)
104 OP(atomic_imin)
105 OP(atomic_umin)
106 OP(atomic_imax)
107 OP(atomic_umax)
108 OP(atomic_and)
109 OP(atomic_or)
110 OP(atomic_xor)
111 OP(atomic_fadd)
112 OP(atomic_fmin)
113 OP(atomic_fmax)
114 OP(atomic_fcomp_swap)
115 #undef OP
116 default:
117 unreachable("Invalid shared atomic");
118 }
119 }
120
121 void
122 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
123 int (*type_size)(const struct glsl_type *, bool))
124 {
125 unsigned location = 0;
126
127 nir_foreach_variable(var, var_list) {
128 /*
129 * UBOs have their own address spaces, so don't count them towards the
130 * number of global uniforms
131 */
132 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
133 continue;
134
135 var->data.driver_location = location;
136 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
137 var->data.mode == nir_var_shader_out ||
138 var->data.bindless;
139 location += type_size(var->type, bindless_type_size);
140 }
141
142 *size = location;
143 }
144
145 /**
146 * Return true if the given variable is a per-vertex input/output array.
147 * (such as geometry shader inputs).
148 */
149 bool
150 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
151 {
152 if (var->data.patch || !glsl_type_is_array(var->type))
153 return false;
154
155 if (var->data.mode == nir_var_shader_in)
156 return stage == MESA_SHADER_GEOMETRY ||
157 stage == MESA_SHADER_TESS_CTRL ||
158 stage == MESA_SHADER_TESS_EVAL;
159
160 if (var->data.mode == nir_var_shader_out)
161 return stage == MESA_SHADER_TESS_CTRL;
162
163 return false;
164 }
165
166 static nir_ssa_def *
167 get_io_offset(nir_builder *b, nir_deref_instr *deref,
168 nir_ssa_def **vertex_index,
169 int (*type_size)(const struct glsl_type *, bool),
170 unsigned *component, bool bts)
171 {
172 nir_deref_path path;
173 nir_deref_path_init(&path, deref, NULL);
174
175 assert(path.path[0]->deref_type == nir_deref_type_var);
176 nir_deref_instr **p = &path.path[1];
177
178 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
179 * outermost array index separate. Process the rest normally.
180 */
181 if (vertex_index != NULL) {
182 assert((*p)->deref_type == nir_deref_type_array);
183 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
184 p++;
185 }
186
187 if (path.path[0]->var->data.compact) {
188 assert((*p)->deref_type == nir_deref_type_array);
189 assert(glsl_type_is_scalar((*p)->type));
190
191 /* We always lower indirect dereferences for "compact" array vars. */
192 const unsigned index = nir_src_as_uint((*p)->arr.index);
193 const unsigned total_offset = *component + index;
194 const unsigned slot_offset = total_offset / 4;
195 *component = total_offset % 4;
196 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
197 }
198
199 /* Just emit code and let constant-folding go to town */
200 nir_ssa_def *offset = nir_imm_int(b, 0);
201
202 for (; *p; p++) {
203 if ((*p)->deref_type == nir_deref_type_array) {
204 unsigned size = type_size((*p)->type, bts);
205
206 nir_ssa_def *mul =
207 nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
208
209 offset = nir_iadd(b, offset, mul);
210 } else if ((*p)->deref_type == nir_deref_type_struct) {
211 /* p starts at path[1], so this is safe */
212 nir_deref_instr *parent = *(p - 1);
213
214 unsigned field_offset = 0;
215 for (unsigned i = 0; i < (*p)->strct.index; i++) {
216 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
217 }
218 offset = nir_iadd_imm(b, offset, field_offset);
219 } else {
220 unreachable("Unsupported deref type");
221 }
222 }
223
224 nir_deref_path_finish(&path);
225
226 return offset;
227 }
228
229 static nir_ssa_def *
230 emit_load(struct lower_io_state *state,
231 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
232 unsigned component, unsigned num_components, unsigned bit_size,
233 nir_alu_type type)
234 {
235 nir_builder *b = &state->builder;
236 const nir_shader *nir = b->shader;
237 nir_variable_mode mode = var->data.mode;
238 nir_ssa_def *barycentric = NULL;
239
240 nir_intrinsic_op op;
241 switch (mode) {
242 case nir_var_shader_in:
243 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
244 nir->options->use_interpolated_input_intrinsics &&
245 var->data.interpolation != INTERP_MODE_FLAT) {
246 assert(vertex_index == NULL);
247
248 nir_intrinsic_op bary_op;
249 if (var->data.sample ||
250 (state->options & nir_lower_io_force_sample_interpolation))
251 bary_op = nir_intrinsic_load_barycentric_sample;
252 else if (var->data.centroid)
253 bary_op = nir_intrinsic_load_barycentric_centroid;
254 else
255 bary_op = nir_intrinsic_load_barycentric_pixel;
256
257 barycentric = nir_load_barycentric(&state->builder, bary_op,
258 var->data.interpolation);
259 op = nir_intrinsic_load_interpolated_input;
260 } else {
261 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
262 nir_intrinsic_load_input;
263 }
264 break;
265 case nir_var_shader_out:
266 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
267 nir_intrinsic_load_output;
268 break;
269 case nir_var_uniform:
270 op = nir_intrinsic_load_uniform;
271 break;
272 case nir_var_mem_shared:
273 op = nir_intrinsic_load_shared;
274 break;
275 default:
276 unreachable("Unknown variable mode");
277 }
278
279 nir_intrinsic_instr *load =
280 nir_intrinsic_instr_create(state->builder.shader, op);
281 load->num_components = num_components;
282
283 nir_intrinsic_set_base(load, var->data.driver_location);
284 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
285 nir_intrinsic_set_component(load, component);
286
287 if (load->intrinsic == nir_intrinsic_load_uniform)
288 nir_intrinsic_set_range(load,
289 state->type_size(var->type, var->data.bindless));
290
291 if (load->intrinsic == nir_intrinsic_load_input ||
292 load->intrinsic == nir_intrinsic_load_uniform)
293 nir_intrinsic_set_type(load, type);
294
295 if (vertex_index) {
296 load->src[0] = nir_src_for_ssa(vertex_index);
297 load->src[1] = nir_src_for_ssa(offset);
298 } else if (barycentric) {
299 load->src[0] = nir_src_for_ssa(barycentric);
300 load->src[1] = nir_src_for_ssa(offset);
301 } else {
302 load->src[0] = nir_src_for_ssa(offset);
303 }
304
305 nir_ssa_dest_init(&load->instr, &load->dest,
306 num_components, bit_size, NULL);
307 nir_builder_instr_insert(b, &load->instr);
308
309 return &load->dest.ssa;
310 }
311
312 static nir_ssa_def *
313 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
314 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
315 unsigned component, const struct glsl_type *type)
316 {
317 assert(intrin->dest.is_ssa);
318 if (intrin->dest.ssa.bit_size == 64 &&
319 (state->options & nir_lower_io_lower_64bit_to_32)) {
320 nir_builder *b = &state->builder;
321
322 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
323
324 nir_ssa_def *comp64[4];
325 assert(component == 0 || component == 2);
326 unsigned dest_comp = 0;
327 while (dest_comp < intrin->dest.ssa.num_components) {
328 const unsigned num_comps =
329 MIN2(intrin->dest.ssa.num_components - dest_comp,
330 (4 - component) / 2);
331
332 nir_ssa_def *data32 =
333 emit_load(state, vertex_index, var, offset, component,
334 num_comps * 2, 32, nir_type_uint32);
335 for (unsigned i = 0; i < num_comps; i++) {
336 comp64[dest_comp + i] =
337 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
338 }
339
340 /* Only the first store has a component offset */
341 component = 0;
342 dest_comp += num_comps;
343 offset = nir_iadd_imm(b, offset, slot_size);
344 }
345
346 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
347 } else {
348 return emit_load(state, vertex_index, var, offset, component,
349 intrin->dest.ssa.num_components,
350 intrin->dest.ssa.bit_size,
351 nir_get_nir_type_for_glsl_type(type));
352 }
353 }
354
355 static void
356 emit_store(struct lower_io_state *state, nir_ssa_def *data,
357 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
358 unsigned component, unsigned num_components,
359 nir_component_mask_t write_mask, nir_alu_type type)
360 {
361 nir_builder *b = &state->builder;
362 nir_variable_mode mode = var->data.mode;
363
364 nir_intrinsic_op op;
365 if (mode == nir_var_mem_shared) {
366 op = nir_intrinsic_store_shared;
367 } else {
368 assert(mode == nir_var_shader_out);
369 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
370 nir_intrinsic_store_output;
371 }
372
373 nir_intrinsic_instr *store =
374 nir_intrinsic_instr_create(state->builder.shader, op);
375 store->num_components = num_components;
376
377 store->src[0] = nir_src_for_ssa(data);
378
379 nir_intrinsic_set_base(store, var->data.driver_location);
380
381 if (mode == nir_var_shader_out)
382 nir_intrinsic_set_component(store, component);
383
384 if (store->intrinsic == nir_intrinsic_store_output)
385 nir_intrinsic_set_type(store, type);
386
387 nir_intrinsic_set_write_mask(store, write_mask);
388
389 if (vertex_index)
390 store->src[1] = nir_src_for_ssa(vertex_index);
391
392 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
393
394 nir_builder_instr_insert(b, &store->instr);
395 }
396
397 static void
398 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
399 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
400 unsigned component, const struct glsl_type *type)
401 {
402 assert(intrin->src[1].is_ssa);
403 if (intrin->src[1].ssa->bit_size == 64 &&
404 (state->options & nir_lower_io_lower_64bit_to_32)) {
405 nir_builder *b = &state->builder;
406
407 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
408
409 assert(component == 0 || component == 2);
410 unsigned src_comp = 0;
411 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
412 while (src_comp < intrin->num_components) {
413 const unsigned num_comps =
414 MIN2(intrin->num_components - src_comp,
415 (4 - component) / 2);
416
417 if (write_mask & BITFIELD_MASK(num_comps)) {
418 nir_ssa_def *data =
419 nir_channels(b, intrin->src[1].ssa,
420 BITFIELD_RANGE(src_comp, num_comps));
421 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
422
423 nir_component_mask_t write_mask32 = 0;
424 for (unsigned i = 0; i < num_comps; i++) {
425 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
426 write_mask32 |= 3 << (i * 2);
427 }
428
429 emit_store(state, data32, vertex_index, var, offset,
430 component, data32->num_components, write_mask32,
431 nir_type_uint32);
432 }
433
434 /* Only the first store has a component offset */
435 component = 0;
436 src_comp += num_comps;
437 write_mask >>= num_comps;
438 offset = nir_iadd_imm(b, offset, slot_size);
439 }
440 } else {
441 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
442 component, intrin->num_components,
443 nir_intrinsic_write_mask(intrin),
444 nir_get_nir_type_for_glsl_type(type));
445 }
446 }
447
448 static nir_ssa_def *
449 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
450 nir_variable *var, nir_ssa_def *offset)
451 {
452 nir_builder *b = &state->builder;
453 assert(var->data.mode == nir_var_mem_shared);
454
455 nir_intrinsic_op op = shared_atomic_for_deref(intrin->intrinsic);
456
457 nir_intrinsic_instr *atomic =
458 nir_intrinsic_instr_create(state->builder.shader, op);
459
460 nir_intrinsic_set_base(atomic, var->data.driver_location);
461
462 atomic->src[0] = nir_src_for_ssa(offset);
463 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
464 nir_intrinsic_infos[op].num_srcs);
465 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
466 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
467 }
468
469 if (nir_intrinsic_infos[op].has_dest) {
470 assert(intrin->dest.is_ssa);
471 assert(nir_intrinsic_infos[intrin->intrinsic].has_dest);
472 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
473 intrin->dest.ssa.num_components,
474 intrin->dest.ssa.bit_size, NULL);
475 }
476
477 nir_builder_instr_insert(b, &atomic->instr);
478
479 return nir_intrinsic_infos[op].has_dest ? &atomic->dest.ssa : NULL;
480 }
481
482 static nir_ssa_def *
483 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
484 nir_variable *var, nir_ssa_def *offset, unsigned component,
485 const struct glsl_type *type)
486 {
487 nir_builder *b = &state->builder;
488 assert(var->data.mode == nir_var_shader_in);
489
490 /* Ignore interpolateAt() for flat variables - flat is flat. */
491 if (var->data.interpolation == INTERP_MODE_FLAT)
492 return lower_load(intrin, state, NULL, var, offset, component, type);
493
494 /* None of the supported APIs allow interpolation on 64-bit things */
495 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
496
497 nir_intrinsic_op bary_op;
498 switch (intrin->intrinsic) {
499 case nir_intrinsic_interp_deref_at_centroid:
500 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
501 nir_intrinsic_load_barycentric_sample :
502 nir_intrinsic_load_barycentric_centroid;
503 break;
504 case nir_intrinsic_interp_deref_at_sample:
505 bary_op = nir_intrinsic_load_barycentric_at_sample;
506 break;
507 case nir_intrinsic_interp_deref_at_offset:
508 bary_op = nir_intrinsic_load_barycentric_at_offset;
509 break;
510 default:
511 unreachable("Bogus interpolateAt() intrinsic.");
512 }
513
514 nir_intrinsic_instr *bary_setup =
515 nir_intrinsic_instr_create(state->builder.shader, bary_op);
516
517 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
518 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
519
520 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
521 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset)
522 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
523
524 nir_builder_instr_insert(b, &bary_setup->instr);
525
526 nir_intrinsic_instr *load =
527 nir_intrinsic_instr_create(state->builder.shader,
528 nir_intrinsic_load_interpolated_input);
529 load->num_components = intrin->num_components;
530
531 nir_intrinsic_set_base(load, var->data.driver_location);
532 nir_intrinsic_set_component(load, component);
533
534 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
535 load->src[1] = nir_src_for_ssa(offset);
536
537 assert(intrin->dest.is_ssa);
538 nir_ssa_dest_init(&load->instr, &load->dest,
539 intrin->dest.ssa.num_components,
540 intrin->dest.ssa.bit_size, NULL);
541 nir_builder_instr_insert(b, &load->instr);
542
543 return &load->dest.ssa;
544 }
545
546 static bool
547 nir_lower_io_block(nir_block *block,
548 struct lower_io_state *state)
549 {
550 nir_builder *b = &state->builder;
551 const nir_shader_compiler_options *options = b->shader->options;
552 bool progress = false;
553
554 nir_foreach_instr_safe(instr, block) {
555 if (instr->type != nir_instr_type_intrinsic)
556 continue;
557
558 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
559
560 switch (intrin->intrinsic) {
561 case nir_intrinsic_load_deref:
562 case nir_intrinsic_store_deref:
563 case nir_intrinsic_deref_atomic_add:
564 case nir_intrinsic_deref_atomic_imin:
565 case nir_intrinsic_deref_atomic_umin:
566 case nir_intrinsic_deref_atomic_imax:
567 case nir_intrinsic_deref_atomic_umax:
568 case nir_intrinsic_deref_atomic_and:
569 case nir_intrinsic_deref_atomic_or:
570 case nir_intrinsic_deref_atomic_xor:
571 case nir_intrinsic_deref_atomic_exchange:
572 case nir_intrinsic_deref_atomic_comp_swap:
573 case nir_intrinsic_deref_atomic_fadd:
574 case nir_intrinsic_deref_atomic_fmin:
575 case nir_intrinsic_deref_atomic_fmax:
576 case nir_intrinsic_deref_atomic_fcomp_swap:
577 /* We can lower the io for this nir instrinsic */
578 break;
579 case nir_intrinsic_interp_deref_at_centroid:
580 case nir_intrinsic_interp_deref_at_sample:
581 case nir_intrinsic_interp_deref_at_offset:
582 /* We can optionally lower these to load_interpolated_input */
583 if (options->use_interpolated_input_intrinsics)
584 break;
585 default:
586 /* We can't lower the io for this nir instrinsic, so skip it */
587 continue;
588 }
589
590 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
591
592 nir_variable_mode mode = deref->mode;
593
594 if ((state->modes & mode) == 0)
595 continue;
596
597 if (mode != nir_var_shader_in &&
598 mode != nir_var_shader_out &&
599 mode != nir_var_mem_shared &&
600 mode != nir_var_uniform)
601 continue;
602
603 nir_variable *var = nir_deref_instr_get_variable(deref);
604
605 b->cursor = nir_before_instr(instr);
606
607 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
608
609 nir_ssa_def *offset;
610 nir_ssa_def *vertex_index = NULL;
611 unsigned component_offset = var->data.location_frac;
612 bool bindless_type_size = mode == nir_var_shader_in ||
613 mode == nir_var_shader_out ||
614 var->data.bindless;
615
616 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
617 state->type_size, &component_offset,
618 bindless_type_size);
619
620 nir_ssa_def *replacement = NULL;
621
622 switch (intrin->intrinsic) {
623 case nir_intrinsic_load_deref:
624 replacement = lower_load(intrin, state, vertex_index, var, offset,
625 component_offset, deref->type);
626 break;
627
628 case nir_intrinsic_store_deref:
629 lower_store(intrin, state, vertex_index, var, offset,
630 component_offset, deref->type);
631 break;
632
633 case nir_intrinsic_deref_atomic_add:
634 case nir_intrinsic_deref_atomic_imin:
635 case nir_intrinsic_deref_atomic_umin:
636 case nir_intrinsic_deref_atomic_imax:
637 case nir_intrinsic_deref_atomic_umax:
638 case nir_intrinsic_deref_atomic_and:
639 case nir_intrinsic_deref_atomic_or:
640 case nir_intrinsic_deref_atomic_xor:
641 case nir_intrinsic_deref_atomic_exchange:
642 case nir_intrinsic_deref_atomic_comp_swap:
643 case nir_intrinsic_deref_atomic_fadd:
644 case nir_intrinsic_deref_atomic_fmin:
645 case nir_intrinsic_deref_atomic_fmax:
646 case nir_intrinsic_deref_atomic_fcomp_swap:
647 assert(vertex_index == NULL);
648 replacement = lower_atomic(intrin, state, var, offset);
649 break;
650
651 case nir_intrinsic_interp_deref_at_centroid:
652 case nir_intrinsic_interp_deref_at_sample:
653 case nir_intrinsic_interp_deref_at_offset:
654 assert(vertex_index == NULL);
655 replacement = lower_interpolate_at(intrin, state, var, offset,
656 component_offset, deref->type);
657 break;
658
659 default:
660 continue;
661 }
662
663 if (replacement) {
664 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
665 nir_src_for_ssa(replacement));
666 }
667 nir_instr_remove(&intrin->instr);
668 progress = true;
669 }
670
671 return progress;
672 }
673
674 static bool
675 nir_lower_io_impl(nir_function_impl *impl,
676 nir_variable_mode modes,
677 int (*type_size)(const struct glsl_type *, bool),
678 nir_lower_io_options options)
679 {
680 struct lower_io_state state;
681 bool progress = false;
682
683 nir_builder_init(&state.builder, impl);
684 state.dead_ctx = ralloc_context(NULL);
685 state.modes = modes;
686 state.type_size = type_size;
687 state.options = options;
688
689 nir_foreach_block(block, impl) {
690 progress |= nir_lower_io_block(block, &state);
691 }
692
693 ralloc_free(state.dead_ctx);
694
695 nir_metadata_preserve(impl, nir_metadata_block_index |
696 nir_metadata_dominance);
697 return progress;
698 }
699
700 bool
701 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
702 int (*type_size)(const struct glsl_type *, bool),
703 nir_lower_io_options options)
704 {
705 bool progress = false;
706
707 nir_foreach_function(function, shader) {
708 if (function->impl) {
709 progress |= nir_lower_io_impl(function->impl, modes,
710 type_size, options);
711 }
712 }
713
714 return progress;
715 }
716
717 static unsigned
718 type_scalar_size_bytes(const struct glsl_type *type)
719 {
720 assert(glsl_type_is_vector_or_scalar(type) ||
721 glsl_type_is_matrix(type));
722 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
723 }
724
725 static nir_ssa_def *
726 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
727 nir_address_format addr_format, nir_ssa_def *offset)
728 {
729 assert(offset->num_components == 1);
730 assert(addr->bit_size == offset->bit_size);
731
732 switch (addr_format) {
733 case nir_address_format_32bit_global:
734 case nir_address_format_64bit_global:
735 case nir_address_format_32bit_offset:
736 assert(addr->num_components == 1);
737 return nir_iadd(b, addr, offset);
738
739 case nir_address_format_64bit_bounded_global:
740 assert(addr->num_components == 4);
741 return nir_vec4(b, nir_channel(b, addr, 0),
742 nir_channel(b, addr, 1),
743 nir_channel(b, addr, 2),
744 nir_iadd(b, nir_channel(b, addr, 3), offset));
745
746 case nir_address_format_32bit_index_offset:
747 assert(addr->num_components == 2);
748 return nir_vec2(b, nir_channel(b, addr, 0),
749 nir_iadd(b, nir_channel(b, addr, 1), offset));
750 case nir_address_format_logical:
751 unreachable("Unsupported address format");
752 }
753 unreachable("Invalid address format");
754 }
755
756 static nir_ssa_def *
757 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
758 nir_address_format addr_format, int64_t offset)
759 {
760 return build_addr_iadd(b, addr, addr_format,
761 nir_imm_intN_t(b, offset, addr->bit_size));
762 }
763
764 static nir_ssa_def *
765 addr_to_index(nir_builder *b, nir_ssa_def *addr,
766 nir_address_format addr_format)
767 {
768 assert(addr_format == nir_address_format_32bit_index_offset);
769 assert(addr->num_components == 2);
770 return nir_channel(b, addr, 0);
771 }
772
773 static nir_ssa_def *
774 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
775 nir_address_format addr_format)
776 {
777 assert(addr_format == nir_address_format_32bit_index_offset);
778 assert(addr->num_components == 2);
779 return nir_channel(b, addr, 1);
780 }
781
782 /** Returns true if the given address format resolves to a global address */
783 static bool
784 addr_format_is_global(nir_address_format addr_format)
785 {
786 return addr_format == nir_address_format_32bit_global ||
787 addr_format == nir_address_format_64bit_global ||
788 addr_format == nir_address_format_64bit_bounded_global;
789 }
790
791 static nir_ssa_def *
792 addr_to_global(nir_builder *b, nir_ssa_def *addr,
793 nir_address_format addr_format)
794 {
795 switch (addr_format) {
796 case nir_address_format_32bit_global:
797 case nir_address_format_64bit_global:
798 assert(addr->num_components == 1);
799 return addr;
800
801 case nir_address_format_64bit_bounded_global:
802 assert(addr->num_components == 4);
803 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
804 nir_u2u64(b, nir_channel(b, addr, 3)));
805
806 case nir_address_format_32bit_index_offset:
807 case nir_address_format_32bit_offset:
808 case nir_address_format_logical:
809 unreachable("Cannot get a 64-bit address with this address format");
810 }
811
812 unreachable("Invalid address format");
813 }
814
815 static bool
816 addr_format_needs_bounds_check(nir_address_format addr_format)
817 {
818 return addr_format == nir_address_format_64bit_bounded_global;
819 }
820
821 static nir_ssa_def *
822 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
823 nir_address_format addr_format, unsigned size)
824 {
825 assert(addr_format == nir_address_format_64bit_bounded_global);
826 assert(addr->num_components == 4);
827 return nir_ige(b, nir_channel(b, addr, 2),
828 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
829 }
830
831 static nir_ssa_def *
832 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
833 nir_ssa_def *addr, nir_address_format addr_format,
834 unsigned num_components)
835 {
836 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
837
838 nir_intrinsic_op op;
839 switch (mode) {
840 case nir_var_mem_ubo:
841 op = nir_intrinsic_load_ubo;
842 break;
843 case nir_var_mem_ssbo:
844 if (addr_format_is_global(addr_format))
845 op = nir_intrinsic_load_global;
846 else
847 op = nir_intrinsic_load_ssbo;
848 break;
849 case nir_var_mem_global:
850 assert(addr_format_is_global(addr_format));
851 op = nir_intrinsic_load_global;
852 break;
853 case nir_var_shader_in:
854 assert(addr_format_is_global(addr_format));
855 op = nir_intrinsic_load_kernel_input;
856 break;
857 case nir_var_mem_shared:
858 assert(addr_format == nir_address_format_32bit_offset);
859 op = nir_intrinsic_load_shared;
860 break;
861 default:
862 unreachable("Unsupported explicit IO variable mode");
863 }
864
865 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
866
867 if (addr_format_is_global(addr_format)) {
868 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
869 } else if (addr_format == nir_address_format_32bit_offset) {
870 assert(addr->num_components == 1);
871 load->src[0] = nir_src_for_ssa(addr);
872 } else {
873 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
874 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
875 }
876
877 if (mode != nir_var_mem_ubo && mode != nir_var_shader_in && mode != nir_var_mem_shared)
878 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
879
880 /* TODO: We should try and provide a better alignment. For OpenCL, we need
881 * to plumb the alignment through from SPIR-V when we have one.
882 */
883 nir_intrinsic_set_align(load, intrin->dest.ssa.bit_size / 8, 0);
884
885 assert(intrin->dest.is_ssa);
886 load->num_components = num_components;
887 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
888 intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
889
890 assert(load->dest.ssa.bit_size % 8 == 0);
891
892 if (addr_format_needs_bounds_check(addr_format)) {
893 /* The Vulkan spec for robustBufferAccess gives us quite a few options
894 * as to what we can do with an OOB read. Unfortunately, returning
895 * undefined values isn't one of them so we return an actual zero.
896 */
897 nir_ssa_def *zero = nir_imm_zero(b, load->num_components,
898 load->dest.ssa.bit_size);
899
900 const unsigned load_size =
901 (load->dest.ssa.bit_size / 8) * load->num_components;
902 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
903
904 nir_builder_instr_insert(b, &load->instr);
905
906 nir_pop_if(b, NULL);
907
908 return nir_if_phi(b, &load->dest.ssa, zero);
909 } else {
910 nir_builder_instr_insert(b, &load->instr);
911 return &load->dest.ssa;
912 }
913 }
914
915 static void
916 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
917 nir_ssa_def *addr, nir_address_format addr_format,
918 nir_ssa_def *value, nir_component_mask_t write_mask)
919 {
920 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
921
922 nir_intrinsic_op op;
923 switch (mode) {
924 case nir_var_mem_ssbo:
925 if (addr_format_is_global(addr_format))
926 op = nir_intrinsic_store_global;
927 else
928 op = nir_intrinsic_store_ssbo;
929 break;
930 case nir_var_mem_global:
931 assert(addr_format_is_global(addr_format));
932 op = nir_intrinsic_store_global;
933 break;
934 case nir_var_mem_shared:
935 assert(addr_format == nir_address_format_32bit_offset);
936 op = nir_intrinsic_store_shared;
937 break;
938 default:
939 unreachable("Unsupported explicit IO variable mode");
940 }
941
942 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
943
944 store->src[0] = nir_src_for_ssa(value);
945 if (addr_format_is_global(addr_format)) {
946 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
947 } else if (addr_format == nir_address_format_32bit_offset) {
948 assert(addr->num_components == 1);
949 store->src[1] = nir_src_for_ssa(addr);
950 } else {
951 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
952 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
953 }
954
955 nir_intrinsic_set_write_mask(store, write_mask);
956
957 if (mode != nir_var_mem_shared)
958 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
959
960 /* TODO: We should try and provide a better alignment. For OpenCL, we need
961 * to plumb the alignment through from SPIR-V when we have one.
962 */
963 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
964
965 assert(value->num_components == 1 ||
966 value->num_components == intrin->num_components);
967 store->num_components = value->num_components;
968
969 assert(value->bit_size % 8 == 0);
970
971 if (addr_format_needs_bounds_check(addr_format)) {
972 const unsigned store_size = (value->bit_size / 8) * store->num_components;
973 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
974
975 nir_builder_instr_insert(b, &store->instr);
976
977 nir_pop_if(b, NULL);
978 } else {
979 nir_builder_instr_insert(b, &store->instr);
980 }
981 }
982
983 static nir_ssa_def *
984 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
985 nir_ssa_def *addr, nir_address_format addr_format)
986 {
987 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
988 const unsigned num_data_srcs =
989 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
990
991 nir_intrinsic_op op;
992 switch (mode) {
993 case nir_var_mem_ssbo:
994 if (addr_format_is_global(addr_format))
995 op = global_atomic_for_deref(intrin->intrinsic);
996 else
997 op = ssbo_atomic_for_deref(intrin->intrinsic);
998 break;
999 case nir_var_mem_global:
1000 assert(addr_format_is_global(addr_format));
1001 op = global_atomic_for_deref(intrin->intrinsic);
1002 break;
1003 case nir_var_mem_shared:
1004 assert(addr_format == nir_address_format_32bit_offset);
1005 op = shared_atomic_for_deref(intrin->intrinsic);
1006 break;
1007 default:
1008 unreachable("Unsupported explicit IO variable mode");
1009 }
1010
1011 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1012
1013 unsigned src = 0;
1014 if (addr_format_is_global(addr_format)) {
1015 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1016 } else if (addr_format == nir_address_format_32bit_offset) {
1017 assert(addr->num_components == 1);
1018 atomic->src[src++] = nir_src_for_ssa(addr);
1019 } else {
1020 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1021 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1022 }
1023 for (unsigned i = 0; i < num_data_srcs; i++) {
1024 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1025 }
1026
1027 /* Global atomics don't have access flags because they assume that the
1028 * address may be non-uniform.
1029 */
1030 if (!addr_format_is_global(addr_format) && mode != nir_var_mem_shared)
1031 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1032
1033 assert(intrin->dest.ssa.num_components == 1);
1034 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1035 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1036
1037 assert(atomic->dest.ssa.bit_size % 8 == 0);
1038
1039 if (addr_format_needs_bounds_check(addr_format)) {
1040 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1041 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1042
1043 nir_builder_instr_insert(b, &atomic->instr);
1044
1045 nir_pop_if(b, NULL);
1046 return nir_if_phi(b, &atomic->dest.ssa,
1047 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1048 } else {
1049 nir_builder_instr_insert(b, &atomic->instr);
1050 return &atomic->dest.ssa;
1051 }
1052 }
1053
1054 nir_ssa_def *
1055 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1056 nir_ssa_def *base_addr,
1057 nir_address_format addr_format)
1058 {
1059 assert(deref->dest.is_ssa);
1060 switch (deref->deref_type) {
1061 case nir_deref_type_var:
1062 assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared));
1063 return nir_imm_intN_t(b, deref->var->data.driver_location,
1064 deref->dest.ssa.bit_size);
1065
1066 case nir_deref_type_array: {
1067 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1068
1069 unsigned stride = glsl_get_explicit_stride(parent->type);
1070 if ((glsl_type_is_matrix(parent->type) &&
1071 glsl_matrix_type_is_row_major(parent->type)) ||
1072 (glsl_type_is_vector(parent->type) && stride == 0))
1073 stride = type_scalar_size_bytes(parent->type);
1074
1075 assert(stride > 0);
1076
1077 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1078 index = nir_i2i(b, index, base_addr->bit_size);
1079 return build_addr_iadd(b, base_addr, addr_format,
1080 nir_imul_imm(b, index, stride));
1081 }
1082
1083 case nir_deref_type_ptr_as_array: {
1084 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1085 index = nir_i2i(b, index, base_addr->bit_size);
1086 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1087 return build_addr_iadd(b, base_addr, addr_format,
1088 nir_imul_imm(b, index, stride));
1089 }
1090
1091 case nir_deref_type_array_wildcard:
1092 unreachable("Wildcards should be lowered by now");
1093 break;
1094
1095 case nir_deref_type_struct: {
1096 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1097 int offset = glsl_get_struct_field_offset(parent->type,
1098 deref->strct.index);
1099 assert(offset >= 0);
1100 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1101 }
1102
1103 case nir_deref_type_cast:
1104 /* Nothing to do here */
1105 return base_addr;
1106 }
1107
1108 unreachable("Invalid NIR deref type");
1109 }
1110
1111 void
1112 nir_lower_explicit_io_instr(nir_builder *b,
1113 nir_intrinsic_instr *intrin,
1114 nir_ssa_def *addr,
1115 nir_address_format addr_format)
1116 {
1117 b->cursor = nir_after_instr(&intrin->instr);
1118
1119 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1120 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1121 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1122 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1123 assert(vec_stride == 0 || vec_stride >= scalar_size);
1124
1125 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1126 nir_ssa_def *value;
1127 if (vec_stride > scalar_size) {
1128 nir_ssa_def *comps[4] = { NULL, };
1129 for (unsigned i = 0; i < intrin->num_components; i++) {
1130 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1131 vec_stride * i);
1132 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1133 addr_format, 1);
1134 }
1135 value = nir_vec(b, comps, intrin->num_components);
1136 } else {
1137 value = build_explicit_io_load(b, intrin, addr, addr_format,
1138 intrin->num_components);
1139 }
1140 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1141 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1142 assert(intrin->src[1].is_ssa);
1143 nir_ssa_def *value = intrin->src[1].ssa;
1144 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1145 if (vec_stride > scalar_size) {
1146 for (unsigned i = 0; i < intrin->num_components; i++) {
1147 if (!(write_mask & (1 << i)))
1148 continue;
1149
1150 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1151 vec_stride * i);
1152 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1153 nir_channel(b, value, i), 1);
1154 }
1155 } else {
1156 build_explicit_io_store(b, intrin, addr, addr_format,
1157 value, write_mask);
1158 }
1159 } else {
1160 nir_ssa_def *value =
1161 build_explicit_io_atomic(b, intrin, addr, addr_format);
1162 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1163 }
1164
1165 nir_instr_remove(&intrin->instr);
1166 }
1167
1168 static void
1169 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1170 nir_address_format addr_format)
1171 {
1172 /* Just delete the deref if it's not used. We can't use
1173 * nir_deref_instr_remove_if_unused here because it may remove more than
1174 * one deref which could break our list walking since we walk the list
1175 * backwards.
1176 */
1177 assert(list_empty(&deref->dest.ssa.if_uses));
1178 if (list_empty(&deref->dest.ssa.uses)) {
1179 nir_instr_remove(&deref->instr);
1180 return;
1181 }
1182
1183 b->cursor = nir_after_instr(&deref->instr);
1184
1185 nir_ssa_def *base_addr = NULL;
1186 if (deref->deref_type != nir_deref_type_var) {
1187 assert(deref->parent.is_ssa);
1188 base_addr = deref->parent.ssa;
1189 }
1190
1191 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1192 addr_format);
1193
1194 nir_instr_remove(&deref->instr);
1195 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1196 }
1197
1198 static void
1199 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1200 nir_address_format addr_format)
1201 {
1202 assert(intrin->src[0].is_ssa);
1203 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1204 }
1205
1206 static void
1207 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1208 nir_address_format addr_format)
1209 {
1210 b->cursor = nir_after_instr(&intrin->instr);
1211
1212 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1213
1214 assert(glsl_type_is_array(deref->type));
1215 assert(glsl_get_length(deref->type) == 0);
1216 unsigned stride = glsl_get_explicit_stride(deref->type);
1217 assert(stride > 0);
1218
1219 assert(addr_format == nir_address_format_32bit_index_offset);
1220 nir_ssa_def *addr = &deref->dest.ssa;
1221 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1222 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1223
1224 nir_intrinsic_instr *bsize =
1225 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1226 bsize->src[0] = nir_src_for_ssa(index);
1227 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1228 nir_builder_instr_insert(b, &bsize->instr);
1229
1230 nir_ssa_def *arr_size =
1231 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1232 nir_imm_int(b, stride));
1233
1234 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1235 nir_instr_remove(&intrin->instr);
1236 }
1237
1238 static bool
1239 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1240 nir_address_format addr_format)
1241 {
1242 bool progress = false;
1243
1244 nir_builder b;
1245 nir_builder_init(&b, impl);
1246
1247 /* Walk in reverse order so that we can see the full deref chain when we
1248 * lower the access operations. We lower them assuming that the derefs
1249 * will be turned into address calculations later.
1250 */
1251 nir_foreach_block_reverse(block, impl) {
1252 nir_foreach_instr_reverse_safe(instr, block) {
1253 switch (instr->type) {
1254 case nir_instr_type_deref: {
1255 nir_deref_instr *deref = nir_instr_as_deref(instr);
1256 if (deref->mode & modes) {
1257 lower_explicit_io_deref(&b, deref, addr_format);
1258 progress = true;
1259 }
1260 break;
1261 }
1262
1263 case nir_instr_type_intrinsic: {
1264 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1265 switch (intrin->intrinsic) {
1266 case nir_intrinsic_load_deref:
1267 case nir_intrinsic_store_deref:
1268 case nir_intrinsic_deref_atomic_add:
1269 case nir_intrinsic_deref_atomic_imin:
1270 case nir_intrinsic_deref_atomic_umin:
1271 case nir_intrinsic_deref_atomic_imax:
1272 case nir_intrinsic_deref_atomic_umax:
1273 case nir_intrinsic_deref_atomic_and:
1274 case nir_intrinsic_deref_atomic_or:
1275 case nir_intrinsic_deref_atomic_xor:
1276 case nir_intrinsic_deref_atomic_exchange:
1277 case nir_intrinsic_deref_atomic_comp_swap:
1278 case nir_intrinsic_deref_atomic_fadd:
1279 case nir_intrinsic_deref_atomic_fmin:
1280 case nir_intrinsic_deref_atomic_fmax:
1281 case nir_intrinsic_deref_atomic_fcomp_swap: {
1282 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1283 if (deref->mode & modes) {
1284 lower_explicit_io_access(&b, intrin, addr_format);
1285 progress = true;
1286 }
1287 break;
1288 }
1289
1290 case nir_intrinsic_deref_buffer_array_length: {
1291 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1292 if (deref->mode & modes) {
1293 lower_explicit_io_array_length(&b, intrin, addr_format);
1294 progress = true;
1295 }
1296 break;
1297 }
1298
1299 default:
1300 break;
1301 }
1302 break;
1303 }
1304
1305 default:
1306 /* Nothing to do */
1307 break;
1308 }
1309 }
1310 }
1311
1312 if (progress) {
1313 nir_metadata_preserve(impl, nir_metadata_block_index |
1314 nir_metadata_dominance);
1315 }
1316
1317 return progress;
1318 }
1319
1320 bool
1321 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1322 nir_address_format addr_format)
1323 {
1324 bool progress = false;
1325
1326 nir_foreach_function(function, shader) {
1327 if (function->impl &&
1328 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1329 progress = true;
1330 }
1331
1332 return progress;
1333 }
1334
1335 /**
1336 * Return the offset source for a load/store intrinsic.
1337 */
1338 nir_src *
1339 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1340 {
1341 switch (instr->intrinsic) {
1342 case nir_intrinsic_load_input:
1343 case nir_intrinsic_load_output:
1344 case nir_intrinsic_load_shared:
1345 case nir_intrinsic_load_uniform:
1346 case nir_intrinsic_load_global:
1347 case nir_intrinsic_load_scratch:
1348 case nir_intrinsic_load_fs_input_interp_deltas:
1349 return &instr->src[0];
1350 case nir_intrinsic_load_ubo:
1351 case nir_intrinsic_load_ssbo:
1352 case nir_intrinsic_load_per_vertex_input:
1353 case nir_intrinsic_load_per_vertex_output:
1354 case nir_intrinsic_load_interpolated_input:
1355 case nir_intrinsic_store_output:
1356 case nir_intrinsic_store_shared:
1357 case nir_intrinsic_store_global:
1358 case nir_intrinsic_store_scratch:
1359 return &instr->src[1];
1360 case nir_intrinsic_store_ssbo:
1361 case nir_intrinsic_store_per_vertex_output:
1362 return &instr->src[2];
1363 default:
1364 return NULL;
1365 }
1366 }
1367
1368 /**
1369 * Return the vertex index source for a load/store per_vertex intrinsic.
1370 */
1371 nir_src *
1372 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1373 {
1374 switch (instr->intrinsic) {
1375 case nir_intrinsic_load_per_vertex_input:
1376 case nir_intrinsic_load_per_vertex_output:
1377 return &instr->src[0];
1378 case nir_intrinsic_store_per_vertex_output:
1379 return &instr->src[1];
1380 default:
1381 return NULL;
1382 }
1383 }
1384
1385 /**
1386 * Return the numeric constant that identify a NULL pointer for each address
1387 * format.
1388 */
1389 const nir_const_value *
1390 nir_address_format_null_value(nir_address_format addr_format)
1391 {
1392 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1393 [nir_address_format_32bit_global] = {{0}},
1394 [nir_address_format_64bit_global] = {{0}},
1395 [nir_address_format_64bit_bounded_global] = {{0}},
1396 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1397 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1398 [nir_address_format_logical] = {{.u32 = ~0}},
1399 };
1400
1401 assert(addr_format < ARRAY_SIZE(null_values));
1402 return null_values[addr_format];
1403 }
1404
1405 nir_ssa_def *
1406 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1407 nir_address_format addr_format)
1408 {
1409 switch (addr_format) {
1410 case nir_address_format_32bit_global:
1411 case nir_address_format_64bit_global:
1412 case nir_address_format_64bit_bounded_global:
1413 case nir_address_format_32bit_index_offset:
1414 case nir_address_format_32bit_offset:
1415 return nir_ball_iequal(b, addr0, addr1);
1416
1417 case nir_address_format_logical:
1418 unreachable("Unsupported address format");
1419 }
1420
1421 unreachable("Invalid address format");
1422 }
1423
1424 nir_ssa_def *
1425 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1426 nir_address_format addr_format)
1427 {
1428 switch (addr_format) {
1429 case nir_address_format_32bit_global:
1430 case nir_address_format_64bit_global:
1431 case nir_address_format_32bit_offset:
1432 assert(addr0->num_components == 1);
1433 assert(addr1->num_components == 1);
1434 return nir_isub(b, addr0, addr1);
1435
1436 case nir_address_format_64bit_bounded_global:
1437 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1438 addr_to_global(b, addr1, addr_format));
1439
1440 case nir_address_format_32bit_index_offset:
1441 assert(addr0->num_components == 2);
1442 assert(addr1->num_components == 2);
1443 /* Assume the same buffer index. */
1444 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1445
1446 case nir_address_format_logical:
1447 unreachable("Unsupported address format");
1448 }
1449
1450 unreachable("Invalid address format");
1451 }
1452
1453 static bool
1454 is_input(nir_intrinsic_instr *intrin)
1455 {
1456 return intrin->intrinsic == nir_intrinsic_load_input ||
1457 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1458 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1459 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1460 }
1461
1462 static bool
1463 is_output(nir_intrinsic_instr *intrin)
1464 {
1465 return intrin->intrinsic == nir_intrinsic_load_output ||
1466 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1467 intrin->intrinsic == nir_intrinsic_store_output ||
1468 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1469 }
1470
1471
1472 /**
1473 * This pass adds constant offsets to instr->const_index[0] for input/output
1474 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1475 * unchanged - since we don't know what part of a compound variable is
1476 * accessed, we allocate storage for the entire thing. For drivers that use
1477 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1478 * the offset source will be 0, so that they don't have to add it in manually.
1479 */
1480
1481 static bool
1482 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1483 nir_variable_mode mode)
1484 {
1485 bool progress = false;
1486 nir_foreach_instr_safe(instr, block) {
1487 if (instr->type != nir_instr_type_intrinsic)
1488 continue;
1489
1490 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1491
1492 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1493 (mode == nir_var_shader_out && is_output(intrin))) {
1494 nir_src *offset = nir_get_io_offset_src(intrin);
1495
1496 if (nir_src_is_const(*offset)) {
1497 intrin->const_index[0] += nir_src_as_uint(*offset);
1498 b->cursor = nir_before_instr(&intrin->instr);
1499 nir_instr_rewrite_src(&intrin->instr, offset,
1500 nir_src_for_ssa(nir_imm_int(b, 0)));
1501 progress = true;
1502 }
1503 }
1504 }
1505
1506 return progress;
1507 }
1508
1509 bool
1510 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1511 {
1512 bool progress = false;
1513
1514 nir_foreach_function(f, nir) {
1515 if (f->impl) {
1516 nir_builder b;
1517 nir_builder_init(&b, f->impl);
1518 nir_foreach_block(block, f->impl) {
1519 progress |= add_const_offset_to_base_block(block, &b, mode);
1520 }
1521 }
1522 }
1523
1524 return progress;
1525 }
1526