nir/lower_io: Return SSA defs from helpers
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 struct lower_io_state {
39 void *dead_ctx;
40 nir_builder builder;
41 int (*type_size)(const struct glsl_type *type, bool);
42 nir_variable_mode modes;
43 nir_lower_io_options options;
44 };
45
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
48 {
49 switch (deref_op) {
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
51 OP(atomic_exchange)
52 OP(atomic_comp_swap)
53 OP(atomic_add)
54 OP(atomic_imin)
55 OP(atomic_umin)
56 OP(atomic_imax)
57 OP(atomic_umax)
58 OP(atomic_and)
59 OP(atomic_or)
60 OP(atomic_xor)
61 OP(atomic_fadd)
62 OP(atomic_fmin)
63 OP(atomic_fmax)
64 OP(atomic_fcomp_swap)
65 #undef OP
66 default:
67 unreachable("Invalid SSBO atomic");
68 }
69 }
70
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op)
73 {
74 switch (deref_op) {
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
76 OP(atomic_exchange)
77 OP(atomic_comp_swap)
78 OP(atomic_add)
79 OP(atomic_imin)
80 OP(atomic_umin)
81 OP(atomic_imax)
82 OP(atomic_umax)
83 OP(atomic_and)
84 OP(atomic_or)
85 OP(atomic_xor)
86 OP(atomic_fadd)
87 OP(atomic_fmin)
88 OP(atomic_fmax)
89 OP(atomic_fcomp_swap)
90 #undef OP
91 default:
92 unreachable("Invalid SSBO atomic");
93 }
94 }
95
96 void
97 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
98 int (*type_size)(const struct glsl_type *, bool))
99 {
100 unsigned location = 0;
101
102 nir_foreach_variable(var, var_list) {
103 /*
104 * UBOs have their own address spaces, so don't count them towards the
105 * number of global uniforms
106 */
107 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
108 continue;
109
110 var->data.driver_location = location;
111 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
112 var->data.mode == nir_var_shader_out ||
113 var->data.bindless;
114 location += type_size(var->type, bindless_type_size);
115 }
116
117 *size = location;
118 }
119
120 /**
121 * Return true if the given variable is a per-vertex input/output array.
122 * (such as geometry shader inputs).
123 */
124 bool
125 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
126 {
127 if (var->data.patch || !glsl_type_is_array(var->type))
128 return false;
129
130 if (var->data.mode == nir_var_shader_in)
131 return stage == MESA_SHADER_GEOMETRY ||
132 stage == MESA_SHADER_TESS_CTRL ||
133 stage == MESA_SHADER_TESS_EVAL;
134
135 if (var->data.mode == nir_var_shader_out)
136 return stage == MESA_SHADER_TESS_CTRL;
137
138 return false;
139 }
140
141 static nir_ssa_def *
142 get_io_offset(nir_builder *b, nir_deref_instr *deref,
143 nir_ssa_def **vertex_index,
144 int (*type_size)(const struct glsl_type *, bool),
145 unsigned *component, bool bts)
146 {
147 nir_deref_path path;
148 nir_deref_path_init(&path, deref, NULL);
149
150 assert(path.path[0]->deref_type == nir_deref_type_var);
151 nir_deref_instr **p = &path.path[1];
152
153 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
154 * outermost array index separate. Process the rest normally.
155 */
156 if (vertex_index != NULL) {
157 assert((*p)->deref_type == nir_deref_type_array);
158 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
159 p++;
160 }
161
162 if (path.path[0]->var->data.compact) {
163 assert((*p)->deref_type == nir_deref_type_array);
164 assert(glsl_type_is_scalar((*p)->type));
165
166 /* We always lower indirect dereferences for "compact" array vars. */
167 const unsigned index = nir_src_as_uint((*p)->arr.index);
168 const unsigned total_offset = *component + index;
169 const unsigned slot_offset = total_offset / 4;
170 *component = total_offset % 4;
171 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
172 }
173
174 /* Just emit code and let constant-folding go to town */
175 nir_ssa_def *offset = nir_imm_int(b, 0);
176
177 for (; *p; p++) {
178 if ((*p)->deref_type == nir_deref_type_array) {
179 unsigned size = type_size((*p)->type, bts);
180
181 nir_ssa_def *mul =
182 nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
183
184 offset = nir_iadd(b, offset, mul);
185 } else if ((*p)->deref_type == nir_deref_type_struct) {
186 /* p starts at path[1], so this is safe */
187 nir_deref_instr *parent = *(p - 1);
188
189 unsigned field_offset = 0;
190 for (unsigned i = 0; i < (*p)->strct.index; i++) {
191 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
192 }
193 offset = nir_iadd_imm(b, offset, field_offset);
194 } else {
195 unreachable("Unsupported deref type");
196 }
197 }
198
199 nir_deref_path_finish(&path);
200
201 return offset;
202 }
203
204 static nir_ssa_def *
205 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
206 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
207 unsigned component, const struct glsl_type *type)
208 {
209 nir_builder *b = &state->builder;
210 const nir_shader *nir = b->shader;
211 nir_variable_mode mode = var->data.mode;
212 nir_ssa_def *barycentric = NULL;
213
214 nir_intrinsic_op op;
215 switch (mode) {
216 case nir_var_shader_in:
217 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
218 nir->options->use_interpolated_input_intrinsics &&
219 var->data.interpolation != INTERP_MODE_FLAT) {
220 assert(vertex_index == NULL);
221
222 nir_intrinsic_op bary_op;
223 if (var->data.sample ||
224 (state->options & nir_lower_io_force_sample_interpolation))
225 bary_op = nir_intrinsic_load_barycentric_sample;
226 else if (var->data.centroid)
227 bary_op = nir_intrinsic_load_barycentric_centroid;
228 else
229 bary_op = nir_intrinsic_load_barycentric_pixel;
230
231 barycentric = nir_load_barycentric(&state->builder, bary_op,
232 var->data.interpolation);
233 op = nir_intrinsic_load_interpolated_input;
234 } else {
235 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
236 nir_intrinsic_load_input;
237 }
238 break;
239 case nir_var_shader_out:
240 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
241 nir_intrinsic_load_output;
242 break;
243 case nir_var_uniform:
244 op = nir_intrinsic_load_uniform;
245 break;
246 case nir_var_mem_shared:
247 op = nir_intrinsic_load_shared;
248 break;
249 default:
250 unreachable("Unknown variable mode");
251 }
252
253 nir_intrinsic_instr *load =
254 nir_intrinsic_instr_create(state->builder.shader, op);
255 load->num_components = intrin->num_components;
256
257 nir_intrinsic_set_base(load, var->data.driver_location);
258 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
259 nir_intrinsic_set_component(load, component);
260
261 if (load->intrinsic == nir_intrinsic_load_uniform)
262 nir_intrinsic_set_range(load,
263 state->type_size(var->type, var->data.bindless));
264
265 if (load->intrinsic == nir_intrinsic_load_input ||
266 load->intrinsic == nir_intrinsic_load_uniform)
267 nir_intrinsic_set_type(load, nir_get_nir_type_for_glsl_type(type));
268
269 if (vertex_index) {
270 load->src[0] = nir_src_for_ssa(vertex_index);
271 load->src[1] = nir_src_for_ssa(offset);
272 } else if (barycentric) {
273 load->src[0] = nir_src_for_ssa(barycentric);
274 load->src[1] = nir_src_for_ssa(offset);
275 } else {
276 load->src[0] = nir_src_for_ssa(offset);
277 }
278
279 assert(intrin->dest.is_ssa);
280 nir_ssa_dest_init(&load->instr, &load->dest,
281 intrin->dest.ssa.num_components,
282 intrin->dest.ssa.bit_size, NULL);
283 nir_builder_instr_insert(b, &load->instr);
284
285 return &load->dest.ssa;
286 }
287
288 static void
289 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
290 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
291 unsigned component, const struct glsl_type *type)
292 {
293 nir_builder *b = &state->builder;
294 nir_variable_mode mode = var->data.mode;
295
296 nir_intrinsic_op op;
297 if (mode == nir_var_mem_shared) {
298 op = nir_intrinsic_store_shared;
299 } else {
300 assert(mode == nir_var_shader_out);
301 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
302 nir_intrinsic_store_output;
303 }
304
305 nir_intrinsic_instr *store =
306 nir_intrinsic_instr_create(state->builder.shader, op);
307 store->num_components = intrin->num_components;
308
309 nir_src_copy(&store->src[0], &intrin->src[1], store);
310
311 nir_intrinsic_set_base(store, var->data.driver_location);
312
313 if (mode == nir_var_shader_out)
314 nir_intrinsic_set_component(store, component);
315
316 if (store->intrinsic == nir_intrinsic_store_output)
317 nir_intrinsic_set_type(store, nir_get_nir_type_for_glsl_type(type));
318
319 nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
320
321 if (vertex_index)
322 store->src[1] = nir_src_for_ssa(vertex_index);
323
324 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
325
326 nir_builder_instr_insert(b, &store->instr);
327 }
328
329 static nir_ssa_def *
330 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
331 nir_variable *var, nir_ssa_def *offset)
332 {
333 nir_builder *b = &state->builder;
334 assert(var->data.mode == nir_var_mem_shared);
335
336 nir_intrinsic_op op;
337 switch (intrin->intrinsic) {
338 #define OP(O) case nir_intrinsic_deref_##O: op = nir_intrinsic_shared_##O; break;
339 OP(atomic_exchange)
340 OP(atomic_comp_swap)
341 OP(atomic_add)
342 OP(atomic_imin)
343 OP(atomic_umin)
344 OP(atomic_imax)
345 OP(atomic_umax)
346 OP(atomic_and)
347 OP(atomic_or)
348 OP(atomic_xor)
349 OP(atomic_fadd)
350 OP(atomic_fmin)
351 OP(atomic_fmax)
352 OP(atomic_fcomp_swap)
353 #undef OP
354 default:
355 unreachable("Invalid atomic");
356 }
357
358 nir_intrinsic_instr *atomic =
359 nir_intrinsic_instr_create(state->builder.shader, op);
360
361 nir_intrinsic_set_base(atomic, var->data.driver_location);
362
363 atomic->src[0] = nir_src_for_ssa(offset);
364 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
365 nir_intrinsic_infos[op].num_srcs);
366 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
367 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
368 }
369
370 if (nir_intrinsic_infos[op].has_dest) {
371 assert(intrin->dest.is_ssa);
372 assert(nir_intrinsic_infos[intrin->intrinsic].has_dest);
373 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
374 intrin->dest.ssa.num_components,
375 intrin->dest.ssa.bit_size, NULL);
376 }
377
378 nir_builder_instr_insert(b, &atomic->instr);
379
380 return nir_intrinsic_infos[op].has_dest ? &atomic->dest.ssa : NULL;
381 }
382
383 static nir_ssa_def *
384 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
385 nir_variable *var, nir_ssa_def *offset, unsigned component,
386 const struct glsl_type *type)
387 {
388 nir_builder *b = &state->builder;
389 assert(var->data.mode == nir_var_shader_in);
390
391 /* Ignore interpolateAt() for flat variables - flat is flat. */
392 if (var->data.interpolation == INTERP_MODE_FLAT)
393 return lower_load(intrin, state, NULL, var, offset, component, type);
394
395 nir_intrinsic_op bary_op;
396 switch (intrin->intrinsic) {
397 case nir_intrinsic_interp_deref_at_centroid:
398 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
399 nir_intrinsic_load_barycentric_sample :
400 nir_intrinsic_load_barycentric_centroid;
401 break;
402 case nir_intrinsic_interp_deref_at_sample:
403 bary_op = nir_intrinsic_load_barycentric_at_sample;
404 break;
405 case nir_intrinsic_interp_deref_at_offset:
406 bary_op = nir_intrinsic_load_barycentric_at_offset;
407 break;
408 default:
409 unreachable("Bogus interpolateAt() intrinsic.");
410 }
411
412 nir_intrinsic_instr *bary_setup =
413 nir_intrinsic_instr_create(state->builder.shader, bary_op);
414
415 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
416 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
417
418 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
419 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset)
420 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
421
422 nir_builder_instr_insert(b, &bary_setup->instr);
423
424 nir_intrinsic_instr *load =
425 nir_intrinsic_instr_create(state->builder.shader,
426 nir_intrinsic_load_interpolated_input);
427 load->num_components = intrin->num_components;
428
429 nir_intrinsic_set_base(load, var->data.driver_location);
430 nir_intrinsic_set_component(load, component);
431
432 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
433 load->src[1] = nir_src_for_ssa(offset);
434
435 assert(intrin->dest.is_ssa);
436 nir_ssa_dest_init(&load->instr, &load->dest,
437 intrin->dest.ssa.num_components,
438 intrin->dest.ssa.bit_size, NULL);
439 nir_builder_instr_insert(b, &load->instr);
440
441 return &load->dest.ssa;
442 }
443
444 static bool
445 nir_lower_io_block(nir_block *block,
446 struct lower_io_state *state)
447 {
448 nir_builder *b = &state->builder;
449 const nir_shader_compiler_options *options = b->shader->options;
450 bool progress = false;
451
452 nir_foreach_instr_safe(instr, block) {
453 if (instr->type != nir_instr_type_intrinsic)
454 continue;
455
456 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
457
458 switch (intrin->intrinsic) {
459 case nir_intrinsic_load_deref:
460 case nir_intrinsic_store_deref:
461 case nir_intrinsic_deref_atomic_add:
462 case nir_intrinsic_deref_atomic_imin:
463 case nir_intrinsic_deref_atomic_umin:
464 case nir_intrinsic_deref_atomic_imax:
465 case nir_intrinsic_deref_atomic_umax:
466 case nir_intrinsic_deref_atomic_and:
467 case nir_intrinsic_deref_atomic_or:
468 case nir_intrinsic_deref_atomic_xor:
469 case nir_intrinsic_deref_atomic_exchange:
470 case nir_intrinsic_deref_atomic_comp_swap:
471 case nir_intrinsic_deref_atomic_fadd:
472 case nir_intrinsic_deref_atomic_fmin:
473 case nir_intrinsic_deref_atomic_fmax:
474 case nir_intrinsic_deref_atomic_fcomp_swap:
475 /* We can lower the io for this nir instrinsic */
476 break;
477 case nir_intrinsic_interp_deref_at_centroid:
478 case nir_intrinsic_interp_deref_at_sample:
479 case nir_intrinsic_interp_deref_at_offset:
480 /* We can optionally lower these to load_interpolated_input */
481 if (options->use_interpolated_input_intrinsics)
482 break;
483 default:
484 /* We can't lower the io for this nir instrinsic, so skip it */
485 continue;
486 }
487
488 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
489
490 nir_variable_mode mode = deref->mode;
491
492 if ((state->modes & mode) == 0)
493 continue;
494
495 if (mode != nir_var_shader_in &&
496 mode != nir_var_shader_out &&
497 mode != nir_var_mem_shared &&
498 mode != nir_var_uniform)
499 continue;
500
501 nir_variable *var = nir_deref_instr_get_variable(deref);
502
503 b->cursor = nir_before_instr(instr);
504
505 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
506
507 nir_ssa_def *offset;
508 nir_ssa_def *vertex_index = NULL;
509 unsigned component_offset = var->data.location_frac;
510 bool bindless_type_size = mode == nir_var_shader_in ||
511 mode == nir_var_shader_out ||
512 var->data.bindless;
513
514 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
515 state->type_size, &component_offset,
516 bindless_type_size);
517
518 nir_ssa_def *replacement = NULL;
519
520 switch (intrin->intrinsic) {
521 case nir_intrinsic_load_deref:
522 replacement = lower_load(intrin, state, vertex_index, var, offset,
523 component_offset, deref->type);
524 break;
525
526 case nir_intrinsic_store_deref:
527 lower_store(intrin, state, vertex_index, var, offset,
528 component_offset, deref->type);
529 break;
530
531 case nir_intrinsic_deref_atomic_add:
532 case nir_intrinsic_deref_atomic_imin:
533 case nir_intrinsic_deref_atomic_umin:
534 case nir_intrinsic_deref_atomic_imax:
535 case nir_intrinsic_deref_atomic_umax:
536 case nir_intrinsic_deref_atomic_and:
537 case nir_intrinsic_deref_atomic_or:
538 case nir_intrinsic_deref_atomic_xor:
539 case nir_intrinsic_deref_atomic_exchange:
540 case nir_intrinsic_deref_atomic_comp_swap:
541 case nir_intrinsic_deref_atomic_fadd:
542 case nir_intrinsic_deref_atomic_fmin:
543 case nir_intrinsic_deref_atomic_fmax:
544 case nir_intrinsic_deref_atomic_fcomp_swap:
545 assert(vertex_index == NULL);
546 replacement = lower_atomic(intrin, state, var, offset);
547 break;
548
549 case nir_intrinsic_interp_deref_at_centroid:
550 case nir_intrinsic_interp_deref_at_sample:
551 case nir_intrinsic_interp_deref_at_offset:
552 assert(vertex_index == NULL);
553 replacement = lower_interpolate_at(intrin, state, var, offset,
554 component_offset, deref->type);
555 break;
556
557 default:
558 continue;
559 }
560
561 if (replacement) {
562 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
563 nir_src_for_ssa(replacement));
564 }
565 nir_instr_remove(&intrin->instr);
566 progress = true;
567 }
568
569 return progress;
570 }
571
572 static bool
573 nir_lower_io_impl(nir_function_impl *impl,
574 nir_variable_mode modes,
575 int (*type_size)(const struct glsl_type *, bool),
576 nir_lower_io_options options)
577 {
578 struct lower_io_state state;
579 bool progress = false;
580
581 nir_builder_init(&state.builder, impl);
582 state.dead_ctx = ralloc_context(NULL);
583 state.modes = modes;
584 state.type_size = type_size;
585 state.options = options;
586
587 nir_foreach_block(block, impl) {
588 progress |= nir_lower_io_block(block, &state);
589 }
590
591 ralloc_free(state.dead_ctx);
592
593 nir_metadata_preserve(impl, nir_metadata_block_index |
594 nir_metadata_dominance);
595 return progress;
596 }
597
598 bool
599 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
600 int (*type_size)(const struct glsl_type *, bool),
601 nir_lower_io_options options)
602 {
603 bool progress = false;
604
605 nir_foreach_function(function, shader) {
606 if (function->impl) {
607 progress |= nir_lower_io_impl(function->impl, modes,
608 type_size, options);
609 }
610 }
611
612 return progress;
613 }
614
615 static unsigned
616 type_scalar_size_bytes(const struct glsl_type *type)
617 {
618 assert(glsl_type_is_vector_or_scalar(type) ||
619 glsl_type_is_matrix(type));
620 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
621 }
622
623 static nir_ssa_def *
624 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
625 nir_address_format addr_format, nir_ssa_def *offset)
626 {
627 assert(offset->num_components == 1);
628 assert(addr->bit_size == offset->bit_size);
629
630 switch (addr_format) {
631 case nir_address_format_32bit_global:
632 case nir_address_format_64bit_global:
633 case nir_address_format_32bit_offset:
634 assert(addr->num_components == 1);
635 return nir_iadd(b, addr, offset);
636
637 case nir_address_format_64bit_bounded_global:
638 assert(addr->num_components == 4);
639 return nir_vec4(b, nir_channel(b, addr, 0),
640 nir_channel(b, addr, 1),
641 nir_channel(b, addr, 2),
642 nir_iadd(b, nir_channel(b, addr, 3), offset));
643
644 case nir_address_format_32bit_index_offset:
645 assert(addr->num_components == 2);
646 return nir_vec2(b, nir_channel(b, addr, 0),
647 nir_iadd(b, nir_channel(b, addr, 1), offset));
648 case nir_address_format_logical:
649 unreachable("Unsupported address format");
650 }
651 unreachable("Invalid address format");
652 }
653
654 static nir_ssa_def *
655 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
656 nir_address_format addr_format, int64_t offset)
657 {
658 return build_addr_iadd(b, addr, addr_format,
659 nir_imm_intN_t(b, offset, addr->bit_size));
660 }
661
662 static nir_ssa_def *
663 addr_to_index(nir_builder *b, nir_ssa_def *addr,
664 nir_address_format addr_format)
665 {
666 assert(addr_format == nir_address_format_32bit_index_offset);
667 assert(addr->num_components == 2);
668 return nir_channel(b, addr, 0);
669 }
670
671 static nir_ssa_def *
672 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
673 nir_address_format addr_format)
674 {
675 assert(addr_format == nir_address_format_32bit_index_offset);
676 assert(addr->num_components == 2);
677 return nir_channel(b, addr, 1);
678 }
679
680 /** Returns true if the given address format resolves to a global address */
681 static bool
682 addr_format_is_global(nir_address_format addr_format)
683 {
684 return addr_format == nir_address_format_32bit_global ||
685 addr_format == nir_address_format_64bit_global ||
686 addr_format == nir_address_format_64bit_bounded_global;
687 }
688
689 static nir_ssa_def *
690 addr_to_global(nir_builder *b, nir_ssa_def *addr,
691 nir_address_format addr_format)
692 {
693 switch (addr_format) {
694 case nir_address_format_32bit_global:
695 case nir_address_format_64bit_global:
696 assert(addr->num_components == 1);
697 return addr;
698
699 case nir_address_format_64bit_bounded_global:
700 assert(addr->num_components == 4);
701 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
702 nir_u2u64(b, nir_channel(b, addr, 3)));
703
704 case nir_address_format_32bit_index_offset:
705 case nir_address_format_32bit_offset:
706 case nir_address_format_logical:
707 unreachable("Cannot get a 64-bit address with this address format");
708 }
709
710 unreachable("Invalid address format");
711 }
712
713 static bool
714 addr_format_needs_bounds_check(nir_address_format addr_format)
715 {
716 return addr_format == nir_address_format_64bit_bounded_global;
717 }
718
719 static nir_ssa_def *
720 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
721 nir_address_format addr_format, unsigned size)
722 {
723 assert(addr_format == nir_address_format_64bit_bounded_global);
724 assert(addr->num_components == 4);
725 return nir_ige(b, nir_channel(b, addr, 2),
726 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
727 }
728
729 static nir_ssa_def *
730 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
731 nir_ssa_def *addr, nir_address_format addr_format,
732 unsigned num_components)
733 {
734 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
735
736 nir_intrinsic_op op;
737 switch (mode) {
738 case nir_var_mem_ubo:
739 op = nir_intrinsic_load_ubo;
740 break;
741 case nir_var_mem_ssbo:
742 if (addr_format_is_global(addr_format))
743 op = nir_intrinsic_load_global;
744 else
745 op = nir_intrinsic_load_ssbo;
746 break;
747 case nir_var_mem_global:
748 assert(addr_format_is_global(addr_format));
749 op = nir_intrinsic_load_global;
750 break;
751 case nir_var_shader_in:
752 assert(addr_format_is_global(addr_format));
753 op = nir_intrinsic_load_kernel_input;
754 break;
755 default:
756 unreachable("Unsupported explicit IO variable mode");
757 }
758
759 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
760
761 if (addr_format_is_global(addr_format)) {
762 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
763 } else {
764 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
765 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
766 }
767
768 if (mode != nir_var_mem_ubo && mode != nir_var_shader_in)
769 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
770
771 /* TODO: We should try and provide a better alignment. For OpenCL, we need
772 * to plumb the alignment through from SPIR-V when we have one.
773 */
774 nir_intrinsic_set_align(load, intrin->dest.ssa.bit_size / 8, 0);
775
776 assert(intrin->dest.is_ssa);
777 load->num_components = num_components;
778 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
779 intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
780
781 assert(load->dest.ssa.bit_size % 8 == 0);
782
783 if (addr_format_needs_bounds_check(addr_format)) {
784 /* The Vulkan spec for robustBufferAccess gives us quite a few options
785 * as to what we can do with an OOB read. Unfortunately, returning
786 * undefined values isn't one of them so we return an actual zero.
787 */
788 nir_ssa_def *zero = nir_imm_zero(b, load->num_components,
789 load->dest.ssa.bit_size);
790
791 const unsigned load_size =
792 (load->dest.ssa.bit_size / 8) * load->num_components;
793 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
794
795 nir_builder_instr_insert(b, &load->instr);
796
797 nir_pop_if(b, NULL);
798
799 return nir_if_phi(b, &load->dest.ssa, zero);
800 } else {
801 nir_builder_instr_insert(b, &load->instr);
802 return &load->dest.ssa;
803 }
804 }
805
806 static void
807 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
808 nir_ssa_def *addr, nir_address_format addr_format,
809 nir_ssa_def *value, nir_component_mask_t write_mask)
810 {
811 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
812
813 nir_intrinsic_op op;
814 switch (mode) {
815 case nir_var_mem_ssbo:
816 if (addr_format_is_global(addr_format))
817 op = nir_intrinsic_store_global;
818 else
819 op = nir_intrinsic_store_ssbo;
820 break;
821 case nir_var_mem_global:
822 assert(addr_format_is_global(addr_format));
823 op = nir_intrinsic_store_global;
824 break;
825 default:
826 unreachable("Unsupported explicit IO variable mode");
827 }
828
829 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
830
831 store->src[0] = nir_src_for_ssa(value);
832 if (addr_format_is_global(addr_format)) {
833 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
834 } else {
835 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
836 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
837 }
838
839 nir_intrinsic_set_write_mask(store, write_mask);
840
841 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
842
843 /* TODO: We should try and provide a better alignment. For OpenCL, we need
844 * to plumb the alignment through from SPIR-V when we have one.
845 */
846 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
847
848 assert(value->num_components == 1 ||
849 value->num_components == intrin->num_components);
850 store->num_components = value->num_components;
851
852 assert(value->bit_size % 8 == 0);
853
854 if (addr_format_needs_bounds_check(addr_format)) {
855 const unsigned store_size = (value->bit_size / 8) * store->num_components;
856 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
857
858 nir_builder_instr_insert(b, &store->instr);
859
860 nir_pop_if(b, NULL);
861 } else {
862 nir_builder_instr_insert(b, &store->instr);
863 }
864 }
865
866 static nir_ssa_def *
867 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
868 nir_ssa_def *addr, nir_address_format addr_format)
869 {
870 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
871 const unsigned num_data_srcs =
872 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
873
874 nir_intrinsic_op op;
875 switch (mode) {
876 case nir_var_mem_ssbo:
877 if (addr_format_is_global(addr_format))
878 op = global_atomic_for_deref(intrin->intrinsic);
879 else
880 op = ssbo_atomic_for_deref(intrin->intrinsic);
881 break;
882 case nir_var_mem_global:
883 assert(addr_format_is_global(addr_format));
884 op = global_atomic_for_deref(intrin->intrinsic);
885 break;
886 default:
887 unreachable("Unsupported explicit IO variable mode");
888 }
889
890 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
891
892 unsigned src = 0;
893 if (addr_format_is_global(addr_format)) {
894 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
895 } else {
896 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
897 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
898 }
899 for (unsigned i = 0; i < num_data_srcs; i++) {
900 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
901 }
902
903 /* Global atomics don't have access flags because they assume that the
904 * address may be non-uniform.
905 */
906 if (!addr_format_is_global(addr_format))
907 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
908
909 assert(intrin->dest.ssa.num_components == 1);
910 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
911 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
912
913 assert(atomic->dest.ssa.bit_size % 8 == 0);
914
915 if (addr_format_needs_bounds_check(addr_format)) {
916 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
917 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
918
919 nir_builder_instr_insert(b, &atomic->instr);
920
921 nir_pop_if(b, NULL);
922 return nir_if_phi(b, &atomic->dest.ssa,
923 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
924 } else {
925 nir_builder_instr_insert(b, &atomic->instr);
926 return &atomic->dest.ssa;
927 }
928 }
929
930 nir_ssa_def *
931 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
932 nir_ssa_def *base_addr,
933 nir_address_format addr_format)
934 {
935 assert(deref->dest.is_ssa);
936 switch (deref->deref_type) {
937 case nir_deref_type_var:
938 assert(deref->mode == nir_var_shader_in);
939 return nir_imm_intN_t(b, deref->var->data.driver_location,
940 deref->dest.ssa.bit_size);
941
942 case nir_deref_type_array: {
943 nir_deref_instr *parent = nir_deref_instr_parent(deref);
944
945 unsigned stride = glsl_get_explicit_stride(parent->type);
946 if ((glsl_type_is_matrix(parent->type) &&
947 glsl_matrix_type_is_row_major(parent->type)) ||
948 (glsl_type_is_vector(parent->type) && stride == 0))
949 stride = type_scalar_size_bytes(parent->type);
950
951 assert(stride > 0);
952
953 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
954 index = nir_i2i(b, index, base_addr->bit_size);
955 return build_addr_iadd(b, base_addr, addr_format,
956 nir_imul_imm(b, index, stride));
957 }
958
959 case nir_deref_type_ptr_as_array: {
960 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
961 index = nir_i2i(b, index, base_addr->bit_size);
962 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
963 return build_addr_iadd(b, base_addr, addr_format,
964 nir_imul_imm(b, index, stride));
965 }
966
967 case nir_deref_type_array_wildcard:
968 unreachable("Wildcards should be lowered by now");
969 break;
970
971 case nir_deref_type_struct: {
972 nir_deref_instr *parent = nir_deref_instr_parent(deref);
973 int offset = glsl_get_struct_field_offset(parent->type,
974 deref->strct.index);
975 assert(offset >= 0);
976 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
977 }
978
979 case nir_deref_type_cast:
980 /* Nothing to do here */
981 return base_addr;
982 }
983
984 unreachable("Invalid NIR deref type");
985 }
986
987 void
988 nir_lower_explicit_io_instr(nir_builder *b,
989 nir_intrinsic_instr *intrin,
990 nir_ssa_def *addr,
991 nir_address_format addr_format)
992 {
993 b->cursor = nir_after_instr(&intrin->instr);
994
995 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
996 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
997 unsigned scalar_size = type_scalar_size_bytes(deref->type);
998 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
999 assert(vec_stride == 0 || vec_stride >= scalar_size);
1000
1001 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1002 nir_ssa_def *value;
1003 if (vec_stride > scalar_size) {
1004 nir_ssa_def *comps[4] = { NULL, };
1005 for (unsigned i = 0; i < intrin->num_components; i++) {
1006 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1007 vec_stride * i);
1008 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1009 addr_format, 1);
1010 }
1011 value = nir_vec(b, comps, intrin->num_components);
1012 } else {
1013 value = build_explicit_io_load(b, intrin, addr, addr_format,
1014 intrin->num_components);
1015 }
1016 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1017 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1018 assert(intrin->src[1].is_ssa);
1019 nir_ssa_def *value = intrin->src[1].ssa;
1020 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1021 if (vec_stride > scalar_size) {
1022 for (unsigned i = 0; i < intrin->num_components; i++) {
1023 if (!(write_mask & (1 << i)))
1024 continue;
1025
1026 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1027 vec_stride * i);
1028 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1029 nir_channel(b, value, i), 1);
1030 }
1031 } else {
1032 build_explicit_io_store(b, intrin, addr, addr_format,
1033 value, write_mask);
1034 }
1035 } else {
1036 nir_ssa_def *value =
1037 build_explicit_io_atomic(b, intrin, addr, addr_format);
1038 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1039 }
1040
1041 nir_instr_remove(&intrin->instr);
1042 }
1043
1044 static void
1045 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1046 nir_address_format addr_format)
1047 {
1048 /* Just delete the deref if it's not used. We can't use
1049 * nir_deref_instr_remove_if_unused here because it may remove more than
1050 * one deref which could break our list walking since we walk the list
1051 * backwards.
1052 */
1053 assert(list_empty(&deref->dest.ssa.if_uses));
1054 if (list_empty(&deref->dest.ssa.uses)) {
1055 nir_instr_remove(&deref->instr);
1056 return;
1057 }
1058
1059 b->cursor = nir_after_instr(&deref->instr);
1060
1061 nir_ssa_def *base_addr = NULL;
1062 if (deref->deref_type != nir_deref_type_var) {
1063 assert(deref->parent.is_ssa);
1064 base_addr = deref->parent.ssa;
1065 }
1066
1067 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1068 addr_format);
1069
1070 nir_instr_remove(&deref->instr);
1071 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1072 }
1073
1074 static void
1075 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1076 nir_address_format addr_format)
1077 {
1078 assert(intrin->src[0].is_ssa);
1079 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1080 }
1081
1082 static void
1083 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1084 nir_address_format addr_format)
1085 {
1086 b->cursor = nir_after_instr(&intrin->instr);
1087
1088 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1089
1090 assert(glsl_type_is_array(deref->type));
1091 assert(glsl_get_length(deref->type) == 0);
1092 unsigned stride = glsl_get_explicit_stride(deref->type);
1093 assert(stride > 0);
1094
1095 assert(addr_format == nir_address_format_32bit_index_offset);
1096 nir_ssa_def *addr = &deref->dest.ssa;
1097 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1098 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1099
1100 nir_intrinsic_instr *bsize =
1101 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1102 bsize->src[0] = nir_src_for_ssa(index);
1103 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1104 nir_builder_instr_insert(b, &bsize->instr);
1105
1106 nir_ssa_def *arr_size =
1107 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1108 nir_imm_int(b, stride));
1109
1110 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1111 nir_instr_remove(&intrin->instr);
1112 }
1113
1114 static bool
1115 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1116 nir_address_format addr_format)
1117 {
1118 bool progress = false;
1119
1120 nir_builder b;
1121 nir_builder_init(&b, impl);
1122
1123 /* Walk in reverse order so that we can see the full deref chain when we
1124 * lower the access operations. We lower them assuming that the derefs
1125 * will be turned into address calculations later.
1126 */
1127 nir_foreach_block_reverse(block, impl) {
1128 nir_foreach_instr_reverse_safe(instr, block) {
1129 switch (instr->type) {
1130 case nir_instr_type_deref: {
1131 nir_deref_instr *deref = nir_instr_as_deref(instr);
1132 if (deref->mode & modes) {
1133 lower_explicit_io_deref(&b, deref, addr_format);
1134 progress = true;
1135 }
1136 break;
1137 }
1138
1139 case nir_instr_type_intrinsic: {
1140 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1141 switch (intrin->intrinsic) {
1142 case nir_intrinsic_load_deref:
1143 case nir_intrinsic_store_deref:
1144 case nir_intrinsic_deref_atomic_add:
1145 case nir_intrinsic_deref_atomic_imin:
1146 case nir_intrinsic_deref_atomic_umin:
1147 case nir_intrinsic_deref_atomic_imax:
1148 case nir_intrinsic_deref_atomic_umax:
1149 case nir_intrinsic_deref_atomic_and:
1150 case nir_intrinsic_deref_atomic_or:
1151 case nir_intrinsic_deref_atomic_xor:
1152 case nir_intrinsic_deref_atomic_exchange:
1153 case nir_intrinsic_deref_atomic_comp_swap:
1154 case nir_intrinsic_deref_atomic_fadd:
1155 case nir_intrinsic_deref_atomic_fmin:
1156 case nir_intrinsic_deref_atomic_fmax:
1157 case nir_intrinsic_deref_atomic_fcomp_swap: {
1158 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1159 if (deref->mode & modes) {
1160 lower_explicit_io_access(&b, intrin, addr_format);
1161 progress = true;
1162 }
1163 break;
1164 }
1165
1166 case nir_intrinsic_deref_buffer_array_length: {
1167 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1168 if (deref->mode & modes) {
1169 lower_explicit_io_array_length(&b, intrin, addr_format);
1170 progress = true;
1171 }
1172 break;
1173 }
1174
1175 default:
1176 break;
1177 }
1178 break;
1179 }
1180
1181 default:
1182 /* Nothing to do */
1183 break;
1184 }
1185 }
1186 }
1187
1188 if (progress) {
1189 nir_metadata_preserve(impl, nir_metadata_block_index |
1190 nir_metadata_dominance);
1191 }
1192
1193 return progress;
1194 }
1195
1196 bool
1197 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1198 nir_address_format addr_format)
1199 {
1200 bool progress = false;
1201
1202 nir_foreach_function(function, shader) {
1203 if (function->impl &&
1204 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1205 progress = true;
1206 }
1207
1208 return progress;
1209 }
1210
1211 /**
1212 * Return the offset source for a load/store intrinsic.
1213 */
1214 nir_src *
1215 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1216 {
1217 switch (instr->intrinsic) {
1218 case nir_intrinsic_load_input:
1219 case nir_intrinsic_load_output:
1220 case nir_intrinsic_load_shared:
1221 case nir_intrinsic_load_uniform:
1222 case nir_intrinsic_load_global:
1223 case nir_intrinsic_load_scratch:
1224 case nir_intrinsic_load_fs_input_interp_deltas:
1225 return &instr->src[0];
1226 case nir_intrinsic_load_ubo:
1227 case nir_intrinsic_load_ssbo:
1228 case nir_intrinsic_load_per_vertex_input:
1229 case nir_intrinsic_load_per_vertex_output:
1230 case nir_intrinsic_load_interpolated_input:
1231 case nir_intrinsic_store_output:
1232 case nir_intrinsic_store_shared:
1233 case nir_intrinsic_store_global:
1234 case nir_intrinsic_store_scratch:
1235 return &instr->src[1];
1236 case nir_intrinsic_store_ssbo:
1237 case nir_intrinsic_store_per_vertex_output:
1238 return &instr->src[2];
1239 default:
1240 return NULL;
1241 }
1242 }
1243
1244 /**
1245 * Return the vertex index source for a load/store per_vertex intrinsic.
1246 */
1247 nir_src *
1248 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1249 {
1250 switch (instr->intrinsic) {
1251 case nir_intrinsic_load_per_vertex_input:
1252 case nir_intrinsic_load_per_vertex_output:
1253 return &instr->src[0];
1254 case nir_intrinsic_store_per_vertex_output:
1255 return &instr->src[1];
1256 default:
1257 return NULL;
1258 }
1259 }
1260
1261 /**
1262 * Return the numeric constant that identify a NULL pointer for each address
1263 * format.
1264 */
1265 const nir_const_value *
1266 nir_address_format_null_value(nir_address_format addr_format)
1267 {
1268 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1269 [nir_address_format_32bit_global] = {{0}},
1270 [nir_address_format_64bit_global] = {{0}},
1271 [nir_address_format_64bit_bounded_global] = {{0}},
1272 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1273 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1274 [nir_address_format_logical] = {{.u32 = ~0}},
1275 };
1276
1277 assert(addr_format < ARRAY_SIZE(null_values));
1278 return null_values[addr_format];
1279 }
1280
1281 nir_ssa_def *
1282 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1283 nir_address_format addr_format)
1284 {
1285 switch (addr_format) {
1286 case nir_address_format_32bit_global:
1287 case nir_address_format_64bit_global:
1288 case nir_address_format_64bit_bounded_global:
1289 case nir_address_format_32bit_index_offset:
1290 case nir_address_format_32bit_offset:
1291 return nir_ball_iequal(b, addr0, addr1);
1292
1293 case nir_address_format_logical:
1294 unreachable("Unsupported address format");
1295 }
1296
1297 unreachable("Invalid address format");
1298 }
1299
1300 nir_ssa_def *
1301 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1302 nir_address_format addr_format)
1303 {
1304 switch (addr_format) {
1305 case nir_address_format_32bit_global:
1306 case nir_address_format_64bit_global:
1307 case nir_address_format_32bit_offset:
1308 assert(addr0->num_components == 1);
1309 assert(addr1->num_components == 1);
1310 return nir_isub(b, addr0, addr1);
1311
1312 case nir_address_format_64bit_bounded_global:
1313 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1314 addr_to_global(b, addr1, addr_format));
1315
1316 case nir_address_format_32bit_index_offset:
1317 assert(addr0->num_components == 2);
1318 assert(addr1->num_components == 2);
1319 /* Assume the same buffer index. */
1320 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1321
1322 case nir_address_format_logical:
1323 unreachable("Unsupported address format");
1324 }
1325
1326 unreachable("Invalid address format");
1327 }
1328
1329 static bool
1330 is_input(nir_intrinsic_instr *intrin)
1331 {
1332 return intrin->intrinsic == nir_intrinsic_load_input ||
1333 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1334 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1335 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1336 }
1337
1338 static bool
1339 is_output(nir_intrinsic_instr *intrin)
1340 {
1341 return intrin->intrinsic == nir_intrinsic_load_output ||
1342 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1343 intrin->intrinsic == nir_intrinsic_store_output ||
1344 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1345 }
1346
1347
1348 /**
1349 * This pass adds constant offsets to instr->const_index[0] for input/output
1350 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1351 * unchanged - since we don't know what part of a compound variable is
1352 * accessed, we allocate storage for the entire thing. For drivers that use
1353 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1354 * the offset source will be 0, so that they don't have to add it in manually.
1355 */
1356
1357 static bool
1358 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1359 nir_variable_mode mode)
1360 {
1361 bool progress = false;
1362 nir_foreach_instr_safe(instr, block) {
1363 if (instr->type != nir_instr_type_intrinsic)
1364 continue;
1365
1366 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1367
1368 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1369 (mode == nir_var_shader_out && is_output(intrin))) {
1370 nir_src *offset = nir_get_io_offset_src(intrin);
1371
1372 if (nir_src_is_const(*offset)) {
1373 intrin->const_index[0] += nir_src_as_uint(*offset);
1374 b->cursor = nir_before_instr(&intrin->instr);
1375 nir_instr_rewrite_src(&intrin->instr, offset,
1376 nir_src_for_ssa(nir_imm_int(b, 0)));
1377 progress = true;
1378 }
1379 }
1380 }
1381
1382 return progress;
1383 }
1384
1385 bool
1386 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1387 {
1388 bool progress = false;
1389
1390 nir_foreach_function(f, nir) {
1391 if (f->impl) {
1392 nir_builder b;
1393 nir_builder_init(&b, f->impl);
1394 nir_foreach_block(block, f->impl) {
1395 progress |= add_const_offset_to_base_block(block, &b, mode);
1396 }
1397 }
1398 }
1399
1400 return progress;
1401 }
1402