nir/lower_io: Don't use variable to get deref mode
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 struct lower_io_state {
39 void *dead_ctx;
40 nir_builder builder;
41 int (*type_size)(const struct glsl_type *type, bool);
42 nir_variable_mode modes;
43 nir_lower_io_options options;
44 };
45
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
48 {
49 switch (deref_op) {
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
51 OP(atomic_exchange)
52 OP(atomic_comp_swap)
53 OP(atomic_add)
54 OP(atomic_imin)
55 OP(atomic_umin)
56 OP(atomic_imax)
57 OP(atomic_umax)
58 OP(atomic_and)
59 OP(atomic_or)
60 OP(atomic_xor)
61 OP(atomic_fadd)
62 OP(atomic_fmin)
63 OP(atomic_fmax)
64 OP(atomic_fcomp_swap)
65 #undef OP
66 default:
67 unreachable("Invalid SSBO atomic");
68 }
69 }
70
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op)
73 {
74 switch (deref_op) {
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
76 OP(atomic_exchange)
77 OP(atomic_comp_swap)
78 OP(atomic_add)
79 OP(atomic_imin)
80 OP(atomic_umin)
81 OP(atomic_imax)
82 OP(atomic_umax)
83 OP(atomic_and)
84 OP(atomic_or)
85 OP(atomic_xor)
86 OP(atomic_fadd)
87 OP(atomic_fmin)
88 OP(atomic_fmax)
89 OP(atomic_fcomp_swap)
90 #undef OP
91 default:
92 unreachable("Invalid SSBO atomic");
93 }
94 }
95
96 void
97 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
98 int (*type_size)(const struct glsl_type *, bool))
99 {
100 unsigned location = 0;
101
102 nir_foreach_variable(var, var_list) {
103 /*
104 * UBOs have their own address spaces, so don't count them towards the
105 * number of global uniforms
106 */
107 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
108 continue;
109
110 var->data.driver_location = location;
111 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
112 var->data.mode == nir_var_shader_out ||
113 var->data.bindless;
114 location += type_size(var->type, bindless_type_size);
115 }
116
117 *size = location;
118 }
119
120 /**
121 * Return true if the given variable is a per-vertex input/output array.
122 * (such as geometry shader inputs).
123 */
124 bool
125 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
126 {
127 if (var->data.patch || !glsl_type_is_array(var->type))
128 return false;
129
130 if (var->data.mode == nir_var_shader_in)
131 return stage == MESA_SHADER_GEOMETRY ||
132 stage == MESA_SHADER_TESS_CTRL ||
133 stage == MESA_SHADER_TESS_EVAL;
134
135 if (var->data.mode == nir_var_shader_out)
136 return stage == MESA_SHADER_TESS_CTRL;
137
138 return false;
139 }
140
141 static nir_ssa_def *
142 get_io_offset(nir_builder *b, nir_deref_instr *deref,
143 nir_ssa_def **vertex_index,
144 int (*type_size)(const struct glsl_type *, bool),
145 unsigned *component, bool bts)
146 {
147 nir_deref_path path;
148 nir_deref_path_init(&path, deref, NULL);
149
150 assert(path.path[0]->deref_type == nir_deref_type_var);
151 nir_deref_instr **p = &path.path[1];
152
153 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
154 * outermost array index separate. Process the rest normally.
155 */
156 if (vertex_index != NULL) {
157 assert((*p)->deref_type == nir_deref_type_array);
158 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
159 p++;
160 }
161
162 if (path.path[0]->var->data.compact) {
163 assert((*p)->deref_type == nir_deref_type_array);
164 assert(glsl_type_is_scalar((*p)->type));
165
166 /* We always lower indirect dereferences for "compact" array vars. */
167 const unsigned index = nir_src_as_uint((*p)->arr.index);
168 const unsigned total_offset = *component + index;
169 const unsigned slot_offset = total_offset / 4;
170 *component = total_offset % 4;
171 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
172 }
173
174 /* Just emit code and let constant-folding go to town */
175 nir_ssa_def *offset = nir_imm_int(b, 0);
176
177 for (; *p; p++) {
178 if ((*p)->deref_type == nir_deref_type_array) {
179 unsigned size = type_size((*p)->type, bts);
180
181 nir_ssa_def *mul =
182 nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
183
184 offset = nir_iadd(b, offset, mul);
185 } else if ((*p)->deref_type == nir_deref_type_struct) {
186 /* p starts at path[1], so this is safe */
187 nir_deref_instr *parent = *(p - 1);
188
189 unsigned field_offset = 0;
190 for (unsigned i = 0; i < (*p)->strct.index; i++) {
191 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
192 }
193 offset = nir_iadd_imm(b, offset, field_offset);
194 } else {
195 unreachable("Unsupported deref type");
196 }
197 }
198
199 nir_deref_path_finish(&path);
200
201 return offset;
202 }
203
204 static nir_intrinsic_instr *
205 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
206 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
207 unsigned component, const struct glsl_type *type)
208 {
209 const nir_shader *nir = state->builder.shader;
210 nir_variable_mode mode = var->data.mode;
211 nir_ssa_def *barycentric = NULL;
212
213 nir_intrinsic_op op;
214 switch (mode) {
215 case nir_var_shader_in:
216 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
217 nir->options->use_interpolated_input_intrinsics &&
218 var->data.interpolation != INTERP_MODE_FLAT) {
219 assert(vertex_index == NULL);
220
221 nir_intrinsic_op bary_op;
222 if (var->data.sample ||
223 (state->options & nir_lower_io_force_sample_interpolation))
224 bary_op = nir_intrinsic_load_barycentric_sample;
225 else if (var->data.centroid)
226 bary_op = nir_intrinsic_load_barycentric_centroid;
227 else
228 bary_op = nir_intrinsic_load_barycentric_pixel;
229
230 barycentric = nir_load_barycentric(&state->builder, bary_op,
231 var->data.interpolation);
232 op = nir_intrinsic_load_interpolated_input;
233 } else {
234 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
235 nir_intrinsic_load_input;
236 }
237 break;
238 case nir_var_shader_out:
239 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
240 nir_intrinsic_load_output;
241 break;
242 case nir_var_uniform:
243 op = nir_intrinsic_load_uniform;
244 break;
245 case nir_var_mem_shared:
246 op = nir_intrinsic_load_shared;
247 break;
248 default:
249 unreachable("Unknown variable mode");
250 }
251
252 nir_intrinsic_instr *load =
253 nir_intrinsic_instr_create(state->builder.shader, op);
254 load->num_components = intrin->num_components;
255
256 nir_intrinsic_set_base(load, var->data.driver_location);
257 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
258 nir_intrinsic_set_component(load, component);
259
260 if (load->intrinsic == nir_intrinsic_load_uniform)
261 nir_intrinsic_set_range(load,
262 state->type_size(var->type, var->data.bindless));
263
264 if (load->intrinsic == nir_intrinsic_load_input ||
265 load->intrinsic == nir_intrinsic_load_uniform)
266 nir_intrinsic_set_type(load, nir_get_nir_type_for_glsl_type(type));
267
268 if (vertex_index) {
269 load->src[0] = nir_src_for_ssa(vertex_index);
270 load->src[1] = nir_src_for_ssa(offset);
271 } else if (barycentric) {
272 load->src[0] = nir_src_for_ssa(barycentric);
273 load->src[1] = nir_src_for_ssa(offset);
274 } else {
275 load->src[0] = nir_src_for_ssa(offset);
276 }
277
278 return load;
279 }
280
281 static nir_intrinsic_instr *
282 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
283 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
284 unsigned component, const struct glsl_type *type)
285 {
286 nir_variable_mode mode = var->data.mode;
287
288 nir_intrinsic_op op;
289 if (mode == nir_var_mem_shared) {
290 op = nir_intrinsic_store_shared;
291 } else {
292 assert(mode == nir_var_shader_out);
293 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
294 nir_intrinsic_store_output;
295 }
296
297 nir_intrinsic_instr *store =
298 nir_intrinsic_instr_create(state->builder.shader, op);
299 store->num_components = intrin->num_components;
300
301 nir_src_copy(&store->src[0], &intrin->src[1], store);
302
303 nir_intrinsic_set_base(store, var->data.driver_location);
304
305 if (mode == nir_var_shader_out)
306 nir_intrinsic_set_component(store, component);
307
308 if (store->intrinsic == nir_intrinsic_store_output)
309 nir_intrinsic_set_type(store, nir_get_nir_type_for_glsl_type(type));
310
311 nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
312
313 if (vertex_index)
314 store->src[1] = nir_src_for_ssa(vertex_index);
315
316 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
317
318 return store;
319 }
320
321 static nir_intrinsic_instr *
322 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
323 nir_variable *var, nir_ssa_def *offset)
324 {
325 assert(var->data.mode == nir_var_mem_shared);
326
327 nir_intrinsic_op op;
328 switch (intrin->intrinsic) {
329 #define OP(O) case nir_intrinsic_deref_##O: op = nir_intrinsic_shared_##O; break;
330 OP(atomic_exchange)
331 OP(atomic_comp_swap)
332 OP(atomic_add)
333 OP(atomic_imin)
334 OP(atomic_umin)
335 OP(atomic_imax)
336 OP(atomic_umax)
337 OP(atomic_and)
338 OP(atomic_or)
339 OP(atomic_xor)
340 OP(atomic_fadd)
341 OP(atomic_fmin)
342 OP(atomic_fmax)
343 OP(atomic_fcomp_swap)
344 #undef OP
345 default:
346 unreachable("Invalid atomic");
347 }
348
349 nir_intrinsic_instr *atomic =
350 nir_intrinsic_instr_create(state->builder.shader, op);
351
352 nir_intrinsic_set_base(atomic, var->data.driver_location);
353
354 atomic->src[0] = nir_src_for_ssa(offset);
355 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
356 nir_intrinsic_infos[op].num_srcs);
357 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
358 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
359 }
360
361 return atomic;
362 }
363
364 static nir_intrinsic_instr *
365 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
366 nir_variable *var, nir_ssa_def *offset, unsigned component,
367 const struct glsl_type *type)
368 {
369 assert(var->data.mode == nir_var_shader_in);
370
371 /* Ignore interpolateAt() for flat variables - flat is flat. */
372 if (var->data.interpolation == INTERP_MODE_FLAT)
373 return lower_load(intrin, state, NULL, var, offset, component, type);
374
375 nir_intrinsic_op bary_op;
376 switch (intrin->intrinsic) {
377 case nir_intrinsic_interp_deref_at_centroid:
378 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
379 nir_intrinsic_load_barycentric_sample :
380 nir_intrinsic_load_barycentric_centroid;
381 break;
382 case nir_intrinsic_interp_deref_at_sample:
383 bary_op = nir_intrinsic_load_barycentric_at_sample;
384 break;
385 case nir_intrinsic_interp_deref_at_offset:
386 bary_op = nir_intrinsic_load_barycentric_at_offset;
387 break;
388 default:
389 unreachable("Bogus interpolateAt() intrinsic.");
390 }
391
392 nir_intrinsic_instr *bary_setup =
393 nir_intrinsic_instr_create(state->builder.shader, bary_op);
394
395 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
396 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
397
398 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
399 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset)
400 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
401
402 nir_builder_instr_insert(&state->builder, &bary_setup->instr);
403
404 nir_intrinsic_instr *load =
405 nir_intrinsic_instr_create(state->builder.shader,
406 nir_intrinsic_load_interpolated_input);
407 load->num_components = intrin->num_components;
408
409 nir_intrinsic_set_base(load, var->data.driver_location);
410 nir_intrinsic_set_component(load, component);
411
412 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
413 load->src[1] = nir_src_for_ssa(offset);
414
415 return load;
416 }
417
418 static bool
419 nir_lower_io_block(nir_block *block,
420 struct lower_io_state *state)
421 {
422 nir_builder *b = &state->builder;
423 const nir_shader_compiler_options *options = b->shader->options;
424 bool progress = false;
425
426 nir_foreach_instr_safe(instr, block) {
427 if (instr->type != nir_instr_type_intrinsic)
428 continue;
429
430 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
431
432 switch (intrin->intrinsic) {
433 case nir_intrinsic_load_deref:
434 case nir_intrinsic_store_deref:
435 case nir_intrinsic_deref_atomic_add:
436 case nir_intrinsic_deref_atomic_imin:
437 case nir_intrinsic_deref_atomic_umin:
438 case nir_intrinsic_deref_atomic_imax:
439 case nir_intrinsic_deref_atomic_umax:
440 case nir_intrinsic_deref_atomic_and:
441 case nir_intrinsic_deref_atomic_or:
442 case nir_intrinsic_deref_atomic_xor:
443 case nir_intrinsic_deref_atomic_exchange:
444 case nir_intrinsic_deref_atomic_comp_swap:
445 case nir_intrinsic_deref_atomic_fadd:
446 case nir_intrinsic_deref_atomic_fmin:
447 case nir_intrinsic_deref_atomic_fmax:
448 case nir_intrinsic_deref_atomic_fcomp_swap:
449 /* We can lower the io for this nir instrinsic */
450 break;
451 case nir_intrinsic_interp_deref_at_centroid:
452 case nir_intrinsic_interp_deref_at_sample:
453 case nir_intrinsic_interp_deref_at_offset:
454 /* We can optionally lower these to load_interpolated_input */
455 if (options->use_interpolated_input_intrinsics)
456 break;
457 default:
458 /* We can't lower the io for this nir instrinsic, so skip it */
459 continue;
460 }
461
462 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
463
464 nir_variable_mode mode = deref->mode;
465
466 if ((state->modes & mode) == 0)
467 continue;
468
469 if (mode != nir_var_shader_in &&
470 mode != nir_var_shader_out &&
471 mode != nir_var_mem_shared &&
472 mode != nir_var_uniform)
473 continue;
474
475 nir_variable *var = nir_deref_instr_get_variable(deref);
476
477 b->cursor = nir_before_instr(instr);
478
479 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
480
481 nir_ssa_def *offset;
482 nir_ssa_def *vertex_index = NULL;
483 unsigned component_offset = var->data.location_frac;
484 bool bindless_type_size = mode == nir_var_shader_in ||
485 mode == nir_var_shader_out ||
486 var->data.bindless;
487
488 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
489 state->type_size, &component_offset,
490 bindless_type_size);
491
492 nir_intrinsic_instr *replacement;
493
494 switch (intrin->intrinsic) {
495 case nir_intrinsic_load_deref:
496 replacement = lower_load(intrin, state, vertex_index, var, offset,
497 component_offset, deref->type);
498 break;
499
500 case nir_intrinsic_store_deref:
501 replacement = lower_store(intrin, state, vertex_index, var, offset,
502 component_offset, deref->type);
503 break;
504
505 case nir_intrinsic_deref_atomic_add:
506 case nir_intrinsic_deref_atomic_imin:
507 case nir_intrinsic_deref_atomic_umin:
508 case nir_intrinsic_deref_atomic_imax:
509 case nir_intrinsic_deref_atomic_umax:
510 case nir_intrinsic_deref_atomic_and:
511 case nir_intrinsic_deref_atomic_or:
512 case nir_intrinsic_deref_atomic_xor:
513 case nir_intrinsic_deref_atomic_exchange:
514 case nir_intrinsic_deref_atomic_comp_swap:
515 case nir_intrinsic_deref_atomic_fadd:
516 case nir_intrinsic_deref_atomic_fmin:
517 case nir_intrinsic_deref_atomic_fmax:
518 case nir_intrinsic_deref_atomic_fcomp_swap:
519 assert(vertex_index == NULL);
520 replacement = lower_atomic(intrin, state, var, offset);
521 break;
522
523 case nir_intrinsic_interp_deref_at_centroid:
524 case nir_intrinsic_interp_deref_at_sample:
525 case nir_intrinsic_interp_deref_at_offset:
526 assert(vertex_index == NULL);
527 replacement = lower_interpolate_at(intrin, state, var, offset,
528 component_offset, deref->type);
529 break;
530
531 default:
532 continue;
533 }
534
535 if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
536 if (intrin->dest.is_ssa) {
537 nir_ssa_dest_init(&replacement->instr, &replacement->dest,
538 intrin->dest.ssa.num_components,
539 intrin->dest.ssa.bit_size, NULL);
540 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
541 nir_src_for_ssa(&replacement->dest.ssa));
542 } else {
543 nir_dest_copy(&replacement->dest, &intrin->dest, &intrin->instr);
544 }
545 }
546
547 nir_instr_insert_before(&intrin->instr, &replacement->instr);
548 nir_instr_remove(&intrin->instr);
549 progress = true;
550 }
551
552 return progress;
553 }
554
555 static bool
556 nir_lower_io_impl(nir_function_impl *impl,
557 nir_variable_mode modes,
558 int (*type_size)(const struct glsl_type *, bool),
559 nir_lower_io_options options)
560 {
561 struct lower_io_state state;
562 bool progress = false;
563
564 nir_builder_init(&state.builder, impl);
565 state.dead_ctx = ralloc_context(NULL);
566 state.modes = modes;
567 state.type_size = type_size;
568 state.options = options;
569
570 nir_foreach_block(block, impl) {
571 progress |= nir_lower_io_block(block, &state);
572 }
573
574 ralloc_free(state.dead_ctx);
575
576 nir_metadata_preserve(impl, nir_metadata_block_index |
577 nir_metadata_dominance);
578 return progress;
579 }
580
581 bool
582 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
583 int (*type_size)(const struct glsl_type *, bool),
584 nir_lower_io_options options)
585 {
586 bool progress = false;
587
588 nir_foreach_function(function, shader) {
589 if (function->impl) {
590 progress |= nir_lower_io_impl(function->impl, modes,
591 type_size, options);
592 }
593 }
594
595 return progress;
596 }
597
598 static unsigned
599 type_scalar_size_bytes(const struct glsl_type *type)
600 {
601 assert(glsl_type_is_vector_or_scalar(type) ||
602 glsl_type_is_matrix(type));
603 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
604 }
605
606 static nir_ssa_def *
607 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
608 nir_address_format addr_format, nir_ssa_def *offset)
609 {
610 assert(offset->num_components == 1);
611 assert(addr->bit_size == offset->bit_size);
612
613 switch (addr_format) {
614 case nir_address_format_32bit_global:
615 case nir_address_format_64bit_global:
616 case nir_address_format_32bit_offset:
617 assert(addr->num_components == 1);
618 return nir_iadd(b, addr, offset);
619
620 case nir_address_format_64bit_bounded_global:
621 assert(addr->num_components == 4);
622 return nir_vec4(b, nir_channel(b, addr, 0),
623 nir_channel(b, addr, 1),
624 nir_channel(b, addr, 2),
625 nir_iadd(b, nir_channel(b, addr, 3), offset));
626
627 case nir_address_format_32bit_index_offset:
628 assert(addr->num_components == 2);
629 return nir_vec2(b, nir_channel(b, addr, 0),
630 nir_iadd(b, nir_channel(b, addr, 1), offset));
631 case nir_address_format_logical:
632 unreachable("Unsupported address format");
633 }
634 unreachable("Invalid address format");
635 }
636
637 static nir_ssa_def *
638 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
639 nir_address_format addr_format, int64_t offset)
640 {
641 return build_addr_iadd(b, addr, addr_format,
642 nir_imm_intN_t(b, offset, addr->bit_size));
643 }
644
645 static nir_ssa_def *
646 addr_to_index(nir_builder *b, nir_ssa_def *addr,
647 nir_address_format addr_format)
648 {
649 assert(addr_format == nir_address_format_32bit_index_offset);
650 assert(addr->num_components == 2);
651 return nir_channel(b, addr, 0);
652 }
653
654 static nir_ssa_def *
655 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
656 nir_address_format addr_format)
657 {
658 assert(addr_format == nir_address_format_32bit_index_offset);
659 assert(addr->num_components == 2);
660 return nir_channel(b, addr, 1);
661 }
662
663 /** Returns true if the given address format resolves to a global address */
664 static bool
665 addr_format_is_global(nir_address_format addr_format)
666 {
667 return addr_format == nir_address_format_32bit_global ||
668 addr_format == nir_address_format_64bit_global ||
669 addr_format == nir_address_format_64bit_bounded_global;
670 }
671
672 static nir_ssa_def *
673 addr_to_global(nir_builder *b, nir_ssa_def *addr,
674 nir_address_format addr_format)
675 {
676 switch (addr_format) {
677 case nir_address_format_32bit_global:
678 case nir_address_format_64bit_global:
679 assert(addr->num_components == 1);
680 return addr;
681
682 case nir_address_format_64bit_bounded_global:
683 assert(addr->num_components == 4);
684 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
685 nir_u2u64(b, nir_channel(b, addr, 3)));
686
687 case nir_address_format_32bit_index_offset:
688 case nir_address_format_32bit_offset:
689 case nir_address_format_logical:
690 unreachable("Cannot get a 64-bit address with this address format");
691 }
692
693 unreachable("Invalid address format");
694 }
695
696 static bool
697 addr_format_needs_bounds_check(nir_address_format addr_format)
698 {
699 return addr_format == nir_address_format_64bit_bounded_global;
700 }
701
702 static nir_ssa_def *
703 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
704 nir_address_format addr_format, unsigned size)
705 {
706 assert(addr_format == nir_address_format_64bit_bounded_global);
707 assert(addr->num_components == 4);
708 return nir_ige(b, nir_channel(b, addr, 2),
709 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
710 }
711
712 static nir_ssa_def *
713 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
714 nir_ssa_def *addr, nir_address_format addr_format,
715 unsigned num_components)
716 {
717 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
718
719 nir_intrinsic_op op;
720 switch (mode) {
721 case nir_var_mem_ubo:
722 op = nir_intrinsic_load_ubo;
723 break;
724 case nir_var_mem_ssbo:
725 if (addr_format_is_global(addr_format))
726 op = nir_intrinsic_load_global;
727 else
728 op = nir_intrinsic_load_ssbo;
729 break;
730 case nir_var_mem_global:
731 assert(addr_format_is_global(addr_format));
732 op = nir_intrinsic_load_global;
733 break;
734 case nir_var_shader_in:
735 assert(addr_format_is_global(addr_format));
736 op = nir_intrinsic_load_kernel_input;
737 break;
738 default:
739 unreachable("Unsupported explicit IO variable mode");
740 }
741
742 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
743
744 if (addr_format_is_global(addr_format)) {
745 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
746 } else {
747 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
748 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
749 }
750
751 if (mode != nir_var_mem_ubo && mode != nir_var_shader_in)
752 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
753
754 /* TODO: We should try and provide a better alignment. For OpenCL, we need
755 * to plumb the alignment through from SPIR-V when we have one.
756 */
757 nir_intrinsic_set_align(load, intrin->dest.ssa.bit_size / 8, 0);
758
759 assert(intrin->dest.is_ssa);
760 load->num_components = num_components;
761 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
762 intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
763
764 assert(load->dest.ssa.bit_size % 8 == 0);
765
766 if (addr_format_needs_bounds_check(addr_format)) {
767 /* The Vulkan spec for robustBufferAccess gives us quite a few options
768 * as to what we can do with an OOB read. Unfortunately, returning
769 * undefined values isn't one of them so we return an actual zero.
770 */
771 nir_ssa_def *zero = nir_imm_zero(b, load->num_components,
772 load->dest.ssa.bit_size);
773
774 const unsigned load_size =
775 (load->dest.ssa.bit_size / 8) * load->num_components;
776 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
777
778 nir_builder_instr_insert(b, &load->instr);
779
780 nir_pop_if(b, NULL);
781
782 return nir_if_phi(b, &load->dest.ssa, zero);
783 } else {
784 nir_builder_instr_insert(b, &load->instr);
785 return &load->dest.ssa;
786 }
787 }
788
789 static void
790 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
791 nir_ssa_def *addr, nir_address_format addr_format,
792 nir_ssa_def *value, nir_component_mask_t write_mask)
793 {
794 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
795
796 nir_intrinsic_op op;
797 switch (mode) {
798 case nir_var_mem_ssbo:
799 if (addr_format_is_global(addr_format))
800 op = nir_intrinsic_store_global;
801 else
802 op = nir_intrinsic_store_ssbo;
803 break;
804 case nir_var_mem_global:
805 assert(addr_format_is_global(addr_format));
806 op = nir_intrinsic_store_global;
807 break;
808 default:
809 unreachable("Unsupported explicit IO variable mode");
810 }
811
812 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
813
814 store->src[0] = nir_src_for_ssa(value);
815 if (addr_format_is_global(addr_format)) {
816 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
817 } else {
818 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
819 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
820 }
821
822 nir_intrinsic_set_write_mask(store, write_mask);
823
824 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
825
826 /* TODO: We should try and provide a better alignment. For OpenCL, we need
827 * to plumb the alignment through from SPIR-V when we have one.
828 */
829 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
830
831 assert(value->num_components == 1 ||
832 value->num_components == intrin->num_components);
833 store->num_components = value->num_components;
834
835 assert(value->bit_size % 8 == 0);
836
837 if (addr_format_needs_bounds_check(addr_format)) {
838 const unsigned store_size = (value->bit_size / 8) * store->num_components;
839 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
840
841 nir_builder_instr_insert(b, &store->instr);
842
843 nir_pop_if(b, NULL);
844 } else {
845 nir_builder_instr_insert(b, &store->instr);
846 }
847 }
848
849 static nir_ssa_def *
850 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
851 nir_ssa_def *addr, nir_address_format addr_format)
852 {
853 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
854 const unsigned num_data_srcs =
855 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
856
857 nir_intrinsic_op op;
858 switch (mode) {
859 case nir_var_mem_ssbo:
860 if (addr_format_is_global(addr_format))
861 op = global_atomic_for_deref(intrin->intrinsic);
862 else
863 op = ssbo_atomic_for_deref(intrin->intrinsic);
864 break;
865 case nir_var_mem_global:
866 assert(addr_format_is_global(addr_format));
867 op = global_atomic_for_deref(intrin->intrinsic);
868 break;
869 default:
870 unreachable("Unsupported explicit IO variable mode");
871 }
872
873 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
874
875 unsigned src = 0;
876 if (addr_format_is_global(addr_format)) {
877 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
878 } else {
879 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
880 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
881 }
882 for (unsigned i = 0; i < num_data_srcs; i++) {
883 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
884 }
885
886 /* Global atomics don't have access flags because they assume that the
887 * address may be non-uniform.
888 */
889 if (!addr_format_is_global(addr_format))
890 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
891
892 assert(intrin->dest.ssa.num_components == 1);
893 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
894 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
895
896 assert(atomic->dest.ssa.bit_size % 8 == 0);
897
898 if (addr_format_needs_bounds_check(addr_format)) {
899 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
900 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
901
902 nir_builder_instr_insert(b, &atomic->instr);
903
904 nir_pop_if(b, NULL);
905 return nir_if_phi(b, &atomic->dest.ssa,
906 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
907 } else {
908 nir_builder_instr_insert(b, &atomic->instr);
909 return &atomic->dest.ssa;
910 }
911 }
912
913 nir_ssa_def *
914 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
915 nir_ssa_def *base_addr,
916 nir_address_format addr_format)
917 {
918 assert(deref->dest.is_ssa);
919 switch (deref->deref_type) {
920 case nir_deref_type_var:
921 assert(deref->mode == nir_var_shader_in);
922 return nir_imm_intN_t(b, deref->var->data.driver_location,
923 deref->dest.ssa.bit_size);
924
925 case nir_deref_type_array: {
926 nir_deref_instr *parent = nir_deref_instr_parent(deref);
927
928 unsigned stride = glsl_get_explicit_stride(parent->type);
929 if ((glsl_type_is_matrix(parent->type) &&
930 glsl_matrix_type_is_row_major(parent->type)) ||
931 (glsl_type_is_vector(parent->type) && stride == 0))
932 stride = type_scalar_size_bytes(parent->type);
933
934 assert(stride > 0);
935
936 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
937 index = nir_i2i(b, index, base_addr->bit_size);
938 return build_addr_iadd(b, base_addr, addr_format,
939 nir_imul_imm(b, index, stride));
940 }
941
942 case nir_deref_type_ptr_as_array: {
943 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
944 index = nir_i2i(b, index, base_addr->bit_size);
945 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
946 return build_addr_iadd(b, base_addr, addr_format,
947 nir_imul_imm(b, index, stride));
948 }
949
950 case nir_deref_type_array_wildcard:
951 unreachable("Wildcards should be lowered by now");
952 break;
953
954 case nir_deref_type_struct: {
955 nir_deref_instr *parent = nir_deref_instr_parent(deref);
956 int offset = glsl_get_struct_field_offset(parent->type,
957 deref->strct.index);
958 assert(offset >= 0);
959 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
960 }
961
962 case nir_deref_type_cast:
963 /* Nothing to do here */
964 return base_addr;
965 }
966
967 unreachable("Invalid NIR deref type");
968 }
969
970 void
971 nir_lower_explicit_io_instr(nir_builder *b,
972 nir_intrinsic_instr *intrin,
973 nir_ssa_def *addr,
974 nir_address_format addr_format)
975 {
976 b->cursor = nir_after_instr(&intrin->instr);
977
978 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
979 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
980 unsigned scalar_size = type_scalar_size_bytes(deref->type);
981 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
982 assert(vec_stride == 0 || vec_stride >= scalar_size);
983
984 if (intrin->intrinsic == nir_intrinsic_load_deref) {
985 nir_ssa_def *value;
986 if (vec_stride > scalar_size) {
987 nir_ssa_def *comps[4] = { NULL, };
988 for (unsigned i = 0; i < intrin->num_components; i++) {
989 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
990 vec_stride * i);
991 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
992 addr_format, 1);
993 }
994 value = nir_vec(b, comps, intrin->num_components);
995 } else {
996 value = build_explicit_io_load(b, intrin, addr, addr_format,
997 intrin->num_components);
998 }
999 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1000 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1001 assert(intrin->src[1].is_ssa);
1002 nir_ssa_def *value = intrin->src[1].ssa;
1003 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1004 if (vec_stride > scalar_size) {
1005 for (unsigned i = 0; i < intrin->num_components; i++) {
1006 if (!(write_mask & (1 << i)))
1007 continue;
1008
1009 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1010 vec_stride * i);
1011 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1012 nir_channel(b, value, i), 1);
1013 }
1014 } else {
1015 build_explicit_io_store(b, intrin, addr, addr_format,
1016 value, write_mask);
1017 }
1018 } else {
1019 nir_ssa_def *value =
1020 build_explicit_io_atomic(b, intrin, addr, addr_format);
1021 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1022 }
1023
1024 nir_instr_remove(&intrin->instr);
1025 }
1026
1027 static void
1028 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1029 nir_address_format addr_format)
1030 {
1031 /* Just delete the deref if it's not used. We can't use
1032 * nir_deref_instr_remove_if_unused here because it may remove more than
1033 * one deref which could break our list walking since we walk the list
1034 * backwards.
1035 */
1036 assert(list_empty(&deref->dest.ssa.if_uses));
1037 if (list_empty(&deref->dest.ssa.uses)) {
1038 nir_instr_remove(&deref->instr);
1039 return;
1040 }
1041
1042 b->cursor = nir_after_instr(&deref->instr);
1043
1044 nir_ssa_def *base_addr = NULL;
1045 if (deref->deref_type != nir_deref_type_var) {
1046 assert(deref->parent.is_ssa);
1047 base_addr = deref->parent.ssa;
1048 }
1049
1050 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1051 addr_format);
1052
1053 nir_instr_remove(&deref->instr);
1054 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1055 }
1056
1057 static void
1058 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1059 nir_address_format addr_format)
1060 {
1061 assert(intrin->src[0].is_ssa);
1062 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1063 }
1064
1065 static void
1066 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1067 nir_address_format addr_format)
1068 {
1069 b->cursor = nir_after_instr(&intrin->instr);
1070
1071 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1072
1073 assert(glsl_type_is_array(deref->type));
1074 assert(glsl_get_length(deref->type) == 0);
1075 unsigned stride = glsl_get_explicit_stride(deref->type);
1076 assert(stride > 0);
1077
1078 assert(addr_format == nir_address_format_32bit_index_offset);
1079 nir_ssa_def *addr = &deref->dest.ssa;
1080 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1081 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1082
1083 nir_intrinsic_instr *bsize =
1084 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1085 bsize->src[0] = nir_src_for_ssa(index);
1086 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1087 nir_builder_instr_insert(b, &bsize->instr);
1088
1089 nir_ssa_def *arr_size =
1090 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1091 nir_imm_int(b, stride));
1092
1093 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1094 nir_instr_remove(&intrin->instr);
1095 }
1096
1097 static bool
1098 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1099 nir_address_format addr_format)
1100 {
1101 bool progress = false;
1102
1103 nir_builder b;
1104 nir_builder_init(&b, impl);
1105
1106 /* Walk in reverse order so that we can see the full deref chain when we
1107 * lower the access operations. We lower them assuming that the derefs
1108 * will be turned into address calculations later.
1109 */
1110 nir_foreach_block_reverse(block, impl) {
1111 nir_foreach_instr_reverse_safe(instr, block) {
1112 switch (instr->type) {
1113 case nir_instr_type_deref: {
1114 nir_deref_instr *deref = nir_instr_as_deref(instr);
1115 if (deref->mode & modes) {
1116 lower_explicit_io_deref(&b, deref, addr_format);
1117 progress = true;
1118 }
1119 break;
1120 }
1121
1122 case nir_instr_type_intrinsic: {
1123 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1124 switch (intrin->intrinsic) {
1125 case nir_intrinsic_load_deref:
1126 case nir_intrinsic_store_deref:
1127 case nir_intrinsic_deref_atomic_add:
1128 case nir_intrinsic_deref_atomic_imin:
1129 case nir_intrinsic_deref_atomic_umin:
1130 case nir_intrinsic_deref_atomic_imax:
1131 case nir_intrinsic_deref_atomic_umax:
1132 case nir_intrinsic_deref_atomic_and:
1133 case nir_intrinsic_deref_atomic_or:
1134 case nir_intrinsic_deref_atomic_xor:
1135 case nir_intrinsic_deref_atomic_exchange:
1136 case nir_intrinsic_deref_atomic_comp_swap:
1137 case nir_intrinsic_deref_atomic_fadd:
1138 case nir_intrinsic_deref_atomic_fmin:
1139 case nir_intrinsic_deref_atomic_fmax:
1140 case nir_intrinsic_deref_atomic_fcomp_swap: {
1141 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1142 if (deref->mode & modes) {
1143 lower_explicit_io_access(&b, intrin, addr_format);
1144 progress = true;
1145 }
1146 break;
1147 }
1148
1149 case nir_intrinsic_deref_buffer_array_length: {
1150 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1151 if (deref->mode & modes) {
1152 lower_explicit_io_array_length(&b, intrin, addr_format);
1153 progress = true;
1154 }
1155 break;
1156 }
1157
1158 default:
1159 break;
1160 }
1161 break;
1162 }
1163
1164 default:
1165 /* Nothing to do */
1166 break;
1167 }
1168 }
1169 }
1170
1171 if (progress) {
1172 nir_metadata_preserve(impl, nir_metadata_block_index |
1173 nir_metadata_dominance);
1174 }
1175
1176 return progress;
1177 }
1178
1179 bool
1180 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1181 nir_address_format addr_format)
1182 {
1183 bool progress = false;
1184
1185 nir_foreach_function(function, shader) {
1186 if (function->impl &&
1187 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1188 progress = true;
1189 }
1190
1191 return progress;
1192 }
1193
1194 /**
1195 * Return the offset source for a load/store intrinsic.
1196 */
1197 nir_src *
1198 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1199 {
1200 switch (instr->intrinsic) {
1201 case nir_intrinsic_load_input:
1202 case nir_intrinsic_load_output:
1203 case nir_intrinsic_load_shared:
1204 case nir_intrinsic_load_uniform:
1205 case nir_intrinsic_load_global:
1206 case nir_intrinsic_load_scratch:
1207 case nir_intrinsic_load_fs_input_interp_deltas:
1208 return &instr->src[0];
1209 case nir_intrinsic_load_ubo:
1210 case nir_intrinsic_load_ssbo:
1211 case nir_intrinsic_load_per_vertex_input:
1212 case nir_intrinsic_load_per_vertex_output:
1213 case nir_intrinsic_load_interpolated_input:
1214 case nir_intrinsic_store_output:
1215 case nir_intrinsic_store_shared:
1216 case nir_intrinsic_store_global:
1217 case nir_intrinsic_store_scratch:
1218 return &instr->src[1];
1219 case nir_intrinsic_store_ssbo:
1220 case nir_intrinsic_store_per_vertex_output:
1221 return &instr->src[2];
1222 default:
1223 return NULL;
1224 }
1225 }
1226
1227 /**
1228 * Return the vertex index source for a load/store per_vertex intrinsic.
1229 */
1230 nir_src *
1231 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1232 {
1233 switch (instr->intrinsic) {
1234 case nir_intrinsic_load_per_vertex_input:
1235 case nir_intrinsic_load_per_vertex_output:
1236 return &instr->src[0];
1237 case nir_intrinsic_store_per_vertex_output:
1238 return &instr->src[1];
1239 default:
1240 return NULL;
1241 }
1242 }
1243
1244 /**
1245 * Return the numeric constant that identify a NULL pointer for each address
1246 * format.
1247 */
1248 const nir_const_value *
1249 nir_address_format_null_value(nir_address_format addr_format)
1250 {
1251 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1252 [nir_address_format_32bit_global] = {{0}},
1253 [nir_address_format_64bit_global] = {{0}},
1254 [nir_address_format_64bit_bounded_global] = {{0}},
1255 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1256 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1257 [nir_address_format_logical] = {{.u32 = ~0}},
1258 };
1259
1260 assert(addr_format < ARRAY_SIZE(null_values));
1261 return null_values[addr_format];
1262 }
1263
1264 nir_ssa_def *
1265 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1266 nir_address_format addr_format)
1267 {
1268 switch (addr_format) {
1269 case nir_address_format_32bit_global:
1270 case nir_address_format_64bit_global:
1271 case nir_address_format_64bit_bounded_global:
1272 case nir_address_format_32bit_index_offset:
1273 case nir_address_format_32bit_offset:
1274 return nir_ball_iequal(b, addr0, addr1);
1275
1276 case nir_address_format_logical:
1277 unreachable("Unsupported address format");
1278 }
1279
1280 unreachable("Invalid address format");
1281 }
1282
1283 nir_ssa_def *
1284 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1285 nir_address_format addr_format)
1286 {
1287 switch (addr_format) {
1288 case nir_address_format_32bit_global:
1289 case nir_address_format_64bit_global:
1290 case nir_address_format_32bit_offset:
1291 assert(addr0->num_components == 1);
1292 assert(addr1->num_components == 1);
1293 return nir_isub(b, addr0, addr1);
1294
1295 case nir_address_format_64bit_bounded_global:
1296 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1297 addr_to_global(b, addr1, addr_format));
1298
1299 case nir_address_format_32bit_index_offset:
1300 assert(addr0->num_components == 2);
1301 assert(addr1->num_components == 2);
1302 /* Assume the same buffer index. */
1303 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1304
1305 case nir_address_format_logical:
1306 unreachable("Unsupported address format");
1307 }
1308
1309 unreachable("Invalid address format");
1310 }
1311
1312 static bool
1313 is_input(nir_intrinsic_instr *intrin)
1314 {
1315 return intrin->intrinsic == nir_intrinsic_load_input ||
1316 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1317 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1318 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1319 }
1320
1321 static bool
1322 is_output(nir_intrinsic_instr *intrin)
1323 {
1324 return intrin->intrinsic == nir_intrinsic_load_output ||
1325 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1326 intrin->intrinsic == nir_intrinsic_store_output ||
1327 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1328 }
1329
1330
1331 /**
1332 * This pass adds constant offsets to instr->const_index[0] for input/output
1333 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1334 * unchanged - since we don't know what part of a compound variable is
1335 * accessed, we allocate storage for the entire thing. For drivers that use
1336 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1337 * the offset source will be 0, so that they don't have to add it in manually.
1338 */
1339
1340 static bool
1341 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1342 nir_variable_mode mode)
1343 {
1344 bool progress = false;
1345 nir_foreach_instr_safe(instr, block) {
1346 if (instr->type != nir_instr_type_intrinsic)
1347 continue;
1348
1349 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1350
1351 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1352 (mode == nir_var_shader_out && is_output(intrin))) {
1353 nir_src *offset = nir_get_io_offset_src(intrin);
1354
1355 if (nir_src_is_const(*offset)) {
1356 intrin->const_index[0] += nir_src_as_uint(*offset);
1357 b->cursor = nir_before_instr(&intrin->instr);
1358 nir_instr_rewrite_src(&intrin->instr, offset,
1359 nir_src_for_ssa(nir_imm_int(b, 0)));
1360 progress = true;
1361 }
1362 }
1363 }
1364
1365 return progress;
1366 }
1367
1368 bool
1369 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1370 {
1371 bool progress = false;
1372
1373 nir_foreach_function(f, nir) {
1374 if (f->impl) {
1375 nir_builder b;
1376 nir_builder_init(&b, f->impl);
1377 nir_foreach_block(block, f->impl) {
1378 progress |= add_const_offset_to_base_block(block, &b, mode);
1379 }
1380 }
1381 }
1382
1383 return progress;
1384 }
1385