nir/lower_io: Add an option to lower 64-bit varyings
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 struct lower_io_state {
39 void *dead_ctx;
40 nir_builder builder;
41 int (*type_size)(const struct glsl_type *type, bool);
42 nir_variable_mode modes;
43 nir_lower_io_options options;
44 };
45
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
48 {
49 switch (deref_op) {
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
51 OP(atomic_exchange)
52 OP(atomic_comp_swap)
53 OP(atomic_add)
54 OP(atomic_imin)
55 OP(atomic_umin)
56 OP(atomic_imax)
57 OP(atomic_umax)
58 OP(atomic_and)
59 OP(atomic_or)
60 OP(atomic_xor)
61 OP(atomic_fadd)
62 OP(atomic_fmin)
63 OP(atomic_fmax)
64 OP(atomic_fcomp_swap)
65 #undef OP
66 default:
67 unreachable("Invalid SSBO atomic");
68 }
69 }
70
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op)
73 {
74 switch (deref_op) {
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
76 OP(atomic_exchange)
77 OP(atomic_comp_swap)
78 OP(atomic_add)
79 OP(atomic_imin)
80 OP(atomic_umin)
81 OP(atomic_imax)
82 OP(atomic_umax)
83 OP(atomic_and)
84 OP(atomic_or)
85 OP(atomic_xor)
86 OP(atomic_fadd)
87 OP(atomic_fmin)
88 OP(atomic_fmax)
89 OP(atomic_fcomp_swap)
90 #undef OP
91 default:
92 unreachable("Invalid SSBO atomic");
93 }
94 }
95
96 void
97 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
98 int (*type_size)(const struct glsl_type *, bool))
99 {
100 unsigned location = 0;
101
102 nir_foreach_variable(var, var_list) {
103 /*
104 * UBOs have their own address spaces, so don't count them towards the
105 * number of global uniforms
106 */
107 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
108 continue;
109
110 var->data.driver_location = location;
111 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
112 var->data.mode == nir_var_shader_out ||
113 var->data.bindless;
114 location += type_size(var->type, bindless_type_size);
115 }
116
117 *size = location;
118 }
119
120 /**
121 * Return true if the given variable is a per-vertex input/output array.
122 * (such as geometry shader inputs).
123 */
124 bool
125 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
126 {
127 if (var->data.patch || !glsl_type_is_array(var->type))
128 return false;
129
130 if (var->data.mode == nir_var_shader_in)
131 return stage == MESA_SHADER_GEOMETRY ||
132 stage == MESA_SHADER_TESS_CTRL ||
133 stage == MESA_SHADER_TESS_EVAL;
134
135 if (var->data.mode == nir_var_shader_out)
136 return stage == MESA_SHADER_TESS_CTRL;
137
138 return false;
139 }
140
141 static nir_ssa_def *
142 get_io_offset(nir_builder *b, nir_deref_instr *deref,
143 nir_ssa_def **vertex_index,
144 int (*type_size)(const struct glsl_type *, bool),
145 unsigned *component, bool bts)
146 {
147 nir_deref_path path;
148 nir_deref_path_init(&path, deref, NULL);
149
150 assert(path.path[0]->deref_type == nir_deref_type_var);
151 nir_deref_instr **p = &path.path[1];
152
153 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
154 * outermost array index separate. Process the rest normally.
155 */
156 if (vertex_index != NULL) {
157 assert((*p)->deref_type == nir_deref_type_array);
158 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
159 p++;
160 }
161
162 if (path.path[0]->var->data.compact) {
163 assert((*p)->deref_type == nir_deref_type_array);
164 assert(glsl_type_is_scalar((*p)->type));
165
166 /* We always lower indirect dereferences for "compact" array vars. */
167 const unsigned index = nir_src_as_uint((*p)->arr.index);
168 const unsigned total_offset = *component + index;
169 const unsigned slot_offset = total_offset / 4;
170 *component = total_offset % 4;
171 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
172 }
173
174 /* Just emit code and let constant-folding go to town */
175 nir_ssa_def *offset = nir_imm_int(b, 0);
176
177 for (; *p; p++) {
178 if ((*p)->deref_type == nir_deref_type_array) {
179 unsigned size = type_size((*p)->type, bts);
180
181 nir_ssa_def *mul =
182 nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
183
184 offset = nir_iadd(b, offset, mul);
185 } else if ((*p)->deref_type == nir_deref_type_struct) {
186 /* p starts at path[1], so this is safe */
187 nir_deref_instr *parent = *(p - 1);
188
189 unsigned field_offset = 0;
190 for (unsigned i = 0; i < (*p)->strct.index; i++) {
191 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
192 }
193 offset = nir_iadd_imm(b, offset, field_offset);
194 } else {
195 unreachable("Unsupported deref type");
196 }
197 }
198
199 nir_deref_path_finish(&path);
200
201 return offset;
202 }
203
204 static nir_ssa_def *
205 emit_load(struct lower_io_state *state,
206 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
207 unsigned component, unsigned num_components, unsigned bit_size,
208 nir_alu_type type)
209 {
210 nir_builder *b = &state->builder;
211 const nir_shader *nir = b->shader;
212 nir_variable_mode mode = var->data.mode;
213 nir_ssa_def *barycentric = NULL;
214
215 nir_intrinsic_op op;
216 switch (mode) {
217 case nir_var_shader_in:
218 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
219 nir->options->use_interpolated_input_intrinsics &&
220 var->data.interpolation != INTERP_MODE_FLAT) {
221 assert(vertex_index == NULL);
222
223 nir_intrinsic_op bary_op;
224 if (var->data.sample ||
225 (state->options & nir_lower_io_force_sample_interpolation))
226 bary_op = nir_intrinsic_load_barycentric_sample;
227 else if (var->data.centroid)
228 bary_op = nir_intrinsic_load_barycentric_centroid;
229 else
230 bary_op = nir_intrinsic_load_barycentric_pixel;
231
232 barycentric = nir_load_barycentric(&state->builder, bary_op,
233 var->data.interpolation);
234 op = nir_intrinsic_load_interpolated_input;
235 } else {
236 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
237 nir_intrinsic_load_input;
238 }
239 break;
240 case nir_var_shader_out:
241 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
242 nir_intrinsic_load_output;
243 break;
244 case nir_var_uniform:
245 op = nir_intrinsic_load_uniform;
246 break;
247 case nir_var_mem_shared:
248 op = nir_intrinsic_load_shared;
249 break;
250 default:
251 unreachable("Unknown variable mode");
252 }
253
254 nir_intrinsic_instr *load =
255 nir_intrinsic_instr_create(state->builder.shader, op);
256 load->num_components = num_components;
257
258 nir_intrinsic_set_base(load, var->data.driver_location);
259 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
260 nir_intrinsic_set_component(load, component);
261
262 if (load->intrinsic == nir_intrinsic_load_uniform)
263 nir_intrinsic_set_range(load,
264 state->type_size(var->type, var->data.bindless));
265
266 if (load->intrinsic == nir_intrinsic_load_input ||
267 load->intrinsic == nir_intrinsic_load_uniform)
268 nir_intrinsic_set_type(load, type);
269
270 if (vertex_index) {
271 load->src[0] = nir_src_for_ssa(vertex_index);
272 load->src[1] = nir_src_for_ssa(offset);
273 } else if (barycentric) {
274 load->src[0] = nir_src_for_ssa(barycentric);
275 load->src[1] = nir_src_for_ssa(offset);
276 } else {
277 load->src[0] = nir_src_for_ssa(offset);
278 }
279
280 nir_ssa_dest_init(&load->instr, &load->dest,
281 num_components, bit_size, NULL);
282 nir_builder_instr_insert(b, &load->instr);
283
284 return &load->dest.ssa;
285 }
286
287 static nir_ssa_def *
288 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
289 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
290 unsigned component, const struct glsl_type *type)
291 {
292 assert(intrin->dest.is_ssa);
293 if (intrin->dest.ssa.bit_size == 64 &&
294 (state->options & nir_lower_io_lower_64bit_to_32)) {
295 nir_builder *b = &state->builder;
296
297 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
298
299 nir_ssa_def *comp64[4];
300 assert(component == 0 || component == 2);
301 unsigned dest_comp = 0;
302 while (dest_comp < intrin->dest.ssa.num_components) {
303 const unsigned num_comps =
304 MIN2(intrin->dest.ssa.num_components - dest_comp,
305 (4 - component) / 2);
306
307 nir_ssa_def *data32 =
308 emit_load(state, vertex_index, var, offset, component,
309 num_comps * 2, 32, nir_type_uint32);
310 for (unsigned i = 0; i < num_comps; i++) {
311 comp64[dest_comp + i] =
312 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
313 }
314
315 /* Only the first store has a component offset */
316 component = 0;
317 dest_comp += num_comps;
318 offset = nir_iadd_imm(b, offset, slot_size);
319 }
320
321 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
322 } else {
323 return emit_load(state, vertex_index, var, offset, component,
324 intrin->dest.ssa.num_components,
325 intrin->dest.ssa.bit_size,
326 nir_get_nir_type_for_glsl_type(type));
327 }
328 }
329
330 static void
331 emit_store(struct lower_io_state *state, nir_ssa_def *data,
332 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
333 unsigned component, unsigned num_components,
334 nir_component_mask_t write_mask, nir_alu_type type)
335 {
336 nir_builder *b = &state->builder;
337 nir_variable_mode mode = var->data.mode;
338
339 nir_intrinsic_op op;
340 if (mode == nir_var_mem_shared) {
341 op = nir_intrinsic_store_shared;
342 } else {
343 assert(mode == nir_var_shader_out);
344 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
345 nir_intrinsic_store_output;
346 }
347
348 nir_intrinsic_instr *store =
349 nir_intrinsic_instr_create(state->builder.shader, op);
350 store->num_components = num_components;
351
352 store->src[0] = nir_src_for_ssa(data);
353
354 nir_intrinsic_set_base(store, var->data.driver_location);
355
356 if (mode == nir_var_shader_out)
357 nir_intrinsic_set_component(store, component);
358
359 if (store->intrinsic == nir_intrinsic_store_output)
360 nir_intrinsic_set_type(store, type);
361
362 nir_intrinsic_set_write_mask(store, write_mask);
363
364 if (vertex_index)
365 store->src[1] = nir_src_for_ssa(vertex_index);
366
367 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
368
369 nir_builder_instr_insert(b, &store->instr);
370 }
371
372 static void
373 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
374 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
375 unsigned component, const struct glsl_type *type)
376 {
377 assert(intrin->src[1].is_ssa);
378 if (intrin->src[1].ssa->bit_size == 64 &&
379 (state->options & nir_lower_io_lower_64bit_to_32)) {
380 nir_builder *b = &state->builder;
381
382 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
383
384 assert(component == 0 || component == 2);
385 unsigned src_comp = 0;
386 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
387 while (src_comp < intrin->num_components) {
388 const unsigned num_comps =
389 MIN2(intrin->num_components - src_comp,
390 (4 - component) / 2);
391
392 if (write_mask & BITFIELD_MASK(num_comps)) {
393 nir_ssa_def *data =
394 nir_channels(b, intrin->src[1].ssa,
395 BITFIELD_RANGE(src_comp, num_comps));
396 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
397
398 nir_component_mask_t write_mask32 = 0;
399 for (unsigned i = 0; i < num_comps; i++) {
400 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
401 write_mask32 |= 3 << (i * 2);
402 }
403
404 emit_store(state, data32, vertex_index, var, offset,
405 component, data32->num_components, write_mask32,
406 nir_type_uint32);
407 }
408
409 /* Only the first store has a component offset */
410 component = 0;
411 src_comp += num_comps;
412 write_mask >>= num_comps;
413 offset = nir_iadd_imm(b, offset, slot_size);
414 }
415 } else {
416 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
417 component, intrin->num_components,
418 nir_intrinsic_write_mask(intrin),
419 nir_get_nir_type_for_glsl_type(type));
420 }
421 }
422
423 static nir_ssa_def *
424 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
425 nir_variable *var, nir_ssa_def *offset)
426 {
427 nir_builder *b = &state->builder;
428 assert(var->data.mode == nir_var_mem_shared);
429
430 nir_intrinsic_op op;
431 switch (intrin->intrinsic) {
432 #define OP(O) case nir_intrinsic_deref_##O: op = nir_intrinsic_shared_##O; break;
433 OP(atomic_exchange)
434 OP(atomic_comp_swap)
435 OP(atomic_add)
436 OP(atomic_imin)
437 OP(atomic_umin)
438 OP(atomic_imax)
439 OP(atomic_umax)
440 OP(atomic_and)
441 OP(atomic_or)
442 OP(atomic_xor)
443 OP(atomic_fadd)
444 OP(atomic_fmin)
445 OP(atomic_fmax)
446 OP(atomic_fcomp_swap)
447 #undef OP
448 default:
449 unreachable("Invalid atomic");
450 }
451
452 nir_intrinsic_instr *atomic =
453 nir_intrinsic_instr_create(state->builder.shader, op);
454
455 nir_intrinsic_set_base(atomic, var->data.driver_location);
456
457 atomic->src[0] = nir_src_for_ssa(offset);
458 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
459 nir_intrinsic_infos[op].num_srcs);
460 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
461 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
462 }
463
464 if (nir_intrinsic_infos[op].has_dest) {
465 assert(intrin->dest.is_ssa);
466 assert(nir_intrinsic_infos[intrin->intrinsic].has_dest);
467 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
468 intrin->dest.ssa.num_components,
469 intrin->dest.ssa.bit_size, NULL);
470 }
471
472 nir_builder_instr_insert(b, &atomic->instr);
473
474 return nir_intrinsic_infos[op].has_dest ? &atomic->dest.ssa : NULL;
475 }
476
477 static nir_ssa_def *
478 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
479 nir_variable *var, nir_ssa_def *offset, unsigned component,
480 const struct glsl_type *type)
481 {
482 nir_builder *b = &state->builder;
483 assert(var->data.mode == nir_var_shader_in);
484
485 /* Ignore interpolateAt() for flat variables - flat is flat. */
486 if (var->data.interpolation == INTERP_MODE_FLAT)
487 return lower_load(intrin, state, NULL, var, offset, component, type);
488
489 /* None of the supported APIs allow interpolation on 64-bit things */
490 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
491
492 nir_intrinsic_op bary_op;
493 switch (intrin->intrinsic) {
494 case nir_intrinsic_interp_deref_at_centroid:
495 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
496 nir_intrinsic_load_barycentric_sample :
497 nir_intrinsic_load_barycentric_centroid;
498 break;
499 case nir_intrinsic_interp_deref_at_sample:
500 bary_op = nir_intrinsic_load_barycentric_at_sample;
501 break;
502 case nir_intrinsic_interp_deref_at_offset:
503 bary_op = nir_intrinsic_load_barycentric_at_offset;
504 break;
505 default:
506 unreachable("Bogus interpolateAt() intrinsic.");
507 }
508
509 nir_intrinsic_instr *bary_setup =
510 nir_intrinsic_instr_create(state->builder.shader, bary_op);
511
512 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
513 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
514
515 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
516 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset)
517 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
518
519 nir_builder_instr_insert(b, &bary_setup->instr);
520
521 nir_intrinsic_instr *load =
522 nir_intrinsic_instr_create(state->builder.shader,
523 nir_intrinsic_load_interpolated_input);
524 load->num_components = intrin->num_components;
525
526 nir_intrinsic_set_base(load, var->data.driver_location);
527 nir_intrinsic_set_component(load, component);
528
529 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
530 load->src[1] = nir_src_for_ssa(offset);
531
532 assert(intrin->dest.is_ssa);
533 nir_ssa_dest_init(&load->instr, &load->dest,
534 intrin->dest.ssa.num_components,
535 intrin->dest.ssa.bit_size, NULL);
536 nir_builder_instr_insert(b, &load->instr);
537
538 return &load->dest.ssa;
539 }
540
541 static bool
542 nir_lower_io_block(nir_block *block,
543 struct lower_io_state *state)
544 {
545 nir_builder *b = &state->builder;
546 const nir_shader_compiler_options *options = b->shader->options;
547 bool progress = false;
548
549 nir_foreach_instr_safe(instr, block) {
550 if (instr->type != nir_instr_type_intrinsic)
551 continue;
552
553 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
554
555 switch (intrin->intrinsic) {
556 case nir_intrinsic_load_deref:
557 case nir_intrinsic_store_deref:
558 case nir_intrinsic_deref_atomic_add:
559 case nir_intrinsic_deref_atomic_imin:
560 case nir_intrinsic_deref_atomic_umin:
561 case nir_intrinsic_deref_atomic_imax:
562 case nir_intrinsic_deref_atomic_umax:
563 case nir_intrinsic_deref_atomic_and:
564 case nir_intrinsic_deref_atomic_or:
565 case nir_intrinsic_deref_atomic_xor:
566 case nir_intrinsic_deref_atomic_exchange:
567 case nir_intrinsic_deref_atomic_comp_swap:
568 case nir_intrinsic_deref_atomic_fadd:
569 case nir_intrinsic_deref_atomic_fmin:
570 case nir_intrinsic_deref_atomic_fmax:
571 case nir_intrinsic_deref_atomic_fcomp_swap:
572 /* We can lower the io for this nir instrinsic */
573 break;
574 case nir_intrinsic_interp_deref_at_centroid:
575 case nir_intrinsic_interp_deref_at_sample:
576 case nir_intrinsic_interp_deref_at_offset:
577 /* We can optionally lower these to load_interpolated_input */
578 if (options->use_interpolated_input_intrinsics)
579 break;
580 default:
581 /* We can't lower the io for this nir instrinsic, so skip it */
582 continue;
583 }
584
585 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
586
587 nir_variable_mode mode = deref->mode;
588
589 if ((state->modes & mode) == 0)
590 continue;
591
592 if (mode != nir_var_shader_in &&
593 mode != nir_var_shader_out &&
594 mode != nir_var_mem_shared &&
595 mode != nir_var_uniform)
596 continue;
597
598 nir_variable *var = nir_deref_instr_get_variable(deref);
599
600 b->cursor = nir_before_instr(instr);
601
602 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
603
604 nir_ssa_def *offset;
605 nir_ssa_def *vertex_index = NULL;
606 unsigned component_offset = var->data.location_frac;
607 bool bindless_type_size = mode == nir_var_shader_in ||
608 mode == nir_var_shader_out ||
609 var->data.bindless;
610
611 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
612 state->type_size, &component_offset,
613 bindless_type_size);
614
615 nir_ssa_def *replacement = NULL;
616
617 switch (intrin->intrinsic) {
618 case nir_intrinsic_load_deref:
619 replacement = lower_load(intrin, state, vertex_index, var, offset,
620 component_offset, deref->type);
621 break;
622
623 case nir_intrinsic_store_deref:
624 lower_store(intrin, state, vertex_index, var, offset,
625 component_offset, deref->type);
626 break;
627
628 case nir_intrinsic_deref_atomic_add:
629 case nir_intrinsic_deref_atomic_imin:
630 case nir_intrinsic_deref_atomic_umin:
631 case nir_intrinsic_deref_atomic_imax:
632 case nir_intrinsic_deref_atomic_umax:
633 case nir_intrinsic_deref_atomic_and:
634 case nir_intrinsic_deref_atomic_or:
635 case nir_intrinsic_deref_atomic_xor:
636 case nir_intrinsic_deref_atomic_exchange:
637 case nir_intrinsic_deref_atomic_comp_swap:
638 case nir_intrinsic_deref_atomic_fadd:
639 case nir_intrinsic_deref_atomic_fmin:
640 case nir_intrinsic_deref_atomic_fmax:
641 case nir_intrinsic_deref_atomic_fcomp_swap:
642 assert(vertex_index == NULL);
643 replacement = lower_atomic(intrin, state, var, offset);
644 break;
645
646 case nir_intrinsic_interp_deref_at_centroid:
647 case nir_intrinsic_interp_deref_at_sample:
648 case nir_intrinsic_interp_deref_at_offset:
649 assert(vertex_index == NULL);
650 replacement = lower_interpolate_at(intrin, state, var, offset,
651 component_offset, deref->type);
652 break;
653
654 default:
655 continue;
656 }
657
658 if (replacement) {
659 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
660 nir_src_for_ssa(replacement));
661 }
662 nir_instr_remove(&intrin->instr);
663 progress = true;
664 }
665
666 return progress;
667 }
668
669 static bool
670 nir_lower_io_impl(nir_function_impl *impl,
671 nir_variable_mode modes,
672 int (*type_size)(const struct glsl_type *, bool),
673 nir_lower_io_options options)
674 {
675 struct lower_io_state state;
676 bool progress = false;
677
678 nir_builder_init(&state.builder, impl);
679 state.dead_ctx = ralloc_context(NULL);
680 state.modes = modes;
681 state.type_size = type_size;
682 state.options = options;
683
684 nir_foreach_block(block, impl) {
685 progress |= nir_lower_io_block(block, &state);
686 }
687
688 ralloc_free(state.dead_ctx);
689
690 nir_metadata_preserve(impl, nir_metadata_block_index |
691 nir_metadata_dominance);
692 return progress;
693 }
694
695 bool
696 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
697 int (*type_size)(const struct glsl_type *, bool),
698 nir_lower_io_options options)
699 {
700 bool progress = false;
701
702 nir_foreach_function(function, shader) {
703 if (function->impl) {
704 progress |= nir_lower_io_impl(function->impl, modes,
705 type_size, options);
706 }
707 }
708
709 return progress;
710 }
711
712 static unsigned
713 type_scalar_size_bytes(const struct glsl_type *type)
714 {
715 assert(glsl_type_is_vector_or_scalar(type) ||
716 glsl_type_is_matrix(type));
717 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
718 }
719
720 static nir_ssa_def *
721 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
722 nir_address_format addr_format, nir_ssa_def *offset)
723 {
724 assert(offset->num_components == 1);
725 assert(addr->bit_size == offset->bit_size);
726
727 switch (addr_format) {
728 case nir_address_format_32bit_global:
729 case nir_address_format_64bit_global:
730 case nir_address_format_32bit_offset:
731 assert(addr->num_components == 1);
732 return nir_iadd(b, addr, offset);
733
734 case nir_address_format_64bit_bounded_global:
735 assert(addr->num_components == 4);
736 return nir_vec4(b, nir_channel(b, addr, 0),
737 nir_channel(b, addr, 1),
738 nir_channel(b, addr, 2),
739 nir_iadd(b, nir_channel(b, addr, 3), offset));
740
741 case nir_address_format_32bit_index_offset:
742 assert(addr->num_components == 2);
743 return nir_vec2(b, nir_channel(b, addr, 0),
744 nir_iadd(b, nir_channel(b, addr, 1), offset));
745 case nir_address_format_logical:
746 unreachable("Unsupported address format");
747 }
748 unreachable("Invalid address format");
749 }
750
751 static nir_ssa_def *
752 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
753 nir_address_format addr_format, int64_t offset)
754 {
755 return build_addr_iadd(b, addr, addr_format,
756 nir_imm_intN_t(b, offset, addr->bit_size));
757 }
758
759 static nir_ssa_def *
760 addr_to_index(nir_builder *b, nir_ssa_def *addr,
761 nir_address_format addr_format)
762 {
763 assert(addr_format == nir_address_format_32bit_index_offset);
764 assert(addr->num_components == 2);
765 return nir_channel(b, addr, 0);
766 }
767
768 static nir_ssa_def *
769 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
770 nir_address_format addr_format)
771 {
772 assert(addr_format == nir_address_format_32bit_index_offset);
773 assert(addr->num_components == 2);
774 return nir_channel(b, addr, 1);
775 }
776
777 /** Returns true if the given address format resolves to a global address */
778 static bool
779 addr_format_is_global(nir_address_format addr_format)
780 {
781 return addr_format == nir_address_format_32bit_global ||
782 addr_format == nir_address_format_64bit_global ||
783 addr_format == nir_address_format_64bit_bounded_global;
784 }
785
786 static nir_ssa_def *
787 addr_to_global(nir_builder *b, nir_ssa_def *addr,
788 nir_address_format addr_format)
789 {
790 switch (addr_format) {
791 case nir_address_format_32bit_global:
792 case nir_address_format_64bit_global:
793 assert(addr->num_components == 1);
794 return addr;
795
796 case nir_address_format_64bit_bounded_global:
797 assert(addr->num_components == 4);
798 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
799 nir_u2u64(b, nir_channel(b, addr, 3)));
800
801 case nir_address_format_32bit_index_offset:
802 case nir_address_format_32bit_offset:
803 case nir_address_format_logical:
804 unreachable("Cannot get a 64-bit address with this address format");
805 }
806
807 unreachable("Invalid address format");
808 }
809
810 static bool
811 addr_format_needs_bounds_check(nir_address_format addr_format)
812 {
813 return addr_format == nir_address_format_64bit_bounded_global;
814 }
815
816 static nir_ssa_def *
817 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
818 nir_address_format addr_format, unsigned size)
819 {
820 assert(addr_format == nir_address_format_64bit_bounded_global);
821 assert(addr->num_components == 4);
822 return nir_ige(b, nir_channel(b, addr, 2),
823 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
824 }
825
826 static nir_ssa_def *
827 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
828 nir_ssa_def *addr, nir_address_format addr_format,
829 unsigned num_components)
830 {
831 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
832
833 nir_intrinsic_op op;
834 switch (mode) {
835 case nir_var_mem_ubo:
836 op = nir_intrinsic_load_ubo;
837 break;
838 case nir_var_mem_ssbo:
839 if (addr_format_is_global(addr_format))
840 op = nir_intrinsic_load_global;
841 else
842 op = nir_intrinsic_load_ssbo;
843 break;
844 case nir_var_mem_global:
845 assert(addr_format_is_global(addr_format));
846 op = nir_intrinsic_load_global;
847 break;
848 case nir_var_shader_in:
849 assert(addr_format_is_global(addr_format));
850 op = nir_intrinsic_load_kernel_input;
851 break;
852 default:
853 unreachable("Unsupported explicit IO variable mode");
854 }
855
856 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
857
858 if (addr_format_is_global(addr_format)) {
859 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
860 } else {
861 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
862 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
863 }
864
865 if (mode != nir_var_mem_ubo && mode != nir_var_shader_in)
866 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
867
868 /* TODO: We should try and provide a better alignment. For OpenCL, we need
869 * to plumb the alignment through from SPIR-V when we have one.
870 */
871 nir_intrinsic_set_align(load, intrin->dest.ssa.bit_size / 8, 0);
872
873 assert(intrin->dest.is_ssa);
874 load->num_components = num_components;
875 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
876 intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
877
878 assert(load->dest.ssa.bit_size % 8 == 0);
879
880 if (addr_format_needs_bounds_check(addr_format)) {
881 /* The Vulkan spec for robustBufferAccess gives us quite a few options
882 * as to what we can do with an OOB read. Unfortunately, returning
883 * undefined values isn't one of them so we return an actual zero.
884 */
885 nir_ssa_def *zero = nir_imm_zero(b, load->num_components,
886 load->dest.ssa.bit_size);
887
888 const unsigned load_size =
889 (load->dest.ssa.bit_size / 8) * load->num_components;
890 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
891
892 nir_builder_instr_insert(b, &load->instr);
893
894 nir_pop_if(b, NULL);
895
896 return nir_if_phi(b, &load->dest.ssa, zero);
897 } else {
898 nir_builder_instr_insert(b, &load->instr);
899 return &load->dest.ssa;
900 }
901 }
902
903 static void
904 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
905 nir_ssa_def *addr, nir_address_format addr_format,
906 nir_ssa_def *value, nir_component_mask_t write_mask)
907 {
908 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
909
910 nir_intrinsic_op op;
911 switch (mode) {
912 case nir_var_mem_ssbo:
913 if (addr_format_is_global(addr_format))
914 op = nir_intrinsic_store_global;
915 else
916 op = nir_intrinsic_store_ssbo;
917 break;
918 case nir_var_mem_global:
919 assert(addr_format_is_global(addr_format));
920 op = nir_intrinsic_store_global;
921 break;
922 default:
923 unreachable("Unsupported explicit IO variable mode");
924 }
925
926 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
927
928 store->src[0] = nir_src_for_ssa(value);
929 if (addr_format_is_global(addr_format)) {
930 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
931 } else {
932 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
933 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
934 }
935
936 nir_intrinsic_set_write_mask(store, write_mask);
937
938 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
939
940 /* TODO: We should try and provide a better alignment. For OpenCL, we need
941 * to plumb the alignment through from SPIR-V when we have one.
942 */
943 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
944
945 assert(value->num_components == 1 ||
946 value->num_components == intrin->num_components);
947 store->num_components = value->num_components;
948
949 assert(value->bit_size % 8 == 0);
950
951 if (addr_format_needs_bounds_check(addr_format)) {
952 const unsigned store_size = (value->bit_size / 8) * store->num_components;
953 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
954
955 nir_builder_instr_insert(b, &store->instr);
956
957 nir_pop_if(b, NULL);
958 } else {
959 nir_builder_instr_insert(b, &store->instr);
960 }
961 }
962
963 static nir_ssa_def *
964 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
965 nir_ssa_def *addr, nir_address_format addr_format)
966 {
967 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
968 const unsigned num_data_srcs =
969 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
970
971 nir_intrinsic_op op;
972 switch (mode) {
973 case nir_var_mem_ssbo:
974 if (addr_format_is_global(addr_format))
975 op = global_atomic_for_deref(intrin->intrinsic);
976 else
977 op = ssbo_atomic_for_deref(intrin->intrinsic);
978 break;
979 case nir_var_mem_global:
980 assert(addr_format_is_global(addr_format));
981 op = global_atomic_for_deref(intrin->intrinsic);
982 break;
983 default:
984 unreachable("Unsupported explicit IO variable mode");
985 }
986
987 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
988
989 unsigned src = 0;
990 if (addr_format_is_global(addr_format)) {
991 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
992 } else {
993 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
994 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
995 }
996 for (unsigned i = 0; i < num_data_srcs; i++) {
997 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
998 }
999
1000 /* Global atomics don't have access flags because they assume that the
1001 * address may be non-uniform.
1002 */
1003 if (!addr_format_is_global(addr_format))
1004 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1005
1006 assert(intrin->dest.ssa.num_components == 1);
1007 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1008 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1009
1010 assert(atomic->dest.ssa.bit_size % 8 == 0);
1011
1012 if (addr_format_needs_bounds_check(addr_format)) {
1013 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1014 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1015
1016 nir_builder_instr_insert(b, &atomic->instr);
1017
1018 nir_pop_if(b, NULL);
1019 return nir_if_phi(b, &atomic->dest.ssa,
1020 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1021 } else {
1022 nir_builder_instr_insert(b, &atomic->instr);
1023 return &atomic->dest.ssa;
1024 }
1025 }
1026
1027 nir_ssa_def *
1028 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1029 nir_ssa_def *base_addr,
1030 nir_address_format addr_format)
1031 {
1032 assert(deref->dest.is_ssa);
1033 switch (deref->deref_type) {
1034 case nir_deref_type_var:
1035 assert(deref->mode == nir_var_shader_in);
1036 return nir_imm_intN_t(b, deref->var->data.driver_location,
1037 deref->dest.ssa.bit_size);
1038
1039 case nir_deref_type_array: {
1040 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1041
1042 unsigned stride = glsl_get_explicit_stride(parent->type);
1043 if ((glsl_type_is_matrix(parent->type) &&
1044 glsl_matrix_type_is_row_major(parent->type)) ||
1045 (glsl_type_is_vector(parent->type) && stride == 0))
1046 stride = type_scalar_size_bytes(parent->type);
1047
1048 assert(stride > 0);
1049
1050 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1051 index = nir_i2i(b, index, base_addr->bit_size);
1052 return build_addr_iadd(b, base_addr, addr_format,
1053 nir_imul_imm(b, index, stride));
1054 }
1055
1056 case nir_deref_type_ptr_as_array: {
1057 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1058 index = nir_i2i(b, index, base_addr->bit_size);
1059 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1060 return build_addr_iadd(b, base_addr, addr_format,
1061 nir_imul_imm(b, index, stride));
1062 }
1063
1064 case nir_deref_type_array_wildcard:
1065 unreachable("Wildcards should be lowered by now");
1066 break;
1067
1068 case nir_deref_type_struct: {
1069 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1070 int offset = glsl_get_struct_field_offset(parent->type,
1071 deref->strct.index);
1072 assert(offset >= 0);
1073 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1074 }
1075
1076 case nir_deref_type_cast:
1077 /* Nothing to do here */
1078 return base_addr;
1079 }
1080
1081 unreachable("Invalid NIR deref type");
1082 }
1083
1084 void
1085 nir_lower_explicit_io_instr(nir_builder *b,
1086 nir_intrinsic_instr *intrin,
1087 nir_ssa_def *addr,
1088 nir_address_format addr_format)
1089 {
1090 b->cursor = nir_after_instr(&intrin->instr);
1091
1092 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1093 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1094 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1095 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1096 assert(vec_stride == 0 || vec_stride >= scalar_size);
1097
1098 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1099 nir_ssa_def *value;
1100 if (vec_stride > scalar_size) {
1101 nir_ssa_def *comps[4] = { NULL, };
1102 for (unsigned i = 0; i < intrin->num_components; i++) {
1103 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1104 vec_stride * i);
1105 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1106 addr_format, 1);
1107 }
1108 value = nir_vec(b, comps, intrin->num_components);
1109 } else {
1110 value = build_explicit_io_load(b, intrin, addr, addr_format,
1111 intrin->num_components);
1112 }
1113 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1114 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1115 assert(intrin->src[1].is_ssa);
1116 nir_ssa_def *value = intrin->src[1].ssa;
1117 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1118 if (vec_stride > scalar_size) {
1119 for (unsigned i = 0; i < intrin->num_components; i++) {
1120 if (!(write_mask & (1 << i)))
1121 continue;
1122
1123 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1124 vec_stride * i);
1125 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1126 nir_channel(b, value, i), 1);
1127 }
1128 } else {
1129 build_explicit_io_store(b, intrin, addr, addr_format,
1130 value, write_mask);
1131 }
1132 } else {
1133 nir_ssa_def *value =
1134 build_explicit_io_atomic(b, intrin, addr, addr_format);
1135 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1136 }
1137
1138 nir_instr_remove(&intrin->instr);
1139 }
1140
1141 static void
1142 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1143 nir_address_format addr_format)
1144 {
1145 /* Just delete the deref if it's not used. We can't use
1146 * nir_deref_instr_remove_if_unused here because it may remove more than
1147 * one deref which could break our list walking since we walk the list
1148 * backwards.
1149 */
1150 assert(list_empty(&deref->dest.ssa.if_uses));
1151 if (list_empty(&deref->dest.ssa.uses)) {
1152 nir_instr_remove(&deref->instr);
1153 return;
1154 }
1155
1156 b->cursor = nir_after_instr(&deref->instr);
1157
1158 nir_ssa_def *base_addr = NULL;
1159 if (deref->deref_type != nir_deref_type_var) {
1160 assert(deref->parent.is_ssa);
1161 base_addr = deref->parent.ssa;
1162 }
1163
1164 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1165 addr_format);
1166
1167 nir_instr_remove(&deref->instr);
1168 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1169 }
1170
1171 static void
1172 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1173 nir_address_format addr_format)
1174 {
1175 assert(intrin->src[0].is_ssa);
1176 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1177 }
1178
1179 static void
1180 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1181 nir_address_format addr_format)
1182 {
1183 b->cursor = nir_after_instr(&intrin->instr);
1184
1185 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1186
1187 assert(glsl_type_is_array(deref->type));
1188 assert(glsl_get_length(deref->type) == 0);
1189 unsigned stride = glsl_get_explicit_stride(deref->type);
1190 assert(stride > 0);
1191
1192 assert(addr_format == nir_address_format_32bit_index_offset);
1193 nir_ssa_def *addr = &deref->dest.ssa;
1194 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1195 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1196
1197 nir_intrinsic_instr *bsize =
1198 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1199 bsize->src[0] = nir_src_for_ssa(index);
1200 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1201 nir_builder_instr_insert(b, &bsize->instr);
1202
1203 nir_ssa_def *arr_size =
1204 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1205 nir_imm_int(b, stride));
1206
1207 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1208 nir_instr_remove(&intrin->instr);
1209 }
1210
1211 static bool
1212 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1213 nir_address_format addr_format)
1214 {
1215 bool progress = false;
1216
1217 nir_builder b;
1218 nir_builder_init(&b, impl);
1219
1220 /* Walk in reverse order so that we can see the full deref chain when we
1221 * lower the access operations. We lower them assuming that the derefs
1222 * will be turned into address calculations later.
1223 */
1224 nir_foreach_block_reverse(block, impl) {
1225 nir_foreach_instr_reverse_safe(instr, block) {
1226 switch (instr->type) {
1227 case nir_instr_type_deref: {
1228 nir_deref_instr *deref = nir_instr_as_deref(instr);
1229 if (deref->mode & modes) {
1230 lower_explicit_io_deref(&b, deref, addr_format);
1231 progress = true;
1232 }
1233 break;
1234 }
1235
1236 case nir_instr_type_intrinsic: {
1237 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1238 switch (intrin->intrinsic) {
1239 case nir_intrinsic_load_deref:
1240 case nir_intrinsic_store_deref:
1241 case nir_intrinsic_deref_atomic_add:
1242 case nir_intrinsic_deref_atomic_imin:
1243 case nir_intrinsic_deref_atomic_umin:
1244 case nir_intrinsic_deref_atomic_imax:
1245 case nir_intrinsic_deref_atomic_umax:
1246 case nir_intrinsic_deref_atomic_and:
1247 case nir_intrinsic_deref_atomic_or:
1248 case nir_intrinsic_deref_atomic_xor:
1249 case nir_intrinsic_deref_atomic_exchange:
1250 case nir_intrinsic_deref_atomic_comp_swap:
1251 case nir_intrinsic_deref_atomic_fadd:
1252 case nir_intrinsic_deref_atomic_fmin:
1253 case nir_intrinsic_deref_atomic_fmax:
1254 case nir_intrinsic_deref_atomic_fcomp_swap: {
1255 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1256 if (deref->mode & modes) {
1257 lower_explicit_io_access(&b, intrin, addr_format);
1258 progress = true;
1259 }
1260 break;
1261 }
1262
1263 case nir_intrinsic_deref_buffer_array_length: {
1264 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1265 if (deref->mode & modes) {
1266 lower_explicit_io_array_length(&b, intrin, addr_format);
1267 progress = true;
1268 }
1269 break;
1270 }
1271
1272 default:
1273 break;
1274 }
1275 break;
1276 }
1277
1278 default:
1279 /* Nothing to do */
1280 break;
1281 }
1282 }
1283 }
1284
1285 if (progress) {
1286 nir_metadata_preserve(impl, nir_metadata_block_index |
1287 nir_metadata_dominance);
1288 }
1289
1290 return progress;
1291 }
1292
1293 bool
1294 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1295 nir_address_format addr_format)
1296 {
1297 bool progress = false;
1298
1299 nir_foreach_function(function, shader) {
1300 if (function->impl &&
1301 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1302 progress = true;
1303 }
1304
1305 return progress;
1306 }
1307
1308 /**
1309 * Return the offset source for a load/store intrinsic.
1310 */
1311 nir_src *
1312 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1313 {
1314 switch (instr->intrinsic) {
1315 case nir_intrinsic_load_input:
1316 case nir_intrinsic_load_output:
1317 case nir_intrinsic_load_shared:
1318 case nir_intrinsic_load_uniform:
1319 case nir_intrinsic_load_global:
1320 case nir_intrinsic_load_scratch:
1321 case nir_intrinsic_load_fs_input_interp_deltas:
1322 return &instr->src[0];
1323 case nir_intrinsic_load_ubo:
1324 case nir_intrinsic_load_ssbo:
1325 case nir_intrinsic_load_per_vertex_input:
1326 case nir_intrinsic_load_per_vertex_output:
1327 case nir_intrinsic_load_interpolated_input:
1328 case nir_intrinsic_store_output:
1329 case nir_intrinsic_store_shared:
1330 case nir_intrinsic_store_global:
1331 case nir_intrinsic_store_scratch:
1332 return &instr->src[1];
1333 case nir_intrinsic_store_ssbo:
1334 case nir_intrinsic_store_per_vertex_output:
1335 return &instr->src[2];
1336 default:
1337 return NULL;
1338 }
1339 }
1340
1341 /**
1342 * Return the vertex index source for a load/store per_vertex intrinsic.
1343 */
1344 nir_src *
1345 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1346 {
1347 switch (instr->intrinsic) {
1348 case nir_intrinsic_load_per_vertex_input:
1349 case nir_intrinsic_load_per_vertex_output:
1350 return &instr->src[0];
1351 case nir_intrinsic_store_per_vertex_output:
1352 return &instr->src[1];
1353 default:
1354 return NULL;
1355 }
1356 }
1357
1358 /**
1359 * Return the numeric constant that identify a NULL pointer for each address
1360 * format.
1361 */
1362 const nir_const_value *
1363 nir_address_format_null_value(nir_address_format addr_format)
1364 {
1365 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1366 [nir_address_format_32bit_global] = {{0}},
1367 [nir_address_format_64bit_global] = {{0}},
1368 [nir_address_format_64bit_bounded_global] = {{0}},
1369 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1370 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1371 [nir_address_format_logical] = {{.u32 = ~0}},
1372 };
1373
1374 assert(addr_format < ARRAY_SIZE(null_values));
1375 return null_values[addr_format];
1376 }
1377
1378 nir_ssa_def *
1379 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1380 nir_address_format addr_format)
1381 {
1382 switch (addr_format) {
1383 case nir_address_format_32bit_global:
1384 case nir_address_format_64bit_global:
1385 case nir_address_format_64bit_bounded_global:
1386 case nir_address_format_32bit_index_offset:
1387 case nir_address_format_32bit_offset:
1388 return nir_ball_iequal(b, addr0, addr1);
1389
1390 case nir_address_format_logical:
1391 unreachable("Unsupported address format");
1392 }
1393
1394 unreachable("Invalid address format");
1395 }
1396
1397 nir_ssa_def *
1398 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1399 nir_address_format addr_format)
1400 {
1401 switch (addr_format) {
1402 case nir_address_format_32bit_global:
1403 case nir_address_format_64bit_global:
1404 case nir_address_format_32bit_offset:
1405 assert(addr0->num_components == 1);
1406 assert(addr1->num_components == 1);
1407 return nir_isub(b, addr0, addr1);
1408
1409 case nir_address_format_64bit_bounded_global:
1410 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1411 addr_to_global(b, addr1, addr_format));
1412
1413 case nir_address_format_32bit_index_offset:
1414 assert(addr0->num_components == 2);
1415 assert(addr1->num_components == 2);
1416 /* Assume the same buffer index. */
1417 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1418
1419 case nir_address_format_logical:
1420 unreachable("Unsupported address format");
1421 }
1422
1423 unreachable("Invalid address format");
1424 }
1425
1426 static bool
1427 is_input(nir_intrinsic_instr *intrin)
1428 {
1429 return intrin->intrinsic == nir_intrinsic_load_input ||
1430 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1431 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1432 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1433 }
1434
1435 static bool
1436 is_output(nir_intrinsic_instr *intrin)
1437 {
1438 return intrin->intrinsic == nir_intrinsic_load_output ||
1439 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1440 intrin->intrinsic == nir_intrinsic_store_output ||
1441 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1442 }
1443
1444
1445 /**
1446 * This pass adds constant offsets to instr->const_index[0] for input/output
1447 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1448 * unchanged - since we don't know what part of a compound variable is
1449 * accessed, we allocate storage for the entire thing. For drivers that use
1450 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1451 * the offset source will be 0, so that they don't have to add it in manually.
1452 */
1453
1454 static bool
1455 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1456 nir_variable_mode mode)
1457 {
1458 bool progress = false;
1459 nir_foreach_instr_safe(instr, block) {
1460 if (instr->type != nir_instr_type_intrinsic)
1461 continue;
1462
1463 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1464
1465 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1466 (mode == nir_var_shader_out && is_output(intrin))) {
1467 nir_src *offset = nir_get_io_offset_src(intrin);
1468
1469 if (nir_src_is_const(*offset)) {
1470 intrin->const_index[0] += nir_src_as_uint(*offset);
1471 b->cursor = nir_before_instr(&intrin->instr);
1472 nir_instr_rewrite_src(&intrin->instr, offset,
1473 nir_src_for_ssa(nir_imm_int(b, 0)));
1474 progress = true;
1475 }
1476 }
1477 }
1478
1479 return progress;
1480 }
1481
1482 bool
1483 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1484 {
1485 bool progress = false;
1486
1487 nir_foreach_function(f, nir) {
1488 if (f->impl) {
1489 nir_builder b;
1490 nir_builder_init(&b, f->impl);
1491 nir_foreach_block(block, f->impl) {
1492 progress |= add_const_offset_to_base_block(block, &b, mode);
1493 }
1494 }
1495 }
1496
1497 return progress;
1498 }
1499