nir: Assert that nir_lower_io is only called with allowed modes
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 #include "util/u_math.h"
39
40 struct lower_io_state {
41 void *dead_ctx;
42 nir_builder builder;
43 int (*type_size)(const struct glsl_type *type, bool);
44 nir_variable_mode modes;
45 nir_lower_io_options options;
46 };
47
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
50 {
51 switch (deref_op) {
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
53 OP(atomic_exchange)
54 OP(atomic_comp_swap)
55 OP(atomic_add)
56 OP(atomic_imin)
57 OP(atomic_umin)
58 OP(atomic_imax)
59 OP(atomic_umax)
60 OP(atomic_and)
61 OP(atomic_or)
62 OP(atomic_xor)
63 OP(atomic_fadd)
64 OP(atomic_fmin)
65 OP(atomic_fmax)
66 OP(atomic_fcomp_swap)
67 #undef OP
68 default:
69 unreachable("Invalid SSBO atomic");
70 }
71 }
72
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op)
75 {
76 switch (deref_op) {
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
78 OP(atomic_exchange)
79 OP(atomic_comp_swap)
80 OP(atomic_add)
81 OP(atomic_imin)
82 OP(atomic_umin)
83 OP(atomic_imax)
84 OP(atomic_umax)
85 OP(atomic_and)
86 OP(atomic_or)
87 OP(atomic_xor)
88 OP(atomic_fadd)
89 OP(atomic_fmin)
90 OP(atomic_fmax)
91 OP(atomic_fcomp_swap)
92 #undef OP
93 default:
94 unreachable("Invalid SSBO atomic");
95 }
96 }
97
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op)
100 {
101 switch (deref_op) {
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
103 OP(atomic_exchange)
104 OP(atomic_comp_swap)
105 OP(atomic_add)
106 OP(atomic_imin)
107 OP(atomic_umin)
108 OP(atomic_imax)
109 OP(atomic_umax)
110 OP(atomic_and)
111 OP(atomic_or)
112 OP(atomic_xor)
113 OP(atomic_fadd)
114 OP(atomic_fmin)
115 OP(atomic_fmax)
116 OP(atomic_fcomp_swap)
117 #undef OP
118 default:
119 unreachable("Invalid shared atomic");
120 }
121 }
122
123 void
124 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
125 int (*type_size)(const struct glsl_type *, bool))
126 {
127 unsigned location = 0;
128
129 nir_foreach_variable(var, var_list) {
130 /*
131 * UBOs have their own address spaces, so don't count them towards the
132 * number of global uniforms
133 */
134 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
135 continue;
136
137 var->data.driver_location = location;
138 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
139 var->data.mode == nir_var_shader_out ||
140 var->data.bindless;
141 location += type_size(var->type, bindless_type_size);
142 }
143
144 *size = location;
145 }
146
147 /**
148 * Return true if the given variable is a per-vertex input/output array.
149 * (such as geometry shader inputs).
150 */
151 bool
152 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
153 {
154 if (var->data.patch || !glsl_type_is_array(var->type))
155 return false;
156
157 if (var->data.mode == nir_var_shader_in)
158 return stage == MESA_SHADER_GEOMETRY ||
159 stage == MESA_SHADER_TESS_CTRL ||
160 stage == MESA_SHADER_TESS_EVAL;
161
162 if (var->data.mode == nir_var_shader_out)
163 return stage == MESA_SHADER_TESS_CTRL;
164
165 return false;
166 }
167
168 static nir_ssa_def *
169 get_io_offset(nir_builder *b, nir_deref_instr *deref,
170 nir_ssa_def **vertex_index,
171 int (*type_size)(const struct glsl_type *, bool),
172 unsigned *component, bool bts)
173 {
174 nir_deref_path path;
175 nir_deref_path_init(&path, deref, NULL);
176
177 assert(path.path[0]->deref_type == nir_deref_type_var);
178 nir_deref_instr **p = &path.path[1];
179
180 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
181 * outermost array index separate. Process the rest normally.
182 */
183 if (vertex_index != NULL) {
184 assert((*p)->deref_type == nir_deref_type_array);
185 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
186 p++;
187 }
188
189 if (path.path[0]->var->data.compact) {
190 assert((*p)->deref_type == nir_deref_type_array);
191 assert(glsl_type_is_scalar((*p)->type));
192
193 /* We always lower indirect dereferences for "compact" array vars. */
194 const unsigned index = nir_src_as_uint((*p)->arr.index);
195 const unsigned total_offset = *component + index;
196 const unsigned slot_offset = total_offset / 4;
197 *component = total_offset % 4;
198 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
199 }
200
201 /* Just emit code and let constant-folding go to town */
202 nir_ssa_def *offset = nir_imm_int(b, 0);
203
204 for (; *p; p++) {
205 if ((*p)->deref_type == nir_deref_type_array) {
206 unsigned size = type_size((*p)->type, bts);
207
208 nir_ssa_def *mul =
209 nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
210
211 offset = nir_iadd(b, offset, mul);
212 } else if ((*p)->deref_type == nir_deref_type_struct) {
213 /* p starts at path[1], so this is safe */
214 nir_deref_instr *parent = *(p - 1);
215
216 unsigned field_offset = 0;
217 for (unsigned i = 0; i < (*p)->strct.index; i++) {
218 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
219 }
220 offset = nir_iadd_imm(b, offset, field_offset);
221 } else {
222 unreachable("Unsupported deref type");
223 }
224 }
225
226 nir_deref_path_finish(&path);
227
228 return offset;
229 }
230
231 static nir_ssa_def *
232 emit_load(struct lower_io_state *state,
233 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
234 unsigned component, unsigned num_components, unsigned bit_size,
235 nir_alu_type type)
236 {
237 nir_builder *b = &state->builder;
238 const nir_shader *nir = b->shader;
239 nir_variable_mode mode = var->data.mode;
240 nir_ssa_def *barycentric = NULL;
241
242 nir_intrinsic_op op;
243 switch (mode) {
244 case nir_var_shader_in:
245 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
246 nir->options->use_interpolated_input_intrinsics &&
247 var->data.interpolation != INTERP_MODE_FLAT) {
248 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
249 assert(vertex_index != NULL);
250 op = nir_intrinsic_load_input_vertex;
251 } else {
252 assert(vertex_index == NULL);
253
254 nir_intrinsic_op bary_op;
255 if (var->data.sample ||
256 (state->options & nir_lower_io_force_sample_interpolation))
257 bary_op = nir_intrinsic_load_barycentric_sample;
258 else if (var->data.centroid)
259 bary_op = nir_intrinsic_load_barycentric_centroid;
260 else
261 bary_op = nir_intrinsic_load_barycentric_pixel;
262
263 barycentric = nir_load_barycentric(&state->builder, bary_op,
264 var->data.interpolation);
265 op = nir_intrinsic_load_interpolated_input;
266 }
267 } else {
268 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
269 nir_intrinsic_load_input;
270 }
271 break;
272 case nir_var_shader_out:
273 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
274 nir_intrinsic_load_output;
275 break;
276 case nir_var_uniform:
277 op = nir_intrinsic_load_uniform;
278 break;
279 case nir_var_mem_shared:
280 op = nir_intrinsic_load_shared;
281 break;
282 default:
283 unreachable("Unknown variable mode");
284 }
285
286 nir_intrinsic_instr *load =
287 nir_intrinsic_instr_create(state->builder.shader, op);
288 load->num_components = num_components;
289
290 nir_intrinsic_set_base(load, var->data.driver_location);
291 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
292 nir_intrinsic_set_component(load, component);
293
294 if (load->intrinsic == nir_intrinsic_load_uniform)
295 nir_intrinsic_set_range(load,
296 state->type_size(var->type, var->data.bindless));
297
298 if (load->intrinsic == nir_intrinsic_load_input ||
299 load->intrinsic == nir_intrinsic_load_input_vertex ||
300 load->intrinsic == nir_intrinsic_load_uniform)
301 nir_intrinsic_set_type(load, type);
302
303 if (vertex_index) {
304 load->src[0] = nir_src_for_ssa(vertex_index);
305 load->src[1] = nir_src_for_ssa(offset);
306 } else if (barycentric) {
307 load->src[0] = nir_src_for_ssa(barycentric);
308 load->src[1] = nir_src_for_ssa(offset);
309 } else {
310 load->src[0] = nir_src_for_ssa(offset);
311 }
312
313 nir_ssa_dest_init(&load->instr, &load->dest,
314 num_components, bit_size, NULL);
315 nir_builder_instr_insert(b, &load->instr);
316
317 return &load->dest.ssa;
318 }
319
320 static nir_ssa_def *
321 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
322 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
323 unsigned component, const struct glsl_type *type)
324 {
325 assert(intrin->dest.is_ssa);
326 if (intrin->dest.ssa.bit_size == 64 &&
327 (state->options & nir_lower_io_lower_64bit_to_32)) {
328 nir_builder *b = &state->builder;
329
330 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
331
332 nir_ssa_def *comp64[4];
333 assert(component == 0 || component == 2);
334 unsigned dest_comp = 0;
335 while (dest_comp < intrin->dest.ssa.num_components) {
336 const unsigned num_comps =
337 MIN2(intrin->dest.ssa.num_components - dest_comp,
338 (4 - component) / 2);
339
340 nir_ssa_def *data32 =
341 emit_load(state, vertex_index, var, offset, component,
342 num_comps * 2, 32, nir_type_uint32);
343 for (unsigned i = 0; i < num_comps; i++) {
344 comp64[dest_comp + i] =
345 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
346 }
347
348 /* Only the first store has a component offset */
349 component = 0;
350 dest_comp += num_comps;
351 offset = nir_iadd_imm(b, offset, slot_size);
352 }
353
354 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
355 } else if (intrin->dest.ssa.bit_size == 1) {
356 /* Booleans are 32-bit */
357 assert(glsl_type_is_boolean(type));
358 return nir_b2b1(&state->builder,
359 emit_load(state, vertex_index, var, offset, component,
360 intrin->dest.ssa.num_components, 32,
361 nir_type_bool32));
362 } else {
363 return emit_load(state, vertex_index, var, offset, component,
364 intrin->dest.ssa.num_components,
365 intrin->dest.ssa.bit_size,
366 nir_get_nir_type_for_glsl_type(type));
367 }
368 }
369
370 static void
371 emit_store(struct lower_io_state *state, nir_ssa_def *data,
372 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
373 unsigned component, unsigned num_components,
374 nir_component_mask_t write_mask, nir_alu_type type)
375 {
376 nir_builder *b = &state->builder;
377 nir_variable_mode mode = var->data.mode;
378
379 nir_intrinsic_op op;
380 if (mode == nir_var_mem_shared) {
381 op = nir_intrinsic_store_shared;
382 } else {
383 assert(mode == nir_var_shader_out);
384 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
385 nir_intrinsic_store_output;
386 }
387
388 nir_intrinsic_instr *store =
389 nir_intrinsic_instr_create(state->builder.shader, op);
390 store->num_components = num_components;
391
392 store->src[0] = nir_src_for_ssa(data);
393
394 nir_intrinsic_set_base(store, var->data.driver_location);
395
396 if (mode == nir_var_shader_out)
397 nir_intrinsic_set_component(store, component);
398
399 if (store->intrinsic == nir_intrinsic_store_output)
400 nir_intrinsic_set_type(store, type);
401
402 nir_intrinsic_set_write_mask(store, write_mask);
403
404 if (vertex_index)
405 store->src[1] = nir_src_for_ssa(vertex_index);
406
407 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
408
409 nir_builder_instr_insert(b, &store->instr);
410 }
411
412 static void
413 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
414 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
415 unsigned component, const struct glsl_type *type)
416 {
417 assert(intrin->src[1].is_ssa);
418 if (intrin->src[1].ssa->bit_size == 64 &&
419 (state->options & nir_lower_io_lower_64bit_to_32)) {
420 nir_builder *b = &state->builder;
421
422 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
423
424 assert(component == 0 || component == 2);
425 unsigned src_comp = 0;
426 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
427 while (src_comp < intrin->num_components) {
428 const unsigned num_comps =
429 MIN2(intrin->num_components - src_comp,
430 (4 - component) / 2);
431
432 if (write_mask & BITFIELD_MASK(num_comps)) {
433 nir_ssa_def *data =
434 nir_channels(b, intrin->src[1].ssa,
435 BITFIELD_RANGE(src_comp, num_comps));
436 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
437
438 nir_component_mask_t write_mask32 = 0;
439 for (unsigned i = 0; i < num_comps; i++) {
440 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
441 write_mask32 |= 3 << (i * 2);
442 }
443
444 emit_store(state, data32, vertex_index, var, offset,
445 component, data32->num_components, write_mask32,
446 nir_type_uint32);
447 }
448
449 /* Only the first store has a component offset */
450 component = 0;
451 src_comp += num_comps;
452 write_mask >>= num_comps;
453 offset = nir_iadd_imm(b, offset, slot_size);
454 }
455 } else if (intrin->dest.ssa.bit_size == 1) {
456 /* Booleans are 32-bit */
457 assert(glsl_type_is_boolean(type));
458 nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
459 emit_store(state, b32_val, vertex_index, var, offset,
460 component, intrin->num_components,
461 nir_intrinsic_write_mask(intrin),
462 nir_type_bool32);
463 } else {
464 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
465 component, intrin->num_components,
466 nir_intrinsic_write_mask(intrin),
467 nir_get_nir_type_for_glsl_type(type));
468 }
469 }
470
471 static nir_ssa_def *
472 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
473 nir_variable *var, nir_ssa_def *offset)
474 {
475 nir_builder *b = &state->builder;
476 assert(var->data.mode == nir_var_mem_shared);
477
478 nir_intrinsic_op op = shared_atomic_for_deref(intrin->intrinsic);
479
480 nir_intrinsic_instr *atomic =
481 nir_intrinsic_instr_create(state->builder.shader, op);
482
483 nir_intrinsic_set_base(atomic, var->data.driver_location);
484
485 atomic->src[0] = nir_src_for_ssa(offset);
486 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
487 nir_intrinsic_infos[op].num_srcs);
488 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
489 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
490 }
491
492 if (nir_intrinsic_infos[op].has_dest) {
493 assert(intrin->dest.is_ssa);
494 assert(nir_intrinsic_infos[intrin->intrinsic].has_dest);
495 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
496 intrin->dest.ssa.num_components,
497 intrin->dest.ssa.bit_size, NULL);
498 }
499
500 nir_builder_instr_insert(b, &atomic->instr);
501
502 return nir_intrinsic_infos[op].has_dest ? &atomic->dest.ssa : NULL;
503 }
504
505 static nir_ssa_def *
506 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
507 nir_variable *var, nir_ssa_def *offset, unsigned component,
508 const struct glsl_type *type)
509 {
510 nir_builder *b = &state->builder;
511 assert(var->data.mode == nir_var_shader_in);
512
513 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
514 * interpolateAtVertex() for explicit variables.
515 */
516 if (var->data.interpolation == INTERP_MODE_FLAT ||
517 var->data.interpolation == INTERP_MODE_EXPLICIT) {
518 nir_ssa_def *vertex_index = NULL;
519
520 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
521 assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
522 vertex_index = intrin->src[1].ssa;
523 }
524
525 return lower_load(intrin, state, vertex_index, var, offset, component, type);
526 }
527
528 /* None of the supported APIs allow interpolation on 64-bit things */
529 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
530
531 nir_intrinsic_op bary_op;
532 switch (intrin->intrinsic) {
533 case nir_intrinsic_interp_deref_at_centroid:
534 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
535 nir_intrinsic_load_barycentric_sample :
536 nir_intrinsic_load_barycentric_centroid;
537 break;
538 case nir_intrinsic_interp_deref_at_sample:
539 bary_op = nir_intrinsic_load_barycentric_at_sample;
540 break;
541 case nir_intrinsic_interp_deref_at_offset:
542 bary_op = nir_intrinsic_load_barycentric_at_offset;
543 break;
544 default:
545 unreachable("Bogus interpolateAt() intrinsic.");
546 }
547
548 nir_intrinsic_instr *bary_setup =
549 nir_intrinsic_instr_create(state->builder.shader, bary_op);
550
551 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
552 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
553
554 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
555 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
556 intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
557 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
558
559 nir_builder_instr_insert(b, &bary_setup->instr);
560
561 nir_intrinsic_instr *load =
562 nir_intrinsic_instr_create(state->builder.shader,
563 nir_intrinsic_load_interpolated_input);
564 load->num_components = intrin->num_components;
565
566 nir_intrinsic_set_base(load, var->data.driver_location);
567 nir_intrinsic_set_component(load, component);
568
569 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
570 load->src[1] = nir_src_for_ssa(offset);
571
572 assert(intrin->dest.is_ssa);
573 nir_ssa_dest_init(&load->instr, &load->dest,
574 intrin->dest.ssa.num_components,
575 intrin->dest.ssa.bit_size, NULL);
576 nir_builder_instr_insert(b, &load->instr);
577
578 return &load->dest.ssa;
579 }
580
581 static bool
582 nir_lower_io_block(nir_block *block,
583 struct lower_io_state *state)
584 {
585 nir_builder *b = &state->builder;
586 const nir_shader_compiler_options *options = b->shader->options;
587 bool progress = false;
588
589 nir_foreach_instr_safe(instr, block) {
590 if (instr->type != nir_instr_type_intrinsic)
591 continue;
592
593 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
594
595 switch (intrin->intrinsic) {
596 case nir_intrinsic_load_deref:
597 case nir_intrinsic_store_deref:
598 case nir_intrinsic_deref_atomic_add:
599 case nir_intrinsic_deref_atomic_imin:
600 case nir_intrinsic_deref_atomic_umin:
601 case nir_intrinsic_deref_atomic_imax:
602 case nir_intrinsic_deref_atomic_umax:
603 case nir_intrinsic_deref_atomic_and:
604 case nir_intrinsic_deref_atomic_or:
605 case nir_intrinsic_deref_atomic_xor:
606 case nir_intrinsic_deref_atomic_exchange:
607 case nir_intrinsic_deref_atomic_comp_swap:
608 case nir_intrinsic_deref_atomic_fadd:
609 case nir_intrinsic_deref_atomic_fmin:
610 case nir_intrinsic_deref_atomic_fmax:
611 case nir_intrinsic_deref_atomic_fcomp_swap:
612 /* We can lower the io for this nir instrinsic */
613 break;
614 case nir_intrinsic_interp_deref_at_centroid:
615 case nir_intrinsic_interp_deref_at_sample:
616 case nir_intrinsic_interp_deref_at_offset:
617 case nir_intrinsic_interp_deref_at_vertex:
618 /* We can optionally lower these to load_interpolated_input */
619 if (options->use_interpolated_input_intrinsics)
620 break;
621 default:
622 /* We can't lower the io for this nir instrinsic, so skip it */
623 continue;
624 }
625
626 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
627
628 nir_variable_mode mode = deref->mode;
629 assert(util_is_power_of_two_nonzero(mode));
630 if ((state->modes & mode) == 0)
631 continue;
632
633 nir_variable *var = nir_deref_instr_get_variable(deref);
634
635 b->cursor = nir_before_instr(instr);
636
637 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
638
639 nir_ssa_def *offset;
640 nir_ssa_def *vertex_index = NULL;
641 unsigned component_offset = var->data.location_frac;
642 bool bindless_type_size = mode == nir_var_shader_in ||
643 mode == nir_var_shader_out ||
644 var->data.bindless;
645
646 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
647 state->type_size, &component_offset,
648 bindless_type_size);
649
650 nir_ssa_def *replacement = NULL;
651
652 switch (intrin->intrinsic) {
653 case nir_intrinsic_load_deref:
654 replacement = lower_load(intrin, state, vertex_index, var, offset,
655 component_offset, deref->type);
656 break;
657
658 case nir_intrinsic_store_deref:
659 lower_store(intrin, state, vertex_index, var, offset,
660 component_offset, deref->type);
661 break;
662
663 case nir_intrinsic_deref_atomic_add:
664 case nir_intrinsic_deref_atomic_imin:
665 case nir_intrinsic_deref_atomic_umin:
666 case nir_intrinsic_deref_atomic_imax:
667 case nir_intrinsic_deref_atomic_umax:
668 case nir_intrinsic_deref_atomic_and:
669 case nir_intrinsic_deref_atomic_or:
670 case nir_intrinsic_deref_atomic_xor:
671 case nir_intrinsic_deref_atomic_exchange:
672 case nir_intrinsic_deref_atomic_comp_swap:
673 case nir_intrinsic_deref_atomic_fadd:
674 case nir_intrinsic_deref_atomic_fmin:
675 case nir_intrinsic_deref_atomic_fmax:
676 case nir_intrinsic_deref_atomic_fcomp_swap:
677 assert(vertex_index == NULL);
678 replacement = lower_atomic(intrin, state, var, offset);
679 break;
680
681 case nir_intrinsic_interp_deref_at_centroid:
682 case nir_intrinsic_interp_deref_at_sample:
683 case nir_intrinsic_interp_deref_at_offset:
684 case nir_intrinsic_interp_deref_at_vertex:
685 assert(vertex_index == NULL);
686 replacement = lower_interpolate_at(intrin, state, var, offset,
687 component_offset, deref->type);
688 break;
689
690 default:
691 continue;
692 }
693
694 if (replacement) {
695 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
696 nir_src_for_ssa(replacement));
697 }
698 nir_instr_remove(&intrin->instr);
699 progress = true;
700 }
701
702 return progress;
703 }
704
705 static bool
706 nir_lower_io_impl(nir_function_impl *impl,
707 nir_variable_mode modes,
708 int (*type_size)(const struct glsl_type *, bool),
709 nir_lower_io_options options)
710 {
711 struct lower_io_state state;
712 bool progress = false;
713
714 nir_builder_init(&state.builder, impl);
715 state.dead_ctx = ralloc_context(NULL);
716 state.modes = modes;
717 state.type_size = type_size;
718 state.options = options;
719
720 ASSERTED nir_variable_mode supported_modes =
721 nir_var_shader_in | nir_var_shader_out |
722 nir_var_mem_shared | nir_var_uniform;
723 assert(!(modes & ~supported_modes));
724
725 nir_foreach_block(block, impl) {
726 progress |= nir_lower_io_block(block, &state);
727 }
728
729 ralloc_free(state.dead_ctx);
730
731 nir_metadata_preserve(impl, nir_metadata_block_index |
732 nir_metadata_dominance);
733 return progress;
734 }
735
736 bool
737 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
738 int (*type_size)(const struct glsl_type *, bool),
739 nir_lower_io_options options)
740 {
741 bool progress = false;
742
743 nir_foreach_function(function, shader) {
744 if (function->impl) {
745 progress |= nir_lower_io_impl(function->impl, modes,
746 type_size, options);
747 }
748 }
749
750 return progress;
751 }
752
753 static unsigned
754 type_scalar_size_bytes(const struct glsl_type *type)
755 {
756 assert(glsl_type_is_vector_or_scalar(type) ||
757 glsl_type_is_matrix(type));
758 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
759 }
760
761 static nir_ssa_def *
762 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
763 nir_address_format addr_format, nir_ssa_def *offset)
764 {
765 assert(offset->num_components == 1);
766 assert(addr->bit_size == offset->bit_size);
767
768 switch (addr_format) {
769 case nir_address_format_32bit_global:
770 case nir_address_format_64bit_global:
771 case nir_address_format_32bit_offset:
772 assert(addr->num_components == 1);
773 return nir_iadd(b, addr, offset);
774
775 case nir_address_format_64bit_bounded_global:
776 assert(addr->num_components == 4);
777 return nir_vec4(b, nir_channel(b, addr, 0),
778 nir_channel(b, addr, 1),
779 nir_channel(b, addr, 2),
780 nir_iadd(b, nir_channel(b, addr, 3), offset));
781
782 case nir_address_format_32bit_index_offset:
783 assert(addr->num_components == 2);
784 return nir_vec2(b, nir_channel(b, addr, 0),
785 nir_iadd(b, nir_channel(b, addr, 1), offset));
786 case nir_address_format_vec2_index_32bit_offset:
787 assert(addr->num_components == 3);
788 return nir_vec3(b, nir_channel(b, addr, 0), nir_channel(b, addr, 1),
789 nir_iadd(b, nir_channel(b, addr, 2), offset));
790 case nir_address_format_logical:
791 unreachable("Unsupported address format");
792 }
793 unreachable("Invalid address format");
794 }
795
796 static nir_ssa_def *
797 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
798 nir_address_format addr_format, int64_t offset)
799 {
800 return build_addr_iadd(b, addr, addr_format,
801 nir_imm_intN_t(b, offset, addr->bit_size));
802 }
803
804 static nir_ssa_def *
805 addr_to_index(nir_builder *b, nir_ssa_def *addr,
806 nir_address_format addr_format)
807 {
808 if (addr_format == nir_address_format_32bit_index_offset) {
809 assert(addr->num_components == 2);
810 return nir_channel(b, addr, 0);
811 } else if (addr_format == nir_address_format_vec2_index_32bit_offset) {
812 assert(addr->num_components == 3);
813 return nir_channels(b, addr, 0x3);
814 } else {
815 unreachable("bad address format for index");
816 }
817 }
818
819 static nir_ssa_def *
820 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
821 nir_address_format addr_format)
822 {
823 if (addr_format == nir_address_format_32bit_index_offset) {
824 assert(addr->num_components == 2);
825 return nir_channel(b, addr, 1);
826 } else if (addr_format == nir_address_format_vec2_index_32bit_offset) {
827 assert(addr->num_components == 3);
828 return nir_channel(b, addr, 2);
829 } else {
830 unreachable("bad address format for offset");
831 }
832 }
833
834 /** Returns true if the given address format resolves to a global address */
835 static bool
836 addr_format_is_global(nir_address_format addr_format)
837 {
838 return addr_format == nir_address_format_32bit_global ||
839 addr_format == nir_address_format_64bit_global ||
840 addr_format == nir_address_format_64bit_bounded_global;
841 }
842
843 static nir_ssa_def *
844 addr_to_global(nir_builder *b, nir_ssa_def *addr,
845 nir_address_format addr_format)
846 {
847 switch (addr_format) {
848 case nir_address_format_32bit_global:
849 case nir_address_format_64bit_global:
850 assert(addr->num_components == 1);
851 return addr;
852
853 case nir_address_format_64bit_bounded_global:
854 assert(addr->num_components == 4);
855 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
856 nir_u2u64(b, nir_channel(b, addr, 3)));
857
858 case nir_address_format_32bit_index_offset:
859 case nir_address_format_vec2_index_32bit_offset:
860 case nir_address_format_32bit_offset:
861 case nir_address_format_logical:
862 unreachable("Cannot get a 64-bit address with this address format");
863 }
864
865 unreachable("Invalid address format");
866 }
867
868 static bool
869 addr_format_needs_bounds_check(nir_address_format addr_format)
870 {
871 return addr_format == nir_address_format_64bit_bounded_global;
872 }
873
874 static nir_ssa_def *
875 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
876 nir_address_format addr_format, unsigned size)
877 {
878 assert(addr_format == nir_address_format_64bit_bounded_global);
879 assert(addr->num_components == 4);
880 return nir_ige(b, nir_channel(b, addr, 2),
881 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
882 }
883
884 static nir_ssa_def *
885 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
886 nir_ssa_def *addr, nir_address_format addr_format,
887 unsigned num_components)
888 {
889 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
890
891 nir_intrinsic_op op;
892 switch (mode) {
893 case nir_var_mem_ubo:
894 op = nir_intrinsic_load_ubo;
895 break;
896 case nir_var_mem_ssbo:
897 if (addr_format_is_global(addr_format))
898 op = nir_intrinsic_load_global;
899 else
900 op = nir_intrinsic_load_ssbo;
901 break;
902 case nir_var_mem_global:
903 assert(addr_format_is_global(addr_format));
904 op = nir_intrinsic_load_global;
905 break;
906 case nir_var_shader_in:
907 assert(addr_format_is_global(addr_format));
908 op = nir_intrinsic_load_kernel_input;
909 break;
910 case nir_var_mem_shared:
911 assert(addr_format == nir_address_format_32bit_offset);
912 op = nir_intrinsic_load_shared;
913 break;
914 default:
915 unreachable("Unsupported explicit IO variable mode");
916 }
917
918 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
919
920 if (addr_format_is_global(addr_format)) {
921 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
922 } else if (addr_format == nir_address_format_32bit_offset) {
923 assert(addr->num_components == 1);
924 load->src[0] = nir_src_for_ssa(addr);
925 } else {
926 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
927 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
928 }
929
930 if (mode != nir_var_shader_in && mode != nir_var_mem_shared)
931 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
932
933 unsigned bit_size = intrin->dest.ssa.bit_size;
934 if (bit_size == 1) {
935 /* TODO: Make the native bool bit_size an option. */
936 bit_size = 32;
937 }
938
939 /* TODO: We should try and provide a better alignment. For OpenCL, we need
940 * to plumb the alignment through from SPIR-V when we have one.
941 */
942 nir_intrinsic_set_align(load, bit_size / 8, 0);
943
944 assert(intrin->dest.is_ssa);
945 load->num_components = num_components;
946 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
947 bit_size, intrin->dest.ssa.name);
948
949 assert(bit_size % 8 == 0);
950
951 nir_ssa_def *result;
952 if (addr_format_needs_bounds_check(addr_format)) {
953 /* The Vulkan spec for robustBufferAccess gives us quite a few options
954 * as to what we can do with an OOB read. Unfortunately, returning
955 * undefined values isn't one of them so we return an actual zero.
956 */
957 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
958
959 const unsigned load_size = (bit_size / 8) * load->num_components;
960 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
961
962 nir_builder_instr_insert(b, &load->instr);
963
964 nir_pop_if(b, NULL);
965
966 result = nir_if_phi(b, &load->dest.ssa, zero);
967 } else {
968 nir_builder_instr_insert(b, &load->instr);
969 result = &load->dest.ssa;
970 }
971
972 if (intrin->dest.ssa.bit_size == 1) {
973 /* For shared, we can go ahead and use NIR's and/or the back-end's
974 * standard encoding for booleans rather than forcing a 0/1 boolean.
975 * This should save an instruction or two.
976 */
977 if (mode == nir_var_mem_shared)
978 result = nir_b2b1(b, result);
979 else
980 result = nir_i2b(b, result);
981 }
982
983 return result;
984 }
985
986 static void
987 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
988 nir_ssa_def *addr, nir_address_format addr_format,
989 nir_ssa_def *value, nir_component_mask_t write_mask)
990 {
991 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
992
993 nir_intrinsic_op op;
994 switch (mode) {
995 case nir_var_mem_ssbo:
996 if (addr_format_is_global(addr_format))
997 op = nir_intrinsic_store_global;
998 else
999 op = nir_intrinsic_store_ssbo;
1000 break;
1001 case nir_var_mem_global:
1002 assert(addr_format_is_global(addr_format));
1003 op = nir_intrinsic_store_global;
1004 break;
1005 case nir_var_mem_shared:
1006 assert(addr_format == nir_address_format_32bit_offset);
1007 op = nir_intrinsic_store_shared;
1008 break;
1009 default:
1010 unreachable("Unsupported explicit IO variable mode");
1011 }
1012
1013 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
1014
1015 if (value->bit_size == 1) {
1016 /* For shared, we can go ahead and use NIR's and/or the back-end's
1017 * standard encoding for booleans rather than forcing a 0/1 boolean.
1018 * This should save an instruction or two.
1019 *
1020 * TODO: Make the native bool bit_size an option.
1021 */
1022 if (mode == nir_var_mem_shared)
1023 value = nir_b2b32(b, value);
1024 else
1025 value = nir_b2i(b, value, 32);
1026 }
1027
1028 store->src[0] = nir_src_for_ssa(value);
1029 if (addr_format_is_global(addr_format)) {
1030 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1031 } else if (addr_format == nir_address_format_32bit_offset) {
1032 assert(addr->num_components == 1);
1033 store->src[1] = nir_src_for_ssa(addr);
1034 } else {
1035 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1036 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1037 }
1038
1039 nir_intrinsic_set_write_mask(store, write_mask);
1040
1041 if (mode != nir_var_mem_shared)
1042 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
1043
1044 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1045 * to plumb the alignment through from SPIR-V when we have one.
1046 */
1047 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
1048
1049 assert(value->num_components == 1 ||
1050 value->num_components == intrin->num_components);
1051 store->num_components = value->num_components;
1052
1053 assert(value->bit_size % 8 == 0);
1054
1055 if (addr_format_needs_bounds_check(addr_format)) {
1056 const unsigned store_size = (value->bit_size / 8) * store->num_components;
1057 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
1058
1059 nir_builder_instr_insert(b, &store->instr);
1060
1061 nir_pop_if(b, NULL);
1062 } else {
1063 nir_builder_instr_insert(b, &store->instr);
1064 }
1065 }
1066
1067 static nir_ssa_def *
1068 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
1069 nir_ssa_def *addr, nir_address_format addr_format)
1070 {
1071 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1072 const unsigned num_data_srcs =
1073 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
1074
1075 nir_intrinsic_op op;
1076 switch (mode) {
1077 case nir_var_mem_ssbo:
1078 if (addr_format_is_global(addr_format))
1079 op = global_atomic_for_deref(intrin->intrinsic);
1080 else
1081 op = ssbo_atomic_for_deref(intrin->intrinsic);
1082 break;
1083 case nir_var_mem_global:
1084 assert(addr_format_is_global(addr_format));
1085 op = global_atomic_for_deref(intrin->intrinsic);
1086 break;
1087 case nir_var_mem_shared:
1088 assert(addr_format == nir_address_format_32bit_offset);
1089 op = shared_atomic_for_deref(intrin->intrinsic);
1090 break;
1091 default:
1092 unreachable("Unsupported explicit IO variable mode");
1093 }
1094
1095 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1096
1097 unsigned src = 0;
1098 if (addr_format_is_global(addr_format)) {
1099 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1100 } else if (addr_format == nir_address_format_32bit_offset) {
1101 assert(addr->num_components == 1);
1102 atomic->src[src++] = nir_src_for_ssa(addr);
1103 } else {
1104 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1105 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1106 }
1107 for (unsigned i = 0; i < num_data_srcs; i++) {
1108 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1109 }
1110
1111 /* Global atomics don't have access flags because they assume that the
1112 * address may be non-uniform.
1113 */
1114 if (!addr_format_is_global(addr_format) && mode != nir_var_mem_shared)
1115 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1116
1117 assert(intrin->dest.ssa.num_components == 1);
1118 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1119 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1120
1121 assert(atomic->dest.ssa.bit_size % 8 == 0);
1122
1123 if (addr_format_needs_bounds_check(addr_format)) {
1124 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1125 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1126
1127 nir_builder_instr_insert(b, &atomic->instr);
1128
1129 nir_pop_if(b, NULL);
1130 return nir_if_phi(b, &atomic->dest.ssa,
1131 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1132 } else {
1133 nir_builder_instr_insert(b, &atomic->instr);
1134 return &atomic->dest.ssa;
1135 }
1136 }
1137
1138 nir_ssa_def *
1139 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1140 nir_ssa_def *base_addr,
1141 nir_address_format addr_format)
1142 {
1143 assert(deref->dest.is_ssa);
1144 switch (deref->deref_type) {
1145 case nir_deref_type_var:
1146 assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared));
1147 return nir_imm_intN_t(b, deref->var->data.driver_location,
1148 deref->dest.ssa.bit_size);
1149
1150 case nir_deref_type_array: {
1151 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1152
1153 unsigned stride = glsl_get_explicit_stride(parent->type);
1154 if ((glsl_type_is_matrix(parent->type) &&
1155 glsl_matrix_type_is_row_major(parent->type)) ||
1156 (glsl_type_is_vector(parent->type) && stride == 0))
1157 stride = type_scalar_size_bytes(parent->type);
1158
1159 assert(stride > 0);
1160
1161 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1162 index = nir_i2i(b, index, base_addr->bit_size);
1163 return build_addr_iadd(b, base_addr, addr_format,
1164 nir_amul_imm(b, index, stride));
1165 }
1166
1167 case nir_deref_type_ptr_as_array: {
1168 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1169 index = nir_i2i(b, index, base_addr->bit_size);
1170 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1171 return build_addr_iadd(b, base_addr, addr_format,
1172 nir_amul_imm(b, index, stride));
1173 }
1174
1175 case nir_deref_type_array_wildcard:
1176 unreachable("Wildcards should be lowered by now");
1177 break;
1178
1179 case nir_deref_type_struct: {
1180 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1181 int offset = glsl_get_struct_field_offset(parent->type,
1182 deref->strct.index);
1183 assert(offset >= 0);
1184 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1185 }
1186
1187 case nir_deref_type_cast:
1188 /* Nothing to do here */
1189 return base_addr;
1190 }
1191
1192 unreachable("Invalid NIR deref type");
1193 }
1194
1195 void
1196 nir_lower_explicit_io_instr(nir_builder *b,
1197 nir_intrinsic_instr *intrin,
1198 nir_ssa_def *addr,
1199 nir_address_format addr_format)
1200 {
1201 b->cursor = nir_after_instr(&intrin->instr);
1202
1203 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1204 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1205 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1206 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1207 assert(vec_stride == 0 || vec_stride >= scalar_size);
1208
1209 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1210 nir_ssa_def *value;
1211 if (vec_stride > scalar_size) {
1212 nir_ssa_def *comps[4] = { NULL, };
1213 for (unsigned i = 0; i < intrin->num_components; i++) {
1214 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1215 vec_stride * i);
1216 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1217 addr_format, 1);
1218 }
1219 value = nir_vec(b, comps, intrin->num_components);
1220 } else {
1221 value = build_explicit_io_load(b, intrin, addr, addr_format,
1222 intrin->num_components);
1223 }
1224 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1225 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1226 assert(intrin->src[1].is_ssa);
1227 nir_ssa_def *value = intrin->src[1].ssa;
1228 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1229 if (vec_stride > scalar_size) {
1230 for (unsigned i = 0; i < intrin->num_components; i++) {
1231 if (!(write_mask & (1 << i)))
1232 continue;
1233
1234 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1235 vec_stride * i);
1236 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1237 nir_channel(b, value, i), 1);
1238 }
1239 } else {
1240 build_explicit_io_store(b, intrin, addr, addr_format,
1241 value, write_mask);
1242 }
1243 } else {
1244 nir_ssa_def *value =
1245 build_explicit_io_atomic(b, intrin, addr, addr_format);
1246 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1247 }
1248
1249 nir_instr_remove(&intrin->instr);
1250 }
1251
1252 static void
1253 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1254 nir_address_format addr_format)
1255 {
1256 /* Just delete the deref if it's not used. We can't use
1257 * nir_deref_instr_remove_if_unused here because it may remove more than
1258 * one deref which could break our list walking since we walk the list
1259 * backwards.
1260 */
1261 assert(list_is_empty(&deref->dest.ssa.if_uses));
1262 if (list_is_empty(&deref->dest.ssa.uses)) {
1263 nir_instr_remove(&deref->instr);
1264 return;
1265 }
1266
1267 b->cursor = nir_after_instr(&deref->instr);
1268
1269 nir_ssa_def *base_addr = NULL;
1270 if (deref->deref_type != nir_deref_type_var) {
1271 assert(deref->parent.is_ssa);
1272 base_addr = deref->parent.ssa;
1273 }
1274
1275 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1276 addr_format);
1277
1278 nir_instr_remove(&deref->instr);
1279 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1280 }
1281
1282 static void
1283 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1284 nir_address_format addr_format)
1285 {
1286 assert(intrin->src[0].is_ssa);
1287 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1288 }
1289
1290 static void
1291 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1292 nir_address_format addr_format)
1293 {
1294 b->cursor = nir_after_instr(&intrin->instr);
1295
1296 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1297
1298 assert(glsl_type_is_array(deref->type));
1299 assert(glsl_get_length(deref->type) == 0);
1300 unsigned stride = glsl_get_explicit_stride(deref->type);
1301 assert(stride > 0);
1302
1303 assert(addr_format == nir_address_format_32bit_index_offset ||
1304 addr_format == nir_address_format_vec2_index_32bit_offset);
1305 nir_ssa_def *addr = &deref->dest.ssa;
1306 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1307 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1308
1309 nir_intrinsic_instr *bsize =
1310 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1311 bsize->src[0] = nir_src_for_ssa(index);
1312 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1313 nir_builder_instr_insert(b, &bsize->instr);
1314
1315 nir_ssa_def *arr_size =
1316 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1317 nir_imm_int(b, stride));
1318
1319 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1320 nir_instr_remove(&intrin->instr);
1321 }
1322
1323 static bool
1324 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1325 nir_address_format addr_format)
1326 {
1327 bool progress = false;
1328
1329 nir_builder b;
1330 nir_builder_init(&b, impl);
1331
1332 /* Walk in reverse order so that we can see the full deref chain when we
1333 * lower the access operations. We lower them assuming that the derefs
1334 * will be turned into address calculations later.
1335 */
1336 nir_foreach_block_reverse(block, impl) {
1337 nir_foreach_instr_reverse_safe(instr, block) {
1338 switch (instr->type) {
1339 case nir_instr_type_deref: {
1340 nir_deref_instr *deref = nir_instr_as_deref(instr);
1341 if (deref->mode & modes) {
1342 lower_explicit_io_deref(&b, deref, addr_format);
1343 progress = true;
1344 }
1345 break;
1346 }
1347
1348 case nir_instr_type_intrinsic: {
1349 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1350 switch (intrin->intrinsic) {
1351 case nir_intrinsic_load_deref:
1352 case nir_intrinsic_store_deref:
1353 case nir_intrinsic_deref_atomic_add:
1354 case nir_intrinsic_deref_atomic_imin:
1355 case nir_intrinsic_deref_atomic_umin:
1356 case nir_intrinsic_deref_atomic_imax:
1357 case nir_intrinsic_deref_atomic_umax:
1358 case nir_intrinsic_deref_atomic_and:
1359 case nir_intrinsic_deref_atomic_or:
1360 case nir_intrinsic_deref_atomic_xor:
1361 case nir_intrinsic_deref_atomic_exchange:
1362 case nir_intrinsic_deref_atomic_comp_swap:
1363 case nir_intrinsic_deref_atomic_fadd:
1364 case nir_intrinsic_deref_atomic_fmin:
1365 case nir_intrinsic_deref_atomic_fmax:
1366 case nir_intrinsic_deref_atomic_fcomp_swap: {
1367 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1368 if (deref->mode & modes) {
1369 lower_explicit_io_access(&b, intrin, addr_format);
1370 progress = true;
1371 }
1372 break;
1373 }
1374
1375 case nir_intrinsic_deref_buffer_array_length: {
1376 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1377 if (deref->mode & modes) {
1378 lower_explicit_io_array_length(&b, intrin, addr_format);
1379 progress = true;
1380 }
1381 break;
1382 }
1383
1384 default:
1385 break;
1386 }
1387 break;
1388 }
1389
1390 default:
1391 /* Nothing to do */
1392 break;
1393 }
1394 }
1395 }
1396
1397 if (progress) {
1398 nir_metadata_preserve(impl, nir_metadata_block_index |
1399 nir_metadata_dominance);
1400 }
1401
1402 return progress;
1403 }
1404
1405 bool
1406 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1407 nir_address_format addr_format)
1408 {
1409 bool progress = false;
1410
1411 nir_foreach_function(function, shader) {
1412 if (function->impl &&
1413 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1414 progress = true;
1415 }
1416
1417 return progress;
1418 }
1419
1420 static bool
1421 nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
1422 nir_variable_mode modes,
1423 glsl_type_size_align_func type_info)
1424 {
1425 bool progress = false;
1426
1427 nir_foreach_block(block, impl) {
1428 nir_foreach_instr(instr, block) {
1429 if (instr->type != nir_instr_type_deref)
1430 continue;
1431
1432 nir_deref_instr *deref = nir_instr_as_deref(instr);
1433 if (!(deref->mode & modes))
1434 continue;
1435
1436 unsigned size, alignment;
1437 const struct glsl_type *new_type =
1438 glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
1439 if (new_type != deref->type) {
1440 progress = true;
1441 deref->type = new_type;
1442 }
1443 if (deref->deref_type == nir_deref_type_cast) {
1444 /* See also glsl_type::get_explicit_type_for_size_align() */
1445 unsigned new_stride = align(size, alignment);
1446 if (new_stride != deref->cast.ptr_stride) {
1447 deref->cast.ptr_stride = new_stride;
1448 progress = true;
1449 }
1450 }
1451 }
1452 }
1453
1454 if (progress) {
1455 nir_metadata_preserve(impl, nir_metadata_block_index |
1456 nir_metadata_dominance |
1457 nir_metadata_live_ssa_defs |
1458 nir_metadata_loop_analysis);
1459 }
1460
1461 return progress;
1462 }
1463
1464 static bool
1465 lower_vars_to_explicit(nir_shader *shader,
1466 struct exec_list *vars, nir_variable_mode mode,
1467 glsl_type_size_align_func type_info)
1468 {
1469 bool progress = false;
1470 unsigned offset = 0;
1471 nir_foreach_variable(var, vars) {
1472 unsigned size, align;
1473 const struct glsl_type *explicit_type =
1474 glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
1475
1476 if (explicit_type != var->type) {
1477 progress = true;
1478 var->type = explicit_type;
1479 }
1480
1481 var->data.driver_location = ALIGN_POT(offset, align);
1482 offset = var->data.driver_location + size;
1483 }
1484
1485 if (mode == nir_var_mem_shared) {
1486 shader->info.cs.shared_size = offset;
1487 shader->num_shared = offset;
1488 }
1489
1490 return progress;
1491 }
1492
1493 bool
1494 nir_lower_vars_to_explicit_types(nir_shader *shader,
1495 nir_variable_mode modes,
1496 glsl_type_size_align_func type_info)
1497 {
1498 /* TODO: Situations which need to be handled to support more modes:
1499 * - row-major matrices
1500 * - compact shader inputs/outputs
1501 * - interface types
1502 */
1503 ASSERTED nir_variable_mode supported = nir_var_mem_shared |
1504 nir_var_shader_temp | nir_var_function_temp;
1505 assert(!(modes & ~supported) && "unsupported");
1506
1507 bool progress = false;
1508
1509 if (modes & nir_var_mem_shared)
1510 progress |= lower_vars_to_explicit(shader, &shader->shared, nir_var_mem_shared, type_info);
1511 if (modes & nir_var_shader_temp)
1512 progress |= lower_vars_to_explicit(shader, &shader->globals, nir_var_shader_temp, type_info);
1513
1514 nir_foreach_function(function, shader) {
1515 if (function->impl) {
1516 if (modes & nir_var_function_temp)
1517 progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
1518
1519 progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
1520 }
1521 }
1522
1523 return progress;
1524 }
1525
1526 /**
1527 * Return the offset source for a load/store intrinsic.
1528 */
1529 nir_src *
1530 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1531 {
1532 switch (instr->intrinsic) {
1533 case nir_intrinsic_load_input:
1534 case nir_intrinsic_load_output:
1535 case nir_intrinsic_load_shared:
1536 case nir_intrinsic_load_uniform:
1537 case nir_intrinsic_load_global:
1538 case nir_intrinsic_load_scratch:
1539 case nir_intrinsic_load_fs_input_interp_deltas:
1540 return &instr->src[0];
1541 case nir_intrinsic_load_ubo:
1542 case nir_intrinsic_load_ssbo:
1543 case nir_intrinsic_load_per_vertex_input:
1544 case nir_intrinsic_load_per_vertex_output:
1545 case nir_intrinsic_load_interpolated_input:
1546 case nir_intrinsic_store_output:
1547 case nir_intrinsic_store_shared:
1548 case nir_intrinsic_store_global:
1549 case nir_intrinsic_store_scratch:
1550 case nir_intrinsic_ssbo_atomic_add:
1551 case nir_intrinsic_ssbo_atomic_imin:
1552 case nir_intrinsic_ssbo_atomic_umin:
1553 case nir_intrinsic_ssbo_atomic_imax:
1554 case nir_intrinsic_ssbo_atomic_umax:
1555 case nir_intrinsic_ssbo_atomic_and:
1556 case nir_intrinsic_ssbo_atomic_or:
1557 case nir_intrinsic_ssbo_atomic_xor:
1558 case nir_intrinsic_ssbo_atomic_exchange:
1559 case nir_intrinsic_ssbo_atomic_comp_swap:
1560 case nir_intrinsic_ssbo_atomic_fadd:
1561 case nir_intrinsic_ssbo_atomic_fmin:
1562 case nir_intrinsic_ssbo_atomic_fmax:
1563 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1564 return &instr->src[1];
1565 case nir_intrinsic_store_ssbo:
1566 case nir_intrinsic_store_per_vertex_output:
1567 return &instr->src[2];
1568 default:
1569 return NULL;
1570 }
1571 }
1572
1573 /**
1574 * Return the vertex index source for a load/store per_vertex intrinsic.
1575 */
1576 nir_src *
1577 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1578 {
1579 switch (instr->intrinsic) {
1580 case nir_intrinsic_load_per_vertex_input:
1581 case nir_intrinsic_load_per_vertex_output:
1582 return &instr->src[0];
1583 case nir_intrinsic_store_per_vertex_output:
1584 return &instr->src[1];
1585 default:
1586 return NULL;
1587 }
1588 }
1589
1590 /**
1591 * Return the numeric constant that identify a NULL pointer for each address
1592 * format.
1593 */
1594 const nir_const_value *
1595 nir_address_format_null_value(nir_address_format addr_format)
1596 {
1597 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1598 [nir_address_format_32bit_global] = {{0}},
1599 [nir_address_format_64bit_global] = {{0}},
1600 [nir_address_format_64bit_bounded_global] = {{0}},
1601 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1602 [nir_address_format_vec2_index_32bit_offset] = {{.u32 = ~0}, {.u32 = ~0}, {.u32 = ~0}},
1603 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1604 [nir_address_format_logical] = {{.u32 = ~0}},
1605 };
1606
1607 assert(addr_format < ARRAY_SIZE(null_values));
1608 return null_values[addr_format];
1609 }
1610
1611 nir_ssa_def *
1612 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1613 nir_address_format addr_format)
1614 {
1615 switch (addr_format) {
1616 case nir_address_format_32bit_global:
1617 case nir_address_format_64bit_global:
1618 case nir_address_format_64bit_bounded_global:
1619 case nir_address_format_32bit_index_offset:
1620 case nir_address_format_vec2_index_32bit_offset:
1621 case nir_address_format_32bit_offset:
1622 return nir_ball_iequal(b, addr0, addr1);
1623
1624 case nir_address_format_logical:
1625 unreachable("Unsupported address format");
1626 }
1627
1628 unreachable("Invalid address format");
1629 }
1630
1631 nir_ssa_def *
1632 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1633 nir_address_format addr_format)
1634 {
1635 switch (addr_format) {
1636 case nir_address_format_32bit_global:
1637 case nir_address_format_64bit_global:
1638 case nir_address_format_32bit_offset:
1639 assert(addr0->num_components == 1);
1640 assert(addr1->num_components == 1);
1641 return nir_isub(b, addr0, addr1);
1642
1643 case nir_address_format_64bit_bounded_global:
1644 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1645 addr_to_global(b, addr1, addr_format));
1646
1647 case nir_address_format_32bit_index_offset:
1648 assert(addr0->num_components == 2);
1649 assert(addr1->num_components == 2);
1650 /* Assume the same buffer index. */
1651 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1652
1653 case nir_address_format_vec2_index_32bit_offset:
1654 assert(addr0->num_components == 3);
1655 assert(addr1->num_components == 3);
1656 /* Assume the same buffer index. */
1657 return nir_isub(b, nir_channel(b, addr0, 2), nir_channel(b, addr1, 2));
1658
1659 case nir_address_format_logical:
1660 unreachable("Unsupported address format");
1661 }
1662
1663 unreachable("Invalid address format");
1664 }
1665
1666 static bool
1667 is_input(nir_intrinsic_instr *intrin)
1668 {
1669 return intrin->intrinsic == nir_intrinsic_load_input ||
1670 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1671 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1672 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1673 }
1674
1675 static bool
1676 is_output(nir_intrinsic_instr *intrin)
1677 {
1678 return intrin->intrinsic == nir_intrinsic_load_output ||
1679 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1680 intrin->intrinsic == nir_intrinsic_store_output ||
1681 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1682 }
1683
1684
1685 /**
1686 * This pass adds constant offsets to instr->const_index[0] for input/output
1687 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1688 * unchanged - since we don't know what part of a compound variable is
1689 * accessed, we allocate storage for the entire thing. For drivers that use
1690 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1691 * the offset source will be 0, so that they don't have to add it in manually.
1692 */
1693
1694 static bool
1695 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1696 nir_variable_mode mode)
1697 {
1698 bool progress = false;
1699 nir_foreach_instr_safe(instr, block) {
1700 if (instr->type != nir_instr_type_intrinsic)
1701 continue;
1702
1703 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1704
1705 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1706 (mode == nir_var_shader_out && is_output(intrin))) {
1707 nir_src *offset = nir_get_io_offset_src(intrin);
1708
1709 if (nir_src_is_const(*offset)) {
1710 intrin->const_index[0] += nir_src_as_uint(*offset);
1711 b->cursor = nir_before_instr(&intrin->instr);
1712 nir_instr_rewrite_src(&intrin->instr, offset,
1713 nir_src_for_ssa(nir_imm_int(b, 0)));
1714 progress = true;
1715 }
1716 }
1717 }
1718
1719 return progress;
1720 }
1721
1722 bool
1723 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1724 {
1725 bool progress = false;
1726
1727 nir_foreach_function(f, nir) {
1728 if (f->impl) {
1729 nir_builder b;
1730 nir_builder_init(&b, f->impl);
1731 nir_foreach_block(block, f->impl) {
1732 progress |= add_const_offset_to_base_block(block, &b, mode);
1733 }
1734 }
1735 }
1736
1737 return progress;
1738 }
1739