nir: Use b2b opcodes for shared and constant memory
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 #include "util/u_math.h"
39
40 struct lower_io_state {
41 void *dead_ctx;
42 nir_builder builder;
43 int (*type_size)(const struct glsl_type *type, bool);
44 nir_variable_mode modes;
45 nir_lower_io_options options;
46 };
47
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
50 {
51 switch (deref_op) {
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
53 OP(atomic_exchange)
54 OP(atomic_comp_swap)
55 OP(atomic_add)
56 OP(atomic_imin)
57 OP(atomic_umin)
58 OP(atomic_imax)
59 OP(atomic_umax)
60 OP(atomic_and)
61 OP(atomic_or)
62 OP(atomic_xor)
63 OP(atomic_fadd)
64 OP(atomic_fmin)
65 OP(atomic_fmax)
66 OP(atomic_fcomp_swap)
67 #undef OP
68 default:
69 unreachable("Invalid SSBO atomic");
70 }
71 }
72
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op)
75 {
76 switch (deref_op) {
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
78 OP(atomic_exchange)
79 OP(atomic_comp_swap)
80 OP(atomic_add)
81 OP(atomic_imin)
82 OP(atomic_umin)
83 OP(atomic_imax)
84 OP(atomic_umax)
85 OP(atomic_and)
86 OP(atomic_or)
87 OP(atomic_xor)
88 OP(atomic_fadd)
89 OP(atomic_fmin)
90 OP(atomic_fmax)
91 OP(atomic_fcomp_swap)
92 #undef OP
93 default:
94 unreachable("Invalid SSBO atomic");
95 }
96 }
97
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op)
100 {
101 switch (deref_op) {
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
103 OP(atomic_exchange)
104 OP(atomic_comp_swap)
105 OP(atomic_add)
106 OP(atomic_imin)
107 OP(atomic_umin)
108 OP(atomic_imax)
109 OP(atomic_umax)
110 OP(atomic_and)
111 OP(atomic_or)
112 OP(atomic_xor)
113 OP(atomic_fadd)
114 OP(atomic_fmin)
115 OP(atomic_fmax)
116 OP(atomic_fcomp_swap)
117 #undef OP
118 default:
119 unreachable("Invalid shared atomic");
120 }
121 }
122
123 void
124 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
125 int (*type_size)(const struct glsl_type *, bool))
126 {
127 unsigned location = 0;
128
129 nir_foreach_variable(var, var_list) {
130 /*
131 * UBOs have their own address spaces, so don't count them towards the
132 * number of global uniforms
133 */
134 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
135 continue;
136
137 var->data.driver_location = location;
138 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
139 var->data.mode == nir_var_shader_out ||
140 var->data.bindless;
141 location += type_size(var->type, bindless_type_size);
142 }
143
144 *size = location;
145 }
146
147 /**
148 * Return true if the given variable is a per-vertex input/output array.
149 * (such as geometry shader inputs).
150 */
151 bool
152 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
153 {
154 if (var->data.patch || !glsl_type_is_array(var->type))
155 return false;
156
157 if (var->data.mode == nir_var_shader_in)
158 return stage == MESA_SHADER_GEOMETRY ||
159 stage == MESA_SHADER_TESS_CTRL ||
160 stage == MESA_SHADER_TESS_EVAL;
161
162 if (var->data.mode == nir_var_shader_out)
163 return stage == MESA_SHADER_TESS_CTRL;
164
165 return false;
166 }
167
168 static nir_ssa_def *
169 get_io_offset(nir_builder *b, nir_deref_instr *deref,
170 nir_ssa_def **vertex_index,
171 int (*type_size)(const struct glsl_type *, bool),
172 unsigned *component, bool bts)
173 {
174 nir_deref_path path;
175 nir_deref_path_init(&path, deref, NULL);
176
177 assert(path.path[0]->deref_type == nir_deref_type_var);
178 nir_deref_instr **p = &path.path[1];
179
180 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
181 * outermost array index separate. Process the rest normally.
182 */
183 if (vertex_index != NULL) {
184 assert((*p)->deref_type == nir_deref_type_array);
185 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
186 p++;
187 }
188
189 if (path.path[0]->var->data.compact) {
190 assert((*p)->deref_type == nir_deref_type_array);
191 assert(glsl_type_is_scalar((*p)->type));
192
193 /* We always lower indirect dereferences for "compact" array vars. */
194 const unsigned index = nir_src_as_uint((*p)->arr.index);
195 const unsigned total_offset = *component + index;
196 const unsigned slot_offset = total_offset / 4;
197 *component = total_offset % 4;
198 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
199 }
200
201 /* Just emit code and let constant-folding go to town */
202 nir_ssa_def *offset = nir_imm_int(b, 0);
203
204 for (; *p; p++) {
205 if ((*p)->deref_type == nir_deref_type_array) {
206 unsigned size = type_size((*p)->type, bts);
207
208 nir_ssa_def *mul =
209 nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
210
211 offset = nir_iadd(b, offset, mul);
212 } else if ((*p)->deref_type == nir_deref_type_struct) {
213 /* p starts at path[1], so this is safe */
214 nir_deref_instr *parent = *(p - 1);
215
216 unsigned field_offset = 0;
217 for (unsigned i = 0; i < (*p)->strct.index; i++) {
218 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
219 }
220 offset = nir_iadd_imm(b, offset, field_offset);
221 } else {
222 unreachable("Unsupported deref type");
223 }
224 }
225
226 nir_deref_path_finish(&path);
227
228 return offset;
229 }
230
231 static nir_ssa_def *
232 emit_load(struct lower_io_state *state,
233 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
234 unsigned component, unsigned num_components, unsigned bit_size,
235 nir_alu_type type)
236 {
237 nir_builder *b = &state->builder;
238 const nir_shader *nir = b->shader;
239 nir_variable_mode mode = var->data.mode;
240 nir_ssa_def *barycentric = NULL;
241
242 nir_intrinsic_op op;
243 switch (mode) {
244 case nir_var_shader_in:
245 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
246 nir->options->use_interpolated_input_intrinsics &&
247 var->data.interpolation != INTERP_MODE_FLAT) {
248 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
249 assert(vertex_index != NULL);
250 op = nir_intrinsic_load_input_vertex;
251 } else {
252 assert(vertex_index == NULL);
253
254 nir_intrinsic_op bary_op;
255 if (var->data.sample ||
256 (state->options & nir_lower_io_force_sample_interpolation))
257 bary_op = nir_intrinsic_load_barycentric_sample;
258 else if (var->data.centroid)
259 bary_op = nir_intrinsic_load_barycentric_centroid;
260 else
261 bary_op = nir_intrinsic_load_barycentric_pixel;
262
263 barycentric = nir_load_barycentric(&state->builder, bary_op,
264 var->data.interpolation);
265 op = nir_intrinsic_load_interpolated_input;
266 }
267 } else {
268 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
269 nir_intrinsic_load_input;
270 }
271 break;
272 case nir_var_shader_out:
273 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
274 nir_intrinsic_load_output;
275 break;
276 case nir_var_uniform:
277 op = nir_intrinsic_load_uniform;
278 break;
279 case nir_var_mem_shared:
280 op = nir_intrinsic_load_shared;
281 break;
282 default:
283 unreachable("Unknown variable mode");
284 }
285
286 nir_intrinsic_instr *load =
287 nir_intrinsic_instr_create(state->builder.shader, op);
288 load->num_components = num_components;
289
290 nir_intrinsic_set_base(load, var->data.driver_location);
291 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
292 nir_intrinsic_set_component(load, component);
293
294 if (load->intrinsic == nir_intrinsic_load_uniform)
295 nir_intrinsic_set_range(load,
296 state->type_size(var->type, var->data.bindless));
297
298 if (load->intrinsic == nir_intrinsic_load_input ||
299 load->intrinsic == nir_intrinsic_load_input_vertex ||
300 load->intrinsic == nir_intrinsic_load_uniform)
301 nir_intrinsic_set_type(load, type);
302
303 if (vertex_index) {
304 load->src[0] = nir_src_for_ssa(vertex_index);
305 load->src[1] = nir_src_for_ssa(offset);
306 } else if (barycentric) {
307 load->src[0] = nir_src_for_ssa(barycentric);
308 load->src[1] = nir_src_for_ssa(offset);
309 } else {
310 load->src[0] = nir_src_for_ssa(offset);
311 }
312
313 nir_ssa_dest_init(&load->instr, &load->dest,
314 num_components, bit_size, NULL);
315 nir_builder_instr_insert(b, &load->instr);
316
317 return &load->dest.ssa;
318 }
319
320 static nir_ssa_def *
321 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
322 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
323 unsigned component, const struct glsl_type *type)
324 {
325 assert(intrin->dest.is_ssa);
326 if (intrin->dest.ssa.bit_size == 64 &&
327 (state->options & nir_lower_io_lower_64bit_to_32)) {
328 nir_builder *b = &state->builder;
329
330 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
331
332 nir_ssa_def *comp64[4];
333 assert(component == 0 || component == 2);
334 unsigned dest_comp = 0;
335 while (dest_comp < intrin->dest.ssa.num_components) {
336 const unsigned num_comps =
337 MIN2(intrin->dest.ssa.num_components - dest_comp,
338 (4 - component) / 2);
339
340 nir_ssa_def *data32 =
341 emit_load(state, vertex_index, var, offset, component,
342 num_comps * 2, 32, nir_type_uint32);
343 for (unsigned i = 0; i < num_comps; i++) {
344 comp64[dest_comp + i] =
345 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
346 }
347
348 /* Only the first store has a component offset */
349 component = 0;
350 dest_comp += num_comps;
351 offset = nir_iadd_imm(b, offset, slot_size);
352 }
353
354 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
355 } else {
356 return emit_load(state, vertex_index, var, offset, component,
357 intrin->dest.ssa.num_components,
358 intrin->dest.ssa.bit_size,
359 nir_get_nir_type_for_glsl_type(type));
360 }
361 }
362
363 static void
364 emit_store(struct lower_io_state *state, nir_ssa_def *data,
365 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
366 unsigned component, unsigned num_components,
367 nir_component_mask_t write_mask, nir_alu_type type)
368 {
369 nir_builder *b = &state->builder;
370 nir_variable_mode mode = var->data.mode;
371
372 nir_intrinsic_op op;
373 if (mode == nir_var_mem_shared) {
374 op = nir_intrinsic_store_shared;
375 } else {
376 assert(mode == nir_var_shader_out);
377 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
378 nir_intrinsic_store_output;
379 }
380
381 nir_intrinsic_instr *store =
382 nir_intrinsic_instr_create(state->builder.shader, op);
383 store->num_components = num_components;
384
385 store->src[0] = nir_src_for_ssa(data);
386
387 nir_intrinsic_set_base(store, var->data.driver_location);
388
389 if (mode == nir_var_shader_out)
390 nir_intrinsic_set_component(store, component);
391
392 if (store->intrinsic == nir_intrinsic_store_output)
393 nir_intrinsic_set_type(store, type);
394
395 nir_intrinsic_set_write_mask(store, write_mask);
396
397 if (vertex_index)
398 store->src[1] = nir_src_for_ssa(vertex_index);
399
400 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
401
402 nir_builder_instr_insert(b, &store->instr);
403 }
404
405 static void
406 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
407 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
408 unsigned component, const struct glsl_type *type)
409 {
410 assert(intrin->src[1].is_ssa);
411 if (intrin->src[1].ssa->bit_size == 64 &&
412 (state->options & nir_lower_io_lower_64bit_to_32)) {
413 nir_builder *b = &state->builder;
414
415 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
416
417 assert(component == 0 || component == 2);
418 unsigned src_comp = 0;
419 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
420 while (src_comp < intrin->num_components) {
421 const unsigned num_comps =
422 MIN2(intrin->num_components - src_comp,
423 (4 - component) / 2);
424
425 if (write_mask & BITFIELD_MASK(num_comps)) {
426 nir_ssa_def *data =
427 nir_channels(b, intrin->src[1].ssa,
428 BITFIELD_RANGE(src_comp, num_comps));
429 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
430
431 nir_component_mask_t write_mask32 = 0;
432 for (unsigned i = 0; i < num_comps; i++) {
433 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
434 write_mask32 |= 3 << (i * 2);
435 }
436
437 emit_store(state, data32, vertex_index, var, offset,
438 component, data32->num_components, write_mask32,
439 nir_type_uint32);
440 }
441
442 /* Only the first store has a component offset */
443 component = 0;
444 src_comp += num_comps;
445 write_mask >>= num_comps;
446 offset = nir_iadd_imm(b, offset, slot_size);
447 }
448 } else {
449 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
450 component, intrin->num_components,
451 nir_intrinsic_write_mask(intrin),
452 nir_get_nir_type_for_glsl_type(type));
453 }
454 }
455
456 static nir_ssa_def *
457 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
458 nir_variable *var, nir_ssa_def *offset)
459 {
460 nir_builder *b = &state->builder;
461 assert(var->data.mode == nir_var_mem_shared);
462
463 nir_intrinsic_op op = shared_atomic_for_deref(intrin->intrinsic);
464
465 nir_intrinsic_instr *atomic =
466 nir_intrinsic_instr_create(state->builder.shader, op);
467
468 nir_intrinsic_set_base(atomic, var->data.driver_location);
469
470 atomic->src[0] = nir_src_for_ssa(offset);
471 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
472 nir_intrinsic_infos[op].num_srcs);
473 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
474 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
475 }
476
477 if (nir_intrinsic_infos[op].has_dest) {
478 assert(intrin->dest.is_ssa);
479 assert(nir_intrinsic_infos[intrin->intrinsic].has_dest);
480 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
481 intrin->dest.ssa.num_components,
482 intrin->dest.ssa.bit_size, NULL);
483 }
484
485 nir_builder_instr_insert(b, &atomic->instr);
486
487 return nir_intrinsic_infos[op].has_dest ? &atomic->dest.ssa : NULL;
488 }
489
490 static nir_ssa_def *
491 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
492 nir_variable *var, nir_ssa_def *offset, unsigned component,
493 const struct glsl_type *type)
494 {
495 nir_builder *b = &state->builder;
496 assert(var->data.mode == nir_var_shader_in);
497
498 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
499 * interpolateAtVertex() for explicit variables.
500 */
501 if (var->data.interpolation == INTERP_MODE_FLAT ||
502 var->data.interpolation == INTERP_MODE_EXPLICIT) {
503 nir_ssa_def *vertex_index = NULL;
504
505 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
506 assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
507 vertex_index = intrin->src[1].ssa;
508 }
509
510 return lower_load(intrin, state, vertex_index, var, offset, component, type);
511 }
512
513 /* None of the supported APIs allow interpolation on 64-bit things */
514 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
515
516 nir_intrinsic_op bary_op;
517 switch (intrin->intrinsic) {
518 case nir_intrinsic_interp_deref_at_centroid:
519 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
520 nir_intrinsic_load_barycentric_sample :
521 nir_intrinsic_load_barycentric_centroid;
522 break;
523 case nir_intrinsic_interp_deref_at_sample:
524 bary_op = nir_intrinsic_load_barycentric_at_sample;
525 break;
526 case nir_intrinsic_interp_deref_at_offset:
527 bary_op = nir_intrinsic_load_barycentric_at_offset;
528 break;
529 default:
530 unreachable("Bogus interpolateAt() intrinsic.");
531 }
532
533 nir_intrinsic_instr *bary_setup =
534 nir_intrinsic_instr_create(state->builder.shader, bary_op);
535
536 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
537 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
538
539 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
540 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
541 intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
542 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
543
544 nir_builder_instr_insert(b, &bary_setup->instr);
545
546 nir_intrinsic_instr *load =
547 nir_intrinsic_instr_create(state->builder.shader,
548 nir_intrinsic_load_interpolated_input);
549 load->num_components = intrin->num_components;
550
551 nir_intrinsic_set_base(load, var->data.driver_location);
552 nir_intrinsic_set_component(load, component);
553
554 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
555 load->src[1] = nir_src_for_ssa(offset);
556
557 assert(intrin->dest.is_ssa);
558 nir_ssa_dest_init(&load->instr, &load->dest,
559 intrin->dest.ssa.num_components,
560 intrin->dest.ssa.bit_size, NULL);
561 nir_builder_instr_insert(b, &load->instr);
562
563 return &load->dest.ssa;
564 }
565
566 static bool
567 nir_lower_io_block(nir_block *block,
568 struct lower_io_state *state)
569 {
570 nir_builder *b = &state->builder;
571 const nir_shader_compiler_options *options = b->shader->options;
572 bool progress = false;
573
574 nir_foreach_instr_safe(instr, block) {
575 if (instr->type != nir_instr_type_intrinsic)
576 continue;
577
578 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
579
580 switch (intrin->intrinsic) {
581 case nir_intrinsic_load_deref:
582 case nir_intrinsic_store_deref:
583 case nir_intrinsic_deref_atomic_add:
584 case nir_intrinsic_deref_atomic_imin:
585 case nir_intrinsic_deref_atomic_umin:
586 case nir_intrinsic_deref_atomic_imax:
587 case nir_intrinsic_deref_atomic_umax:
588 case nir_intrinsic_deref_atomic_and:
589 case nir_intrinsic_deref_atomic_or:
590 case nir_intrinsic_deref_atomic_xor:
591 case nir_intrinsic_deref_atomic_exchange:
592 case nir_intrinsic_deref_atomic_comp_swap:
593 case nir_intrinsic_deref_atomic_fadd:
594 case nir_intrinsic_deref_atomic_fmin:
595 case nir_intrinsic_deref_atomic_fmax:
596 case nir_intrinsic_deref_atomic_fcomp_swap:
597 /* We can lower the io for this nir instrinsic */
598 break;
599 case nir_intrinsic_interp_deref_at_centroid:
600 case nir_intrinsic_interp_deref_at_sample:
601 case nir_intrinsic_interp_deref_at_offset:
602 case nir_intrinsic_interp_deref_at_vertex:
603 /* We can optionally lower these to load_interpolated_input */
604 if (options->use_interpolated_input_intrinsics)
605 break;
606 default:
607 /* We can't lower the io for this nir instrinsic, so skip it */
608 continue;
609 }
610
611 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
612
613 nir_variable_mode mode = deref->mode;
614
615 if ((state->modes & mode) == 0)
616 continue;
617
618 if (mode != nir_var_shader_in &&
619 mode != nir_var_shader_out &&
620 mode != nir_var_mem_shared &&
621 mode != nir_var_uniform)
622 continue;
623
624 nir_variable *var = nir_deref_instr_get_variable(deref);
625
626 b->cursor = nir_before_instr(instr);
627
628 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
629
630 nir_ssa_def *offset;
631 nir_ssa_def *vertex_index = NULL;
632 unsigned component_offset = var->data.location_frac;
633 bool bindless_type_size = mode == nir_var_shader_in ||
634 mode == nir_var_shader_out ||
635 var->data.bindless;
636
637 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
638 state->type_size, &component_offset,
639 bindless_type_size);
640
641 nir_ssa_def *replacement = NULL;
642
643 switch (intrin->intrinsic) {
644 case nir_intrinsic_load_deref:
645 replacement = lower_load(intrin, state, vertex_index, var, offset,
646 component_offset, deref->type);
647 break;
648
649 case nir_intrinsic_store_deref:
650 lower_store(intrin, state, vertex_index, var, offset,
651 component_offset, deref->type);
652 break;
653
654 case nir_intrinsic_deref_atomic_add:
655 case nir_intrinsic_deref_atomic_imin:
656 case nir_intrinsic_deref_atomic_umin:
657 case nir_intrinsic_deref_atomic_imax:
658 case nir_intrinsic_deref_atomic_umax:
659 case nir_intrinsic_deref_atomic_and:
660 case nir_intrinsic_deref_atomic_or:
661 case nir_intrinsic_deref_atomic_xor:
662 case nir_intrinsic_deref_atomic_exchange:
663 case nir_intrinsic_deref_atomic_comp_swap:
664 case nir_intrinsic_deref_atomic_fadd:
665 case nir_intrinsic_deref_atomic_fmin:
666 case nir_intrinsic_deref_atomic_fmax:
667 case nir_intrinsic_deref_atomic_fcomp_swap:
668 assert(vertex_index == NULL);
669 replacement = lower_atomic(intrin, state, var, offset);
670 break;
671
672 case nir_intrinsic_interp_deref_at_centroid:
673 case nir_intrinsic_interp_deref_at_sample:
674 case nir_intrinsic_interp_deref_at_offset:
675 case nir_intrinsic_interp_deref_at_vertex:
676 assert(vertex_index == NULL);
677 replacement = lower_interpolate_at(intrin, state, var, offset,
678 component_offset, deref->type);
679 break;
680
681 default:
682 continue;
683 }
684
685 if (replacement) {
686 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
687 nir_src_for_ssa(replacement));
688 }
689 nir_instr_remove(&intrin->instr);
690 progress = true;
691 }
692
693 return progress;
694 }
695
696 static bool
697 nir_lower_io_impl(nir_function_impl *impl,
698 nir_variable_mode modes,
699 int (*type_size)(const struct glsl_type *, bool),
700 nir_lower_io_options options)
701 {
702 struct lower_io_state state;
703 bool progress = false;
704
705 nir_builder_init(&state.builder, impl);
706 state.dead_ctx = ralloc_context(NULL);
707 state.modes = modes;
708 state.type_size = type_size;
709 state.options = options;
710
711 nir_foreach_block(block, impl) {
712 progress |= nir_lower_io_block(block, &state);
713 }
714
715 ralloc_free(state.dead_ctx);
716
717 nir_metadata_preserve(impl, nir_metadata_block_index |
718 nir_metadata_dominance);
719 return progress;
720 }
721
722 bool
723 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
724 int (*type_size)(const struct glsl_type *, bool),
725 nir_lower_io_options options)
726 {
727 bool progress = false;
728
729 nir_foreach_function(function, shader) {
730 if (function->impl) {
731 progress |= nir_lower_io_impl(function->impl, modes,
732 type_size, options);
733 }
734 }
735
736 return progress;
737 }
738
739 static unsigned
740 type_scalar_size_bytes(const struct glsl_type *type)
741 {
742 assert(glsl_type_is_vector_or_scalar(type) ||
743 glsl_type_is_matrix(type));
744 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
745 }
746
747 static nir_ssa_def *
748 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
749 nir_address_format addr_format, nir_ssa_def *offset)
750 {
751 assert(offset->num_components == 1);
752 assert(addr->bit_size == offset->bit_size);
753
754 switch (addr_format) {
755 case nir_address_format_32bit_global:
756 case nir_address_format_64bit_global:
757 case nir_address_format_32bit_offset:
758 assert(addr->num_components == 1);
759 return nir_iadd(b, addr, offset);
760
761 case nir_address_format_64bit_bounded_global:
762 assert(addr->num_components == 4);
763 return nir_vec4(b, nir_channel(b, addr, 0),
764 nir_channel(b, addr, 1),
765 nir_channel(b, addr, 2),
766 nir_iadd(b, nir_channel(b, addr, 3), offset));
767
768 case nir_address_format_32bit_index_offset:
769 assert(addr->num_components == 2);
770 return nir_vec2(b, nir_channel(b, addr, 0),
771 nir_iadd(b, nir_channel(b, addr, 1), offset));
772 case nir_address_format_logical:
773 unreachable("Unsupported address format");
774 }
775 unreachable("Invalid address format");
776 }
777
778 static nir_ssa_def *
779 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
780 nir_address_format addr_format, int64_t offset)
781 {
782 return build_addr_iadd(b, addr, addr_format,
783 nir_imm_intN_t(b, offset, addr->bit_size));
784 }
785
786 static nir_ssa_def *
787 addr_to_index(nir_builder *b, nir_ssa_def *addr,
788 nir_address_format addr_format)
789 {
790 assert(addr_format == nir_address_format_32bit_index_offset);
791 assert(addr->num_components == 2);
792 return nir_channel(b, addr, 0);
793 }
794
795 static nir_ssa_def *
796 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
797 nir_address_format addr_format)
798 {
799 assert(addr_format == nir_address_format_32bit_index_offset);
800 assert(addr->num_components == 2);
801 return nir_channel(b, addr, 1);
802 }
803
804 /** Returns true if the given address format resolves to a global address */
805 static bool
806 addr_format_is_global(nir_address_format addr_format)
807 {
808 return addr_format == nir_address_format_32bit_global ||
809 addr_format == nir_address_format_64bit_global ||
810 addr_format == nir_address_format_64bit_bounded_global;
811 }
812
813 static nir_ssa_def *
814 addr_to_global(nir_builder *b, nir_ssa_def *addr,
815 nir_address_format addr_format)
816 {
817 switch (addr_format) {
818 case nir_address_format_32bit_global:
819 case nir_address_format_64bit_global:
820 assert(addr->num_components == 1);
821 return addr;
822
823 case nir_address_format_64bit_bounded_global:
824 assert(addr->num_components == 4);
825 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
826 nir_u2u64(b, nir_channel(b, addr, 3)));
827
828 case nir_address_format_32bit_index_offset:
829 case nir_address_format_32bit_offset:
830 case nir_address_format_logical:
831 unreachable("Cannot get a 64-bit address with this address format");
832 }
833
834 unreachable("Invalid address format");
835 }
836
837 static bool
838 addr_format_needs_bounds_check(nir_address_format addr_format)
839 {
840 return addr_format == nir_address_format_64bit_bounded_global;
841 }
842
843 static nir_ssa_def *
844 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
845 nir_address_format addr_format, unsigned size)
846 {
847 assert(addr_format == nir_address_format_64bit_bounded_global);
848 assert(addr->num_components == 4);
849 return nir_ige(b, nir_channel(b, addr, 2),
850 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
851 }
852
853 static nir_ssa_def *
854 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
855 nir_ssa_def *addr, nir_address_format addr_format,
856 unsigned num_components)
857 {
858 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
859
860 nir_intrinsic_op op;
861 switch (mode) {
862 case nir_var_mem_ubo:
863 op = nir_intrinsic_load_ubo;
864 break;
865 case nir_var_mem_ssbo:
866 if (addr_format_is_global(addr_format))
867 op = nir_intrinsic_load_global;
868 else
869 op = nir_intrinsic_load_ssbo;
870 break;
871 case nir_var_mem_global:
872 assert(addr_format_is_global(addr_format));
873 op = nir_intrinsic_load_global;
874 break;
875 case nir_var_shader_in:
876 assert(addr_format_is_global(addr_format));
877 op = nir_intrinsic_load_kernel_input;
878 break;
879 case nir_var_mem_shared:
880 assert(addr_format == nir_address_format_32bit_offset);
881 op = nir_intrinsic_load_shared;
882 break;
883 default:
884 unreachable("Unsupported explicit IO variable mode");
885 }
886
887 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
888
889 if (addr_format_is_global(addr_format)) {
890 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
891 } else if (addr_format == nir_address_format_32bit_offset) {
892 assert(addr->num_components == 1);
893 load->src[0] = nir_src_for_ssa(addr);
894 } else {
895 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
896 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
897 }
898
899 if (mode != nir_var_mem_ubo && mode != nir_var_shader_in && mode != nir_var_mem_shared)
900 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
901
902 unsigned bit_size = intrin->dest.ssa.bit_size;
903 if (bit_size == 1) {
904 /* TODO: Make the native bool bit_size an option. */
905 bit_size = 32;
906 }
907
908 /* TODO: We should try and provide a better alignment. For OpenCL, we need
909 * to plumb the alignment through from SPIR-V when we have one.
910 */
911 nir_intrinsic_set_align(load, bit_size / 8, 0);
912
913 assert(intrin->dest.is_ssa);
914 load->num_components = num_components;
915 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
916 bit_size, intrin->dest.ssa.name);
917
918 assert(bit_size % 8 == 0);
919
920 nir_ssa_def *result;
921 if (addr_format_needs_bounds_check(addr_format)) {
922 /* The Vulkan spec for robustBufferAccess gives us quite a few options
923 * as to what we can do with an OOB read. Unfortunately, returning
924 * undefined values isn't one of them so we return an actual zero.
925 */
926 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
927
928 const unsigned load_size = (bit_size / 8) * load->num_components;
929 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
930
931 nir_builder_instr_insert(b, &load->instr);
932
933 nir_pop_if(b, NULL);
934
935 result = nir_if_phi(b, &load->dest.ssa, zero);
936 } else {
937 nir_builder_instr_insert(b, &load->instr);
938 result = &load->dest.ssa;
939 }
940
941 if (intrin->dest.ssa.bit_size == 1) {
942 /* For shared, we can go ahead and use NIR's and/or the back-end's
943 * standard encoding for booleans rather than forcing a 0/1 boolean.
944 * This should save an instruction or two.
945 */
946 if (mode == nir_var_mem_shared)
947 result = nir_b2b1(b, result);
948 else
949 result = nir_i2b(b, result);
950 }
951
952 return result;
953 }
954
955 static void
956 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
957 nir_ssa_def *addr, nir_address_format addr_format,
958 nir_ssa_def *value, nir_component_mask_t write_mask)
959 {
960 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
961
962 nir_intrinsic_op op;
963 switch (mode) {
964 case nir_var_mem_ssbo:
965 if (addr_format_is_global(addr_format))
966 op = nir_intrinsic_store_global;
967 else
968 op = nir_intrinsic_store_ssbo;
969 break;
970 case nir_var_mem_global:
971 assert(addr_format_is_global(addr_format));
972 op = nir_intrinsic_store_global;
973 break;
974 case nir_var_mem_shared:
975 assert(addr_format == nir_address_format_32bit_offset);
976 op = nir_intrinsic_store_shared;
977 break;
978 default:
979 unreachable("Unsupported explicit IO variable mode");
980 }
981
982 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
983
984 if (value->bit_size == 1) {
985 /* For shared, we can go ahead and use NIR's and/or the back-end's
986 * standard encoding for booleans rather than forcing a 0/1 boolean.
987 * This should save an instruction or two.
988 *
989 * TODO: Make the native bool bit_size an option.
990 */
991 if (mode == nir_var_mem_shared)
992 value = nir_b2b32(b, value);
993 else
994 value = nir_b2i(b, value, 32);
995 }
996
997 store->src[0] = nir_src_for_ssa(value);
998 if (addr_format_is_global(addr_format)) {
999 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1000 } else if (addr_format == nir_address_format_32bit_offset) {
1001 assert(addr->num_components == 1);
1002 store->src[1] = nir_src_for_ssa(addr);
1003 } else {
1004 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1005 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1006 }
1007
1008 nir_intrinsic_set_write_mask(store, write_mask);
1009
1010 if (mode != nir_var_mem_shared)
1011 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
1012
1013 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1014 * to plumb the alignment through from SPIR-V when we have one.
1015 */
1016 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
1017
1018 assert(value->num_components == 1 ||
1019 value->num_components == intrin->num_components);
1020 store->num_components = value->num_components;
1021
1022 assert(value->bit_size % 8 == 0);
1023
1024 if (addr_format_needs_bounds_check(addr_format)) {
1025 const unsigned store_size = (value->bit_size / 8) * store->num_components;
1026 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
1027
1028 nir_builder_instr_insert(b, &store->instr);
1029
1030 nir_pop_if(b, NULL);
1031 } else {
1032 nir_builder_instr_insert(b, &store->instr);
1033 }
1034 }
1035
1036 static nir_ssa_def *
1037 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
1038 nir_ssa_def *addr, nir_address_format addr_format)
1039 {
1040 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1041 const unsigned num_data_srcs =
1042 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
1043
1044 nir_intrinsic_op op;
1045 switch (mode) {
1046 case nir_var_mem_ssbo:
1047 if (addr_format_is_global(addr_format))
1048 op = global_atomic_for_deref(intrin->intrinsic);
1049 else
1050 op = ssbo_atomic_for_deref(intrin->intrinsic);
1051 break;
1052 case nir_var_mem_global:
1053 assert(addr_format_is_global(addr_format));
1054 op = global_atomic_for_deref(intrin->intrinsic);
1055 break;
1056 case nir_var_mem_shared:
1057 assert(addr_format == nir_address_format_32bit_offset);
1058 op = shared_atomic_for_deref(intrin->intrinsic);
1059 break;
1060 default:
1061 unreachable("Unsupported explicit IO variable mode");
1062 }
1063
1064 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1065
1066 unsigned src = 0;
1067 if (addr_format_is_global(addr_format)) {
1068 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1069 } else if (addr_format == nir_address_format_32bit_offset) {
1070 assert(addr->num_components == 1);
1071 atomic->src[src++] = nir_src_for_ssa(addr);
1072 } else {
1073 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1074 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1075 }
1076 for (unsigned i = 0; i < num_data_srcs; i++) {
1077 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1078 }
1079
1080 /* Global atomics don't have access flags because they assume that the
1081 * address may be non-uniform.
1082 */
1083 if (!addr_format_is_global(addr_format) && mode != nir_var_mem_shared)
1084 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1085
1086 assert(intrin->dest.ssa.num_components == 1);
1087 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1088 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1089
1090 assert(atomic->dest.ssa.bit_size % 8 == 0);
1091
1092 if (addr_format_needs_bounds_check(addr_format)) {
1093 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1094 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1095
1096 nir_builder_instr_insert(b, &atomic->instr);
1097
1098 nir_pop_if(b, NULL);
1099 return nir_if_phi(b, &atomic->dest.ssa,
1100 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1101 } else {
1102 nir_builder_instr_insert(b, &atomic->instr);
1103 return &atomic->dest.ssa;
1104 }
1105 }
1106
1107 nir_ssa_def *
1108 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1109 nir_ssa_def *base_addr,
1110 nir_address_format addr_format)
1111 {
1112 assert(deref->dest.is_ssa);
1113 switch (deref->deref_type) {
1114 case nir_deref_type_var:
1115 assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared));
1116 return nir_imm_intN_t(b, deref->var->data.driver_location,
1117 deref->dest.ssa.bit_size);
1118
1119 case nir_deref_type_array: {
1120 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1121
1122 unsigned stride = glsl_get_explicit_stride(parent->type);
1123 if ((glsl_type_is_matrix(parent->type) &&
1124 glsl_matrix_type_is_row_major(parent->type)) ||
1125 (glsl_type_is_vector(parent->type) && stride == 0))
1126 stride = type_scalar_size_bytes(parent->type);
1127
1128 assert(stride > 0);
1129
1130 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1131 index = nir_i2i(b, index, base_addr->bit_size);
1132 return build_addr_iadd(b, base_addr, addr_format,
1133 nir_amul_imm(b, index, stride));
1134 }
1135
1136 case nir_deref_type_ptr_as_array: {
1137 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1138 index = nir_i2i(b, index, base_addr->bit_size);
1139 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1140 return build_addr_iadd(b, base_addr, addr_format,
1141 nir_amul_imm(b, index, stride));
1142 }
1143
1144 case nir_deref_type_array_wildcard:
1145 unreachable("Wildcards should be lowered by now");
1146 break;
1147
1148 case nir_deref_type_struct: {
1149 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1150 int offset = glsl_get_struct_field_offset(parent->type,
1151 deref->strct.index);
1152 assert(offset >= 0);
1153 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1154 }
1155
1156 case nir_deref_type_cast:
1157 /* Nothing to do here */
1158 return base_addr;
1159 }
1160
1161 unreachable("Invalid NIR deref type");
1162 }
1163
1164 void
1165 nir_lower_explicit_io_instr(nir_builder *b,
1166 nir_intrinsic_instr *intrin,
1167 nir_ssa_def *addr,
1168 nir_address_format addr_format)
1169 {
1170 b->cursor = nir_after_instr(&intrin->instr);
1171
1172 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1173 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1174 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1175 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1176 assert(vec_stride == 0 || vec_stride >= scalar_size);
1177
1178 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1179 nir_ssa_def *value;
1180 if (vec_stride > scalar_size) {
1181 nir_ssa_def *comps[4] = { NULL, };
1182 for (unsigned i = 0; i < intrin->num_components; i++) {
1183 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1184 vec_stride * i);
1185 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1186 addr_format, 1);
1187 }
1188 value = nir_vec(b, comps, intrin->num_components);
1189 } else {
1190 value = build_explicit_io_load(b, intrin, addr, addr_format,
1191 intrin->num_components);
1192 }
1193 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1194 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1195 assert(intrin->src[1].is_ssa);
1196 nir_ssa_def *value = intrin->src[1].ssa;
1197 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1198 if (vec_stride > scalar_size) {
1199 for (unsigned i = 0; i < intrin->num_components; i++) {
1200 if (!(write_mask & (1 << i)))
1201 continue;
1202
1203 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1204 vec_stride * i);
1205 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1206 nir_channel(b, value, i), 1);
1207 }
1208 } else {
1209 build_explicit_io_store(b, intrin, addr, addr_format,
1210 value, write_mask);
1211 }
1212 } else {
1213 nir_ssa_def *value =
1214 build_explicit_io_atomic(b, intrin, addr, addr_format);
1215 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1216 }
1217
1218 nir_instr_remove(&intrin->instr);
1219 }
1220
1221 static void
1222 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1223 nir_address_format addr_format)
1224 {
1225 /* Just delete the deref if it's not used. We can't use
1226 * nir_deref_instr_remove_if_unused here because it may remove more than
1227 * one deref which could break our list walking since we walk the list
1228 * backwards.
1229 */
1230 assert(list_is_empty(&deref->dest.ssa.if_uses));
1231 if (list_is_empty(&deref->dest.ssa.uses)) {
1232 nir_instr_remove(&deref->instr);
1233 return;
1234 }
1235
1236 b->cursor = nir_after_instr(&deref->instr);
1237
1238 nir_ssa_def *base_addr = NULL;
1239 if (deref->deref_type != nir_deref_type_var) {
1240 assert(deref->parent.is_ssa);
1241 base_addr = deref->parent.ssa;
1242 }
1243
1244 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1245 addr_format);
1246
1247 nir_instr_remove(&deref->instr);
1248 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1249 }
1250
1251 static void
1252 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1253 nir_address_format addr_format)
1254 {
1255 assert(intrin->src[0].is_ssa);
1256 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1257 }
1258
1259 static void
1260 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1261 nir_address_format addr_format)
1262 {
1263 b->cursor = nir_after_instr(&intrin->instr);
1264
1265 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1266
1267 assert(glsl_type_is_array(deref->type));
1268 assert(glsl_get_length(deref->type) == 0);
1269 unsigned stride = glsl_get_explicit_stride(deref->type);
1270 assert(stride > 0);
1271
1272 assert(addr_format == nir_address_format_32bit_index_offset);
1273 nir_ssa_def *addr = &deref->dest.ssa;
1274 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1275 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1276
1277 nir_intrinsic_instr *bsize =
1278 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1279 bsize->src[0] = nir_src_for_ssa(index);
1280 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1281 nir_builder_instr_insert(b, &bsize->instr);
1282
1283 nir_ssa_def *arr_size =
1284 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1285 nir_imm_int(b, stride));
1286
1287 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1288 nir_instr_remove(&intrin->instr);
1289 }
1290
1291 static bool
1292 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1293 nir_address_format addr_format)
1294 {
1295 bool progress = false;
1296
1297 nir_builder b;
1298 nir_builder_init(&b, impl);
1299
1300 /* Walk in reverse order so that we can see the full deref chain when we
1301 * lower the access operations. We lower them assuming that the derefs
1302 * will be turned into address calculations later.
1303 */
1304 nir_foreach_block_reverse(block, impl) {
1305 nir_foreach_instr_reverse_safe(instr, block) {
1306 switch (instr->type) {
1307 case nir_instr_type_deref: {
1308 nir_deref_instr *deref = nir_instr_as_deref(instr);
1309 if (deref->mode & modes) {
1310 lower_explicit_io_deref(&b, deref, addr_format);
1311 progress = true;
1312 }
1313 break;
1314 }
1315
1316 case nir_instr_type_intrinsic: {
1317 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1318 switch (intrin->intrinsic) {
1319 case nir_intrinsic_load_deref:
1320 case nir_intrinsic_store_deref:
1321 case nir_intrinsic_deref_atomic_add:
1322 case nir_intrinsic_deref_atomic_imin:
1323 case nir_intrinsic_deref_atomic_umin:
1324 case nir_intrinsic_deref_atomic_imax:
1325 case nir_intrinsic_deref_atomic_umax:
1326 case nir_intrinsic_deref_atomic_and:
1327 case nir_intrinsic_deref_atomic_or:
1328 case nir_intrinsic_deref_atomic_xor:
1329 case nir_intrinsic_deref_atomic_exchange:
1330 case nir_intrinsic_deref_atomic_comp_swap:
1331 case nir_intrinsic_deref_atomic_fadd:
1332 case nir_intrinsic_deref_atomic_fmin:
1333 case nir_intrinsic_deref_atomic_fmax:
1334 case nir_intrinsic_deref_atomic_fcomp_swap: {
1335 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1336 if (deref->mode & modes) {
1337 lower_explicit_io_access(&b, intrin, addr_format);
1338 progress = true;
1339 }
1340 break;
1341 }
1342
1343 case nir_intrinsic_deref_buffer_array_length: {
1344 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1345 if (deref->mode & modes) {
1346 lower_explicit_io_array_length(&b, intrin, addr_format);
1347 progress = true;
1348 }
1349 break;
1350 }
1351
1352 default:
1353 break;
1354 }
1355 break;
1356 }
1357
1358 default:
1359 /* Nothing to do */
1360 break;
1361 }
1362 }
1363 }
1364
1365 if (progress) {
1366 nir_metadata_preserve(impl, nir_metadata_block_index |
1367 nir_metadata_dominance);
1368 }
1369
1370 return progress;
1371 }
1372
1373 bool
1374 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1375 nir_address_format addr_format)
1376 {
1377 bool progress = false;
1378
1379 nir_foreach_function(function, shader) {
1380 if (function->impl &&
1381 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1382 progress = true;
1383 }
1384
1385 return progress;
1386 }
1387
1388 static bool
1389 nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
1390 nir_variable_mode modes,
1391 glsl_type_size_align_func type_info)
1392 {
1393 bool progress = false;
1394
1395 nir_foreach_block(block, impl) {
1396 nir_foreach_instr(instr, block) {
1397 if (instr->type != nir_instr_type_deref)
1398 continue;
1399
1400 nir_deref_instr *deref = nir_instr_as_deref(instr);
1401 if (!(deref->mode & modes))
1402 continue;
1403
1404 unsigned size, alignment;
1405 const struct glsl_type *new_type =
1406 glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
1407 if (new_type != deref->type) {
1408 progress = true;
1409 deref->type = new_type;
1410 }
1411 if (deref->deref_type == nir_deref_type_cast) {
1412 /* See also glsl_type::get_explicit_type_for_size_align() */
1413 unsigned new_stride = align(size, alignment);
1414 if (new_stride != deref->cast.ptr_stride) {
1415 deref->cast.ptr_stride = new_stride;
1416 progress = true;
1417 }
1418 }
1419 }
1420 }
1421
1422 if (progress) {
1423 nir_metadata_preserve(impl, nir_metadata_block_index |
1424 nir_metadata_dominance |
1425 nir_metadata_live_ssa_defs |
1426 nir_metadata_loop_analysis);
1427 }
1428
1429 return progress;
1430 }
1431
1432 static bool
1433 lower_vars_to_explicit(nir_shader *shader,
1434 struct exec_list *vars, nir_variable_mode mode,
1435 glsl_type_size_align_func type_info)
1436 {
1437 bool progress = false;
1438 unsigned offset = 0;
1439 nir_foreach_variable(var, vars) {
1440 unsigned size, align;
1441 const struct glsl_type *explicit_type =
1442 glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
1443
1444 if (explicit_type != var->type) {
1445 progress = true;
1446 var->type = explicit_type;
1447 }
1448
1449 var->data.driver_location = ALIGN_POT(offset, align);
1450 offset = var->data.driver_location + size;
1451 }
1452
1453 if (mode == nir_var_mem_shared) {
1454 shader->info.cs.shared_size = offset;
1455 shader->num_shared = offset;
1456 }
1457
1458 return progress;
1459 }
1460
1461 bool
1462 nir_lower_vars_to_explicit_types(nir_shader *shader,
1463 nir_variable_mode modes,
1464 glsl_type_size_align_func type_info)
1465 {
1466 /* TODO: Situations which need to be handled to support more modes:
1467 * - row-major matrices
1468 * - compact shader inputs/outputs
1469 * - interface types
1470 */
1471 ASSERTED nir_variable_mode supported = nir_var_mem_shared |
1472 nir_var_shader_temp | nir_var_function_temp;
1473 assert(!(modes & ~supported) && "unsupported");
1474
1475 bool progress = false;
1476
1477 if (modes & nir_var_mem_shared)
1478 progress |= lower_vars_to_explicit(shader, &shader->shared, nir_var_mem_shared, type_info);
1479 if (modes & nir_var_shader_temp)
1480 progress |= lower_vars_to_explicit(shader, &shader->globals, nir_var_shader_temp, type_info);
1481
1482 nir_foreach_function(function, shader) {
1483 if (function->impl) {
1484 if (modes & nir_var_function_temp)
1485 progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
1486
1487 progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
1488 }
1489 }
1490
1491 return progress;
1492 }
1493
1494 /**
1495 * Return the offset source for a load/store intrinsic.
1496 */
1497 nir_src *
1498 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1499 {
1500 switch (instr->intrinsic) {
1501 case nir_intrinsic_load_input:
1502 case nir_intrinsic_load_output:
1503 case nir_intrinsic_load_shared:
1504 case nir_intrinsic_load_uniform:
1505 case nir_intrinsic_load_global:
1506 case nir_intrinsic_load_scratch:
1507 case nir_intrinsic_load_fs_input_interp_deltas:
1508 return &instr->src[0];
1509 case nir_intrinsic_load_ubo:
1510 case nir_intrinsic_load_ssbo:
1511 case nir_intrinsic_load_per_vertex_input:
1512 case nir_intrinsic_load_per_vertex_output:
1513 case nir_intrinsic_load_interpolated_input:
1514 case nir_intrinsic_store_output:
1515 case nir_intrinsic_store_shared:
1516 case nir_intrinsic_store_global:
1517 case nir_intrinsic_store_scratch:
1518 case nir_intrinsic_ssbo_atomic_add:
1519 case nir_intrinsic_ssbo_atomic_imin:
1520 case nir_intrinsic_ssbo_atomic_umin:
1521 case nir_intrinsic_ssbo_atomic_imax:
1522 case nir_intrinsic_ssbo_atomic_umax:
1523 case nir_intrinsic_ssbo_atomic_and:
1524 case nir_intrinsic_ssbo_atomic_or:
1525 case nir_intrinsic_ssbo_atomic_xor:
1526 case nir_intrinsic_ssbo_atomic_exchange:
1527 case nir_intrinsic_ssbo_atomic_comp_swap:
1528 case nir_intrinsic_ssbo_atomic_fadd:
1529 case nir_intrinsic_ssbo_atomic_fmin:
1530 case nir_intrinsic_ssbo_atomic_fmax:
1531 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1532 return &instr->src[1];
1533 case nir_intrinsic_store_ssbo:
1534 case nir_intrinsic_store_per_vertex_output:
1535 return &instr->src[2];
1536 default:
1537 return NULL;
1538 }
1539 }
1540
1541 /**
1542 * Return the vertex index source for a load/store per_vertex intrinsic.
1543 */
1544 nir_src *
1545 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1546 {
1547 switch (instr->intrinsic) {
1548 case nir_intrinsic_load_per_vertex_input:
1549 case nir_intrinsic_load_per_vertex_output:
1550 return &instr->src[0];
1551 case nir_intrinsic_store_per_vertex_output:
1552 return &instr->src[1];
1553 default:
1554 return NULL;
1555 }
1556 }
1557
1558 /**
1559 * Return the numeric constant that identify a NULL pointer for each address
1560 * format.
1561 */
1562 const nir_const_value *
1563 nir_address_format_null_value(nir_address_format addr_format)
1564 {
1565 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1566 [nir_address_format_32bit_global] = {{0}},
1567 [nir_address_format_64bit_global] = {{0}},
1568 [nir_address_format_64bit_bounded_global] = {{0}},
1569 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1570 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1571 [nir_address_format_logical] = {{.u32 = ~0}},
1572 };
1573
1574 assert(addr_format < ARRAY_SIZE(null_values));
1575 return null_values[addr_format];
1576 }
1577
1578 nir_ssa_def *
1579 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1580 nir_address_format addr_format)
1581 {
1582 switch (addr_format) {
1583 case nir_address_format_32bit_global:
1584 case nir_address_format_64bit_global:
1585 case nir_address_format_64bit_bounded_global:
1586 case nir_address_format_32bit_index_offset:
1587 case nir_address_format_32bit_offset:
1588 return nir_ball_iequal(b, addr0, addr1);
1589
1590 case nir_address_format_logical:
1591 unreachable("Unsupported address format");
1592 }
1593
1594 unreachable("Invalid address format");
1595 }
1596
1597 nir_ssa_def *
1598 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1599 nir_address_format addr_format)
1600 {
1601 switch (addr_format) {
1602 case nir_address_format_32bit_global:
1603 case nir_address_format_64bit_global:
1604 case nir_address_format_32bit_offset:
1605 assert(addr0->num_components == 1);
1606 assert(addr1->num_components == 1);
1607 return nir_isub(b, addr0, addr1);
1608
1609 case nir_address_format_64bit_bounded_global:
1610 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1611 addr_to_global(b, addr1, addr_format));
1612
1613 case nir_address_format_32bit_index_offset:
1614 assert(addr0->num_components == 2);
1615 assert(addr1->num_components == 2);
1616 /* Assume the same buffer index. */
1617 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1618
1619 case nir_address_format_logical:
1620 unreachable("Unsupported address format");
1621 }
1622
1623 unreachable("Invalid address format");
1624 }
1625
1626 static bool
1627 is_input(nir_intrinsic_instr *intrin)
1628 {
1629 return intrin->intrinsic == nir_intrinsic_load_input ||
1630 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1631 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1632 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1633 }
1634
1635 static bool
1636 is_output(nir_intrinsic_instr *intrin)
1637 {
1638 return intrin->intrinsic == nir_intrinsic_load_output ||
1639 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1640 intrin->intrinsic == nir_intrinsic_store_output ||
1641 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1642 }
1643
1644
1645 /**
1646 * This pass adds constant offsets to instr->const_index[0] for input/output
1647 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1648 * unchanged - since we don't know what part of a compound variable is
1649 * accessed, we allocate storage for the entire thing. For drivers that use
1650 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1651 * the offset source will be 0, so that they don't have to add it in manually.
1652 */
1653
1654 static bool
1655 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1656 nir_variable_mode mode)
1657 {
1658 bool progress = false;
1659 nir_foreach_instr_safe(instr, block) {
1660 if (instr->type != nir_instr_type_intrinsic)
1661 continue;
1662
1663 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1664
1665 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1666 (mode == nir_var_shader_out && is_output(intrin))) {
1667 nir_src *offset = nir_get_io_offset_src(intrin);
1668
1669 if (nir_src_is_const(*offset)) {
1670 intrin->const_index[0] += nir_src_as_uint(*offset);
1671 b->cursor = nir_before_instr(&intrin->instr);
1672 nir_instr_rewrite_src(&intrin->instr, offset,
1673 nir_src_for_ssa(nir_imm_int(b, 0)));
1674 progress = true;
1675 }
1676 }
1677 }
1678
1679 return progress;
1680 }
1681
1682 bool
1683 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1684 {
1685 bool progress = false;
1686
1687 nir_foreach_function(f, nir) {
1688 if (f->impl) {
1689 nir_builder b;
1690 nir_builder_init(&b, f->impl);
1691 nir_foreach_block(block, f->impl) {
1692 progress |= add_const_offset_to_base_block(block, &b, mode);
1693 }
1694 }
1695 }
1696
1697 return progress;
1698 }
1699