nir: add nir_intrinsic_interp_deref_at_vertex
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 #include "util/u_math.h"
39
40 struct lower_io_state {
41 void *dead_ctx;
42 nir_builder builder;
43 int (*type_size)(const struct glsl_type *type, bool);
44 nir_variable_mode modes;
45 nir_lower_io_options options;
46 };
47
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
50 {
51 switch (deref_op) {
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
53 OP(atomic_exchange)
54 OP(atomic_comp_swap)
55 OP(atomic_add)
56 OP(atomic_imin)
57 OP(atomic_umin)
58 OP(atomic_imax)
59 OP(atomic_umax)
60 OP(atomic_and)
61 OP(atomic_or)
62 OP(atomic_xor)
63 OP(atomic_fadd)
64 OP(atomic_fmin)
65 OP(atomic_fmax)
66 OP(atomic_fcomp_swap)
67 #undef OP
68 default:
69 unreachable("Invalid SSBO atomic");
70 }
71 }
72
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op)
75 {
76 switch (deref_op) {
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
78 OP(atomic_exchange)
79 OP(atomic_comp_swap)
80 OP(atomic_add)
81 OP(atomic_imin)
82 OP(atomic_umin)
83 OP(atomic_imax)
84 OP(atomic_umax)
85 OP(atomic_and)
86 OP(atomic_or)
87 OP(atomic_xor)
88 OP(atomic_fadd)
89 OP(atomic_fmin)
90 OP(atomic_fmax)
91 OP(atomic_fcomp_swap)
92 #undef OP
93 default:
94 unreachable("Invalid SSBO atomic");
95 }
96 }
97
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op)
100 {
101 switch (deref_op) {
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
103 OP(atomic_exchange)
104 OP(atomic_comp_swap)
105 OP(atomic_add)
106 OP(atomic_imin)
107 OP(atomic_umin)
108 OP(atomic_imax)
109 OP(atomic_umax)
110 OP(atomic_and)
111 OP(atomic_or)
112 OP(atomic_xor)
113 OP(atomic_fadd)
114 OP(atomic_fmin)
115 OP(atomic_fmax)
116 OP(atomic_fcomp_swap)
117 #undef OP
118 default:
119 unreachable("Invalid shared atomic");
120 }
121 }
122
123 void
124 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
125 int (*type_size)(const struct glsl_type *, bool))
126 {
127 unsigned location = 0;
128
129 nir_foreach_variable(var, var_list) {
130 /*
131 * UBOs have their own address spaces, so don't count them towards the
132 * number of global uniforms
133 */
134 if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
135 continue;
136
137 var->data.driver_location = location;
138 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
139 var->data.mode == nir_var_shader_out ||
140 var->data.bindless;
141 location += type_size(var->type, bindless_type_size);
142 }
143
144 *size = location;
145 }
146
147 /**
148 * Return true if the given variable is a per-vertex input/output array.
149 * (such as geometry shader inputs).
150 */
151 bool
152 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
153 {
154 if (var->data.patch || !glsl_type_is_array(var->type))
155 return false;
156
157 if (var->data.mode == nir_var_shader_in)
158 return stage == MESA_SHADER_GEOMETRY ||
159 stage == MESA_SHADER_TESS_CTRL ||
160 stage == MESA_SHADER_TESS_EVAL;
161
162 if (var->data.mode == nir_var_shader_out)
163 return stage == MESA_SHADER_TESS_CTRL;
164
165 return false;
166 }
167
168 static nir_ssa_def *
169 get_io_offset(nir_builder *b, nir_deref_instr *deref,
170 nir_ssa_def **vertex_index,
171 int (*type_size)(const struct glsl_type *, bool),
172 unsigned *component, bool bts)
173 {
174 nir_deref_path path;
175 nir_deref_path_init(&path, deref, NULL);
176
177 assert(path.path[0]->deref_type == nir_deref_type_var);
178 nir_deref_instr **p = &path.path[1];
179
180 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
181 * outermost array index separate. Process the rest normally.
182 */
183 if (vertex_index != NULL) {
184 assert((*p)->deref_type == nir_deref_type_array);
185 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
186 p++;
187 }
188
189 if (path.path[0]->var->data.compact) {
190 assert((*p)->deref_type == nir_deref_type_array);
191 assert(glsl_type_is_scalar((*p)->type));
192
193 /* We always lower indirect dereferences for "compact" array vars. */
194 const unsigned index = nir_src_as_uint((*p)->arr.index);
195 const unsigned total_offset = *component + index;
196 const unsigned slot_offset = total_offset / 4;
197 *component = total_offset % 4;
198 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
199 }
200
201 /* Just emit code and let constant-folding go to town */
202 nir_ssa_def *offset = nir_imm_int(b, 0);
203
204 for (; *p; p++) {
205 if ((*p)->deref_type == nir_deref_type_array) {
206 unsigned size = type_size((*p)->type, bts);
207
208 nir_ssa_def *mul =
209 nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
210
211 offset = nir_iadd(b, offset, mul);
212 } else if ((*p)->deref_type == nir_deref_type_struct) {
213 /* p starts at path[1], so this is safe */
214 nir_deref_instr *parent = *(p - 1);
215
216 unsigned field_offset = 0;
217 for (unsigned i = 0; i < (*p)->strct.index; i++) {
218 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
219 }
220 offset = nir_iadd_imm(b, offset, field_offset);
221 } else {
222 unreachable("Unsupported deref type");
223 }
224 }
225
226 nir_deref_path_finish(&path);
227
228 return offset;
229 }
230
231 static nir_ssa_def *
232 emit_load(struct lower_io_state *state,
233 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
234 unsigned component, unsigned num_components, unsigned bit_size,
235 nir_alu_type type)
236 {
237 nir_builder *b = &state->builder;
238 const nir_shader *nir = b->shader;
239 nir_variable_mode mode = var->data.mode;
240 nir_ssa_def *barycentric = NULL;
241
242 nir_intrinsic_op op;
243 switch (mode) {
244 case nir_var_shader_in:
245 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
246 nir->options->use_interpolated_input_intrinsics &&
247 var->data.interpolation != INTERP_MODE_FLAT) {
248 assert(vertex_index == NULL);
249
250 nir_intrinsic_op bary_op;
251 if (var->data.sample ||
252 (state->options & nir_lower_io_force_sample_interpolation))
253 bary_op = nir_intrinsic_load_barycentric_sample;
254 else if (var->data.centroid)
255 bary_op = nir_intrinsic_load_barycentric_centroid;
256 else
257 bary_op = nir_intrinsic_load_barycentric_pixel;
258
259 barycentric = nir_load_barycentric(&state->builder, bary_op,
260 var->data.interpolation);
261 op = nir_intrinsic_load_interpolated_input;
262 } else {
263 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
264 nir_intrinsic_load_input;
265 }
266 break;
267 case nir_var_shader_out:
268 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
269 nir_intrinsic_load_output;
270 break;
271 case nir_var_uniform:
272 op = nir_intrinsic_load_uniform;
273 break;
274 case nir_var_mem_shared:
275 op = nir_intrinsic_load_shared;
276 break;
277 default:
278 unreachable("Unknown variable mode");
279 }
280
281 nir_intrinsic_instr *load =
282 nir_intrinsic_instr_create(state->builder.shader, op);
283 load->num_components = num_components;
284
285 nir_intrinsic_set_base(load, var->data.driver_location);
286 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
287 nir_intrinsic_set_component(load, component);
288
289 if (load->intrinsic == nir_intrinsic_load_uniform)
290 nir_intrinsic_set_range(load,
291 state->type_size(var->type, var->data.bindless));
292
293 if (load->intrinsic == nir_intrinsic_load_input ||
294 load->intrinsic == nir_intrinsic_load_uniform)
295 nir_intrinsic_set_type(load, type);
296
297 if (vertex_index) {
298 load->src[0] = nir_src_for_ssa(vertex_index);
299 load->src[1] = nir_src_for_ssa(offset);
300 } else if (barycentric) {
301 load->src[0] = nir_src_for_ssa(barycentric);
302 load->src[1] = nir_src_for_ssa(offset);
303 } else {
304 load->src[0] = nir_src_for_ssa(offset);
305 }
306
307 nir_ssa_dest_init(&load->instr, &load->dest,
308 num_components, bit_size, NULL);
309 nir_builder_instr_insert(b, &load->instr);
310
311 return &load->dest.ssa;
312 }
313
314 static nir_ssa_def *
315 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
316 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
317 unsigned component, const struct glsl_type *type)
318 {
319 assert(intrin->dest.is_ssa);
320 if (intrin->dest.ssa.bit_size == 64 &&
321 (state->options & nir_lower_io_lower_64bit_to_32)) {
322 nir_builder *b = &state->builder;
323
324 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
325
326 nir_ssa_def *comp64[4];
327 assert(component == 0 || component == 2);
328 unsigned dest_comp = 0;
329 while (dest_comp < intrin->dest.ssa.num_components) {
330 const unsigned num_comps =
331 MIN2(intrin->dest.ssa.num_components - dest_comp,
332 (4 - component) / 2);
333
334 nir_ssa_def *data32 =
335 emit_load(state, vertex_index, var, offset, component,
336 num_comps * 2, 32, nir_type_uint32);
337 for (unsigned i = 0; i < num_comps; i++) {
338 comp64[dest_comp + i] =
339 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
340 }
341
342 /* Only the first store has a component offset */
343 component = 0;
344 dest_comp += num_comps;
345 offset = nir_iadd_imm(b, offset, slot_size);
346 }
347
348 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
349 } else {
350 return emit_load(state, vertex_index, var, offset, component,
351 intrin->dest.ssa.num_components,
352 intrin->dest.ssa.bit_size,
353 nir_get_nir_type_for_glsl_type(type));
354 }
355 }
356
357 static void
358 emit_store(struct lower_io_state *state, nir_ssa_def *data,
359 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
360 unsigned component, unsigned num_components,
361 nir_component_mask_t write_mask, nir_alu_type type)
362 {
363 nir_builder *b = &state->builder;
364 nir_variable_mode mode = var->data.mode;
365
366 nir_intrinsic_op op;
367 if (mode == nir_var_mem_shared) {
368 op = nir_intrinsic_store_shared;
369 } else {
370 assert(mode == nir_var_shader_out);
371 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
372 nir_intrinsic_store_output;
373 }
374
375 nir_intrinsic_instr *store =
376 nir_intrinsic_instr_create(state->builder.shader, op);
377 store->num_components = num_components;
378
379 store->src[0] = nir_src_for_ssa(data);
380
381 nir_intrinsic_set_base(store, var->data.driver_location);
382
383 if (mode == nir_var_shader_out)
384 nir_intrinsic_set_component(store, component);
385
386 if (store->intrinsic == nir_intrinsic_store_output)
387 nir_intrinsic_set_type(store, type);
388
389 nir_intrinsic_set_write_mask(store, write_mask);
390
391 if (vertex_index)
392 store->src[1] = nir_src_for_ssa(vertex_index);
393
394 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
395
396 nir_builder_instr_insert(b, &store->instr);
397 }
398
399 static void
400 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
401 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
402 unsigned component, const struct glsl_type *type)
403 {
404 assert(intrin->src[1].is_ssa);
405 if (intrin->src[1].ssa->bit_size == 64 &&
406 (state->options & nir_lower_io_lower_64bit_to_32)) {
407 nir_builder *b = &state->builder;
408
409 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
410
411 assert(component == 0 || component == 2);
412 unsigned src_comp = 0;
413 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
414 while (src_comp < intrin->num_components) {
415 const unsigned num_comps =
416 MIN2(intrin->num_components - src_comp,
417 (4 - component) / 2);
418
419 if (write_mask & BITFIELD_MASK(num_comps)) {
420 nir_ssa_def *data =
421 nir_channels(b, intrin->src[1].ssa,
422 BITFIELD_RANGE(src_comp, num_comps));
423 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
424
425 nir_component_mask_t write_mask32 = 0;
426 for (unsigned i = 0; i < num_comps; i++) {
427 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
428 write_mask32 |= 3 << (i * 2);
429 }
430
431 emit_store(state, data32, vertex_index, var, offset,
432 component, data32->num_components, write_mask32,
433 nir_type_uint32);
434 }
435
436 /* Only the first store has a component offset */
437 component = 0;
438 src_comp += num_comps;
439 write_mask >>= num_comps;
440 offset = nir_iadd_imm(b, offset, slot_size);
441 }
442 } else {
443 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
444 component, intrin->num_components,
445 nir_intrinsic_write_mask(intrin),
446 nir_get_nir_type_for_glsl_type(type));
447 }
448 }
449
450 static nir_ssa_def *
451 lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
452 nir_variable *var, nir_ssa_def *offset)
453 {
454 nir_builder *b = &state->builder;
455 assert(var->data.mode == nir_var_mem_shared);
456
457 nir_intrinsic_op op = shared_atomic_for_deref(intrin->intrinsic);
458
459 nir_intrinsic_instr *atomic =
460 nir_intrinsic_instr_create(state->builder.shader, op);
461
462 nir_intrinsic_set_base(atomic, var->data.driver_location);
463
464 atomic->src[0] = nir_src_for_ssa(offset);
465 assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs ==
466 nir_intrinsic_infos[op].num_srcs);
467 for (unsigned i = 1; i < nir_intrinsic_infos[op].num_srcs; i++) {
468 nir_src_copy(&atomic->src[i], &intrin->src[i], atomic);
469 }
470
471 if (nir_intrinsic_infos[op].has_dest) {
472 assert(intrin->dest.is_ssa);
473 assert(nir_intrinsic_infos[intrin->intrinsic].has_dest);
474 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
475 intrin->dest.ssa.num_components,
476 intrin->dest.ssa.bit_size, NULL);
477 }
478
479 nir_builder_instr_insert(b, &atomic->instr);
480
481 return nir_intrinsic_infos[op].has_dest ? &atomic->dest.ssa : NULL;
482 }
483
484 static nir_ssa_def *
485 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
486 nir_variable *var, nir_ssa_def *offset, unsigned component,
487 const struct glsl_type *type)
488 {
489 nir_builder *b = &state->builder;
490 assert(var->data.mode == nir_var_shader_in);
491
492 /* Ignore interpolateAt() for flat variables - flat is flat. */
493 if (var->data.interpolation == INTERP_MODE_FLAT)
494 return lower_load(intrin, state, NULL, var, offset, component, type);
495
496 /* None of the supported APIs allow interpolation on 64-bit things */
497 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
498
499 nir_intrinsic_op bary_op;
500 switch (intrin->intrinsic) {
501 case nir_intrinsic_interp_deref_at_centroid:
502 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
503 nir_intrinsic_load_barycentric_sample :
504 nir_intrinsic_load_barycentric_centroid;
505 break;
506 case nir_intrinsic_interp_deref_at_sample:
507 bary_op = nir_intrinsic_load_barycentric_at_sample;
508 break;
509 case nir_intrinsic_interp_deref_at_offset:
510 bary_op = nir_intrinsic_load_barycentric_at_offset;
511 break;
512 default:
513 unreachable("Bogus interpolateAt() intrinsic.");
514 }
515
516 nir_intrinsic_instr *bary_setup =
517 nir_intrinsic_instr_create(state->builder.shader, bary_op);
518
519 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
520 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
521
522 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
523 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
524 intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
525 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
526
527 nir_builder_instr_insert(b, &bary_setup->instr);
528
529 nir_intrinsic_instr *load =
530 nir_intrinsic_instr_create(state->builder.shader,
531 nir_intrinsic_load_interpolated_input);
532 load->num_components = intrin->num_components;
533
534 nir_intrinsic_set_base(load, var->data.driver_location);
535 nir_intrinsic_set_component(load, component);
536
537 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
538 load->src[1] = nir_src_for_ssa(offset);
539
540 assert(intrin->dest.is_ssa);
541 nir_ssa_dest_init(&load->instr, &load->dest,
542 intrin->dest.ssa.num_components,
543 intrin->dest.ssa.bit_size, NULL);
544 nir_builder_instr_insert(b, &load->instr);
545
546 return &load->dest.ssa;
547 }
548
549 static bool
550 nir_lower_io_block(nir_block *block,
551 struct lower_io_state *state)
552 {
553 nir_builder *b = &state->builder;
554 const nir_shader_compiler_options *options = b->shader->options;
555 bool progress = false;
556
557 nir_foreach_instr_safe(instr, block) {
558 if (instr->type != nir_instr_type_intrinsic)
559 continue;
560
561 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
562
563 switch (intrin->intrinsic) {
564 case nir_intrinsic_load_deref:
565 case nir_intrinsic_store_deref:
566 case nir_intrinsic_deref_atomic_add:
567 case nir_intrinsic_deref_atomic_imin:
568 case nir_intrinsic_deref_atomic_umin:
569 case nir_intrinsic_deref_atomic_imax:
570 case nir_intrinsic_deref_atomic_umax:
571 case nir_intrinsic_deref_atomic_and:
572 case nir_intrinsic_deref_atomic_or:
573 case nir_intrinsic_deref_atomic_xor:
574 case nir_intrinsic_deref_atomic_exchange:
575 case nir_intrinsic_deref_atomic_comp_swap:
576 case nir_intrinsic_deref_atomic_fadd:
577 case nir_intrinsic_deref_atomic_fmin:
578 case nir_intrinsic_deref_atomic_fmax:
579 case nir_intrinsic_deref_atomic_fcomp_swap:
580 /* We can lower the io for this nir instrinsic */
581 break;
582 case nir_intrinsic_interp_deref_at_centroid:
583 case nir_intrinsic_interp_deref_at_sample:
584 case nir_intrinsic_interp_deref_at_offset:
585 case nir_intrinsic_interp_deref_at_vertex:
586 /* We can optionally lower these to load_interpolated_input */
587 if (options->use_interpolated_input_intrinsics)
588 break;
589 default:
590 /* We can't lower the io for this nir instrinsic, so skip it */
591 continue;
592 }
593
594 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
595
596 nir_variable_mode mode = deref->mode;
597
598 if ((state->modes & mode) == 0)
599 continue;
600
601 if (mode != nir_var_shader_in &&
602 mode != nir_var_shader_out &&
603 mode != nir_var_mem_shared &&
604 mode != nir_var_uniform)
605 continue;
606
607 nir_variable *var = nir_deref_instr_get_variable(deref);
608
609 b->cursor = nir_before_instr(instr);
610
611 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
612
613 nir_ssa_def *offset;
614 nir_ssa_def *vertex_index = NULL;
615 unsigned component_offset = var->data.location_frac;
616 bool bindless_type_size = mode == nir_var_shader_in ||
617 mode == nir_var_shader_out ||
618 var->data.bindless;
619
620 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
621 state->type_size, &component_offset,
622 bindless_type_size);
623
624 nir_ssa_def *replacement = NULL;
625
626 switch (intrin->intrinsic) {
627 case nir_intrinsic_load_deref:
628 replacement = lower_load(intrin, state, vertex_index, var, offset,
629 component_offset, deref->type);
630 break;
631
632 case nir_intrinsic_store_deref:
633 lower_store(intrin, state, vertex_index, var, offset,
634 component_offset, deref->type);
635 break;
636
637 case nir_intrinsic_deref_atomic_add:
638 case nir_intrinsic_deref_atomic_imin:
639 case nir_intrinsic_deref_atomic_umin:
640 case nir_intrinsic_deref_atomic_imax:
641 case nir_intrinsic_deref_atomic_umax:
642 case nir_intrinsic_deref_atomic_and:
643 case nir_intrinsic_deref_atomic_or:
644 case nir_intrinsic_deref_atomic_xor:
645 case nir_intrinsic_deref_atomic_exchange:
646 case nir_intrinsic_deref_atomic_comp_swap:
647 case nir_intrinsic_deref_atomic_fadd:
648 case nir_intrinsic_deref_atomic_fmin:
649 case nir_intrinsic_deref_atomic_fmax:
650 case nir_intrinsic_deref_atomic_fcomp_swap:
651 assert(vertex_index == NULL);
652 replacement = lower_atomic(intrin, state, var, offset);
653 break;
654
655 case nir_intrinsic_interp_deref_at_centroid:
656 case nir_intrinsic_interp_deref_at_sample:
657 case nir_intrinsic_interp_deref_at_offset:
658 case nir_intrinsic_interp_deref_at_vertex:
659 assert(vertex_index == NULL);
660 replacement = lower_interpolate_at(intrin, state, var, offset,
661 component_offset, deref->type);
662 break;
663
664 default:
665 continue;
666 }
667
668 if (replacement) {
669 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
670 nir_src_for_ssa(replacement));
671 }
672 nir_instr_remove(&intrin->instr);
673 progress = true;
674 }
675
676 return progress;
677 }
678
679 static bool
680 nir_lower_io_impl(nir_function_impl *impl,
681 nir_variable_mode modes,
682 int (*type_size)(const struct glsl_type *, bool),
683 nir_lower_io_options options)
684 {
685 struct lower_io_state state;
686 bool progress = false;
687
688 nir_builder_init(&state.builder, impl);
689 state.dead_ctx = ralloc_context(NULL);
690 state.modes = modes;
691 state.type_size = type_size;
692 state.options = options;
693
694 nir_foreach_block(block, impl) {
695 progress |= nir_lower_io_block(block, &state);
696 }
697
698 ralloc_free(state.dead_ctx);
699
700 nir_metadata_preserve(impl, nir_metadata_block_index |
701 nir_metadata_dominance);
702 return progress;
703 }
704
705 bool
706 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
707 int (*type_size)(const struct glsl_type *, bool),
708 nir_lower_io_options options)
709 {
710 bool progress = false;
711
712 nir_foreach_function(function, shader) {
713 if (function->impl) {
714 progress |= nir_lower_io_impl(function->impl, modes,
715 type_size, options);
716 }
717 }
718
719 return progress;
720 }
721
722 static unsigned
723 type_scalar_size_bytes(const struct glsl_type *type)
724 {
725 assert(glsl_type_is_vector_or_scalar(type) ||
726 glsl_type_is_matrix(type));
727 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
728 }
729
730 static nir_ssa_def *
731 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
732 nir_address_format addr_format, nir_ssa_def *offset)
733 {
734 assert(offset->num_components == 1);
735 assert(addr->bit_size == offset->bit_size);
736
737 switch (addr_format) {
738 case nir_address_format_32bit_global:
739 case nir_address_format_64bit_global:
740 case nir_address_format_32bit_offset:
741 assert(addr->num_components == 1);
742 return nir_iadd(b, addr, offset);
743
744 case nir_address_format_64bit_bounded_global:
745 assert(addr->num_components == 4);
746 return nir_vec4(b, nir_channel(b, addr, 0),
747 nir_channel(b, addr, 1),
748 nir_channel(b, addr, 2),
749 nir_iadd(b, nir_channel(b, addr, 3), offset));
750
751 case nir_address_format_32bit_index_offset:
752 assert(addr->num_components == 2);
753 return nir_vec2(b, nir_channel(b, addr, 0),
754 nir_iadd(b, nir_channel(b, addr, 1), offset));
755 case nir_address_format_logical:
756 unreachable("Unsupported address format");
757 }
758 unreachable("Invalid address format");
759 }
760
761 static nir_ssa_def *
762 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
763 nir_address_format addr_format, int64_t offset)
764 {
765 return build_addr_iadd(b, addr, addr_format,
766 nir_imm_intN_t(b, offset, addr->bit_size));
767 }
768
769 static nir_ssa_def *
770 addr_to_index(nir_builder *b, nir_ssa_def *addr,
771 nir_address_format addr_format)
772 {
773 assert(addr_format == nir_address_format_32bit_index_offset);
774 assert(addr->num_components == 2);
775 return nir_channel(b, addr, 0);
776 }
777
778 static nir_ssa_def *
779 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
780 nir_address_format addr_format)
781 {
782 assert(addr_format == nir_address_format_32bit_index_offset);
783 assert(addr->num_components == 2);
784 return nir_channel(b, addr, 1);
785 }
786
787 /** Returns true if the given address format resolves to a global address */
788 static bool
789 addr_format_is_global(nir_address_format addr_format)
790 {
791 return addr_format == nir_address_format_32bit_global ||
792 addr_format == nir_address_format_64bit_global ||
793 addr_format == nir_address_format_64bit_bounded_global;
794 }
795
796 static nir_ssa_def *
797 addr_to_global(nir_builder *b, nir_ssa_def *addr,
798 nir_address_format addr_format)
799 {
800 switch (addr_format) {
801 case nir_address_format_32bit_global:
802 case nir_address_format_64bit_global:
803 assert(addr->num_components == 1);
804 return addr;
805
806 case nir_address_format_64bit_bounded_global:
807 assert(addr->num_components == 4);
808 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
809 nir_u2u64(b, nir_channel(b, addr, 3)));
810
811 case nir_address_format_32bit_index_offset:
812 case nir_address_format_32bit_offset:
813 case nir_address_format_logical:
814 unreachable("Cannot get a 64-bit address with this address format");
815 }
816
817 unreachable("Invalid address format");
818 }
819
820 static bool
821 addr_format_needs_bounds_check(nir_address_format addr_format)
822 {
823 return addr_format == nir_address_format_64bit_bounded_global;
824 }
825
826 static nir_ssa_def *
827 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
828 nir_address_format addr_format, unsigned size)
829 {
830 assert(addr_format == nir_address_format_64bit_bounded_global);
831 assert(addr->num_components == 4);
832 return nir_ige(b, nir_channel(b, addr, 2),
833 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
834 }
835
836 static nir_ssa_def *
837 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
838 nir_ssa_def *addr, nir_address_format addr_format,
839 unsigned num_components)
840 {
841 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
842
843 nir_intrinsic_op op;
844 switch (mode) {
845 case nir_var_mem_ubo:
846 op = nir_intrinsic_load_ubo;
847 break;
848 case nir_var_mem_ssbo:
849 if (addr_format_is_global(addr_format))
850 op = nir_intrinsic_load_global;
851 else
852 op = nir_intrinsic_load_ssbo;
853 break;
854 case nir_var_mem_global:
855 assert(addr_format_is_global(addr_format));
856 op = nir_intrinsic_load_global;
857 break;
858 case nir_var_shader_in:
859 assert(addr_format_is_global(addr_format));
860 op = nir_intrinsic_load_kernel_input;
861 break;
862 case nir_var_mem_shared:
863 assert(addr_format == nir_address_format_32bit_offset);
864 op = nir_intrinsic_load_shared;
865 break;
866 default:
867 unreachable("Unsupported explicit IO variable mode");
868 }
869
870 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
871
872 if (addr_format_is_global(addr_format)) {
873 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
874 } else if (addr_format == nir_address_format_32bit_offset) {
875 assert(addr->num_components == 1);
876 load->src[0] = nir_src_for_ssa(addr);
877 } else {
878 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
879 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
880 }
881
882 if (mode != nir_var_mem_ubo && mode != nir_var_shader_in && mode != nir_var_mem_shared)
883 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
884
885 unsigned bit_size = intrin->dest.ssa.bit_size;
886 if (bit_size == 1) {
887 /* TODO: Make the native bool bit_size an option. */
888 bit_size = 32;
889 }
890
891 /* TODO: We should try and provide a better alignment. For OpenCL, we need
892 * to plumb the alignment through from SPIR-V when we have one.
893 */
894 nir_intrinsic_set_align(load, bit_size / 8, 0);
895
896 assert(intrin->dest.is_ssa);
897 load->num_components = num_components;
898 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
899 bit_size, intrin->dest.ssa.name);
900
901 assert(bit_size % 8 == 0);
902
903 nir_ssa_def *result;
904 if (addr_format_needs_bounds_check(addr_format)) {
905 /* The Vulkan spec for robustBufferAccess gives us quite a few options
906 * as to what we can do with an OOB read. Unfortunately, returning
907 * undefined values isn't one of them so we return an actual zero.
908 */
909 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
910
911 const unsigned load_size = (bit_size / 8) * load->num_components;
912 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
913
914 nir_builder_instr_insert(b, &load->instr);
915
916 nir_pop_if(b, NULL);
917
918 result = nir_if_phi(b, &load->dest.ssa, zero);
919 } else {
920 nir_builder_instr_insert(b, &load->instr);
921 result = &load->dest.ssa;
922 }
923
924 if (intrin->dest.ssa.bit_size == 1)
925 result = nir_i2b(b, result);
926
927 return result;
928 }
929
930 static void
931 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
932 nir_ssa_def *addr, nir_address_format addr_format,
933 nir_ssa_def *value, nir_component_mask_t write_mask)
934 {
935 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
936
937 nir_intrinsic_op op;
938 switch (mode) {
939 case nir_var_mem_ssbo:
940 if (addr_format_is_global(addr_format))
941 op = nir_intrinsic_store_global;
942 else
943 op = nir_intrinsic_store_ssbo;
944 break;
945 case nir_var_mem_global:
946 assert(addr_format_is_global(addr_format));
947 op = nir_intrinsic_store_global;
948 break;
949 case nir_var_mem_shared:
950 assert(addr_format == nir_address_format_32bit_offset);
951 op = nir_intrinsic_store_shared;
952 break;
953 default:
954 unreachable("Unsupported explicit IO variable mode");
955 }
956
957 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
958
959 if (value->bit_size == 1) {
960 /* TODO: Make the native bool bit_size an option. */
961 value = nir_b2i(b, value, 32);
962 }
963
964 store->src[0] = nir_src_for_ssa(value);
965 if (addr_format_is_global(addr_format)) {
966 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
967 } else if (addr_format == nir_address_format_32bit_offset) {
968 assert(addr->num_components == 1);
969 store->src[1] = nir_src_for_ssa(addr);
970 } else {
971 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
972 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
973 }
974
975 nir_intrinsic_set_write_mask(store, write_mask);
976
977 if (mode != nir_var_mem_shared)
978 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
979
980 /* TODO: We should try and provide a better alignment. For OpenCL, we need
981 * to plumb the alignment through from SPIR-V when we have one.
982 */
983 nir_intrinsic_set_align(store, value->bit_size / 8, 0);
984
985 assert(value->num_components == 1 ||
986 value->num_components == intrin->num_components);
987 store->num_components = value->num_components;
988
989 assert(value->bit_size % 8 == 0);
990
991 if (addr_format_needs_bounds_check(addr_format)) {
992 const unsigned store_size = (value->bit_size / 8) * store->num_components;
993 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
994
995 nir_builder_instr_insert(b, &store->instr);
996
997 nir_pop_if(b, NULL);
998 } else {
999 nir_builder_instr_insert(b, &store->instr);
1000 }
1001 }
1002
1003 static nir_ssa_def *
1004 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
1005 nir_ssa_def *addr, nir_address_format addr_format)
1006 {
1007 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1008 const unsigned num_data_srcs =
1009 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
1010
1011 nir_intrinsic_op op;
1012 switch (mode) {
1013 case nir_var_mem_ssbo:
1014 if (addr_format_is_global(addr_format))
1015 op = global_atomic_for_deref(intrin->intrinsic);
1016 else
1017 op = ssbo_atomic_for_deref(intrin->intrinsic);
1018 break;
1019 case nir_var_mem_global:
1020 assert(addr_format_is_global(addr_format));
1021 op = global_atomic_for_deref(intrin->intrinsic);
1022 break;
1023 case nir_var_mem_shared:
1024 assert(addr_format == nir_address_format_32bit_offset);
1025 op = shared_atomic_for_deref(intrin->intrinsic);
1026 break;
1027 default:
1028 unreachable("Unsupported explicit IO variable mode");
1029 }
1030
1031 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1032
1033 unsigned src = 0;
1034 if (addr_format_is_global(addr_format)) {
1035 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1036 } else if (addr_format == nir_address_format_32bit_offset) {
1037 assert(addr->num_components == 1);
1038 atomic->src[src++] = nir_src_for_ssa(addr);
1039 } else {
1040 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1041 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1042 }
1043 for (unsigned i = 0; i < num_data_srcs; i++) {
1044 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1045 }
1046
1047 /* Global atomics don't have access flags because they assume that the
1048 * address may be non-uniform.
1049 */
1050 if (!addr_format_is_global(addr_format) && mode != nir_var_mem_shared)
1051 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1052
1053 assert(intrin->dest.ssa.num_components == 1);
1054 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1055 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1056
1057 assert(atomic->dest.ssa.bit_size % 8 == 0);
1058
1059 if (addr_format_needs_bounds_check(addr_format)) {
1060 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1061 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1062
1063 nir_builder_instr_insert(b, &atomic->instr);
1064
1065 nir_pop_if(b, NULL);
1066 return nir_if_phi(b, &atomic->dest.ssa,
1067 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1068 } else {
1069 nir_builder_instr_insert(b, &atomic->instr);
1070 return &atomic->dest.ssa;
1071 }
1072 }
1073
1074 nir_ssa_def *
1075 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1076 nir_ssa_def *base_addr,
1077 nir_address_format addr_format)
1078 {
1079 assert(deref->dest.is_ssa);
1080 switch (deref->deref_type) {
1081 case nir_deref_type_var:
1082 assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared));
1083 return nir_imm_intN_t(b, deref->var->data.driver_location,
1084 deref->dest.ssa.bit_size);
1085
1086 case nir_deref_type_array: {
1087 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1088
1089 unsigned stride = glsl_get_explicit_stride(parent->type);
1090 if ((glsl_type_is_matrix(parent->type) &&
1091 glsl_matrix_type_is_row_major(parent->type)) ||
1092 (glsl_type_is_vector(parent->type) && stride == 0))
1093 stride = type_scalar_size_bytes(parent->type);
1094
1095 assert(stride > 0);
1096
1097 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1098 index = nir_i2i(b, index, base_addr->bit_size);
1099 return build_addr_iadd(b, base_addr, addr_format,
1100 nir_amul_imm(b, index, stride));
1101 }
1102
1103 case nir_deref_type_ptr_as_array: {
1104 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1105 index = nir_i2i(b, index, base_addr->bit_size);
1106 unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
1107 return build_addr_iadd(b, base_addr, addr_format,
1108 nir_amul_imm(b, index, stride));
1109 }
1110
1111 case nir_deref_type_array_wildcard:
1112 unreachable("Wildcards should be lowered by now");
1113 break;
1114
1115 case nir_deref_type_struct: {
1116 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1117 int offset = glsl_get_struct_field_offset(parent->type,
1118 deref->strct.index);
1119 assert(offset >= 0);
1120 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1121 }
1122
1123 case nir_deref_type_cast:
1124 /* Nothing to do here */
1125 return base_addr;
1126 }
1127
1128 unreachable("Invalid NIR deref type");
1129 }
1130
1131 void
1132 nir_lower_explicit_io_instr(nir_builder *b,
1133 nir_intrinsic_instr *intrin,
1134 nir_ssa_def *addr,
1135 nir_address_format addr_format)
1136 {
1137 b->cursor = nir_after_instr(&intrin->instr);
1138
1139 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1140 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1141 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1142 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1143 assert(vec_stride == 0 || vec_stride >= scalar_size);
1144
1145 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1146 nir_ssa_def *value;
1147 if (vec_stride > scalar_size) {
1148 nir_ssa_def *comps[4] = { NULL, };
1149 for (unsigned i = 0; i < intrin->num_components; i++) {
1150 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1151 vec_stride * i);
1152 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1153 addr_format, 1);
1154 }
1155 value = nir_vec(b, comps, intrin->num_components);
1156 } else {
1157 value = build_explicit_io_load(b, intrin, addr, addr_format,
1158 intrin->num_components);
1159 }
1160 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1161 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1162 assert(intrin->src[1].is_ssa);
1163 nir_ssa_def *value = intrin->src[1].ssa;
1164 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1165 if (vec_stride > scalar_size) {
1166 for (unsigned i = 0; i < intrin->num_components; i++) {
1167 if (!(write_mask & (1 << i)))
1168 continue;
1169
1170 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1171 vec_stride * i);
1172 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1173 nir_channel(b, value, i), 1);
1174 }
1175 } else {
1176 build_explicit_io_store(b, intrin, addr, addr_format,
1177 value, write_mask);
1178 }
1179 } else {
1180 nir_ssa_def *value =
1181 build_explicit_io_atomic(b, intrin, addr, addr_format);
1182 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1183 }
1184
1185 nir_instr_remove(&intrin->instr);
1186 }
1187
1188 static void
1189 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1190 nir_address_format addr_format)
1191 {
1192 /* Just delete the deref if it's not used. We can't use
1193 * nir_deref_instr_remove_if_unused here because it may remove more than
1194 * one deref which could break our list walking since we walk the list
1195 * backwards.
1196 */
1197 assert(list_is_empty(&deref->dest.ssa.if_uses));
1198 if (list_is_empty(&deref->dest.ssa.uses)) {
1199 nir_instr_remove(&deref->instr);
1200 return;
1201 }
1202
1203 b->cursor = nir_after_instr(&deref->instr);
1204
1205 nir_ssa_def *base_addr = NULL;
1206 if (deref->deref_type != nir_deref_type_var) {
1207 assert(deref->parent.is_ssa);
1208 base_addr = deref->parent.ssa;
1209 }
1210
1211 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1212 addr_format);
1213
1214 nir_instr_remove(&deref->instr);
1215 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1216 }
1217
1218 static void
1219 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1220 nir_address_format addr_format)
1221 {
1222 assert(intrin->src[0].is_ssa);
1223 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1224 }
1225
1226 static void
1227 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1228 nir_address_format addr_format)
1229 {
1230 b->cursor = nir_after_instr(&intrin->instr);
1231
1232 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1233
1234 assert(glsl_type_is_array(deref->type));
1235 assert(glsl_get_length(deref->type) == 0);
1236 unsigned stride = glsl_get_explicit_stride(deref->type);
1237 assert(stride > 0);
1238
1239 assert(addr_format == nir_address_format_32bit_index_offset);
1240 nir_ssa_def *addr = &deref->dest.ssa;
1241 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1242 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1243
1244 nir_intrinsic_instr *bsize =
1245 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1246 bsize->src[0] = nir_src_for_ssa(index);
1247 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1248 nir_builder_instr_insert(b, &bsize->instr);
1249
1250 nir_ssa_def *arr_size =
1251 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1252 nir_imm_int(b, stride));
1253
1254 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1255 nir_instr_remove(&intrin->instr);
1256 }
1257
1258 static bool
1259 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1260 nir_address_format addr_format)
1261 {
1262 bool progress = false;
1263
1264 nir_builder b;
1265 nir_builder_init(&b, impl);
1266
1267 /* Walk in reverse order so that we can see the full deref chain when we
1268 * lower the access operations. We lower them assuming that the derefs
1269 * will be turned into address calculations later.
1270 */
1271 nir_foreach_block_reverse(block, impl) {
1272 nir_foreach_instr_reverse_safe(instr, block) {
1273 switch (instr->type) {
1274 case nir_instr_type_deref: {
1275 nir_deref_instr *deref = nir_instr_as_deref(instr);
1276 if (deref->mode & modes) {
1277 lower_explicit_io_deref(&b, deref, addr_format);
1278 progress = true;
1279 }
1280 break;
1281 }
1282
1283 case nir_instr_type_intrinsic: {
1284 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1285 switch (intrin->intrinsic) {
1286 case nir_intrinsic_load_deref:
1287 case nir_intrinsic_store_deref:
1288 case nir_intrinsic_deref_atomic_add:
1289 case nir_intrinsic_deref_atomic_imin:
1290 case nir_intrinsic_deref_atomic_umin:
1291 case nir_intrinsic_deref_atomic_imax:
1292 case nir_intrinsic_deref_atomic_umax:
1293 case nir_intrinsic_deref_atomic_and:
1294 case nir_intrinsic_deref_atomic_or:
1295 case nir_intrinsic_deref_atomic_xor:
1296 case nir_intrinsic_deref_atomic_exchange:
1297 case nir_intrinsic_deref_atomic_comp_swap:
1298 case nir_intrinsic_deref_atomic_fadd:
1299 case nir_intrinsic_deref_atomic_fmin:
1300 case nir_intrinsic_deref_atomic_fmax:
1301 case nir_intrinsic_deref_atomic_fcomp_swap: {
1302 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1303 if (deref->mode & modes) {
1304 lower_explicit_io_access(&b, intrin, addr_format);
1305 progress = true;
1306 }
1307 break;
1308 }
1309
1310 case nir_intrinsic_deref_buffer_array_length: {
1311 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1312 if (deref->mode & modes) {
1313 lower_explicit_io_array_length(&b, intrin, addr_format);
1314 progress = true;
1315 }
1316 break;
1317 }
1318
1319 default:
1320 break;
1321 }
1322 break;
1323 }
1324
1325 default:
1326 /* Nothing to do */
1327 break;
1328 }
1329 }
1330 }
1331
1332 if (progress) {
1333 nir_metadata_preserve(impl, nir_metadata_block_index |
1334 nir_metadata_dominance);
1335 }
1336
1337 return progress;
1338 }
1339
1340 bool
1341 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1342 nir_address_format addr_format)
1343 {
1344 bool progress = false;
1345
1346 nir_foreach_function(function, shader) {
1347 if (function->impl &&
1348 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1349 progress = true;
1350 }
1351
1352 return progress;
1353 }
1354
1355 static bool
1356 nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
1357 nir_variable_mode modes,
1358 glsl_type_size_align_func type_info)
1359 {
1360 bool progress = false;
1361
1362 nir_foreach_block(block, impl) {
1363 nir_foreach_instr(instr, block) {
1364 if (instr->type != nir_instr_type_deref)
1365 continue;
1366
1367 nir_deref_instr *deref = nir_instr_as_deref(instr);
1368 if (!(deref->mode & modes))
1369 continue;
1370
1371 unsigned size, alignment;
1372 const struct glsl_type *new_type =
1373 glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
1374 if (new_type != deref->type) {
1375 progress = true;
1376 deref->type = new_type;
1377 }
1378 if (deref->deref_type == nir_deref_type_cast) {
1379 /* See also glsl_type::get_explicit_type_for_size_align() */
1380 unsigned new_stride = align(size, alignment);
1381 if (new_stride != deref->cast.ptr_stride) {
1382 deref->cast.ptr_stride = new_stride;
1383 progress = true;
1384 }
1385 }
1386 }
1387 }
1388
1389 if (progress) {
1390 nir_metadata_preserve(impl, nir_metadata_block_index |
1391 nir_metadata_dominance |
1392 nir_metadata_live_ssa_defs |
1393 nir_metadata_loop_analysis);
1394 }
1395
1396 return progress;
1397 }
1398
1399 static bool
1400 lower_vars_to_explicit(nir_shader *shader,
1401 struct exec_list *vars, nir_variable_mode mode,
1402 glsl_type_size_align_func type_info)
1403 {
1404 bool progress = false;
1405 unsigned offset = 0;
1406 nir_foreach_variable(var, vars) {
1407 unsigned size, align;
1408 const struct glsl_type *explicit_type =
1409 glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
1410
1411 if (explicit_type != var->type) {
1412 progress = true;
1413 var->type = explicit_type;
1414 }
1415
1416 var->data.driver_location = ALIGN_POT(offset, align);
1417 offset = var->data.driver_location + size;
1418 }
1419
1420 if (mode == nir_var_mem_shared) {
1421 shader->info.cs.shared_size = offset;
1422 shader->num_shared = offset;
1423 }
1424
1425 return progress;
1426 }
1427
1428 bool
1429 nir_lower_vars_to_explicit_types(nir_shader *shader,
1430 nir_variable_mode modes,
1431 glsl_type_size_align_func type_info)
1432 {
1433 /* TODO: Situations which need to be handled to support more modes:
1434 * - row-major matrices
1435 * - compact shader inputs/outputs
1436 * - interface types
1437 */
1438 ASSERTED nir_variable_mode supported = nir_var_mem_shared |
1439 nir_var_shader_temp | nir_var_function_temp;
1440 assert(!(modes & ~supported) && "unsupported");
1441
1442 bool progress = false;
1443
1444 if (modes & nir_var_mem_shared)
1445 progress |= lower_vars_to_explicit(shader, &shader->shared, nir_var_mem_shared, type_info);
1446 if (modes & nir_var_shader_temp)
1447 progress |= lower_vars_to_explicit(shader, &shader->globals, nir_var_shader_temp, type_info);
1448
1449 nir_foreach_function(function, shader) {
1450 if (function->impl) {
1451 if (modes & nir_var_function_temp)
1452 progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
1453
1454 progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
1455 }
1456 }
1457
1458 return progress;
1459 }
1460
1461 /**
1462 * Return the offset source for a load/store intrinsic.
1463 */
1464 nir_src *
1465 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1466 {
1467 switch (instr->intrinsic) {
1468 case nir_intrinsic_load_input:
1469 case nir_intrinsic_load_output:
1470 case nir_intrinsic_load_shared:
1471 case nir_intrinsic_load_uniform:
1472 case nir_intrinsic_load_global:
1473 case nir_intrinsic_load_scratch:
1474 case nir_intrinsic_load_fs_input_interp_deltas:
1475 return &instr->src[0];
1476 case nir_intrinsic_load_ubo:
1477 case nir_intrinsic_load_ssbo:
1478 case nir_intrinsic_load_per_vertex_input:
1479 case nir_intrinsic_load_per_vertex_output:
1480 case nir_intrinsic_load_interpolated_input:
1481 case nir_intrinsic_store_output:
1482 case nir_intrinsic_store_shared:
1483 case nir_intrinsic_store_global:
1484 case nir_intrinsic_store_scratch:
1485 return &instr->src[1];
1486 case nir_intrinsic_store_ssbo:
1487 case nir_intrinsic_store_per_vertex_output:
1488 return &instr->src[2];
1489 default:
1490 return NULL;
1491 }
1492 }
1493
1494 /**
1495 * Return the vertex index source for a load/store per_vertex intrinsic.
1496 */
1497 nir_src *
1498 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1499 {
1500 switch (instr->intrinsic) {
1501 case nir_intrinsic_load_per_vertex_input:
1502 case nir_intrinsic_load_per_vertex_output:
1503 return &instr->src[0];
1504 case nir_intrinsic_store_per_vertex_output:
1505 return &instr->src[1];
1506 default:
1507 return NULL;
1508 }
1509 }
1510
1511 /**
1512 * Return the numeric constant that identify a NULL pointer for each address
1513 * format.
1514 */
1515 const nir_const_value *
1516 nir_address_format_null_value(nir_address_format addr_format)
1517 {
1518 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1519 [nir_address_format_32bit_global] = {{0}},
1520 [nir_address_format_64bit_global] = {{0}},
1521 [nir_address_format_64bit_bounded_global] = {{0}},
1522 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1523 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1524 [nir_address_format_logical] = {{.u32 = ~0}},
1525 };
1526
1527 assert(addr_format < ARRAY_SIZE(null_values));
1528 return null_values[addr_format];
1529 }
1530
1531 nir_ssa_def *
1532 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1533 nir_address_format addr_format)
1534 {
1535 switch (addr_format) {
1536 case nir_address_format_32bit_global:
1537 case nir_address_format_64bit_global:
1538 case nir_address_format_64bit_bounded_global:
1539 case nir_address_format_32bit_index_offset:
1540 case nir_address_format_32bit_offset:
1541 return nir_ball_iequal(b, addr0, addr1);
1542
1543 case nir_address_format_logical:
1544 unreachable("Unsupported address format");
1545 }
1546
1547 unreachable("Invalid address format");
1548 }
1549
1550 nir_ssa_def *
1551 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1552 nir_address_format addr_format)
1553 {
1554 switch (addr_format) {
1555 case nir_address_format_32bit_global:
1556 case nir_address_format_64bit_global:
1557 case nir_address_format_32bit_offset:
1558 assert(addr0->num_components == 1);
1559 assert(addr1->num_components == 1);
1560 return nir_isub(b, addr0, addr1);
1561
1562 case nir_address_format_64bit_bounded_global:
1563 return nir_isub(b, addr_to_global(b, addr0, addr_format),
1564 addr_to_global(b, addr1, addr_format));
1565
1566 case nir_address_format_32bit_index_offset:
1567 assert(addr0->num_components == 2);
1568 assert(addr1->num_components == 2);
1569 /* Assume the same buffer index. */
1570 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
1571
1572 case nir_address_format_logical:
1573 unreachable("Unsupported address format");
1574 }
1575
1576 unreachable("Invalid address format");
1577 }
1578
1579 static bool
1580 is_input(nir_intrinsic_instr *intrin)
1581 {
1582 return intrin->intrinsic == nir_intrinsic_load_input ||
1583 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
1584 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
1585 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
1586 }
1587
1588 static bool
1589 is_output(nir_intrinsic_instr *intrin)
1590 {
1591 return intrin->intrinsic == nir_intrinsic_load_output ||
1592 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
1593 intrin->intrinsic == nir_intrinsic_store_output ||
1594 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1595 }
1596
1597
1598 /**
1599 * This pass adds constant offsets to instr->const_index[0] for input/output
1600 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1601 * unchanged - since we don't know what part of a compound variable is
1602 * accessed, we allocate storage for the entire thing. For drivers that use
1603 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1604 * the offset source will be 0, so that they don't have to add it in manually.
1605 */
1606
1607 static bool
1608 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
1609 nir_variable_mode mode)
1610 {
1611 bool progress = false;
1612 nir_foreach_instr_safe(instr, block) {
1613 if (instr->type != nir_instr_type_intrinsic)
1614 continue;
1615
1616 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1617
1618 if ((mode == nir_var_shader_in && is_input(intrin)) ||
1619 (mode == nir_var_shader_out && is_output(intrin))) {
1620 nir_src *offset = nir_get_io_offset_src(intrin);
1621
1622 if (nir_src_is_const(*offset)) {
1623 intrin->const_index[0] += nir_src_as_uint(*offset);
1624 b->cursor = nir_before_instr(&intrin->instr);
1625 nir_instr_rewrite_src(&intrin->instr, offset,
1626 nir_src_for_ssa(nir_imm_int(b, 0)));
1627 progress = true;
1628 }
1629 }
1630 }
1631
1632 return progress;
1633 }
1634
1635 bool
1636 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
1637 {
1638 bool progress = false;
1639
1640 nir_foreach_function(f, nir) {
1641 if (f->impl) {
1642 nir_builder b;
1643 nir_builder_init(&b, f->impl);
1644 nir_foreach_block(block, f->impl) {
1645 progress |= add_const_offset_to_base_block(block, &b, mode);
1646 }
1647 }
1648 }
1649
1650 return progress;
1651 }
1652