nir: Allow uniform in nir_lower_vars_to_explicit_types
[mesa.git] / src / compiler / nir / nir_lower_io.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
26 *
27 */
28
29 /*
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
32 */
33
34 #include "nir.h"
35 #include "nir_builder.h"
36 #include "nir_deref.h"
37
38 #include "util/u_math.h"
39
40 struct lower_io_state {
41 void *dead_ctx;
42 nir_builder builder;
43 int (*type_size)(const struct glsl_type *type, bool);
44 nir_variable_mode modes;
45 nir_lower_io_options options;
46 };
47
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
50 {
51 switch (deref_op) {
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
53 OP(atomic_exchange)
54 OP(atomic_comp_swap)
55 OP(atomic_add)
56 OP(atomic_imin)
57 OP(atomic_umin)
58 OP(atomic_imax)
59 OP(atomic_umax)
60 OP(atomic_and)
61 OP(atomic_or)
62 OP(atomic_xor)
63 OP(atomic_fadd)
64 OP(atomic_fmin)
65 OP(atomic_fmax)
66 OP(atomic_fcomp_swap)
67 #undef OP
68 default:
69 unreachable("Invalid SSBO atomic");
70 }
71 }
72
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op)
75 {
76 switch (deref_op) {
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
78 OP(atomic_exchange)
79 OP(atomic_comp_swap)
80 OP(atomic_add)
81 OP(atomic_imin)
82 OP(atomic_umin)
83 OP(atomic_imax)
84 OP(atomic_umax)
85 OP(atomic_and)
86 OP(atomic_or)
87 OP(atomic_xor)
88 OP(atomic_fadd)
89 OP(atomic_fmin)
90 OP(atomic_fmax)
91 OP(atomic_fcomp_swap)
92 #undef OP
93 default:
94 unreachable("Invalid SSBO atomic");
95 }
96 }
97
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op)
100 {
101 switch (deref_op) {
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
103 OP(atomic_exchange)
104 OP(atomic_comp_swap)
105 OP(atomic_add)
106 OP(atomic_imin)
107 OP(atomic_umin)
108 OP(atomic_imax)
109 OP(atomic_umax)
110 OP(atomic_and)
111 OP(atomic_or)
112 OP(atomic_xor)
113 OP(atomic_fadd)
114 OP(atomic_fmin)
115 OP(atomic_fmax)
116 OP(atomic_fcomp_swap)
117 #undef OP
118 default:
119 unreachable("Invalid shared atomic");
120 }
121 }
122
123 void
124 nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
125 unsigned *size,
126 int (*type_size)(const struct glsl_type *, bool))
127 {
128 unsigned location = 0;
129
130 nir_foreach_variable_with_modes(var, shader, mode) {
131 var->data.driver_location = location;
132 bool bindless_type_size = var->data.mode == nir_var_shader_in ||
133 var->data.mode == nir_var_shader_out ||
134 var->data.bindless;
135 location += type_size(var->type, bindless_type_size);
136 }
137
138 *size = location;
139 }
140
141 /**
142 * Return true if the given variable is a per-vertex input/output array.
143 * (such as geometry shader inputs).
144 */
145 bool
146 nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
147 {
148 if (var->data.patch || !glsl_type_is_array(var->type))
149 return false;
150
151 if (var->data.mode == nir_var_shader_in)
152 return stage == MESA_SHADER_GEOMETRY ||
153 stage == MESA_SHADER_TESS_CTRL ||
154 stage == MESA_SHADER_TESS_EVAL;
155
156 if (var->data.mode == nir_var_shader_out)
157 return stage == MESA_SHADER_TESS_CTRL;
158
159 return false;
160 }
161
162 static unsigned get_number_of_slots(struct lower_io_state *state,
163 const nir_variable *var)
164 {
165 const struct glsl_type *type = var->type;
166
167 if (nir_is_per_vertex_io(var, state->builder.shader->info.stage)) {
168 assert(glsl_type_is_array(type));
169 type = glsl_get_array_element(type);
170 }
171
172 return state->type_size(type, var->data.bindless);
173 }
174
175 static nir_ssa_def *
176 get_io_offset(nir_builder *b, nir_deref_instr *deref,
177 nir_ssa_def **vertex_index,
178 int (*type_size)(const struct glsl_type *, bool),
179 unsigned *component, bool bts)
180 {
181 nir_deref_path path;
182 nir_deref_path_init(&path, deref, NULL);
183
184 assert(path.path[0]->deref_type == nir_deref_type_var);
185 nir_deref_instr **p = &path.path[1];
186
187 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
188 * outermost array index separate. Process the rest normally.
189 */
190 if (vertex_index != NULL) {
191 assert((*p)->deref_type == nir_deref_type_array);
192 *vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
193 p++;
194 }
195
196 if (path.path[0]->var->data.compact) {
197 assert((*p)->deref_type == nir_deref_type_array);
198 assert(glsl_type_is_scalar((*p)->type));
199
200 /* We always lower indirect dereferences for "compact" array vars. */
201 const unsigned index = nir_src_as_uint((*p)->arr.index);
202 const unsigned total_offset = *component + index;
203 const unsigned slot_offset = total_offset / 4;
204 *component = total_offset % 4;
205 return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
206 }
207
208 /* Just emit code and let constant-folding go to town */
209 nir_ssa_def *offset = nir_imm_int(b, 0);
210
211 for (; *p; p++) {
212 if ((*p)->deref_type == nir_deref_type_array) {
213 unsigned size = type_size((*p)->type, bts);
214
215 nir_ssa_def *mul =
216 nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
217
218 offset = nir_iadd(b, offset, mul);
219 } else if ((*p)->deref_type == nir_deref_type_struct) {
220 /* p starts at path[1], so this is safe */
221 nir_deref_instr *parent = *(p - 1);
222
223 unsigned field_offset = 0;
224 for (unsigned i = 0; i < (*p)->strct.index; i++) {
225 field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
226 }
227 offset = nir_iadd_imm(b, offset, field_offset);
228 } else {
229 unreachable("Unsupported deref type");
230 }
231 }
232
233 nir_deref_path_finish(&path);
234
235 return offset;
236 }
237
238 static nir_ssa_def *
239 emit_load(struct lower_io_state *state,
240 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
241 unsigned component, unsigned num_components, unsigned bit_size,
242 nir_alu_type type)
243 {
244 nir_builder *b = &state->builder;
245 const nir_shader *nir = b->shader;
246 nir_variable_mode mode = var->data.mode;
247 nir_ssa_def *barycentric = NULL;
248
249 nir_intrinsic_op op;
250 switch (mode) {
251 case nir_var_shader_in:
252 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
253 nir->options->use_interpolated_input_intrinsics &&
254 var->data.interpolation != INTERP_MODE_FLAT) {
255 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
256 assert(vertex_index != NULL);
257 op = nir_intrinsic_load_input_vertex;
258 } else {
259 assert(vertex_index == NULL);
260
261 nir_intrinsic_op bary_op;
262 if (var->data.sample ||
263 (state->options & nir_lower_io_force_sample_interpolation))
264 bary_op = nir_intrinsic_load_barycentric_sample;
265 else if (var->data.centroid)
266 bary_op = nir_intrinsic_load_barycentric_centroid;
267 else
268 bary_op = nir_intrinsic_load_barycentric_pixel;
269
270 barycentric = nir_load_barycentric(&state->builder, bary_op,
271 var->data.interpolation);
272 op = nir_intrinsic_load_interpolated_input;
273 }
274 } else {
275 op = vertex_index ? nir_intrinsic_load_per_vertex_input :
276 nir_intrinsic_load_input;
277 }
278 break;
279 case nir_var_shader_out:
280 op = vertex_index ? nir_intrinsic_load_per_vertex_output :
281 nir_intrinsic_load_output;
282 break;
283 case nir_var_uniform:
284 op = nir_intrinsic_load_uniform;
285 break;
286 default:
287 unreachable("Unknown variable mode");
288 }
289
290 nir_intrinsic_instr *load =
291 nir_intrinsic_instr_create(state->builder.shader, op);
292 load->num_components = num_components;
293
294 nir_intrinsic_set_base(load, var->data.driver_location);
295 if (mode == nir_var_shader_in || mode == nir_var_shader_out)
296 nir_intrinsic_set_component(load, component);
297
298 if (load->intrinsic == nir_intrinsic_load_uniform)
299 nir_intrinsic_set_range(load,
300 state->type_size(var->type, var->data.bindless));
301
302 if (load->intrinsic == nir_intrinsic_load_input ||
303 load->intrinsic == nir_intrinsic_load_input_vertex ||
304 load->intrinsic == nir_intrinsic_load_uniform)
305 nir_intrinsic_set_type(load, type);
306
307 if (load->intrinsic != nir_intrinsic_load_uniform) {
308 nir_io_semantics semantics = {0};
309 semantics.location = var->data.location;
310 semantics.num_slots = get_number_of_slots(state, var);
311 semantics.fb_fetch_output = var->data.fb_fetch_output;
312 nir_intrinsic_set_io_semantics(load, semantics);
313 }
314
315 if (vertex_index) {
316 load->src[0] = nir_src_for_ssa(vertex_index);
317 load->src[1] = nir_src_for_ssa(offset);
318 } else if (barycentric) {
319 load->src[0] = nir_src_for_ssa(barycentric);
320 load->src[1] = nir_src_for_ssa(offset);
321 } else {
322 load->src[0] = nir_src_for_ssa(offset);
323 }
324
325 nir_ssa_dest_init(&load->instr, &load->dest,
326 num_components, bit_size, NULL);
327 nir_builder_instr_insert(b, &load->instr);
328
329 return &load->dest.ssa;
330 }
331
332 static nir_ssa_def *
333 lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
334 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
335 unsigned component, const struct glsl_type *type)
336 {
337 assert(intrin->dest.is_ssa);
338 if (intrin->dest.ssa.bit_size == 64 &&
339 (state->options & nir_lower_io_lower_64bit_to_32)) {
340 nir_builder *b = &state->builder;
341
342 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
343
344 nir_ssa_def *comp64[4];
345 assert(component == 0 || component == 2);
346 unsigned dest_comp = 0;
347 while (dest_comp < intrin->dest.ssa.num_components) {
348 const unsigned num_comps =
349 MIN2(intrin->dest.ssa.num_components - dest_comp,
350 (4 - component) / 2);
351
352 nir_ssa_def *data32 =
353 emit_load(state, vertex_index, var, offset, component,
354 num_comps * 2, 32, nir_type_uint32);
355 for (unsigned i = 0; i < num_comps; i++) {
356 comp64[dest_comp + i] =
357 nir_pack_64_2x32(b, nir_channels(b, data32, 3 << (i * 2)));
358 }
359
360 /* Only the first store has a component offset */
361 component = 0;
362 dest_comp += num_comps;
363 offset = nir_iadd_imm(b, offset, slot_size);
364 }
365
366 return nir_vec(b, comp64, intrin->dest.ssa.num_components);
367 } else if (intrin->dest.ssa.bit_size == 1) {
368 /* Booleans are 32-bit */
369 assert(glsl_type_is_boolean(type));
370 return nir_b2b1(&state->builder,
371 emit_load(state, vertex_index, var, offset, component,
372 intrin->dest.ssa.num_components, 32,
373 nir_type_bool32));
374 } else {
375 return emit_load(state, vertex_index, var, offset, component,
376 intrin->dest.ssa.num_components,
377 intrin->dest.ssa.bit_size,
378 nir_get_nir_type_for_glsl_type(type));
379 }
380 }
381
382 static void
383 emit_store(struct lower_io_state *state, nir_ssa_def *data,
384 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
385 unsigned component, unsigned num_components,
386 nir_component_mask_t write_mask, nir_alu_type type)
387 {
388 nir_builder *b = &state->builder;
389 nir_variable_mode mode = var->data.mode;
390
391 assert(mode == nir_var_shader_out);
392 nir_intrinsic_op op;
393 op = vertex_index ? nir_intrinsic_store_per_vertex_output :
394 nir_intrinsic_store_output;
395
396 nir_intrinsic_instr *store =
397 nir_intrinsic_instr_create(state->builder.shader, op);
398 store->num_components = num_components;
399
400 store->src[0] = nir_src_for_ssa(data);
401
402 nir_intrinsic_set_base(store, var->data.driver_location);
403
404 if (mode == nir_var_shader_out)
405 nir_intrinsic_set_component(store, component);
406
407 if (store->intrinsic == nir_intrinsic_store_output)
408 nir_intrinsic_set_type(store, type);
409
410 nir_intrinsic_set_write_mask(store, write_mask);
411
412 if (vertex_index)
413 store->src[1] = nir_src_for_ssa(vertex_index);
414
415 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
416
417 unsigned gs_streams = 0;
418 if (state->builder.shader->info.stage == MESA_SHADER_GEOMETRY) {
419 if (var->data.stream & NIR_STREAM_PACKED) {
420 gs_streams = var->data.stream & ~NIR_STREAM_PACKED;
421 } else {
422 assert(var->data.stream < 4);
423 gs_streams = 0;
424 for (unsigned i = 0; i < num_components; ++i)
425 gs_streams |= var->data.stream << (2 * i);
426 }
427 }
428
429 nir_io_semantics semantics = {0};
430 semantics.location = var->data.location;
431 semantics.num_slots = get_number_of_slots(state, var);
432 semantics.dual_source_blend_index = var->data.index;
433 semantics.gs_streams = gs_streams;
434 nir_intrinsic_set_io_semantics(store, semantics);
435
436 nir_builder_instr_insert(b, &store->instr);
437 }
438
439 static void
440 lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
441 nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
442 unsigned component, const struct glsl_type *type)
443 {
444 assert(intrin->src[1].is_ssa);
445 if (intrin->src[1].ssa->bit_size == 64 &&
446 (state->options & nir_lower_io_lower_64bit_to_32)) {
447 nir_builder *b = &state->builder;
448
449 const unsigned slot_size = state->type_size(glsl_dvec_type(2), false);
450
451 assert(component == 0 || component == 2);
452 unsigned src_comp = 0;
453 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
454 while (src_comp < intrin->num_components) {
455 const unsigned num_comps =
456 MIN2(intrin->num_components - src_comp,
457 (4 - component) / 2);
458
459 if (write_mask & BITFIELD_MASK(num_comps)) {
460 nir_ssa_def *data =
461 nir_channels(b, intrin->src[1].ssa,
462 BITFIELD_RANGE(src_comp, num_comps));
463 nir_ssa_def *data32 = nir_bitcast_vector(b, data, 32);
464
465 nir_component_mask_t write_mask32 = 0;
466 for (unsigned i = 0; i < num_comps; i++) {
467 if (write_mask & BITFIELD_MASK(num_comps) & (1 << i))
468 write_mask32 |= 3 << (i * 2);
469 }
470
471 emit_store(state, data32, vertex_index, var, offset,
472 component, data32->num_components, write_mask32,
473 nir_type_uint32);
474 }
475
476 /* Only the first store has a component offset */
477 component = 0;
478 src_comp += num_comps;
479 write_mask >>= num_comps;
480 offset = nir_iadd_imm(b, offset, slot_size);
481 }
482 } else if (intrin->dest.ssa.bit_size == 1) {
483 /* Booleans are 32-bit */
484 assert(glsl_type_is_boolean(type));
485 nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
486 emit_store(state, b32_val, vertex_index, var, offset,
487 component, intrin->num_components,
488 nir_intrinsic_write_mask(intrin),
489 nir_type_bool32);
490 } else {
491 emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
492 component, intrin->num_components,
493 nir_intrinsic_write_mask(intrin),
494 nir_get_nir_type_for_glsl_type(type));
495 }
496 }
497
498 static nir_ssa_def *
499 lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
500 nir_variable *var, nir_ssa_def *offset, unsigned component,
501 const struct glsl_type *type)
502 {
503 nir_builder *b = &state->builder;
504 assert(var->data.mode == nir_var_shader_in);
505
506 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
507 * interpolateAtVertex() for explicit variables.
508 */
509 if (var->data.interpolation == INTERP_MODE_FLAT ||
510 var->data.interpolation == INTERP_MODE_EXPLICIT) {
511 nir_ssa_def *vertex_index = NULL;
512
513 if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
514 assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
515 vertex_index = intrin->src[1].ssa;
516 }
517
518 return lower_load(intrin, state, vertex_index, var, offset, component, type);
519 }
520
521 /* None of the supported APIs allow interpolation on 64-bit things */
522 assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
523
524 nir_intrinsic_op bary_op;
525 switch (intrin->intrinsic) {
526 case nir_intrinsic_interp_deref_at_centroid:
527 bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
528 nir_intrinsic_load_barycentric_sample :
529 nir_intrinsic_load_barycentric_centroid;
530 break;
531 case nir_intrinsic_interp_deref_at_sample:
532 bary_op = nir_intrinsic_load_barycentric_at_sample;
533 break;
534 case nir_intrinsic_interp_deref_at_offset:
535 bary_op = nir_intrinsic_load_barycentric_at_offset;
536 break;
537 default:
538 unreachable("Bogus interpolateAt() intrinsic.");
539 }
540
541 nir_intrinsic_instr *bary_setup =
542 nir_intrinsic_instr_create(state->builder.shader, bary_op);
543
544 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
545 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
546
547 if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
548 intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
549 intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
550 nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
551
552 nir_builder_instr_insert(b, &bary_setup->instr);
553
554 nir_intrinsic_instr *load =
555 nir_intrinsic_instr_create(state->builder.shader,
556 nir_intrinsic_load_interpolated_input);
557 load->num_components = intrin->num_components;
558
559 nir_intrinsic_set_base(load, var->data.driver_location);
560 nir_intrinsic_set_component(load, component);
561
562 nir_io_semantics semantics = {0};
563 semantics.location = var->data.location;
564 semantics.num_slots = get_number_of_slots(state, var);
565 nir_intrinsic_set_io_semantics(load, semantics);
566
567 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
568 load->src[1] = nir_src_for_ssa(offset);
569
570 assert(intrin->dest.is_ssa);
571 nir_ssa_dest_init(&load->instr, &load->dest,
572 intrin->dest.ssa.num_components,
573 intrin->dest.ssa.bit_size, NULL);
574 nir_builder_instr_insert(b, &load->instr);
575
576 return &load->dest.ssa;
577 }
578
579 static bool
580 nir_lower_io_block(nir_block *block,
581 struct lower_io_state *state)
582 {
583 nir_builder *b = &state->builder;
584 const nir_shader_compiler_options *options = b->shader->options;
585 bool progress = false;
586
587 nir_foreach_instr_safe(instr, block) {
588 if (instr->type != nir_instr_type_intrinsic)
589 continue;
590
591 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
592
593 switch (intrin->intrinsic) {
594 case nir_intrinsic_load_deref:
595 case nir_intrinsic_store_deref:
596 /* We can lower the io for this nir instrinsic */
597 break;
598 case nir_intrinsic_interp_deref_at_centroid:
599 case nir_intrinsic_interp_deref_at_sample:
600 case nir_intrinsic_interp_deref_at_offset:
601 case nir_intrinsic_interp_deref_at_vertex:
602 /* We can optionally lower these to load_interpolated_input */
603 if (options->use_interpolated_input_intrinsics)
604 break;
605 default:
606 /* We can't lower the io for this nir instrinsic, so skip it */
607 continue;
608 }
609
610 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
611
612 nir_variable_mode mode = deref->mode;
613 assert(util_is_power_of_two_nonzero(mode));
614 if ((state->modes & mode) == 0)
615 continue;
616
617 nir_variable *var = nir_deref_instr_get_variable(deref);
618
619 b->cursor = nir_before_instr(instr);
620
621 const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
622
623 nir_ssa_def *offset;
624 nir_ssa_def *vertex_index = NULL;
625 unsigned component_offset = var->data.location_frac;
626 bool bindless_type_size = mode == nir_var_shader_in ||
627 mode == nir_var_shader_out ||
628 var->data.bindless;
629
630 offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
631 state->type_size, &component_offset,
632 bindless_type_size);
633
634 nir_ssa_def *replacement = NULL;
635
636 switch (intrin->intrinsic) {
637 case nir_intrinsic_load_deref:
638 replacement = lower_load(intrin, state, vertex_index, var, offset,
639 component_offset, deref->type);
640 break;
641
642 case nir_intrinsic_store_deref:
643 lower_store(intrin, state, vertex_index, var, offset,
644 component_offset, deref->type);
645 break;
646
647 case nir_intrinsic_interp_deref_at_centroid:
648 case nir_intrinsic_interp_deref_at_sample:
649 case nir_intrinsic_interp_deref_at_offset:
650 case nir_intrinsic_interp_deref_at_vertex:
651 assert(vertex_index == NULL);
652 replacement = lower_interpolate_at(intrin, state, var, offset,
653 component_offset, deref->type);
654 break;
655
656 default:
657 continue;
658 }
659
660 if (replacement) {
661 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
662 nir_src_for_ssa(replacement));
663 }
664 nir_instr_remove(&intrin->instr);
665 progress = true;
666 }
667
668 return progress;
669 }
670
671 static bool
672 nir_lower_io_impl(nir_function_impl *impl,
673 nir_variable_mode modes,
674 int (*type_size)(const struct glsl_type *, bool),
675 nir_lower_io_options options)
676 {
677 struct lower_io_state state;
678 bool progress = false;
679
680 nir_builder_init(&state.builder, impl);
681 state.dead_ctx = ralloc_context(NULL);
682 state.modes = modes;
683 state.type_size = type_size;
684 state.options = options;
685
686 ASSERTED nir_variable_mode supported_modes =
687 nir_var_shader_in | nir_var_shader_out | nir_var_uniform;
688 assert(!(modes & ~supported_modes));
689
690 nir_foreach_block(block, impl) {
691 progress |= nir_lower_io_block(block, &state);
692 }
693
694 ralloc_free(state.dead_ctx);
695
696 nir_metadata_preserve(impl, nir_metadata_block_index |
697 nir_metadata_dominance);
698 return progress;
699 }
700
701 /** Lower load/store_deref intrinsics on I/O variables to offset-based intrinsics
702 *
703 * This pass is intended to be used for cross-stage shader I/O and driver-
704 * managed uniforms to turn deref-based access into a simpler model using
705 * locations or offsets. For fragment shader inputs, it can optionally turn
706 * load_deref into an explicit interpolation using barycentrics coming from
707 * one of the load_barycentric_* intrinsics. This pass requires that all
708 * deref chains are complete and contain no casts.
709 */
710 bool
711 nir_lower_io(nir_shader *shader, nir_variable_mode modes,
712 int (*type_size)(const struct glsl_type *, bool),
713 nir_lower_io_options options)
714 {
715 bool progress = false;
716
717 nir_foreach_function(function, shader) {
718 if (function->impl) {
719 progress |= nir_lower_io_impl(function->impl, modes,
720 type_size, options);
721 }
722 }
723
724 return progress;
725 }
726
727 static unsigned
728 type_scalar_size_bytes(const struct glsl_type *type)
729 {
730 assert(glsl_type_is_vector_or_scalar(type) ||
731 glsl_type_is_matrix(type));
732 return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
733 }
734
735 static nir_ssa_def *
736 build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
737 nir_address_format addr_format, nir_ssa_def *offset)
738 {
739 assert(offset->num_components == 1);
740
741 switch (addr_format) {
742 case nir_address_format_32bit_global:
743 case nir_address_format_64bit_global:
744 case nir_address_format_32bit_offset:
745 assert(addr->bit_size == offset->bit_size);
746 assert(addr->num_components == 1);
747 return nir_iadd(b, addr, offset);
748
749 case nir_address_format_32bit_offset_as_64bit:
750 assert(addr->num_components == 1);
751 assert(offset->bit_size == 32);
752 return nir_u2u64(b, nir_iadd(b, nir_u2u32(b, addr), offset));
753
754 case nir_address_format_64bit_bounded_global:
755 assert(addr->num_components == 4);
756 assert(addr->bit_size == offset->bit_size);
757 return nir_vec4(b, nir_channel(b, addr, 0),
758 nir_channel(b, addr, 1),
759 nir_channel(b, addr, 2),
760 nir_iadd(b, nir_channel(b, addr, 3), offset));
761
762 case nir_address_format_32bit_index_offset:
763 assert(addr->num_components == 2);
764 assert(addr->bit_size == offset->bit_size);
765 return nir_vec2(b, nir_channel(b, addr, 0),
766 nir_iadd(b, nir_channel(b, addr, 1), offset));
767
768 case nir_address_format_32bit_index_offset_pack64:
769 assert(addr->num_components == 1);
770 assert(offset->bit_size == 32);
771 return nir_pack_64_2x32_split(b,
772 nir_iadd(b, nir_unpack_64_2x32_split_x(b, addr), offset),
773 nir_unpack_64_2x32_split_y(b, addr));
774
775 case nir_address_format_vec2_index_32bit_offset:
776 assert(addr->num_components == 3);
777 assert(offset->bit_size == 32);
778 return nir_vec3(b, nir_channel(b, addr, 0), nir_channel(b, addr, 1),
779 nir_iadd(b, nir_channel(b, addr, 2), offset));
780
781 case nir_address_format_logical:
782 unreachable("Unsupported address format");
783 }
784 unreachable("Invalid address format");
785 }
786
787 static unsigned
788 addr_get_offset_bit_size(nir_ssa_def *addr, nir_address_format addr_format)
789 {
790 if (addr_format == nir_address_format_32bit_offset_as_64bit ||
791 addr_format == nir_address_format_32bit_index_offset_pack64)
792 return 32;
793 return addr->bit_size;
794 }
795
796 static nir_ssa_def *
797 build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
798 nir_address_format addr_format, int64_t offset)
799 {
800 return build_addr_iadd(b, addr, addr_format,
801 nir_imm_intN_t(b, offset,
802 addr_get_offset_bit_size(addr, addr_format)));
803 }
804
805 static nir_ssa_def *
806 build_addr_for_var(nir_builder *b, nir_variable *var,
807 nir_address_format addr_format)
808 {
809 assert(var->data.mode & (nir_var_uniform | nir_var_mem_shared |
810 nir_var_shader_temp | nir_var_function_temp |
811 nir_var_mem_constant));
812
813 const unsigned num_comps = nir_address_format_num_components(addr_format);
814 const unsigned bit_size = nir_address_format_bit_size(addr_format);
815
816 switch (addr_format) {
817 case nir_address_format_32bit_global:
818 case nir_address_format_64bit_global: {
819 nir_ssa_def *base_addr;
820 switch (var->data.mode) {
821 case nir_var_shader_temp:
822 base_addr = nir_load_scratch_base_ptr(b, 0, num_comps, bit_size);
823 break;
824
825 case nir_var_function_temp:
826 base_addr = nir_load_scratch_base_ptr(b, 1, num_comps, bit_size);
827 break;
828
829 case nir_var_mem_constant:
830 base_addr = nir_load_constant_base_ptr(b, num_comps, bit_size);
831 break;
832
833 default:
834 unreachable("Unsupported variable mode");
835 }
836
837 return build_addr_iadd_imm(b, base_addr, addr_format,
838 var->data.driver_location);
839 }
840
841 case nir_address_format_32bit_offset:
842 assert(var->data.driver_location <= UINT32_MAX);
843 return nir_imm_int(b, var->data.driver_location);
844
845 case nir_address_format_32bit_offset_as_64bit:
846 assert(var->data.driver_location <= UINT32_MAX);
847 return nir_imm_int64(b, var->data.driver_location);
848
849 default:
850 unreachable("Unsupported address format");
851 }
852 }
853
854 static nir_ssa_def *
855 addr_to_index(nir_builder *b, nir_ssa_def *addr,
856 nir_address_format addr_format)
857 {
858 switch (addr_format) {
859 case nir_address_format_32bit_index_offset:
860 assert(addr->num_components == 2);
861 return nir_channel(b, addr, 0);
862 case nir_address_format_32bit_index_offset_pack64:
863 return nir_unpack_64_2x32_split_y(b, addr);
864 case nir_address_format_vec2_index_32bit_offset:
865 assert(addr->num_components == 3);
866 return nir_channels(b, addr, 0x3);
867 default: unreachable("Invalid address format");
868 }
869 }
870
871 static nir_ssa_def *
872 addr_to_offset(nir_builder *b, nir_ssa_def *addr,
873 nir_address_format addr_format)
874 {
875 switch (addr_format) {
876 case nir_address_format_32bit_index_offset:
877 assert(addr->num_components == 2);
878 return nir_channel(b, addr, 1);
879 case nir_address_format_32bit_index_offset_pack64:
880 return nir_unpack_64_2x32_split_x(b, addr);
881 case nir_address_format_vec2_index_32bit_offset:
882 assert(addr->num_components == 3);
883 return nir_channel(b, addr, 2);
884 case nir_address_format_32bit_offset:
885 return addr;
886 case nir_address_format_32bit_offset_as_64bit:
887 return nir_u2u32(b, addr);
888 default:
889 unreachable("Invalid address format");
890 }
891 }
892
893 /** Returns true if the given address format resolves to a global address */
894 static bool
895 addr_format_is_global(nir_address_format addr_format)
896 {
897 return addr_format == nir_address_format_32bit_global ||
898 addr_format == nir_address_format_64bit_global ||
899 addr_format == nir_address_format_64bit_bounded_global;
900 }
901
902 static bool
903 addr_format_is_offset(nir_address_format addr_format)
904 {
905 return addr_format == nir_address_format_32bit_offset ||
906 addr_format == nir_address_format_32bit_offset_as_64bit;
907 }
908
909 static nir_ssa_def *
910 addr_to_global(nir_builder *b, nir_ssa_def *addr,
911 nir_address_format addr_format)
912 {
913 switch (addr_format) {
914 case nir_address_format_32bit_global:
915 case nir_address_format_64bit_global:
916 assert(addr->num_components == 1);
917 return addr;
918
919 case nir_address_format_64bit_bounded_global:
920 assert(addr->num_components == 4);
921 return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
922 nir_u2u64(b, nir_channel(b, addr, 3)));
923
924 case nir_address_format_32bit_index_offset:
925 case nir_address_format_32bit_index_offset_pack64:
926 case nir_address_format_vec2_index_32bit_offset:
927 case nir_address_format_32bit_offset:
928 case nir_address_format_32bit_offset_as_64bit:
929 case nir_address_format_logical:
930 unreachable("Cannot get a 64-bit address with this address format");
931 }
932
933 unreachable("Invalid address format");
934 }
935
936 static bool
937 addr_format_needs_bounds_check(nir_address_format addr_format)
938 {
939 return addr_format == nir_address_format_64bit_bounded_global;
940 }
941
942 static nir_ssa_def *
943 addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
944 nir_address_format addr_format, unsigned size)
945 {
946 assert(addr_format == nir_address_format_64bit_bounded_global);
947 assert(addr->num_components == 4);
948 return nir_ige(b, nir_channel(b, addr, 2),
949 nir_iadd_imm(b, nir_channel(b, addr, 3), size));
950 }
951
952 static nir_ssa_def *
953 build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
954 nir_ssa_def *addr, nir_address_format addr_format,
955 uint32_t align_mul, uint32_t align_offset,
956 unsigned num_components)
957 {
958 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
959
960 nir_intrinsic_op op;
961 switch (mode) {
962 case nir_var_mem_ubo:
963 op = nir_intrinsic_load_ubo;
964 break;
965 case nir_var_mem_ssbo:
966 if (addr_format_is_global(addr_format))
967 op = nir_intrinsic_load_global;
968 else
969 op = nir_intrinsic_load_ssbo;
970 break;
971 case nir_var_mem_global:
972 assert(addr_format_is_global(addr_format));
973 op = nir_intrinsic_load_global;
974 break;
975 case nir_var_uniform:
976 assert(addr_format_is_offset(addr_format));
977 assert(b->shader->info.stage == MESA_SHADER_KERNEL);
978 op = nir_intrinsic_load_kernel_input;
979 break;
980 case nir_var_mem_shared:
981 assert(addr_format_is_offset(addr_format));
982 op = nir_intrinsic_load_shared;
983 break;
984 case nir_var_shader_temp:
985 case nir_var_function_temp:
986 if (addr_format_is_offset(addr_format)) {
987 op = nir_intrinsic_load_scratch;
988 } else {
989 assert(addr_format_is_global(addr_format));
990 op = nir_intrinsic_load_global;
991 }
992 break;
993 case nir_var_mem_constant:
994 if (addr_format_is_offset(addr_format)) {
995 op = nir_intrinsic_load_constant;
996 } else {
997 assert(addr_format_is_global(addr_format));
998 op = nir_intrinsic_load_global_constant;
999 }
1000 break;
1001 default:
1002 unreachable("Unsupported explicit IO variable mode");
1003 }
1004
1005 nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
1006
1007 if (addr_format_is_global(addr_format)) {
1008 load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1009 } else if (addr_format_is_offset(addr_format)) {
1010 assert(addr->num_components == 1);
1011 load->src[0] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1012 } else {
1013 load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1014 load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1015 }
1016
1017 if (nir_intrinsic_has_access(load))
1018 nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
1019
1020 if (op == nir_intrinsic_load_constant) {
1021 nir_intrinsic_set_base(load, 0);
1022 nir_intrinsic_set_range(load, b->shader->constant_data_size);
1023 }
1024
1025 unsigned bit_size = intrin->dest.ssa.bit_size;
1026 if (bit_size == 1) {
1027 /* TODO: Make the native bool bit_size an option. */
1028 bit_size = 32;
1029 }
1030
1031 nir_intrinsic_set_align(load, align_mul, align_offset);
1032
1033 assert(intrin->dest.is_ssa);
1034 load->num_components = num_components;
1035 nir_ssa_dest_init(&load->instr, &load->dest, num_components,
1036 bit_size, intrin->dest.ssa.name);
1037
1038 assert(bit_size % 8 == 0);
1039
1040 nir_ssa_def *result;
1041 if (addr_format_needs_bounds_check(addr_format)) {
1042 /* The Vulkan spec for robustBufferAccess gives us quite a few options
1043 * as to what we can do with an OOB read. Unfortunately, returning
1044 * undefined values isn't one of them so we return an actual zero.
1045 */
1046 nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
1047
1048 const unsigned load_size = (bit_size / 8) * load->num_components;
1049 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
1050
1051 nir_builder_instr_insert(b, &load->instr);
1052
1053 nir_pop_if(b, NULL);
1054
1055 result = nir_if_phi(b, &load->dest.ssa, zero);
1056 } else {
1057 nir_builder_instr_insert(b, &load->instr);
1058 result = &load->dest.ssa;
1059 }
1060
1061 if (intrin->dest.ssa.bit_size == 1) {
1062 /* For shared, we can go ahead and use NIR's and/or the back-end's
1063 * standard encoding for booleans rather than forcing a 0/1 boolean.
1064 * This should save an instruction or two.
1065 */
1066 if (mode == nir_var_mem_shared ||
1067 mode == nir_var_shader_temp ||
1068 mode == nir_var_function_temp)
1069 result = nir_b2b1(b, result);
1070 else
1071 result = nir_i2b(b, result);
1072 }
1073
1074 return result;
1075 }
1076
1077 static void
1078 build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
1079 nir_ssa_def *addr, nir_address_format addr_format,
1080 uint32_t align_mul, uint32_t align_offset,
1081 nir_ssa_def *value, nir_component_mask_t write_mask)
1082 {
1083 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1084
1085 nir_intrinsic_op op;
1086 switch (mode) {
1087 case nir_var_mem_ssbo:
1088 if (addr_format_is_global(addr_format))
1089 op = nir_intrinsic_store_global;
1090 else
1091 op = nir_intrinsic_store_ssbo;
1092 break;
1093 case nir_var_mem_global:
1094 assert(addr_format_is_global(addr_format));
1095 op = nir_intrinsic_store_global;
1096 break;
1097 case nir_var_mem_shared:
1098 assert(addr_format_is_offset(addr_format));
1099 op = nir_intrinsic_store_shared;
1100 break;
1101 case nir_var_shader_temp:
1102 case nir_var_function_temp:
1103 if (addr_format_is_offset(addr_format)) {
1104 op = nir_intrinsic_store_scratch;
1105 } else {
1106 assert(addr_format_is_global(addr_format));
1107 op = nir_intrinsic_store_global;
1108 }
1109 break;
1110 default:
1111 unreachable("Unsupported explicit IO variable mode");
1112 }
1113
1114 nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
1115
1116 if (value->bit_size == 1) {
1117 /* For shared, we can go ahead and use NIR's and/or the back-end's
1118 * standard encoding for booleans rather than forcing a 0/1 boolean.
1119 * This should save an instruction or two.
1120 *
1121 * TODO: Make the native bool bit_size an option.
1122 */
1123 if (mode == nir_var_mem_shared ||
1124 mode == nir_var_shader_temp ||
1125 mode == nir_var_function_temp)
1126 value = nir_b2b32(b, value);
1127 else
1128 value = nir_b2i(b, value, 32);
1129 }
1130
1131 store->src[0] = nir_src_for_ssa(value);
1132 if (addr_format_is_global(addr_format)) {
1133 store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1134 } else if (addr_format_is_offset(addr_format)) {
1135 assert(addr->num_components == 1);
1136 store->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1137 } else {
1138 store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1139 store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1140 }
1141
1142 nir_intrinsic_set_write_mask(store, write_mask);
1143
1144 if (nir_intrinsic_has_access(store))
1145 nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
1146
1147 nir_intrinsic_set_align(store, align_mul, align_offset);
1148
1149 assert(value->num_components == 1 ||
1150 value->num_components == intrin->num_components);
1151 store->num_components = value->num_components;
1152
1153 assert(value->bit_size % 8 == 0);
1154
1155 if (addr_format_needs_bounds_check(addr_format)) {
1156 const unsigned store_size = (value->bit_size / 8) * store->num_components;
1157 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
1158
1159 nir_builder_instr_insert(b, &store->instr);
1160
1161 nir_pop_if(b, NULL);
1162 } else {
1163 nir_builder_instr_insert(b, &store->instr);
1164 }
1165 }
1166
1167 static nir_ssa_def *
1168 build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
1169 nir_ssa_def *addr, nir_address_format addr_format)
1170 {
1171 nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
1172 const unsigned num_data_srcs =
1173 nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
1174
1175 nir_intrinsic_op op;
1176 switch (mode) {
1177 case nir_var_mem_ssbo:
1178 if (addr_format_is_global(addr_format))
1179 op = global_atomic_for_deref(intrin->intrinsic);
1180 else
1181 op = ssbo_atomic_for_deref(intrin->intrinsic);
1182 break;
1183 case nir_var_mem_global:
1184 assert(addr_format_is_global(addr_format));
1185 op = global_atomic_for_deref(intrin->intrinsic);
1186 break;
1187 case nir_var_mem_shared:
1188 assert(addr_format_is_offset(addr_format));
1189 op = shared_atomic_for_deref(intrin->intrinsic);
1190 break;
1191 default:
1192 unreachable("Unsupported explicit IO variable mode");
1193 }
1194
1195 nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
1196
1197 unsigned src = 0;
1198 if (addr_format_is_global(addr_format)) {
1199 atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
1200 } else if (addr_format_is_offset(addr_format)) {
1201 assert(addr->num_components == 1);
1202 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1203 } else {
1204 atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
1205 atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
1206 }
1207 for (unsigned i = 0; i < num_data_srcs; i++) {
1208 atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
1209 }
1210
1211 /* Global atomics don't have access flags because they assume that the
1212 * address may be non-uniform.
1213 */
1214 if (nir_intrinsic_has_access(atomic))
1215 nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
1216
1217 assert(intrin->dest.ssa.num_components == 1);
1218 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1219 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
1220
1221 assert(atomic->dest.ssa.bit_size % 8 == 0);
1222
1223 if (addr_format_needs_bounds_check(addr_format)) {
1224 const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
1225 nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
1226
1227 nir_builder_instr_insert(b, &atomic->instr);
1228
1229 nir_pop_if(b, NULL);
1230 return nir_if_phi(b, &atomic->dest.ssa,
1231 nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
1232 } else {
1233 nir_builder_instr_insert(b, &atomic->instr);
1234 return &atomic->dest.ssa;
1235 }
1236 }
1237
1238 nir_ssa_def *
1239 nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
1240 nir_ssa_def *base_addr,
1241 nir_address_format addr_format)
1242 {
1243 assert(deref->dest.is_ssa);
1244 switch (deref->deref_type) {
1245 case nir_deref_type_var:
1246 return build_addr_for_var(b, deref->var, addr_format);
1247
1248 case nir_deref_type_array: {
1249 unsigned stride = nir_deref_instr_array_stride(deref);
1250 assert(stride > 0);
1251
1252 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1253 index = nir_i2i(b, index, addr_get_offset_bit_size(base_addr, addr_format));
1254 return build_addr_iadd(b, base_addr, addr_format,
1255 nir_amul_imm(b, index, stride));
1256 }
1257
1258 case nir_deref_type_ptr_as_array: {
1259 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
1260 index = nir_i2i(b, index, addr_get_offset_bit_size(base_addr, addr_format));
1261 unsigned stride = nir_deref_instr_array_stride(deref);
1262 return build_addr_iadd(b, base_addr, addr_format,
1263 nir_amul_imm(b, index, stride));
1264 }
1265
1266 case nir_deref_type_array_wildcard:
1267 unreachable("Wildcards should be lowered by now");
1268 break;
1269
1270 case nir_deref_type_struct: {
1271 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1272 int offset = glsl_get_struct_field_offset(parent->type,
1273 deref->strct.index);
1274 assert(offset >= 0);
1275 return build_addr_iadd_imm(b, base_addr, addr_format, offset);
1276 }
1277
1278 case nir_deref_type_cast:
1279 /* Nothing to do here */
1280 return base_addr;
1281 }
1282
1283 unreachable("Invalid NIR deref type");
1284 }
1285
1286 void
1287 nir_lower_explicit_io_instr(nir_builder *b,
1288 nir_intrinsic_instr *intrin,
1289 nir_ssa_def *addr,
1290 nir_address_format addr_format)
1291 {
1292 b->cursor = nir_after_instr(&intrin->instr);
1293
1294 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1295 unsigned vec_stride = glsl_get_explicit_stride(deref->type);
1296 unsigned scalar_size = type_scalar_size_bytes(deref->type);
1297 assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
1298 assert(vec_stride == 0 || vec_stride >= scalar_size);
1299
1300 uint32_t align_mul, align_offset;
1301 if (!nir_get_explicit_deref_align(deref, true, &align_mul, &align_offset)) {
1302 /* If we don't have an alignment from the deref, assume scalar */
1303 align_mul = scalar_size;
1304 align_offset = 0;
1305 }
1306
1307 if (intrin->intrinsic == nir_intrinsic_load_deref) {
1308 nir_ssa_def *value;
1309 if (vec_stride > scalar_size) {
1310 nir_ssa_def *comps[4] = { NULL, };
1311 for (unsigned i = 0; i < intrin->num_components; i++) {
1312 unsigned comp_offset = i * vec_stride;
1313 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1314 comp_offset);
1315 comps[i] = build_explicit_io_load(b, intrin, comp_addr,
1316 addr_format, align_mul,
1317 (align_offset + comp_offset) %
1318 align_mul,
1319 1);
1320 }
1321 value = nir_vec(b, comps, intrin->num_components);
1322 } else {
1323 value = build_explicit_io_load(b, intrin, addr, addr_format,
1324 align_mul, align_offset,
1325 intrin->num_components);
1326 }
1327 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1328 } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
1329 assert(intrin->src[1].is_ssa);
1330 nir_ssa_def *value = intrin->src[1].ssa;
1331 nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
1332 if (vec_stride > scalar_size) {
1333 for (unsigned i = 0; i < intrin->num_components; i++) {
1334 if (!(write_mask & (1 << i)))
1335 continue;
1336
1337 unsigned comp_offset = i * vec_stride;
1338 nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
1339 comp_offset);
1340 build_explicit_io_store(b, intrin, comp_addr, addr_format,
1341 align_mul,
1342 (align_offset + comp_offset) % align_mul,
1343 nir_channel(b, value, i), 1);
1344 }
1345 } else {
1346 build_explicit_io_store(b, intrin, addr, addr_format,
1347 align_mul, align_offset,
1348 value, write_mask);
1349 }
1350 } else {
1351 nir_ssa_def *value =
1352 build_explicit_io_atomic(b, intrin, addr, addr_format);
1353 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
1354 }
1355
1356 nir_instr_remove(&intrin->instr);
1357 }
1358
1359 bool
1360 nir_get_explicit_deref_align(nir_deref_instr *deref,
1361 bool default_to_type_align,
1362 uint32_t *align_mul,
1363 uint32_t *align_offset)
1364 {
1365 if (deref->deref_type == nir_deref_type_var) {
1366 /* If we see a variable, align_mul is effectively infinite because we
1367 * know the offset exactly (up to the offset of the base pointer for the
1368 * given variable mode). We have to pick something so we choose 256B
1369 * as an arbitrary alignment which seems high enough for any reasonable
1370 * wide-load use-case. Back-ends should clamp alignments down if 256B
1371 * is too large for some reason.
1372 */
1373 *align_mul = 256;
1374 *align_offset = deref->var->data.driver_location % 256;
1375 return true;
1376 }
1377
1378 /* If we're a cast deref that has an alignment, use that. */
1379 if (deref->deref_type == nir_deref_type_cast && deref->cast.align_mul > 0) {
1380 *align_mul = deref->cast.align_mul;
1381 *align_offset = deref->cast.align_offset;
1382 return true;
1383 }
1384
1385 /* Otherwise, we need to compute the alignment based on the parent */
1386 nir_deref_instr *parent = nir_deref_instr_parent(deref);
1387 if (parent == NULL) {
1388 assert(deref->deref_type == nir_deref_type_cast);
1389 if (default_to_type_align) {
1390 /* If we don't have a parent, assume the type's alignment, if any. */
1391 unsigned type_align = glsl_get_explicit_alignment(deref->type);
1392 if (type_align == 0)
1393 return false;
1394
1395 *align_mul = type_align;
1396 *align_offset = 0;
1397 return true;
1398 } else {
1399 return false;
1400 }
1401 }
1402
1403 uint32_t parent_mul, parent_offset;
1404 if (!nir_get_explicit_deref_align(parent, default_to_type_align,
1405 &parent_mul, &parent_offset))
1406 return false;
1407
1408 switch (deref->deref_type) {
1409 case nir_deref_type_var:
1410 unreachable("Handled above");
1411
1412 case nir_deref_type_array:
1413 case nir_deref_type_array_wildcard:
1414 case nir_deref_type_ptr_as_array: {
1415 const unsigned stride = nir_deref_instr_array_stride(deref);
1416 if (stride == 0)
1417 return false;
1418
1419 if (deref->deref_type != nir_deref_type_array_wildcard &&
1420 nir_src_is_const(deref->arr.index)) {
1421 unsigned offset = nir_src_as_uint(deref->arr.index) * stride;
1422 *align_mul = parent_mul;
1423 *align_offset = (parent_offset + offset) % parent_mul;
1424 } else {
1425 /* If this is a wildcard or an indirect deref, we have to go with the
1426 * power-of-two gcd.
1427 */
1428 *align_mul = MIN3(parent_mul,
1429 1 << (ffs(parent_offset) - 1),
1430 1 << (ffs(stride) - 1));
1431 *align_offset = 0;
1432 }
1433 return true;
1434 }
1435
1436 case nir_deref_type_struct: {
1437 const int offset = glsl_get_struct_field_offset(parent->type,
1438 deref->strct.index);
1439 if (offset < 0)
1440 return false;
1441
1442 *align_mul = parent_mul;
1443 *align_offset = (parent_offset + offset) % parent_mul;
1444 return true;
1445 }
1446
1447 case nir_deref_type_cast:
1448 /* We handled the explicit alignment case above. */
1449 assert(deref->cast.align_mul == 0);
1450 *align_mul = parent_mul;
1451 *align_offset = parent_offset;
1452 return true;
1453 }
1454
1455 unreachable("Invalid deref_instr_type");
1456 }
1457
1458 static void
1459 lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
1460 nir_address_format addr_format)
1461 {
1462 /* Just delete the deref if it's not used. We can't use
1463 * nir_deref_instr_remove_if_unused here because it may remove more than
1464 * one deref which could break our list walking since we walk the list
1465 * backwards.
1466 */
1467 assert(list_is_empty(&deref->dest.ssa.if_uses));
1468 if (list_is_empty(&deref->dest.ssa.uses)) {
1469 nir_instr_remove(&deref->instr);
1470 return;
1471 }
1472
1473 b->cursor = nir_after_instr(&deref->instr);
1474
1475 nir_ssa_def *base_addr = NULL;
1476 if (deref->deref_type != nir_deref_type_var) {
1477 assert(deref->parent.is_ssa);
1478 base_addr = deref->parent.ssa;
1479 }
1480
1481 nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
1482 addr_format);
1483 assert(addr->bit_size == deref->dest.ssa.bit_size);
1484 assert(addr->num_components == deref->dest.ssa.num_components);
1485
1486 nir_instr_remove(&deref->instr);
1487 nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
1488 }
1489
1490 static void
1491 lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
1492 nir_address_format addr_format)
1493 {
1494 assert(intrin->src[0].is_ssa);
1495 nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
1496 }
1497
1498 static void
1499 lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
1500 nir_address_format addr_format)
1501 {
1502 b->cursor = nir_after_instr(&intrin->instr);
1503
1504 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1505
1506 assert(glsl_type_is_array(deref->type));
1507 assert(glsl_get_length(deref->type) == 0);
1508 unsigned stride = glsl_get_explicit_stride(deref->type);
1509 assert(stride > 0);
1510
1511 nir_ssa_def *addr = &deref->dest.ssa;
1512 nir_ssa_def *index = addr_to_index(b, addr, addr_format);
1513 nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
1514
1515 nir_intrinsic_instr *bsize =
1516 nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
1517 bsize->src[0] = nir_src_for_ssa(index);
1518 nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
1519 nir_builder_instr_insert(b, &bsize->instr);
1520
1521 nir_ssa_def *arr_size =
1522 nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
1523 nir_imm_int(b, stride));
1524
1525 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
1526 nir_instr_remove(&intrin->instr);
1527 }
1528
1529 static bool
1530 nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
1531 nir_address_format addr_format)
1532 {
1533 bool progress = false;
1534
1535 nir_builder b;
1536 nir_builder_init(&b, impl);
1537
1538 /* Walk in reverse order so that we can see the full deref chain when we
1539 * lower the access operations. We lower them assuming that the derefs
1540 * will be turned into address calculations later.
1541 */
1542 nir_foreach_block_reverse(block, impl) {
1543 nir_foreach_instr_reverse_safe(instr, block) {
1544 switch (instr->type) {
1545 case nir_instr_type_deref: {
1546 nir_deref_instr *deref = nir_instr_as_deref(instr);
1547 if (deref->mode & modes) {
1548 lower_explicit_io_deref(&b, deref, addr_format);
1549 progress = true;
1550 }
1551 break;
1552 }
1553
1554 case nir_instr_type_intrinsic: {
1555 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1556 switch (intrin->intrinsic) {
1557 case nir_intrinsic_load_deref:
1558 case nir_intrinsic_store_deref:
1559 case nir_intrinsic_deref_atomic_add:
1560 case nir_intrinsic_deref_atomic_imin:
1561 case nir_intrinsic_deref_atomic_umin:
1562 case nir_intrinsic_deref_atomic_imax:
1563 case nir_intrinsic_deref_atomic_umax:
1564 case nir_intrinsic_deref_atomic_and:
1565 case nir_intrinsic_deref_atomic_or:
1566 case nir_intrinsic_deref_atomic_xor:
1567 case nir_intrinsic_deref_atomic_exchange:
1568 case nir_intrinsic_deref_atomic_comp_swap:
1569 case nir_intrinsic_deref_atomic_fadd:
1570 case nir_intrinsic_deref_atomic_fmin:
1571 case nir_intrinsic_deref_atomic_fmax:
1572 case nir_intrinsic_deref_atomic_fcomp_swap: {
1573 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1574 if (deref->mode & modes) {
1575 lower_explicit_io_access(&b, intrin, addr_format);
1576 progress = true;
1577 }
1578 break;
1579 }
1580
1581 case nir_intrinsic_deref_buffer_array_length: {
1582 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1583 if (deref->mode & modes) {
1584 lower_explicit_io_array_length(&b, intrin, addr_format);
1585 progress = true;
1586 }
1587 break;
1588 }
1589
1590 default:
1591 break;
1592 }
1593 break;
1594 }
1595
1596 default:
1597 /* Nothing to do */
1598 break;
1599 }
1600 }
1601 }
1602
1603 if (progress) {
1604 nir_metadata_preserve(impl, nir_metadata_block_index |
1605 nir_metadata_dominance);
1606 }
1607
1608 return progress;
1609 }
1610
1611 /** Lower explicitly laid out I/O access to byte offset/address intrinsics
1612 *
1613 * This pass is intended to be used for any I/O which touches memory external
1614 * to the shader or which is directly visible to the client. It requires that
1615 * all data types in the given modes have a explicit stride/offset decorations
1616 * to tell it exactly how to calculate the offset/address for the given load,
1617 * store, or atomic operation. If the offset/stride information does not come
1618 * from the client explicitly (as with shared variables in GL or Vulkan),
1619 * nir_lower_vars_to_explicit_types() can be used to add them.
1620 *
1621 * Unlike nir_lower_io, this pass is fully capable of handling incomplete
1622 * pointer chains which may contain cast derefs. It does so by walking the
1623 * deref chain backwards and simply replacing each deref, one at a time, with
1624 * the appropriate address calculation. The pass takes a nir_address_format
1625 * parameter which describes how the offset or address is to be represented
1626 * during calculations. By ensuring that the address is always in a
1627 * consistent format, pointers can safely be conjured from thin air by the
1628 * driver, stored to variables, passed through phis, etc.
1629 *
1630 * The one exception to the simple algorithm described above is for handling
1631 * row-major matrices in which case we may look down one additional level of
1632 * the deref chain.
1633 */
1634 bool
1635 nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
1636 nir_address_format addr_format)
1637 {
1638 bool progress = false;
1639
1640 nir_foreach_function(function, shader) {
1641 if (function->impl &&
1642 nir_lower_explicit_io_impl(function->impl, modes, addr_format))
1643 progress = true;
1644 }
1645
1646 return progress;
1647 }
1648
1649 static bool
1650 nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
1651 nir_variable_mode modes,
1652 glsl_type_size_align_func type_info)
1653 {
1654 bool progress = false;
1655
1656 nir_foreach_block(block, impl) {
1657 nir_foreach_instr(instr, block) {
1658 if (instr->type != nir_instr_type_deref)
1659 continue;
1660
1661 nir_deref_instr *deref = nir_instr_as_deref(instr);
1662 if (!(deref->mode & modes))
1663 continue;
1664
1665 unsigned size, alignment;
1666 const struct glsl_type *new_type =
1667 glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
1668 if (new_type != deref->type) {
1669 progress = true;
1670 deref->type = new_type;
1671 }
1672 if (deref->deref_type == nir_deref_type_cast) {
1673 /* See also glsl_type::get_explicit_type_for_size_align() */
1674 unsigned new_stride = align(size, alignment);
1675 if (new_stride != deref->cast.ptr_stride) {
1676 deref->cast.ptr_stride = new_stride;
1677 progress = true;
1678 }
1679 }
1680 }
1681 }
1682
1683 if (progress) {
1684 nir_metadata_preserve(impl, nir_metadata_block_index |
1685 nir_metadata_dominance |
1686 nir_metadata_live_ssa_defs |
1687 nir_metadata_loop_analysis);
1688 }
1689
1690 return progress;
1691 }
1692
1693 static bool
1694 lower_vars_to_explicit(nir_shader *shader,
1695 struct exec_list *vars, nir_variable_mode mode,
1696 glsl_type_size_align_func type_info)
1697 {
1698 bool progress = false;
1699 unsigned offset;
1700 switch (mode) {
1701 case nir_var_uniform:
1702 assert(shader->info.stage == MESA_SHADER_KERNEL);
1703 offset = 0;
1704 break;
1705 case nir_var_function_temp:
1706 case nir_var_shader_temp:
1707 offset = shader->scratch_size;
1708 break;
1709 case nir_var_mem_shared:
1710 offset = 0;
1711 break;
1712 case nir_var_mem_constant:
1713 offset = shader->constant_data_size;
1714 break;
1715 default:
1716 unreachable("Unsupported mode");
1717 }
1718 nir_foreach_variable_in_list(var, vars) {
1719 if (var->data.mode != mode)
1720 continue;
1721
1722 unsigned size, align;
1723 const struct glsl_type *explicit_type =
1724 glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
1725
1726 if (explicit_type != var->type)
1727 var->type = explicit_type;
1728
1729 var->data.driver_location = ALIGN_POT(offset, align);
1730 offset = var->data.driver_location + size;
1731 progress = true;
1732 }
1733
1734 switch (mode) {
1735 case nir_var_uniform:
1736 assert(shader->info.stage == MESA_SHADER_KERNEL);
1737 shader->num_uniforms = offset;
1738 break;
1739 case nir_var_shader_temp:
1740 case nir_var_function_temp:
1741 shader->scratch_size = offset;
1742 break;
1743 case nir_var_mem_shared:
1744 shader->info.cs.shared_size = offset;
1745 shader->shared_size = offset;
1746 break;
1747 case nir_var_mem_constant:
1748 shader->constant_data_size = offset;
1749 break;
1750 default:
1751 unreachable("Unsupported mode");
1752 }
1753
1754 return progress;
1755 }
1756
1757 bool
1758 nir_lower_vars_to_explicit_types(nir_shader *shader,
1759 nir_variable_mode modes,
1760 glsl_type_size_align_func type_info)
1761 {
1762 /* TODO: Situations which need to be handled to support more modes:
1763 * - row-major matrices
1764 * - compact shader inputs/outputs
1765 * - interface types
1766 */
1767 ASSERTED nir_variable_mode supported =
1768 nir_var_mem_shared | nir_var_mem_global |
1769 nir_var_shader_temp | nir_var_function_temp | nir_var_uniform;
1770 assert(!(modes & ~supported) && "unsupported");
1771
1772 bool progress = false;
1773
1774 if (modes & nir_var_uniform)
1775 progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_uniform, type_info);
1776 if (modes & nir_var_mem_shared)
1777 progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_mem_shared, type_info);
1778 if (modes & nir_var_shader_temp)
1779 progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_shader_temp, type_info);
1780
1781 nir_foreach_function(function, shader) {
1782 if (function->impl) {
1783 if (modes & nir_var_function_temp)
1784 progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
1785
1786 progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
1787 }
1788 }
1789
1790 return progress;
1791 }
1792
1793 static void
1794 write_constant(void *dst, const nir_constant *c, const struct glsl_type *type)
1795 {
1796 if (glsl_type_is_vector_or_scalar(type)) {
1797 const unsigned num_components = glsl_get_vector_elements(type);
1798 const unsigned bit_size = glsl_get_bit_size(type);
1799 if (bit_size == 1) {
1800 /* Booleans are special-cased to be 32-bit
1801 *
1802 * TODO: Make the native bool bit_size an option.
1803 */
1804 for (unsigned i = 0; i < num_components; i++) {
1805 int32_t b32 = -(int)c->values[i].b;
1806 memcpy((char *)dst + i * 4, &b32, 4);
1807 }
1808 } else {
1809 assert(bit_size >= 8 && bit_size % 8 == 0);
1810 const unsigned byte_size = bit_size / 8;
1811 for (unsigned i = 0; i < num_components; i++) {
1812 /* Annoyingly, thanks to packed structs, we can't make any
1813 * assumptions about the alignment of dst. To avoid any strange
1814 * issues with unaligned writes, we always use memcpy.
1815 */
1816 memcpy((char *)dst + i * byte_size, &c->values[i], byte_size);
1817 }
1818 }
1819 } else if (glsl_type_is_array_or_matrix(type)) {
1820 const unsigned array_len = glsl_get_length(type);
1821 const unsigned stride = glsl_get_explicit_stride(type);
1822 assert(stride > 0);
1823 const struct glsl_type *elem_type = glsl_get_array_element(type);
1824 for (unsigned i = 0; i < array_len; i++)
1825 write_constant((char *)dst + i * stride, c->elements[i], elem_type);
1826 } else {
1827 assert(glsl_type_is_struct_or_ifc(type));
1828 const unsigned num_fields = glsl_get_length(type);
1829 for (unsigned i = 0; i < num_fields; i++) {
1830 const int field_offset = glsl_get_struct_field_offset(type, i);
1831 assert(field_offset >= 0);
1832 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
1833 write_constant((char *)dst + field_offset, c->elements[i], field_type);
1834 }
1835 }
1836 }
1837
1838 bool
1839 nir_lower_mem_constant_vars(nir_shader *shader,
1840 glsl_type_size_align_func type_info)
1841 {
1842 bool progress = false;
1843
1844 unsigned old_constant_data_size = shader->constant_data_size;
1845 if (lower_vars_to_explicit(shader, &shader->variables,
1846 nir_var_mem_constant, type_info)) {
1847 assert(shader->constant_data_size > old_constant_data_size);
1848 shader->constant_data = rerzalloc_size(shader, shader->constant_data,
1849 old_constant_data_size,
1850 shader->constant_data_size);
1851
1852 nir_foreach_variable_with_modes(var, shader, nir_var_mem_constant) {
1853 write_constant((char *)shader->constant_data +
1854 var->data.driver_location,
1855 var->constant_initializer, var->type);
1856 }
1857 progress = true;
1858 }
1859
1860 nir_foreach_function(function, shader) {
1861 if (!function->impl)
1862 continue;
1863
1864 if (nir_lower_vars_to_explicit_types_impl(function->impl,
1865 nir_var_mem_constant,
1866 type_info))
1867 progress = true;
1868 }
1869
1870 return progress;
1871 }
1872
1873 /**
1874 * Return the offset source for a load/store intrinsic.
1875 */
1876 nir_src *
1877 nir_get_io_offset_src(nir_intrinsic_instr *instr)
1878 {
1879 switch (instr->intrinsic) {
1880 case nir_intrinsic_load_input:
1881 case nir_intrinsic_load_output:
1882 case nir_intrinsic_load_shared:
1883 case nir_intrinsic_load_uniform:
1884 case nir_intrinsic_load_global:
1885 case nir_intrinsic_load_global_constant:
1886 case nir_intrinsic_load_scratch:
1887 case nir_intrinsic_load_fs_input_interp_deltas:
1888 case nir_intrinsic_shared_atomic_add:
1889 case nir_intrinsic_shared_atomic_and:
1890 case nir_intrinsic_shared_atomic_comp_swap:
1891 case nir_intrinsic_shared_atomic_exchange:
1892 case nir_intrinsic_shared_atomic_fadd:
1893 case nir_intrinsic_shared_atomic_fcomp_swap:
1894 case nir_intrinsic_shared_atomic_fmax:
1895 case nir_intrinsic_shared_atomic_fmin:
1896 case nir_intrinsic_shared_atomic_imax:
1897 case nir_intrinsic_shared_atomic_imin:
1898 case nir_intrinsic_shared_atomic_or:
1899 case nir_intrinsic_shared_atomic_umax:
1900 case nir_intrinsic_shared_atomic_umin:
1901 case nir_intrinsic_shared_atomic_xor:
1902 case nir_intrinsic_global_atomic_add:
1903 case nir_intrinsic_global_atomic_and:
1904 case nir_intrinsic_global_atomic_comp_swap:
1905 case nir_intrinsic_global_atomic_exchange:
1906 case nir_intrinsic_global_atomic_fadd:
1907 case nir_intrinsic_global_atomic_fcomp_swap:
1908 case nir_intrinsic_global_atomic_fmax:
1909 case nir_intrinsic_global_atomic_fmin:
1910 case nir_intrinsic_global_atomic_imax:
1911 case nir_intrinsic_global_atomic_imin:
1912 case nir_intrinsic_global_atomic_or:
1913 case nir_intrinsic_global_atomic_umax:
1914 case nir_intrinsic_global_atomic_umin:
1915 case nir_intrinsic_global_atomic_xor:
1916 return &instr->src[0];
1917 case nir_intrinsic_load_ubo:
1918 case nir_intrinsic_load_ssbo:
1919 case nir_intrinsic_load_input_vertex:
1920 case nir_intrinsic_load_per_vertex_input:
1921 case nir_intrinsic_load_per_vertex_output:
1922 case nir_intrinsic_load_interpolated_input:
1923 case nir_intrinsic_store_output:
1924 case nir_intrinsic_store_shared:
1925 case nir_intrinsic_store_global:
1926 case nir_intrinsic_store_scratch:
1927 case nir_intrinsic_ssbo_atomic_add:
1928 case nir_intrinsic_ssbo_atomic_imin:
1929 case nir_intrinsic_ssbo_atomic_umin:
1930 case nir_intrinsic_ssbo_atomic_imax:
1931 case nir_intrinsic_ssbo_atomic_umax:
1932 case nir_intrinsic_ssbo_atomic_and:
1933 case nir_intrinsic_ssbo_atomic_or:
1934 case nir_intrinsic_ssbo_atomic_xor:
1935 case nir_intrinsic_ssbo_atomic_exchange:
1936 case nir_intrinsic_ssbo_atomic_comp_swap:
1937 case nir_intrinsic_ssbo_atomic_fadd:
1938 case nir_intrinsic_ssbo_atomic_fmin:
1939 case nir_intrinsic_ssbo_atomic_fmax:
1940 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1941 return &instr->src[1];
1942 case nir_intrinsic_store_ssbo:
1943 case nir_intrinsic_store_per_vertex_output:
1944 return &instr->src[2];
1945 default:
1946 return NULL;
1947 }
1948 }
1949
1950 /**
1951 * Return the vertex index source for a load/store per_vertex intrinsic.
1952 */
1953 nir_src *
1954 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
1955 {
1956 switch (instr->intrinsic) {
1957 case nir_intrinsic_load_per_vertex_input:
1958 case nir_intrinsic_load_per_vertex_output:
1959 return &instr->src[0];
1960 case nir_intrinsic_store_per_vertex_output:
1961 return &instr->src[1];
1962 default:
1963 return NULL;
1964 }
1965 }
1966
1967 /**
1968 * Return the numeric constant that identify a NULL pointer for each address
1969 * format.
1970 */
1971 const nir_const_value *
1972 nir_address_format_null_value(nir_address_format addr_format)
1973 {
1974 const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
1975 [nir_address_format_32bit_global] = {{0}},
1976 [nir_address_format_64bit_global] = {{0}},
1977 [nir_address_format_64bit_bounded_global] = {{0}},
1978 [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
1979 [nir_address_format_32bit_index_offset_pack64] = {{.u64 = ~0ull}},
1980 [nir_address_format_vec2_index_32bit_offset] = {{.u32 = ~0}, {.u32 = ~0}, {.u32 = ~0}},
1981 [nir_address_format_32bit_offset] = {{.u32 = ~0}},
1982 [nir_address_format_32bit_offset_as_64bit] = {{.u64 = ~0ull}},
1983 [nir_address_format_logical] = {{.u32 = ~0}},
1984 };
1985
1986 assert(addr_format < ARRAY_SIZE(null_values));
1987 return null_values[addr_format];
1988 }
1989
1990 nir_ssa_def *
1991 nir_build_addr_ieq(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
1992 nir_address_format addr_format)
1993 {
1994 switch (addr_format) {
1995 case nir_address_format_32bit_global:
1996 case nir_address_format_64bit_global:
1997 case nir_address_format_64bit_bounded_global:
1998 case nir_address_format_32bit_index_offset:
1999 case nir_address_format_vec2_index_32bit_offset:
2000 case nir_address_format_32bit_offset:
2001 return nir_ball_iequal(b, addr0, addr1);
2002
2003 case nir_address_format_32bit_offset_as_64bit:
2004 assert(addr0->num_components == 1 && addr1->num_components == 1);
2005 return nir_ieq(b, nir_u2u32(b, addr0), nir_u2u32(b, addr1));
2006
2007 case nir_address_format_32bit_index_offset_pack64:
2008 assert(addr0->num_components == 1 && addr1->num_components == 1);
2009 return nir_ball_iequal(b, nir_unpack_64_2x32(b, addr0), nir_unpack_64_2x32(b, addr1));
2010
2011 case nir_address_format_logical:
2012 unreachable("Unsupported address format");
2013 }
2014
2015 unreachable("Invalid address format");
2016 }
2017
2018 nir_ssa_def *
2019 nir_build_addr_isub(nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
2020 nir_address_format addr_format)
2021 {
2022 switch (addr_format) {
2023 case nir_address_format_32bit_global:
2024 case nir_address_format_64bit_global:
2025 case nir_address_format_32bit_offset:
2026 case nir_address_format_32bit_index_offset_pack64:
2027 assert(addr0->num_components == 1);
2028 assert(addr1->num_components == 1);
2029 return nir_isub(b, addr0, addr1);
2030
2031 case nir_address_format_32bit_offset_as_64bit:
2032 assert(addr0->num_components == 1);
2033 assert(addr1->num_components == 1);
2034 return nir_u2u64(b, nir_isub(b, nir_u2u32(b, addr0), nir_u2u32(b, addr1)));
2035
2036 case nir_address_format_64bit_bounded_global:
2037 return nir_isub(b, addr_to_global(b, addr0, addr_format),
2038 addr_to_global(b, addr1, addr_format));
2039
2040 case nir_address_format_32bit_index_offset:
2041 assert(addr0->num_components == 2);
2042 assert(addr1->num_components == 2);
2043 /* Assume the same buffer index. */
2044 return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
2045
2046 case nir_address_format_vec2_index_32bit_offset:
2047 assert(addr0->num_components == 3);
2048 assert(addr1->num_components == 3);
2049 /* Assume the same buffer index. */
2050 return nir_isub(b, nir_channel(b, addr0, 2), nir_channel(b, addr1, 2));
2051
2052 case nir_address_format_logical:
2053 unreachable("Unsupported address format");
2054 }
2055
2056 unreachable("Invalid address format");
2057 }
2058
2059 static bool
2060 is_input(nir_intrinsic_instr *intrin)
2061 {
2062 return intrin->intrinsic == nir_intrinsic_load_input ||
2063 intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
2064 intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
2065 intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
2066 }
2067
2068 static bool
2069 is_output(nir_intrinsic_instr *intrin)
2070 {
2071 return intrin->intrinsic == nir_intrinsic_load_output ||
2072 intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
2073 intrin->intrinsic == nir_intrinsic_store_output ||
2074 intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
2075 }
2076
2077 static bool is_dual_slot(nir_intrinsic_instr *intrin)
2078 {
2079 if (intrin->intrinsic == nir_intrinsic_store_output ||
2080 intrin->intrinsic == nir_intrinsic_store_per_vertex_output) {
2081 return nir_src_bit_size(intrin->src[0]) == 64 &&
2082 nir_src_num_components(intrin->src[0]) >= 3;
2083 }
2084
2085 return nir_dest_bit_size(intrin->dest) == 64 &&
2086 nir_dest_num_components(intrin->dest) >= 3;
2087 }
2088
2089 /**
2090 * This pass adds constant offsets to instr->const_index[0] for input/output
2091 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
2092 * unchanged - since we don't know what part of a compound variable is
2093 * accessed, we allocate storage for the entire thing. For drivers that use
2094 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
2095 * the offset source will be 0, so that they don't have to add it in manually.
2096 */
2097
2098 static bool
2099 add_const_offset_to_base_block(nir_block *block, nir_builder *b,
2100 nir_variable_mode mode)
2101 {
2102 bool progress = false;
2103 nir_foreach_instr_safe(instr, block) {
2104 if (instr->type != nir_instr_type_intrinsic)
2105 continue;
2106
2107 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
2108
2109 if ((mode == nir_var_shader_in && is_input(intrin)) ||
2110 (mode == nir_var_shader_out && is_output(intrin))) {
2111 nir_src *offset = nir_get_io_offset_src(intrin);
2112
2113 if (nir_src_is_const(*offset)) {
2114 unsigned off = nir_src_as_uint(*offset);
2115
2116 nir_intrinsic_set_base(intrin, nir_intrinsic_base(intrin) + off);
2117
2118 nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
2119 sem.location += off;
2120 /* non-indirect indexing should reduce num_slots */
2121 sem.num_slots = is_dual_slot(intrin) ? 2 : 1;
2122 nir_intrinsic_set_io_semantics(intrin, sem);
2123
2124 b->cursor = nir_before_instr(&intrin->instr);
2125 nir_instr_rewrite_src(&intrin->instr, offset,
2126 nir_src_for_ssa(nir_imm_int(b, 0)));
2127 progress = true;
2128 }
2129 }
2130 }
2131
2132 return progress;
2133 }
2134
2135 bool
2136 nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
2137 {
2138 bool progress = false;
2139
2140 nir_foreach_function(f, nir) {
2141 if (f->impl) {
2142 nir_builder b;
2143 nir_builder_init(&b, f->impl);
2144 nir_foreach_block(block, f->impl) {
2145 progress |= add_const_offset_to_base_block(block, &b, mode);
2146 }
2147 }
2148 }
2149
2150 return progress;
2151 }
2152