anv: Set alignments on UBO/SSBO root derefs
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28 #include "util/mesa-sha1.h"
29 #include "util/set.h"
30
31 /* Sampler tables don't actually have a maximum size but we pick one just so
32 * that we don't end up emitting too much state on-the-fly.
33 */
34 #define MAX_SAMPLER_TABLE_SIZE 128
35 #define BINDLESS_OFFSET 255
36
37 struct apply_pipeline_layout_state {
38 const struct anv_physical_device *pdevice;
39
40 nir_shader *shader;
41 nir_builder builder;
42
43 const struct anv_pipeline_layout *layout;
44 bool add_bounds_checks;
45 nir_address_format ssbo_addr_format;
46
47 /* Place to flag lowered instructions so we don't lower them twice */
48 struct set *lowered_instrs;
49
50 bool uses_constants;
51 bool has_dynamic_buffers;
52 uint8_t constants_offset;
53 struct {
54 bool desc_buffer_used;
55 uint8_t desc_offset;
56
57 uint8_t *use_count;
58 uint8_t *surface_offsets;
59 uint8_t *sampler_offsets;
60 } set[MAX_SETS];
61 };
62
63 static void
64 add_binding(struct apply_pipeline_layout_state *state,
65 uint32_t set, uint32_t binding)
66 {
67 const struct anv_descriptor_set_binding_layout *bind_layout =
68 &state->layout->set[set].layout->binding[binding];
69
70 if (state->set[set].use_count[binding] < UINT8_MAX)
71 state->set[set].use_count[binding]++;
72
73 /* Only flag the descriptor buffer as used if there's actually data for
74 * this binding. This lets us be lazy and call this function constantly
75 * without worrying about unnecessarily enabling the buffer.
76 */
77 if (anv_descriptor_size(bind_layout))
78 state->set[set].desc_buffer_used = true;
79 }
80
81 static void
82 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
83 {
84 nir_deref_instr *deref = nir_src_as_deref(src);
85 nir_variable *var = nir_deref_instr_get_variable(deref);
86 add_binding(state, var->data.descriptor_set, var->data.binding);
87 }
88
89 static void
90 add_tex_src_binding(struct apply_pipeline_layout_state *state,
91 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
92 {
93 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
94 if (deref_src_idx < 0)
95 return;
96
97 add_deref_src_binding(state, tex->src[deref_src_idx].src);
98 }
99
100 static void
101 get_used_bindings_block(nir_block *block,
102 struct apply_pipeline_layout_state *state)
103 {
104 nir_foreach_instr_safe(instr, block) {
105 switch (instr->type) {
106 case nir_instr_type_intrinsic: {
107 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
108 switch (intrin->intrinsic) {
109 case nir_intrinsic_vulkan_resource_index:
110 add_binding(state, nir_intrinsic_desc_set(intrin),
111 nir_intrinsic_binding(intrin));
112 break;
113
114 case nir_intrinsic_image_deref_load:
115 case nir_intrinsic_image_deref_store:
116 case nir_intrinsic_image_deref_atomic_add:
117 case nir_intrinsic_image_deref_atomic_imin:
118 case nir_intrinsic_image_deref_atomic_umin:
119 case nir_intrinsic_image_deref_atomic_imax:
120 case nir_intrinsic_image_deref_atomic_umax:
121 case nir_intrinsic_image_deref_atomic_and:
122 case nir_intrinsic_image_deref_atomic_or:
123 case nir_intrinsic_image_deref_atomic_xor:
124 case nir_intrinsic_image_deref_atomic_exchange:
125 case nir_intrinsic_image_deref_atomic_comp_swap:
126 case nir_intrinsic_image_deref_size:
127 case nir_intrinsic_image_deref_samples:
128 case nir_intrinsic_image_deref_load_param_intel:
129 case nir_intrinsic_image_deref_load_raw_intel:
130 case nir_intrinsic_image_deref_store_raw_intel:
131 add_deref_src_binding(state, intrin->src[0]);
132 break;
133
134 case nir_intrinsic_load_constant:
135 state->uses_constants = true;
136 break;
137
138 default:
139 break;
140 }
141 break;
142 }
143 case nir_instr_type_tex: {
144 nir_tex_instr *tex = nir_instr_as_tex(instr);
145 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
146 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
147 break;
148 }
149 default:
150 continue;
151 }
152 }
153 }
154
155 static bool
156 find_descriptor_for_index_src(nir_src src,
157 struct apply_pipeline_layout_state *state)
158 {
159 nir_intrinsic_instr *intrin = nir_src_as_intrinsic(src);
160
161 while (intrin && intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex)
162 intrin = nir_src_as_intrinsic(intrin->src[0]);
163
164 if (!intrin || intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
165 return false;
166
167 uint32_t set = nir_intrinsic_desc_set(intrin);
168 uint32_t binding = nir_intrinsic_binding(intrin);
169 uint32_t surface_index = state->set[set].surface_offsets[binding];
170
171 /* Only lower to a BTI message if we have a valid binding table index. */
172 return surface_index < MAX_BINDING_TABLE_SIZE;
173 }
174
175 static bool
176 nir_deref_find_descriptor(nir_deref_instr *deref,
177 struct apply_pipeline_layout_state *state)
178 {
179 while (1) {
180 /* Nothing we will use this on has a variable */
181 assert(deref->deref_type != nir_deref_type_var);
182
183 nir_deref_instr *parent = nir_src_as_deref(deref->parent);
184 if (!parent)
185 break;
186
187 deref = parent;
188 }
189 assert(deref->deref_type == nir_deref_type_cast);
190
191 nir_intrinsic_instr *intrin = nir_src_as_intrinsic(deref->parent);
192 if (!intrin || intrin->intrinsic != nir_intrinsic_load_vulkan_descriptor)
193 return false;
194
195 return find_descriptor_for_index_src(intrin->src[0], state);
196 }
197
198 static nir_ssa_def *
199 build_index_for_res_reindex(nir_intrinsic_instr *intrin,
200 struct apply_pipeline_layout_state *state)
201 {
202 nir_builder *b = &state->builder;
203
204 if (intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex) {
205 nir_ssa_def *bti =
206 build_index_for_res_reindex(nir_src_as_intrinsic(intrin->src[0]), state);
207
208 b->cursor = nir_before_instr(&intrin->instr);
209 return nir_iadd(b, bti, nir_ssa_for_src(b, intrin->src[1], 1));
210 }
211
212 assert(intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
213
214 uint32_t set = nir_intrinsic_desc_set(intrin);
215 uint32_t binding = nir_intrinsic_binding(intrin);
216
217 const struct anv_descriptor_set_binding_layout *bind_layout =
218 &state->layout->set[set].layout->binding[binding];
219
220 uint32_t surface_index = state->set[set].surface_offsets[binding];
221 uint32_t array_size = bind_layout->array_size;
222
223 b->cursor = nir_before_instr(&intrin->instr);
224
225 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
226 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
227 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
228
229 return nir_iadd_imm(b, array_index, surface_index);
230 }
231
232 static nir_ssa_def *
233 build_index_offset_for_deref(nir_deref_instr *deref,
234 struct apply_pipeline_layout_state *state)
235 {
236 nir_builder *b = &state->builder;
237
238 nir_deref_instr *parent = nir_deref_instr_parent(deref);
239 if (parent) {
240 nir_ssa_def *addr = build_index_offset_for_deref(parent, state);
241
242 b->cursor = nir_before_instr(&deref->instr);
243 return nir_explicit_io_address_from_deref(b, deref, addr,
244 nir_address_format_32bit_index_offset);
245 }
246
247 nir_intrinsic_instr *load_desc = nir_src_as_intrinsic(deref->parent);
248 assert(load_desc->intrinsic == nir_intrinsic_load_vulkan_descriptor);
249
250 nir_ssa_def *index =
251 build_index_for_res_reindex(nir_src_as_intrinsic(load_desc->src[0]), state);
252
253 /* Return a 0 offset which will get picked up by the recursion */
254 b->cursor = nir_before_instr(&deref->instr);
255 return nir_vec2(b, index, nir_imm_int(b, 0));
256 }
257
258 static bool
259 try_lower_direct_buffer_intrinsic(nir_intrinsic_instr *intrin, bool is_atomic,
260 struct apply_pipeline_layout_state *state)
261 {
262 nir_builder *b = &state->builder;
263
264 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
265 if (deref->mode != nir_var_mem_ssbo)
266 return false;
267
268 /* 64-bit atomics only support A64 messages so we can't lower them to the
269 * index+offset model.
270 */
271 if (is_atomic && nir_dest_bit_size(intrin->dest) == 64)
272 return false;
273
274 /* Normal binding table-based messages can't handle non-uniform access so
275 * we have to fall back to A64.
276 */
277 if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM)
278 return false;
279
280 if (!nir_deref_find_descriptor(deref, state))
281 return false;
282
283 nir_ssa_def *addr = build_index_offset_for_deref(deref, state);
284
285 b->cursor = nir_before_instr(&intrin->instr);
286 nir_lower_explicit_io_instr(b, intrin, addr,
287 nir_address_format_32bit_index_offset);
288 return true;
289 }
290
291 static void
292 lower_direct_buffer_access(nir_function_impl *impl,
293 struct apply_pipeline_layout_state *state)
294 {
295 nir_foreach_block(block, impl) {
296 nir_foreach_instr_safe(instr, block) {
297 if (instr->type != nir_instr_type_intrinsic)
298 continue;
299
300 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
301 switch (intrin->intrinsic) {
302 case nir_intrinsic_load_deref:
303 case nir_intrinsic_store_deref:
304 try_lower_direct_buffer_intrinsic(intrin, false, state);
305 break;
306 case nir_intrinsic_deref_atomic_add:
307 case nir_intrinsic_deref_atomic_imin:
308 case nir_intrinsic_deref_atomic_umin:
309 case nir_intrinsic_deref_atomic_imax:
310 case nir_intrinsic_deref_atomic_umax:
311 case nir_intrinsic_deref_atomic_and:
312 case nir_intrinsic_deref_atomic_or:
313 case nir_intrinsic_deref_atomic_xor:
314 case nir_intrinsic_deref_atomic_exchange:
315 case nir_intrinsic_deref_atomic_comp_swap:
316 case nir_intrinsic_deref_atomic_fmin:
317 case nir_intrinsic_deref_atomic_fmax:
318 case nir_intrinsic_deref_atomic_fcomp_swap:
319 try_lower_direct_buffer_intrinsic(intrin, true, state);
320 break;
321
322 case nir_intrinsic_get_buffer_size: {
323 /* The get_buffer_size intrinsic always just takes a
324 * index/reindex intrinsic.
325 */
326 if (!find_descriptor_for_index_src(intrin->src[0], state))
327 break;
328
329 nir_ssa_def *index =
330 build_index_for_res_reindex(nir_src_as_intrinsic(intrin->src[0]),
331 state);
332 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
333 nir_src_for_ssa(index));
334 _mesa_set_add(state->lowered_instrs, intrin);
335 }
336
337 default:
338 break;
339 }
340 }
341 }
342 }
343
344 static nir_address_format
345 desc_addr_format(VkDescriptorType desc_type,
346 struct apply_pipeline_layout_state *state)
347 {
348 return (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
349 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) ?
350 state->ssbo_addr_format : nir_address_format_32bit_index_offset;
351 }
352
353 static void
354 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
355 struct apply_pipeline_layout_state *state)
356 {
357 nir_builder *b = &state->builder;
358
359 b->cursor = nir_before_instr(&intrin->instr);
360
361 uint32_t set = nir_intrinsic_desc_set(intrin);
362 uint32_t binding = nir_intrinsic_binding(intrin);
363 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
364
365 const struct anv_descriptor_set_binding_layout *bind_layout =
366 &state->layout->set[set].layout->binding[binding];
367
368 uint32_t surface_index = state->set[set].surface_offsets[binding];
369 uint32_t array_size = bind_layout->array_size;
370
371 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
372 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
373 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
374
375 nir_ssa_def *index;
376 if (state->pdevice->has_a64_buffer_access &&
377 (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
378 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
379 /* We store the descriptor offset as 16.8.8 where the top 16 bits are
380 * the offset into the descriptor set, the next 8 are the binding table
381 * index of the descriptor buffer, and the bottom 8 bits are the offset
382 * (in bytes) into the dynamic offset table.
383 */
384 assert(bind_layout->dynamic_offset_index < MAX_DYNAMIC_BUFFERS);
385 uint32_t dynamic_offset_index = 0xff; /* No dynamic offset */
386 if (bind_layout->dynamic_offset_index >= 0) {
387 dynamic_offset_index =
388 state->layout->set[set].dynamic_offset_start +
389 bind_layout->dynamic_offset_index;
390 }
391
392 const uint32_t desc_offset =
393 bind_layout->descriptor_offset << 16 |
394 (uint32_t)state->set[set].desc_offset << 8 |
395 dynamic_offset_index;
396
397 if (state->add_bounds_checks) {
398 assert(desc_addr_format(desc_type, state) ==
399 nir_address_format_64bit_bounded_global);
400 assert(intrin->dest.ssa.num_components == 4);
401 assert(intrin->dest.ssa.bit_size == 32);
402 index = nir_vec4(b, nir_imm_int(b, desc_offset),
403 nir_ssa_for_src(b, intrin->src[0], 1),
404 nir_imm_int(b, array_size - 1),
405 nir_ssa_undef(b, 1, 32));
406 } else {
407 assert(desc_addr_format(desc_type, state) ==
408 nir_address_format_64bit_global);
409 assert(intrin->dest.ssa.num_components == 1);
410 assert(intrin->dest.ssa.bit_size == 64);
411 index = nir_pack_64_2x32_split(b, nir_imm_int(b, desc_offset),
412 nir_ssa_for_src(b, intrin->src[0], 1));
413 }
414 } else if (bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
415 /* This is an inline uniform block. Just reference the descriptor set
416 * and use the descriptor offset as the base.
417 */
418 assert(desc_addr_format(desc_type, state) ==
419 nir_address_format_32bit_index_offset);
420 assert(intrin->dest.ssa.num_components == 2);
421 assert(intrin->dest.ssa.bit_size == 32);
422 index = nir_imm_ivec2(b, state->set[set].desc_offset,
423 bind_layout->descriptor_offset);
424 } else {
425 assert(desc_addr_format(desc_type, state) ==
426 nir_address_format_32bit_index_offset);
427 assert(intrin->dest.ssa.num_components == 2);
428 assert(intrin->dest.ssa.bit_size == 32);
429 index = nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
430 nir_imm_int(b, 0));
431 }
432
433 assert(intrin->dest.is_ssa);
434 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
435 nir_instr_remove(&intrin->instr);
436 }
437
438 static void
439 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
440 struct apply_pipeline_layout_state *state)
441 {
442 nir_builder *b = &state->builder;
443
444 b->cursor = nir_before_instr(&intrin->instr);
445
446 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
447
448 /* For us, the resource indices are just indices into the binding table and
449 * array elements are sequential. A resource_reindex just turns into an
450 * add of the two indices.
451 */
452 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
453 nir_ssa_def *old_index = intrin->src[0].ssa;
454 nir_ssa_def *offset = intrin->src[1].ssa;
455
456 nir_ssa_def *new_index;
457 switch (desc_addr_format(desc_type, state)) {
458 case nir_address_format_64bit_bounded_global:
459 /* See also lower_res_index_intrinsic() */
460 assert(intrin->dest.ssa.num_components == 4);
461 assert(intrin->dest.ssa.bit_size == 32);
462 new_index = nir_vec4(b, nir_channel(b, old_index, 0),
463 nir_iadd(b, nir_channel(b, old_index, 1),
464 offset),
465 nir_channel(b, old_index, 2),
466 nir_ssa_undef(b, 1, 32));
467 break;
468
469 case nir_address_format_64bit_global: {
470 /* See also lower_res_index_intrinsic() */
471 assert(intrin->dest.ssa.num_components == 1);
472 assert(intrin->dest.ssa.bit_size == 64);
473 nir_ssa_def *base = nir_unpack_64_2x32_split_x(b, old_index);
474 nir_ssa_def *arr_idx = nir_unpack_64_2x32_split_y(b, old_index);
475 new_index = nir_pack_64_2x32_split(b, base, nir_iadd(b, arr_idx, offset));
476 break;
477 }
478
479 case nir_address_format_32bit_index_offset:
480 assert(intrin->dest.ssa.num_components == 2);
481 assert(intrin->dest.ssa.bit_size == 32);
482 new_index = nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
483 nir_channel(b, old_index, 1));
484 break;
485
486 default:
487 unreachable("Uhandled address format");
488 }
489
490 assert(intrin->dest.is_ssa);
491 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
492 nir_instr_remove(&intrin->instr);
493 }
494
495 static nir_ssa_def *
496 build_ssbo_descriptor_load(const VkDescriptorType desc_type,
497 nir_ssa_def *index,
498 struct apply_pipeline_layout_state *state)
499 {
500 nir_builder *b = &state->builder;
501
502 nir_ssa_def *desc_offset, *array_index;
503 switch (state->ssbo_addr_format) {
504 case nir_address_format_64bit_bounded_global:
505 /* See also lower_res_index_intrinsic() */
506 desc_offset = nir_channel(b, index, 0);
507 array_index = nir_umin(b, nir_channel(b, index, 1),
508 nir_channel(b, index, 2));
509 break;
510
511 case nir_address_format_64bit_global:
512 /* See also lower_res_index_intrinsic() */
513 desc_offset = nir_unpack_64_2x32_split_x(b, index);
514 array_index = nir_unpack_64_2x32_split_y(b, index);
515 break;
516
517 default:
518 unreachable("Unhandled address format for SSBO");
519 }
520
521 /* The desc_offset is actually 16.8.8 */
522 nir_ssa_def *desc_buffer_index =
523 nir_extract_u8(b, desc_offset, nir_imm_int(b, 1));
524 nir_ssa_def *desc_offset_base =
525 nir_extract_u16(b, desc_offset, nir_imm_int(b, 1));
526
527 /* Compute the actual descriptor offset */
528 const unsigned descriptor_size =
529 anv_descriptor_type_size(state->pdevice, desc_type);
530 desc_offset = nir_iadd(b, desc_offset_base,
531 nir_imul_imm(b, array_index, descriptor_size));
532
533 nir_intrinsic_instr *desc_load =
534 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
535 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
536 desc_load->src[1] = nir_src_for_ssa(desc_offset);
537 nir_intrinsic_set_align(desc_load, 8, 0);
538 desc_load->num_components = 4;
539 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest, 4, 32, NULL);
540 nir_builder_instr_insert(b, &desc_load->instr);
541
542 return &desc_load->dest.ssa;
543 }
544
545 static void
546 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
547 struct apply_pipeline_layout_state *state)
548 {
549 nir_builder *b = &state->builder;
550
551 b->cursor = nir_before_instr(&intrin->instr);
552
553 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
554
555 assert(intrin->dest.is_ssa);
556 nir_foreach_use(src, &intrin->dest.ssa) {
557 if (src->parent_instr->type != nir_instr_type_deref)
558 continue;
559
560 nir_deref_instr *cast = nir_instr_as_deref(src->parent_instr);
561 assert(cast->deref_type == nir_deref_type_cast);
562 switch (desc_type) {
563 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
564 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
565 cast->cast.align_mul = ANV_UBO_ALIGNMENT;
566 cast->cast.align_offset = 0;
567 break;
568
569 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
570 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
571 cast->cast.align_mul = ANV_SSBO_ALIGNMENT;
572 cast->cast.align_offset = 0;
573 break;
574
575 default:
576 break;
577 }
578 }
579
580 assert(intrin->src[0].is_ssa);
581 nir_ssa_def *index = intrin->src[0].ssa;
582
583 nir_ssa_def *desc;
584 if (state->pdevice->has_a64_buffer_access &&
585 (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
586 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
587 desc = build_ssbo_descriptor_load(desc_type, index, state);
588
589 /* We want nir_address_format_64bit_global */
590 if (!state->add_bounds_checks)
591 desc = nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
592
593 if (state->has_dynamic_buffers) {
594 /* This shader has dynamic offsets and we have no way of knowing
595 * (save from the dynamic offset base index) if this buffer has a
596 * dynamic offset.
597 */
598 nir_ssa_def *desc_offset, *array_index;
599 switch (state->ssbo_addr_format) {
600 case nir_address_format_64bit_bounded_global:
601 /* See also lower_res_index_intrinsic() */
602 desc_offset = nir_channel(b, index, 0);
603 array_index = nir_umin(b, nir_channel(b, index, 1),
604 nir_channel(b, index, 2));
605 break;
606
607 case nir_address_format_64bit_global:
608 /* See also lower_res_index_intrinsic() */
609 desc_offset = nir_unpack_64_2x32_split_x(b, index);
610 array_index = nir_unpack_64_2x32_split_y(b, index);
611 break;
612
613 default:
614 unreachable("Unhandled address format for SSBO");
615 }
616
617 nir_ssa_def *dyn_offset_base =
618 nir_extract_u8(b, desc_offset, nir_imm_int(b, 0));
619 nir_ssa_def *dyn_offset_idx =
620 nir_iadd(b, dyn_offset_base, array_index);
621 if (state->add_bounds_checks) {
622 dyn_offset_idx = nir_umin(b, dyn_offset_idx,
623 nir_imm_int(b, MAX_DYNAMIC_BUFFERS));
624 }
625
626 nir_intrinsic_instr *dyn_load =
627 nir_intrinsic_instr_create(b->shader,
628 nir_intrinsic_load_push_constant);
629 nir_intrinsic_set_base(dyn_load, offsetof(struct anv_push_constants,
630 dynamic_offsets));
631 nir_intrinsic_set_range(dyn_load, MAX_DYNAMIC_BUFFERS * 4);
632 dyn_load->src[0] = nir_src_for_ssa(nir_imul_imm(b, dyn_offset_idx, 4));
633 dyn_load->num_components = 1;
634 nir_ssa_dest_init(&dyn_load->instr, &dyn_load->dest, 1, 32, NULL);
635 nir_builder_instr_insert(b, &dyn_load->instr);
636
637 nir_ssa_def *dynamic_offset =
638 nir_bcsel(b, nir_ieq(b, dyn_offset_base, nir_imm_int(b, 0xff)),
639 nir_imm_int(b, 0), &dyn_load->dest.ssa);
640
641 switch (state->ssbo_addr_format) {
642 case nir_address_format_64bit_bounded_global: {
643 /* The dynamic offset gets added to the base pointer so that we
644 * have a sliding window range.
645 */
646 nir_ssa_def *base_ptr =
647 nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
648 base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset));
649 desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr),
650 nir_unpack_64_2x32_split_y(b, base_ptr),
651 nir_channel(b, desc, 2),
652 nir_channel(b, desc, 3));
653 break;
654 }
655
656 case nir_address_format_64bit_global:
657 desc = nir_iadd(b, desc, nir_u2u64(b, dynamic_offset));
658 break;
659
660 default:
661 unreachable("Unhandled address format for SSBO");
662 }
663 }
664 } else {
665 /* We follow the nir_address_format_32bit_index_offset model */
666 desc = index;
667 }
668
669 assert(intrin->dest.is_ssa);
670 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
671 nir_instr_remove(&intrin->instr);
672 }
673
674 static void
675 lower_get_buffer_size(nir_intrinsic_instr *intrin,
676 struct apply_pipeline_layout_state *state)
677 {
678 if (_mesa_set_search(state->lowered_instrs, intrin))
679 return;
680
681 nir_builder *b = &state->builder;
682
683 b->cursor = nir_before_instr(&intrin->instr);
684
685 const VkDescriptorType desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
686
687 assert(intrin->src[0].is_ssa);
688 nir_ssa_def *index = intrin->src[0].ssa;
689
690 if (state->pdevice->has_a64_buffer_access) {
691 nir_ssa_def *desc = build_ssbo_descriptor_load(desc_type, index, state);
692 nir_ssa_def *size = nir_channel(b, desc, 2);
693 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(size));
694 nir_instr_remove(&intrin->instr);
695 } else {
696 /* We're following the nir_address_format_32bit_index_offset model so
697 * the binding table index is the first component of the address. The
698 * back-end wants a scalar binding table index source.
699 */
700 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
701 nir_src_for_ssa(nir_channel(b, index, 0)));
702 }
703 }
704
705 static nir_ssa_def *
706 build_descriptor_load(nir_deref_instr *deref, unsigned offset,
707 unsigned num_components, unsigned bit_size,
708 struct apply_pipeline_layout_state *state)
709 {
710 nir_variable *var = nir_deref_instr_get_variable(deref);
711
712 unsigned set = var->data.descriptor_set;
713 unsigned binding = var->data.binding;
714 unsigned array_size =
715 state->layout->set[set].layout->binding[binding].array_size;
716
717 const struct anv_descriptor_set_binding_layout *bind_layout =
718 &state->layout->set[set].layout->binding[binding];
719
720 nir_builder *b = &state->builder;
721
722 nir_ssa_def *desc_buffer_index =
723 nir_imm_int(b, state->set[set].desc_offset);
724
725 nir_ssa_def *desc_offset =
726 nir_imm_int(b, bind_layout->descriptor_offset + offset);
727 if (deref->deref_type != nir_deref_type_var) {
728 assert(deref->deref_type == nir_deref_type_array);
729
730 const unsigned descriptor_size = anv_descriptor_size(bind_layout);
731 nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
732 if (state->add_bounds_checks)
733 arr_index = nir_umin(b, arr_index, nir_imm_int(b, array_size - 1));
734
735 desc_offset = nir_iadd(b, desc_offset,
736 nir_imul_imm(b, arr_index, descriptor_size));
737 }
738
739 nir_intrinsic_instr *desc_load =
740 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
741 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
742 desc_load->src[1] = nir_src_for_ssa(desc_offset);
743 nir_intrinsic_set_align(desc_load, 8, offset % 8);
744 desc_load->num_components = num_components;
745 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
746 num_components, bit_size, NULL);
747 nir_builder_instr_insert(b, &desc_load->instr);
748
749 return &desc_load->dest.ssa;
750 }
751
752 static void
753 lower_image_intrinsic(nir_intrinsic_instr *intrin,
754 struct apply_pipeline_layout_state *state)
755 {
756 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
757 nir_variable *var = nir_deref_instr_get_variable(deref);
758
759 unsigned set = var->data.descriptor_set;
760 unsigned binding = var->data.binding;
761 unsigned binding_offset = state->set[set].surface_offsets[binding];
762
763 nir_builder *b = &state->builder;
764 b->cursor = nir_before_instr(&intrin->instr);
765
766 ASSERTED const bool use_bindless = state->pdevice->has_bindless_images;
767
768 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
769 b->cursor = nir_instr_remove(&intrin->instr);
770
771 assert(!use_bindless); /* Otherwise our offsets would be wrong */
772 const unsigned param = nir_intrinsic_base(intrin);
773
774 nir_ssa_def *desc =
775 build_descriptor_load(deref, param * 16,
776 intrin->dest.ssa.num_components,
777 intrin->dest.ssa.bit_size, state);
778
779 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
780 } else if (binding_offset > MAX_BINDING_TABLE_SIZE) {
781 const bool write_only =
782 (var->data.access & ACCESS_NON_READABLE) != 0;
783 nir_ssa_def *desc =
784 build_descriptor_load(deref, 0, 2, 32, state);
785 nir_ssa_def *handle = nir_channel(b, desc, write_only ? 1 : 0);
786 nir_rewrite_image_intrinsic(intrin, handle, true);
787 } else {
788 unsigned array_size =
789 state->layout->set[set].layout->binding[binding].array_size;
790
791 nir_ssa_def *index = NULL;
792 if (deref->deref_type != nir_deref_type_var) {
793 assert(deref->deref_type == nir_deref_type_array);
794 index = nir_ssa_for_src(b, deref->arr.index, 1);
795 if (state->add_bounds_checks)
796 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
797 } else {
798 index = nir_imm_int(b, 0);
799 }
800
801 index = nir_iadd_imm(b, index, binding_offset);
802 nir_rewrite_image_intrinsic(intrin, index, false);
803 }
804 }
805
806 static void
807 lower_load_constant(nir_intrinsic_instr *intrin,
808 struct apply_pipeline_layout_state *state)
809 {
810 nir_builder *b = &state->builder;
811
812 b->cursor = nir_instr_remove(&intrin->instr);
813
814 /* Any constant-offset load_constant instructions should have been removed
815 * by constant folding.
816 */
817 assert(!nir_src_is_const(intrin->src[0]));
818 nir_ssa_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
819 nir_intrinsic_base(intrin));
820
821 nir_ssa_def *data;
822 if (state->pdevice->use_softpin) {
823 unsigned load_size = intrin->dest.ssa.num_components *
824 intrin->dest.ssa.bit_size / 8;
825 unsigned load_align = intrin->dest.ssa.bit_size / 8;
826
827 assert(load_size < b->shader->constant_data_size);
828 unsigned max_offset = b->shader->constant_data_size - load_size;
829 offset = nir_umin(b, offset, nir_imm_int(b, max_offset));
830
831 nir_ssa_def *const_data_base_addr = nir_pack_64_2x32_split(b,
832 nir_load_reloc_const_intel(b, ANV_SHADER_RELOC_CONST_DATA_ADDR_LOW),
833 nir_load_reloc_const_intel(b, ANV_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
834
835 data = nir_load_global(b, nir_iadd(b, const_data_base_addr,
836 nir_u2u64(b, offset)),
837 load_align,
838 intrin->dest.ssa.num_components,
839 intrin->dest.ssa.bit_size);
840 } else {
841 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
842
843 nir_intrinsic_instr *load_ubo =
844 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
845 load_ubo->num_components = intrin->num_components;
846 load_ubo->src[0] = nir_src_for_ssa(index);
847 load_ubo->src[1] = nir_src_for_ssa(offset);
848 nir_intrinsic_set_align(load_ubo, intrin->dest.ssa.bit_size / 8, 0);
849 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
850 intrin->dest.ssa.num_components,
851 intrin->dest.ssa.bit_size, NULL);
852 nir_builder_instr_insert(b, &load_ubo->instr);
853 data = &load_ubo->dest.ssa;
854 }
855
856 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(data));
857 }
858
859 static void
860 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
861 unsigned *base_index, unsigned plane,
862 struct apply_pipeline_layout_state *state)
863 {
864 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
865 if (deref_src_idx < 0)
866 return;
867
868 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
869 nir_variable *var = nir_deref_instr_get_variable(deref);
870
871 unsigned set = var->data.descriptor_set;
872 unsigned binding = var->data.binding;
873 unsigned array_size =
874 state->layout->set[set].layout->binding[binding].array_size;
875
876 unsigned binding_offset;
877 if (deref_src_type == nir_tex_src_texture_deref) {
878 binding_offset = state->set[set].surface_offsets[binding];
879 } else {
880 assert(deref_src_type == nir_tex_src_sampler_deref);
881 binding_offset = state->set[set].sampler_offsets[binding];
882 }
883
884 nir_builder *b = &state->builder;
885
886 nir_tex_src_type offset_src_type;
887 nir_ssa_def *index = NULL;
888 if (binding_offset > MAX_BINDING_TABLE_SIZE) {
889 const unsigned plane_offset =
890 plane * sizeof(struct anv_sampled_image_descriptor);
891
892 nir_ssa_def *desc =
893 build_descriptor_load(deref, plane_offset, 2, 32, state);
894
895 if (deref_src_type == nir_tex_src_texture_deref) {
896 offset_src_type = nir_tex_src_texture_handle;
897 index = nir_channel(b, desc, 0);
898 } else {
899 assert(deref_src_type == nir_tex_src_sampler_deref);
900 offset_src_type = nir_tex_src_sampler_handle;
901 index = nir_channel(b, desc, 1);
902 }
903 } else {
904 if (deref_src_type == nir_tex_src_texture_deref) {
905 offset_src_type = nir_tex_src_texture_offset;
906 } else {
907 assert(deref_src_type == nir_tex_src_sampler_deref);
908 offset_src_type = nir_tex_src_sampler_offset;
909 }
910
911 *base_index = binding_offset + plane;
912
913 if (deref->deref_type != nir_deref_type_var) {
914 assert(deref->deref_type == nir_deref_type_array);
915
916 if (nir_src_is_const(deref->arr.index)) {
917 unsigned arr_index = MIN2(nir_src_as_uint(deref->arr.index), array_size - 1);
918 struct anv_sampler **immutable_samplers =
919 state->layout->set[set].layout->binding[binding].immutable_samplers;
920 if (immutable_samplers) {
921 /* Array of YCbCr samplers are tightly packed in the binding
922 * tables, compute the offset of an element in the array by
923 * adding the number of planes of all preceding elements.
924 */
925 unsigned desc_arr_index = 0;
926 for (int i = 0; i < arr_index; i++)
927 desc_arr_index += immutable_samplers[i]->n_planes;
928 *base_index += desc_arr_index;
929 } else {
930 *base_index += arr_index;
931 }
932 } else {
933 /* From VK_KHR_sampler_ycbcr_conversion:
934 *
935 * If sampler Y’CBCR conversion is enabled, the combined image
936 * sampler must be indexed only by constant integral expressions
937 * when aggregated into arrays in shader code, irrespective of
938 * the shaderSampledImageArrayDynamicIndexing feature.
939 */
940 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
941
942 index = nir_ssa_for_src(b, deref->arr.index, 1);
943
944 if (state->add_bounds_checks)
945 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
946 }
947 }
948 }
949
950 if (index) {
951 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
952 nir_src_for_ssa(index));
953 tex->src[deref_src_idx].src_type = offset_src_type;
954 } else {
955 nir_tex_instr_remove_src(tex, deref_src_idx);
956 }
957 }
958
959 static uint32_t
960 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
961 {
962 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
963 if (plane_src_idx < 0)
964 return 0;
965
966 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
967
968 nir_tex_instr_remove_src(tex, plane_src_idx);
969
970 return plane;
971 }
972
973 static nir_ssa_def *
974 build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx,
975 unsigned start, unsigned end)
976 {
977 if (start == end - 1) {
978 return srcs[start];
979 } else {
980 unsigned mid = start + (end - start) / 2;
981 return nir_bcsel(b, nir_ilt(b, idx, nir_imm_int(b, mid)),
982 build_def_array_select(b, srcs, idx, start, mid),
983 build_def_array_select(b, srcs, idx, mid, end));
984 }
985 }
986
987 static void
988 lower_gen7_tex_swizzle(nir_tex_instr *tex, unsigned plane,
989 struct apply_pipeline_layout_state *state)
990 {
991 assert(state->pdevice->info.gen == 7 && !state->pdevice->info.is_haswell);
992 if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF ||
993 nir_tex_instr_is_query(tex) ||
994 tex->op == nir_texop_tg4 || /* We can't swizzle TG4 */
995 (tex->is_shadow && tex->is_new_style_shadow))
996 return;
997
998 int deref_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
999 assert(deref_src_idx >= 0);
1000
1001 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
1002 nir_variable *var = nir_deref_instr_get_variable(deref);
1003
1004 unsigned set = var->data.descriptor_set;
1005 unsigned binding = var->data.binding;
1006 const struct anv_descriptor_set_binding_layout *bind_layout =
1007 &state->layout->set[set].layout->binding[binding];
1008
1009 if ((bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) == 0)
1010 return;
1011
1012 nir_builder *b = &state->builder;
1013 b->cursor = nir_before_instr(&tex->instr);
1014
1015 const unsigned plane_offset =
1016 plane * sizeof(struct anv_texture_swizzle_descriptor);
1017 nir_ssa_def *swiz =
1018 build_descriptor_load(deref, plane_offset, 1, 32, state);
1019
1020 b->cursor = nir_after_instr(&tex->instr);
1021
1022 assert(tex->dest.ssa.bit_size == 32);
1023 assert(tex->dest.ssa.num_components == 4);
1024
1025 /* Initializing to undef is ok; nir_opt_undef will clean it up. */
1026 nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
1027 nir_ssa_def *comps[8];
1028 for (unsigned i = 0; i < ARRAY_SIZE(comps); i++)
1029 comps[i] = undef;
1030
1031 comps[ISL_CHANNEL_SELECT_ZERO] = nir_imm_int(b, 0);
1032 if (nir_alu_type_get_base_type(tex->dest_type) == nir_type_float)
1033 comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_float(b, 1);
1034 else
1035 comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_int(b, 1);
1036 comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->dest.ssa, 0);
1037 comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->dest.ssa, 1);
1038 comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->dest.ssa, 2);
1039 comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->dest.ssa, 3);
1040
1041 nir_ssa_def *swiz_comps[4];
1042 for (unsigned i = 0; i < 4; i++) {
1043 nir_ssa_def *comp_swiz = nir_extract_u8(b, swiz, nir_imm_int(b, i));
1044 swiz_comps[i] = build_def_array_select(b, comps, comp_swiz, 0, 8);
1045 }
1046 nir_ssa_def *swiz_tex_res = nir_vec(b, swiz_comps, 4);
1047
1048 /* Rewrite uses before we insert so we don't rewrite this use */
1049 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa,
1050 nir_src_for_ssa(swiz_tex_res),
1051 swiz_tex_res->parent_instr);
1052 }
1053
1054 static void
1055 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
1056 {
1057 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
1058
1059 /* On Ivy Bridge and Bay Trail, we have to swizzle in the shader. Do this
1060 * before we lower the derefs away so we can still find the descriptor.
1061 */
1062 if (state->pdevice->info.gen == 7 && !state->pdevice->info.is_haswell)
1063 lower_gen7_tex_swizzle(tex, plane, state);
1064
1065 state->builder.cursor = nir_before_instr(&tex->instr);
1066
1067 lower_tex_deref(tex, nir_tex_src_texture_deref,
1068 &tex->texture_index, plane, state);
1069
1070 lower_tex_deref(tex, nir_tex_src_sampler_deref,
1071 &tex->sampler_index, plane, state);
1072 }
1073
1074 static void
1075 apply_pipeline_layout_block(nir_block *block,
1076 struct apply_pipeline_layout_state *state)
1077 {
1078 nir_foreach_instr_safe(instr, block) {
1079 switch (instr->type) {
1080 case nir_instr_type_intrinsic: {
1081 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1082 switch (intrin->intrinsic) {
1083 case nir_intrinsic_vulkan_resource_index:
1084 lower_res_index_intrinsic(intrin, state);
1085 break;
1086 case nir_intrinsic_vulkan_resource_reindex:
1087 lower_res_reindex_intrinsic(intrin, state);
1088 break;
1089 case nir_intrinsic_load_vulkan_descriptor:
1090 lower_load_vulkan_descriptor(intrin, state);
1091 break;
1092 case nir_intrinsic_get_buffer_size:
1093 lower_get_buffer_size(intrin, state);
1094 break;
1095 case nir_intrinsic_image_deref_load:
1096 case nir_intrinsic_image_deref_store:
1097 case nir_intrinsic_image_deref_atomic_add:
1098 case nir_intrinsic_image_deref_atomic_imin:
1099 case nir_intrinsic_image_deref_atomic_umin:
1100 case nir_intrinsic_image_deref_atomic_imax:
1101 case nir_intrinsic_image_deref_atomic_umax:
1102 case nir_intrinsic_image_deref_atomic_and:
1103 case nir_intrinsic_image_deref_atomic_or:
1104 case nir_intrinsic_image_deref_atomic_xor:
1105 case nir_intrinsic_image_deref_atomic_exchange:
1106 case nir_intrinsic_image_deref_atomic_comp_swap:
1107 case nir_intrinsic_image_deref_size:
1108 case nir_intrinsic_image_deref_samples:
1109 case nir_intrinsic_image_deref_load_param_intel:
1110 case nir_intrinsic_image_deref_load_raw_intel:
1111 case nir_intrinsic_image_deref_store_raw_intel:
1112 lower_image_intrinsic(intrin, state);
1113 break;
1114 case nir_intrinsic_load_constant:
1115 lower_load_constant(intrin, state);
1116 break;
1117 default:
1118 break;
1119 }
1120 break;
1121 }
1122 case nir_instr_type_tex:
1123 lower_tex(nir_instr_as_tex(instr), state);
1124 break;
1125 default:
1126 continue;
1127 }
1128 }
1129 }
1130
1131 struct binding_info {
1132 uint32_t binding;
1133 uint8_t set;
1134 uint16_t score;
1135 };
1136
1137 static int
1138 compare_binding_infos(const void *_a, const void *_b)
1139 {
1140 const struct binding_info *a = _a, *b = _b;
1141 if (a->score != b->score)
1142 return b->score - a->score;
1143
1144 if (a->set != b->set)
1145 return a->set - b->set;
1146
1147 return a->binding - b->binding;
1148 }
1149
1150 void
1151 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
1152 bool robust_buffer_access,
1153 const struct anv_pipeline_layout *layout,
1154 nir_shader *shader,
1155 struct anv_pipeline_bind_map *map)
1156 {
1157 void *mem_ctx = ralloc_context(NULL);
1158
1159 struct apply_pipeline_layout_state state = {
1160 .pdevice = pdevice,
1161 .shader = shader,
1162 .layout = layout,
1163 .add_bounds_checks = robust_buffer_access,
1164 .ssbo_addr_format = anv_nir_ssbo_addr_format(pdevice, robust_buffer_access),
1165 .lowered_instrs = _mesa_pointer_set_create(mem_ctx),
1166 };
1167
1168 for (unsigned s = 0; s < layout->num_sets; s++) {
1169 const unsigned count = layout->set[s].layout->binding_count;
1170 state.set[s].use_count = rzalloc_array(mem_ctx, uint8_t, count);
1171 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
1172 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
1173 }
1174
1175 nir_foreach_function(function, shader) {
1176 if (!function->impl)
1177 continue;
1178
1179 nir_foreach_block(block, function->impl)
1180 get_used_bindings_block(block, &state);
1181 }
1182
1183 for (unsigned s = 0; s < layout->num_sets; s++) {
1184 if (state.set[s].desc_buffer_used) {
1185 map->surface_to_descriptor[map->surface_count] =
1186 (struct anv_pipeline_binding) {
1187 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
1188 .index = s,
1189 };
1190 state.set[s].desc_offset = map->surface_count;
1191 map->surface_count++;
1192 }
1193 }
1194
1195 if (state.uses_constants && !pdevice->use_softpin) {
1196 state.constants_offset = map->surface_count;
1197 map->surface_to_descriptor[map->surface_count].set =
1198 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
1199 map->surface_count++;
1200 }
1201
1202 unsigned used_binding_count = 0;
1203 for (uint32_t set = 0; set < layout->num_sets; set++) {
1204 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
1205 for (unsigned b = 0; b < set_layout->binding_count; b++) {
1206 if (state.set[set].use_count[b] == 0)
1207 continue;
1208
1209 used_binding_count++;
1210 }
1211 }
1212
1213 struct binding_info *infos =
1214 rzalloc_array(mem_ctx, struct binding_info, used_binding_count);
1215 used_binding_count = 0;
1216 for (uint32_t set = 0; set < layout->num_sets; set++) {
1217 const struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
1218 for (unsigned b = 0; b < set_layout->binding_count; b++) {
1219 if (state.set[set].use_count[b] == 0)
1220 continue;
1221
1222 const struct anv_descriptor_set_binding_layout *binding =
1223 &layout->set[set].layout->binding[b];
1224
1225 /* Do a fixed-point calculation to generate a score based on the
1226 * number of uses and the binding array size. We shift by 7 instead
1227 * of 8 because we're going to use the top bit below to make
1228 * everything which does not support bindless super higher priority
1229 * than things which do.
1230 */
1231 uint16_t score = ((uint16_t)state.set[set].use_count[b] << 7) /
1232 binding->array_size;
1233
1234 /* If the descriptor type doesn't support bindless then put it at the
1235 * beginning so we guarantee it gets a slot.
1236 */
1237 if (!anv_descriptor_supports_bindless(pdevice, binding, true) ||
1238 !anv_descriptor_supports_bindless(pdevice, binding, false))
1239 score |= 1 << 15;
1240
1241 infos[used_binding_count++] = (struct binding_info) {
1242 .set = set,
1243 .binding = b,
1244 .score = score,
1245 };
1246 }
1247 }
1248
1249 /* Order the binding infos based on score with highest scores first. If
1250 * scores are equal we then order by set and binding.
1251 */
1252 qsort(infos, used_binding_count, sizeof(struct binding_info),
1253 compare_binding_infos);
1254
1255 for (unsigned i = 0; i < used_binding_count; i++) {
1256 unsigned set = infos[i].set, b = infos[i].binding;
1257 const struct anv_descriptor_set_binding_layout *binding =
1258 &layout->set[set].layout->binding[b];
1259
1260 const uint32_t array_size = binding->array_size;
1261
1262 if (binding->dynamic_offset_index >= 0)
1263 state.has_dynamic_buffers = true;
1264
1265 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
1266 if (map->surface_count + array_size > MAX_BINDING_TABLE_SIZE ||
1267 anv_descriptor_requires_bindless(pdevice, binding, false)) {
1268 /* If this descriptor doesn't fit in the binding table or if it
1269 * requires bindless for some reason, flag it as bindless.
1270 */
1271 assert(anv_descriptor_supports_bindless(pdevice, binding, false));
1272 state.set[set].surface_offsets[b] = BINDLESS_OFFSET;
1273 } else {
1274 state.set[set].surface_offsets[b] = map->surface_count;
1275 if (binding->dynamic_offset_index < 0) {
1276 struct anv_sampler **samplers = binding->immutable_samplers;
1277 for (unsigned i = 0; i < binding->array_size; i++) {
1278 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
1279 for (uint8_t p = 0; p < planes; p++) {
1280 map->surface_to_descriptor[map->surface_count++] =
1281 (struct anv_pipeline_binding) {
1282 .set = set,
1283 .index = binding->descriptor_index + i,
1284 .plane = p,
1285 };
1286 }
1287 }
1288 } else {
1289 for (unsigned i = 0; i < binding->array_size; i++) {
1290 map->surface_to_descriptor[map->surface_count++] =
1291 (struct anv_pipeline_binding) {
1292 .set = set,
1293 .index = binding->descriptor_index + i,
1294 .dynamic_offset_index =
1295 layout->set[set].dynamic_offset_start +
1296 binding->dynamic_offset_index + i,
1297 };
1298 }
1299 }
1300 }
1301 assert(map->surface_count <= MAX_BINDING_TABLE_SIZE);
1302 }
1303
1304 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
1305 if (map->sampler_count + array_size > MAX_SAMPLER_TABLE_SIZE ||
1306 anv_descriptor_requires_bindless(pdevice, binding, true)) {
1307 /* If this descriptor doesn't fit in the binding table or if it
1308 * requires bindless for some reason, flag it as bindless.
1309 *
1310 * We also make large sampler arrays bindless because we can avoid
1311 * using indirect sends thanks to bindless samplers being packed
1312 * less tightly than the sampler table.
1313 */
1314 assert(anv_descriptor_supports_bindless(pdevice, binding, true));
1315 state.set[set].sampler_offsets[b] = BINDLESS_OFFSET;
1316 } else {
1317 state.set[set].sampler_offsets[b] = map->sampler_count;
1318 struct anv_sampler **samplers = binding->immutable_samplers;
1319 for (unsigned i = 0; i < binding->array_size; i++) {
1320 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
1321 for (uint8_t p = 0; p < planes; p++) {
1322 map->sampler_to_descriptor[map->sampler_count++] =
1323 (struct anv_pipeline_binding) {
1324 .set = set,
1325 .index = binding->descriptor_index + i,
1326 .plane = p,
1327 };
1328 }
1329 }
1330 }
1331 }
1332 }
1333
1334 nir_foreach_uniform_variable(var, shader) {
1335 const struct glsl_type *glsl_type = glsl_without_array(var->type);
1336
1337 if (!glsl_type_is_image(glsl_type))
1338 continue;
1339
1340 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
1341
1342 const uint32_t set = var->data.descriptor_set;
1343 const uint32_t binding = var->data.binding;
1344 const struct anv_descriptor_set_binding_layout *bind_layout =
1345 &layout->set[set].layout->binding[binding];
1346 const uint32_t array_size = bind_layout->array_size;
1347
1348 if (state.set[set].use_count[binding] == 0)
1349 continue;
1350
1351 if (state.set[set].surface_offsets[binding] >= MAX_BINDING_TABLE_SIZE)
1352 continue;
1353
1354 struct anv_pipeline_binding *pipe_binding =
1355 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
1356 for (unsigned i = 0; i < array_size; i++) {
1357 assert(pipe_binding[i].set == set);
1358 assert(pipe_binding[i].index == bind_layout->descriptor_index + i);
1359
1360 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
1361 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
1362 pipe_binding[i].input_attachment_index = var->data.index + i;
1363
1364 /* NOTE: This is a uint8_t so we really do need to != 0 here */
1365 pipe_binding[i].write_only =
1366 (var->data.access & ACCESS_NON_READABLE) != 0;
1367 }
1368 }
1369
1370 nir_foreach_function(function, shader) {
1371 if (!function->impl)
1372 continue;
1373
1374 nir_builder_init(&state.builder, function->impl);
1375
1376 /* Before we do the normal lowering, we look for any SSBO operations
1377 * that we can lower to the BTI model and lower them up-front. The BTI
1378 * model can perform better than the A64 model for a couple reasons:
1379 *
1380 * 1. 48-bit address calculations are potentially expensive and using
1381 * the BTI model lets us simply compute 32-bit offsets and the
1382 * hardware adds the 64-bit surface base address.
1383 *
1384 * 2. The BTI messages, because they use surface states, do bounds
1385 * checking for us. With the A64 model, we have to do our own
1386 * bounds checking and this means wider pointers and extra
1387 * calculations and branching in the shader.
1388 *
1389 * The solution to both of these is to convert things to the BTI model
1390 * opportunistically. The reason why we need to do this as a pre-pass
1391 * is for two reasons:
1392 *
1393 * 1. The BTI model requires nir_address_format_32bit_index_offset
1394 * pointers which are not the same type as the pointers needed for
1395 * the A64 model. Because all our derefs are set up for the A64
1396 * model (in case we have variable pointers), we have to crawl all
1397 * the way back to the vulkan_resource_index intrinsic and build a
1398 * completely fresh index+offset calculation.
1399 *
1400 * 2. Because the variable-pointers-capable lowering that we do as part
1401 * of apply_pipeline_layout_block is destructive (It really has to
1402 * be to handle variable pointers properly), we've lost the deref
1403 * information by the time we get to the load/store/atomic
1404 * intrinsics in that pass.
1405 */
1406 lower_direct_buffer_access(function->impl, &state);
1407
1408 nir_foreach_block(block, function->impl)
1409 apply_pipeline_layout_block(block, &state);
1410 nir_metadata_preserve(function->impl, nir_metadata_block_index |
1411 nir_metadata_dominance);
1412 }
1413
1414 ralloc_free(mem_ctx);
1415
1416 /* Now that we're done computing the surface and sampler portions of the
1417 * bind map, hash them. This lets us quickly determine if the actual
1418 * mapping has changed and not just a no-op pipeline change.
1419 */
1420 _mesa_sha1_compute(map->surface_to_descriptor,
1421 map->surface_count * sizeof(struct anv_pipeline_binding),
1422 map->surface_sha1);
1423 _mesa_sha1_compute(map->sampler_to_descriptor,
1424 map->sampler_count * sizeof(struct anv_pipeline_binding),
1425 map->sampler_sha1);
1426 }