anv: Patch constant data pointers into shaders with using softpin
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28 #include "util/mesa-sha1.h"
29 #include "util/set.h"
30
31 /* Sampler tables don't actually have a maximum size but we pick one just so
32 * that we don't end up emitting too much state on-the-fly.
33 */
34 #define MAX_SAMPLER_TABLE_SIZE 128
35 #define BINDLESS_OFFSET 255
36
37 struct apply_pipeline_layout_state {
38 const struct anv_physical_device *pdevice;
39
40 nir_shader *shader;
41 nir_builder builder;
42
43 const struct anv_pipeline_layout *layout;
44 bool add_bounds_checks;
45 nir_address_format ssbo_addr_format;
46
47 /* Place to flag lowered instructions so we don't lower them twice */
48 struct set *lowered_instrs;
49
50 bool uses_constants;
51 bool has_dynamic_buffers;
52 uint8_t constants_offset;
53 struct {
54 bool desc_buffer_used;
55 uint8_t desc_offset;
56
57 uint8_t *use_count;
58 uint8_t *surface_offsets;
59 uint8_t *sampler_offsets;
60 } set[MAX_SETS];
61 };
62
63 static void
64 add_binding(struct apply_pipeline_layout_state *state,
65 uint32_t set, uint32_t binding)
66 {
67 const struct anv_descriptor_set_binding_layout *bind_layout =
68 &state->layout->set[set].layout->binding[binding];
69
70 if (state->set[set].use_count[binding] < UINT8_MAX)
71 state->set[set].use_count[binding]++;
72
73 /* Only flag the descriptor buffer as used if there's actually data for
74 * this binding. This lets us be lazy and call this function constantly
75 * without worrying about unnecessarily enabling the buffer.
76 */
77 if (anv_descriptor_size(bind_layout))
78 state->set[set].desc_buffer_used = true;
79 }
80
81 static void
82 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
83 {
84 nir_deref_instr *deref = nir_src_as_deref(src);
85 nir_variable *var = nir_deref_instr_get_variable(deref);
86 add_binding(state, var->data.descriptor_set, var->data.binding);
87 }
88
89 static void
90 add_tex_src_binding(struct apply_pipeline_layout_state *state,
91 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
92 {
93 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
94 if (deref_src_idx < 0)
95 return;
96
97 add_deref_src_binding(state, tex->src[deref_src_idx].src);
98 }
99
100 static void
101 get_used_bindings_block(nir_block *block,
102 struct apply_pipeline_layout_state *state)
103 {
104 nir_foreach_instr_safe(instr, block) {
105 switch (instr->type) {
106 case nir_instr_type_intrinsic: {
107 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
108 switch (intrin->intrinsic) {
109 case nir_intrinsic_vulkan_resource_index:
110 add_binding(state, nir_intrinsic_desc_set(intrin),
111 nir_intrinsic_binding(intrin));
112 break;
113
114 case nir_intrinsic_image_deref_load:
115 case nir_intrinsic_image_deref_store:
116 case nir_intrinsic_image_deref_atomic_add:
117 case nir_intrinsic_image_deref_atomic_imin:
118 case nir_intrinsic_image_deref_atomic_umin:
119 case nir_intrinsic_image_deref_atomic_imax:
120 case nir_intrinsic_image_deref_atomic_umax:
121 case nir_intrinsic_image_deref_atomic_and:
122 case nir_intrinsic_image_deref_atomic_or:
123 case nir_intrinsic_image_deref_atomic_xor:
124 case nir_intrinsic_image_deref_atomic_exchange:
125 case nir_intrinsic_image_deref_atomic_comp_swap:
126 case nir_intrinsic_image_deref_size:
127 case nir_intrinsic_image_deref_samples:
128 case nir_intrinsic_image_deref_load_param_intel:
129 case nir_intrinsic_image_deref_load_raw_intel:
130 case nir_intrinsic_image_deref_store_raw_intel:
131 add_deref_src_binding(state, intrin->src[0]);
132 break;
133
134 case nir_intrinsic_load_constant:
135 state->uses_constants = true;
136 break;
137
138 default:
139 break;
140 }
141 break;
142 }
143 case nir_instr_type_tex: {
144 nir_tex_instr *tex = nir_instr_as_tex(instr);
145 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
146 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
147 break;
148 }
149 default:
150 continue;
151 }
152 }
153 }
154
155 static bool
156 find_descriptor_for_index_src(nir_src src,
157 struct apply_pipeline_layout_state *state)
158 {
159 nir_intrinsic_instr *intrin = nir_src_as_intrinsic(src);
160
161 while (intrin && intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex)
162 intrin = nir_src_as_intrinsic(intrin->src[0]);
163
164 if (!intrin || intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
165 return false;
166
167 uint32_t set = nir_intrinsic_desc_set(intrin);
168 uint32_t binding = nir_intrinsic_binding(intrin);
169 uint32_t surface_index = state->set[set].surface_offsets[binding];
170
171 /* Only lower to a BTI message if we have a valid binding table index. */
172 return surface_index < MAX_BINDING_TABLE_SIZE;
173 }
174
175 static bool
176 nir_deref_find_descriptor(nir_deref_instr *deref,
177 struct apply_pipeline_layout_state *state)
178 {
179 while (1) {
180 /* Nothing we will use this on has a variable */
181 assert(deref->deref_type != nir_deref_type_var);
182
183 nir_deref_instr *parent = nir_src_as_deref(deref->parent);
184 if (!parent)
185 break;
186
187 deref = parent;
188 }
189 assert(deref->deref_type == nir_deref_type_cast);
190
191 nir_intrinsic_instr *intrin = nir_src_as_intrinsic(deref->parent);
192 if (!intrin || intrin->intrinsic != nir_intrinsic_load_vulkan_descriptor)
193 return false;
194
195 return find_descriptor_for_index_src(intrin->src[0], state);
196 }
197
198 static nir_ssa_def *
199 build_index_for_res_reindex(nir_intrinsic_instr *intrin,
200 struct apply_pipeline_layout_state *state)
201 {
202 nir_builder *b = &state->builder;
203
204 if (intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex) {
205 nir_ssa_def *bti =
206 build_index_for_res_reindex(nir_src_as_intrinsic(intrin->src[0]), state);
207
208 b->cursor = nir_before_instr(&intrin->instr);
209 return nir_iadd(b, bti, nir_ssa_for_src(b, intrin->src[1], 1));
210 }
211
212 assert(intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
213
214 uint32_t set = nir_intrinsic_desc_set(intrin);
215 uint32_t binding = nir_intrinsic_binding(intrin);
216
217 const struct anv_descriptor_set_binding_layout *bind_layout =
218 &state->layout->set[set].layout->binding[binding];
219
220 uint32_t surface_index = state->set[set].surface_offsets[binding];
221 uint32_t array_size = bind_layout->array_size;
222
223 b->cursor = nir_before_instr(&intrin->instr);
224
225 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
226 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
227 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
228
229 return nir_iadd_imm(b, array_index, surface_index);
230 }
231
232 static nir_ssa_def *
233 build_index_offset_for_deref(nir_deref_instr *deref,
234 struct apply_pipeline_layout_state *state)
235 {
236 nir_builder *b = &state->builder;
237
238 nir_deref_instr *parent = nir_deref_instr_parent(deref);
239 if (parent) {
240 nir_ssa_def *addr = build_index_offset_for_deref(parent, state);
241
242 b->cursor = nir_before_instr(&deref->instr);
243 return nir_explicit_io_address_from_deref(b, deref, addr,
244 nir_address_format_32bit_index_offset);
245 }
246
247 nir_intrinsic_instr *load_desc = nir_src_as_intrinsic(deref->parent);
248 assert(load_desc->intrinsic == nir_intrinsic_load_vulkan_descriptor);
249
250 nir_ssa_def *index =
251 build_index_for_res_reindex(nir_src_as_intrinsic(load_desc->src[0]), state);
252
253 /* Return a 0 offset which will get picked up by the recursion */
254 b->cursor = nir_before_instr(&deref->instr);
255 return nir_vec2(b, index, nir_imm_int(b, 0));
256 }
257
258 static bool
259 try_lower_direct_buffer_intrinsic(nir_intrinsic_instr *intrin, bool is_atomic,
260 struct apply_pipeline_layout_state *state)
261 {
262 nir_builder *b = &state->builder;
263
264 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
265 if (deref->mode != nir_var_mem_ssbo)
266 return false;
267
268 /* 64-bit atomics only support A64 messages so we can't lower them to the
269 * index+offset model.
270 */
271 if (is_atomic && nir_dest_bit_size(intrin->dest) == 64)
272 return false;
273
274 /* Normal binding table-based messages can't handle non-uniform access so
275 * we have to fall back to A64.
276 */
277 if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM)
278 return false;
279
280 if (!nir_deref_find_descriptor(deref, state))
281 return false;
282
283 nir_ssa_def *addr = build_index_offset_for_deref(deref, state);
284
285 b->cursor = nir_before_instr(&intrin->instr);
286 nir_lower_explicit_io_instr(b, intrin, addr,
287 nir_address_format_32bit_index_offset);
288 return true;
289 }
290
291 static void
292 lower_direct_buffer_access(nir_function_impl *impl,
293 struct apply_pipeline_layout_state *state)
294 {
295 nir_foreach_block(block, impl) {
296 nir_foreach_instr_safe(instr, block) {
297 if (instr->type != nir_instr_type_intrinsic)
298 continue;
299
300 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
301 switch (intrin->intrinsic) {
302 case nir_intrinsic_load_deref:
303 case nir_intrinsic_store_deref:
304 try_lower_direct_buffer_intrinsic(intrin, false, state);
305 break;
306 case nir_intrinsic_deref_atomic_add:
307 case nir_intrinsic_deref_atomic_imin:
308 case nir_intrinsic_deref_atomic_umin:
309 case nir_intrinsic_deref_atomic_imax:
310 case nir_intrinsic_deref_atomic_umax:
311 case nir_intrinsic_deref_atomic_and:
312 case nir_intrinsic_deref_atomic_or:
313 case nir_intrinsic_deref_atomic_xor:
314 case nir_intrinsic_deref_atomic_exchange:
315 case nir_intrinsic_deref_atomic_comp_swap:
316 case nir_intrinsic_deref_atomic_fmin:
317 case nir_intrinsic_deref_atomic_fmax:
318 case nir_intrinsic_deref_atomic_fcomp_swap:
319 try_lower_direct_buffer_intrinsic(intrin, true, state);
320 break;
321
322 case nir_intrinsic_get_buffer_size: {
323 /* The get_buffer_size intrinsic always just takes a
324 * index/reindex intrinsic.
325 */
326 if (!find_descriptor_for_index_src(intrin->src[0], state))
327 break;
328
329 nir_ssa_def *index =
330 build_index_for_res_reindex(nir_src_as_intrinsic(intrin->src[0]),
331 state);
332 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
333 nir_src_for_ssa(index));
334 _mesa_set_add(state->lowered_instrs, intrin);
335 }
336
337 default:
338 break;
339 }
340 }
341 }
342 }
343
344 static nir_address_format
345 desc_addr_format(VkDescriptorType desc_type,
346 struct apply_pipeline_layout_state *state)
347 {
348 return (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
349 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) ?
350 state->ssbo_addr_format : nir_address_format_32bit_index_offset;
351 }
352
353 static void
354 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
355 struct apply_pipeline_layout_state *state)
356 {
357 nir_builder *b = &state->builder;
358
359 b->cursor = nir_before_instr(&intrin->instr);
360
361 uint32_t set = nir_intrinsic_desc_set(intrin);
362 uint32_t binding = nir_intrinsic_binding(intrin);
363 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
364
365 const struct anv_descriptor_set_binding_layout *bind_layout =
366 &state->layout->set[set].layout->binding[binding];
367
368 uint32_t surface_index = state->set[set].surface_offsets[binding];
369 uint32_t array_size = bind_layout->array_size;
370
371 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
372 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
373 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
374
375 nir_ssa_def *index;
376 if (state->pdevice->has_a64_buffer_access &&
377 (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
378 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
379 /* We store the descriptor offset as 16.8.8 where the top 16 bits are
380 * the offset into the descriptor set, the next 8 are the binding table
381 * index of the descriptor buffer, and the bottom 8 bits are the offset
382 * (in bytes) into the dynamic offset table.
383 */
384 assert(bind_layout->dynamic_offset_index < MAX_DYNAMIC_BUFFERS);
385 uint32_t dynamic_offset_index = 0xff; /* No dynamic offset */
386 if (bind_layout->dynamic_offset_index >= 0) {
387 dynamic_offset_index =
388 state->layout->set[set].dynamic_offset_start +
389 bind_layout->dynamic_offset_index;
390 }
391
392 const uint32_t desc_offset =
393 bind_layout->descriptor_offset << 16 |
394 (uint32_t)state->set[set].desc_offset << 8 |
395 dynamic_offset_index;
396
397 if (state->add_bounds_checks) {
398 assert(desc_addr_format(desc_type, state) ==
399 nir_address_format_64bit_bounded_global);
400 assert(intrin->dest.ssa.num_components == 4);
401 assert(intrin->dest.ssa.bit_size == 32);
402 index = nir_vec4(b, nir_imm_int(b, desc_offset),
403 nir_ssa_for_src(b, intrin->src[0], 1),
404 nir_imm_int(b, array_size - 1),
405 nir_ssa_undef(b, 1, 32));
406 } else {
407 assert(desc_addr_format(desc_type, state) ==
408 nir_address_format_64bit_global);
409 assert(intrin->dest.ssa.num_components == 1);
410 assert(intrin->dest.ssa.bit_size == 64);
411 index = nir_pack_64_2x32_split(b, nir_imm_int(b, desc_offset),
412 nir_ssa_for_src(b, intrin->src[0], 1));
413 }
414 } else if (bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
415 /* This is an inline uniform block. Just reference the descriptor set
416 * and use the descriptor offset as the base.
417 */
418 assert(desc_addr_format(desc_type, state) ==
419 nir_address_format_32bit_index_offset);
420 assert(intrin->dest.ssa.num_components == 2);
421 assert(intrin->dest.ssa.bit_size == 32);
422 index = nir_imm_ivec2(b, state->set[set].desc_offset,
423 bind_layout->descriptor_offset);
424 } else {
425 assert(desc_addr_format(desc_type, state) ==
426 nir_address_format_32bit_index_offset);
427 assert(intrin->dest.ssa.num_components == 2);
428 assert(intrin->dest.ssa.bit_size == 32);
429 index = nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
430 nir_imm_int(b, 0));
431 }
432
433 assert(intrin->dest.is_ssa);
434 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
435 nir_instr_remove(&intrin->instr);
436 }
437
438 static void
439 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
440 struct apply_pipeline_layout_state *state)
441 {
442 nir_builder *b = &state->builder;
443
444 b->cursor = nir_before_instr(&intrin->instr);
445
446 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
447
448 /* For us, the resource indices are just indices into the binding table and
449 * array elements are sequential. A resource_reindex just turns into an
450 * add of the two indices.
451 */
452 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
453 nir_ssa_def *old_index = intrin->src[0].ssa;
454 nir_ssa_def *offset = intrin->src[1].ssa;
455
456 nir_ssa_def *new_index;
457 switch (desc_addr_format(desc_type, state)) {
458 case nir_address_format_64bit_bounded_global:
459 /* See also lower_res_index_intrinsic() */
460 assert(intrin->dest.ssa.num_components == 4);
461 assert(intrin->dest.ssa.bit_size == 32);
462 new_index = nir_vec4(b, nir_channel(b, old_index, 0),
463 nir_iadd(b, nir_channel(b, old_index, 1),
464 offset),
465 nir_channel(b, old_index, 2),
466 nir_ssa_undef(b, 1, 32));
467 break;
468
469 case nir_address_format_64bit_global: {
470 /* See also lower_res_index_intrinsic() */
471 assert(intrin->dest.ssa.num_components == 1);
472 assert(intrin->dest.ssa.bit_size == 64);
473 nir_ssa_def *base = nir_unpack_64_2x32_split_x(b, old_index);
474 nir_ssa_def *arr_idx = nir_unpack_64_2x32_split_y(b, old_index);
475 new_index = nir_pack_64_2x32_split(b, base, nir_iadd(b, arr_idx, offset));
476 break;
477 }
478
479 case nir_address_format_32bit_index_offset:
480 assert(intrin->dest.ssa.num_components == 2);
481 assert(intrin->dest.ssa.bit_size == 32);
482 new_index = nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
483 nir_channel(b, old_index, 1));
484 break;
485
486 default:
487 unreachable("Uhandled address format");
488 }
489
490 assert(intrin->dest.is_ssa);
491 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
492 nir_instr_remove(&intrin->instr);
493 }
494
495 static nir_ssa_def *
496 build_ssbo_descriptor_load(const VkDescriptorType desc_type,
497 nir_ssa_def *index,
498 struct apply_pipeline_layout_state *state)
499 {
500 nir_builder *b = &state->builder;
501
502 nir_ssa_def *desc_offset, *array_index;
503 switch (state->ssbo_addr_format) {
504 case nir_address_format_64bit_bounded_global:
505 /* See also lower_res_index_intrinsic() */
506 desc_offset = nir_channel(b, index, 0);
507 array_index = nir_umin(b, nir_channel(b, index, 1),
508 nir_channel(b, index, 2));
509 break;
510
511 case nir_address_format_64bit_global:
512 /* See also lower_res_index_intrinsic() */
513 desc_offset = nir_unpack_64_2x32_split_x(b, index);
514 array_index = nir_unpack_64_2x32_split_y(b, index);
515 break;
516
517 default:
518 unreachable("Unhandled address format for SSBO");
519 }
520
521 /* The desc_offset is actually 16.8.8 */
522 nir_ssa_def *desc_buffer_index =
523 nir_extract_u8(b, desc_offset, nir_imm_int(b, 1));
524 nir_ssa_def *desc_offset_base =
525 nir_extract_u16(b, desc_offset, nir_imm_int(b, 1));
526
527 /* Compute the actual descriptor offset */
528 const unsigned descriptor_size =
529 anv_descriptor_type_size(state->pdevice, desc_type);
530 desc_offset = nir_iadd(b, desc_offset_base,
531 nir_imul_imm(b, array_index, descriptor_size));
532
533 nir_intrinsic_instr *desc_load =
534 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
535 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
536 desc_load->src[1] = nir_src_for_ssa(desc_offset);
537 nir_intrinsic_set_align(desc_load, 8, 0);
538 desc_load->num_components = 4;
539 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest, 4, 32, NULL);
540 nir_builder_instr_insert(b, &desc_load->instr);
541
542 return &desc_load->dest.ssa;
543 }
544
545 static void
546 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
547 struct apply_pipeline_layout_state *state)
548 {
549 nir_builder *b = &state->builder;
550
551 b->cursor = nir_before_instr(&intrin->instr);
552
553 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
554
555 assert(intrin->src[0].is_ssa);
556 nir_ssa_def *index = intrin->src[0].ssa;
557
558 nir_ssa_def *desc;
559 if (state->pdevice->has_a64_buffer_access &&
560 (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
561 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
562 desc = build_ssbo_descriptor_load(desc_type, index, state);
563
564 /* We want nir_address_format_64bit_global */
565 if (!state->add_bounds_checks)
566 desc = nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
567
568 if (state->has_dynamic_buffers) {
569 /* This shader has dynamic offsets and we have no way of knowing
570 * (save from the dynamic offset base index) if this buffer has a
571 * dynamic offset.
572 */
573 nir_ssa_def *desc_offset, *array_index;
574 switch (state->ssbo_addr_format) {
575 case nir_address_format_64bit_bounded_global:
576 /* See also lower_res_index_intrinsic() */
577 desc_offset = nir_channel(b, index, 0);
578 array_index = nir_umin(b, nir_channel(b, index, 1),
579 nir_channel(b, index, 2));
580 break;
581
582 case nir_address_format_64bit_global:
583 /* See also lower_res_index_intrinsic() */
584 desc_offset = nir_unpack_64_2x32_split_x(b, index);
585 array_index = nir_unpack_64_2x32_split_y(b, index);
586 break;
587
588 default:
589 unreachable("Unhandled address format for SSBO");
590 }
591
592 nir_ssa_def *dyn_offset_base =
593 nir_extract_u8(b, desc_offset, nir_imm_int(b, 0));
594 nir_ssa_def *dyn_offset_idx =
595 nir_iadd(b, dyn_offset_base, array_index);
596 if (state->add_bounds_checks) {
597 dyn_offset_idx = nir_umin(b, dyn_offset_idx,
598 nir_imm_int(b, MAX_DYNAMIC_BUFFERS));
599 }
600
601 nir_intrinsic_instr *dyn_load =
602 nir_intrinsic_instr_create(b->shader,
603 nir_intrinsic_load_push_constant);
604 nir_intrinsic_set_base(dyn_load, offsetof(struct anv_push_constants,
605 dynamic_offsets));
606 nir_intrinsic_set_range(dyn_load, MAX_DYNAMIC_BUFFERS * 4);
607 dyn_load->src[0] = nir_src_for_ssa(nir_imul_imm(b, dyn_offset_idx, 4));
608 dyn_load->num_components = 1;
609 nir_ssa_dest_init(&dyn_load->instr, &dyn_load->dest, 1, 32, NULL);
610 nir_builder_instr_insert(b, &dyn_load->instr);
611
612 nir_ssa_def *dynamic_offset =
613 nir_bcsel(b, nir_ieq(b, dyn_offset_base, nir_imm_int(b, 0xff)),
614 nir_imm_int(b, 0), &dyn_load->dest.ssa);
615
616 switch (state->ssbo_addr_format) {
617 case nir_address_format_64bit_bounded_global: {
618 /* The dynamic offset gets added to the base pointer so that we
619 * have a sliding window range.
620 */
621 nir_ssa_def *base_ptr =
622 nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
623 base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset));
624 desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr),
625 nir_unpack_64_2x32_split_y(b, base_ptr),
626 nir_channel(b, desc, 2),
627 nir_channel(b, desc, 3));
628 break;
629 }
630
631 case nir_address_format_64bit_global:
632 desc = nir_iadd(b, desc, nir_u2u64(b, dynamic_offset));
633 break;
634
635 default:
636 unreachable("Unhandled address format for SSBO");
637 }
638 }
639 } else {
640 /* We follow the nir_address_format_32bit_index_offset model */
641 desc = index;
642 }
643
644 assert(intrin->dest.is_ssa);
645 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
646 nir_instr_remove(&intrin->instr);
647 }
648
649 static void
650 lower_get_buffer_size(nir_intrinsic_instr *intrin,
651 struct apply_pipeline_layout_state *state)
652 {
653 if (_mesa_set_search(state->lowered_instrs, intrin))
654 return;
655
656 nir_builder *b = &state->builder;
657
658 b->cursor = nir_before_instr(&intrin->instr);
659
660 const VkDescriptorType desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
661
662 assert(intrin->src[0].is_ssa);
663 nir_ssa_def *index = intrin->src[0].ssa;
664
665 if (state->pdevice->has_a64_buffer_access) {
666 nir_ssa_def *desc = build_ssbo_descriptor_load(desc_type, index, state);
667 nir_ssa_def *size = nir_channel(b, desc, 2);
668 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(size));
669 nir_instr_remove(&intrin->instr);
670 } else {
671 /* We're following the nir_address_format_32bit_index_offset model so
672 * the binding table index is the first component of the address. The
673 * back-end wants a scalar binding table index source.
674 */
675 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
676 nir_src_for_ssa(nir_channel(b, index, 0)));
677 }
678 }
679
680 static nir_ssa_def *
681 build_descriptor_load(nir_deref_instr *deref, unsigned offset,
682 unsigned num_components, unsigned bit_size,
683 struct apply_pipeline_layout_state *state)
684 {
685 nir_variable *var = nir_deref_instr_get_variable(deref);
686
687 unsigned set = var->data.descriptor_set;
688 unsigned binding = var->data.binding;
689 unsigned array_size =
690 state->layout->set[set].layout->binding[binding].array_size;
691
692 const struct anv_descriptor_set_binding_layout *bind_layout =
693 &state->layout->set[set].layout->binding[binding];
694
695 nir_builder *b = &state->builder;
696
697 nir_ssa_def *desc_buffer_index =
698 nir_imm_int(b, state->set[set].desc_offset);
699
700 nir_ssa_def *desc_offset =
701 nir_imm_int(b, bind_layout->descriptor_offset + offset);
702 if (deref->deref_type != nir_deref_type_var) {
703 assert(deref->deref_type == nir_deref_type_array);
704
705 const unsigned descriptor_size = anv_descriptor_size(bind_layout);
706 nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
707 if (state->add_bounds_checks)
708 arr_index = nir_umin(b, arr_index, nir_imm_int(b, array_size - 1));
709
710 desc_offset = nir_iadd(b, desc_offset,
711 nir_imul_imm(b, arr_index, descriptor_size));
712 }
713
714 nir_intrinsic_instr *desc_load =
715 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
716 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
717 desc_load->src[1] = nir_src_for_ssa(desc_offset);
718 nir_intrinsic_set_align(desc_load, 8, offset % 8);
719 desc_load->num_components = num_components;
720 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
721 num_components, bit_size, NULL);
722 nir_builder_instr_insert(b, &desc_load->instr);
723
724 return &desc_load->dest.ssa;
725 }
726
727 static void
728 lower_image_intrinsic(nir_intrinsic_instr *intrin,
729 struct apply_pipeline_layout_state *state)
730 {
731 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
732 nir_variable *var = nir_deref_instr_get_variable(deref);
733
734 unsigned set = var->data.descriptor_set;
735 unsigned binding = var->data.binding;
736 unsigned binding_offset = state->set[set].surface_offsets[binding];
737
738 nir_builder *b = &state->builder;
739 b->cursor = nir_before_instr(&intrin->instr);
740
741 ASSERTED const bool use_bindless = state->pdevice->has_bindless_images;
742
743 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
744 b->cursor = nir_instr_remove(&intrin->instr);
745
746 assert(!use_bindless); /* Otherwise our offsets would be wrong */
747 const unsigned param = nir_intrinsic_base(intrin);
748
749 nir_ssa_def *desc =
750 build_descriptor_load(deref, param * 16,
751 intrin->dest.ssa.num_components,
752 intrin->dest.ssa.bit_size, state);
753
754 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
755 } else if (binding_offset > MAX_BINDING_TABLE_SIZE) {
756 const bool write_only =
757 (var->data.access & ACCESS_NON_READABLE) != 0;
758 nir_ssa_def *desc =
759 build_descriptor_load(deref, 0, 2, 32, state);
760 nir_ssa_def *handle = nir_channel(b, desc, write_only ? 1 : 0);
761 nir_rewrite_image_intrinsic(intrin, handle, true);
762 } else {
763 unsigned array_size =
764 state->layout->set[set].layout->binding[binding].array_size;
765
766 nir_ssa_def *index = NULL;
767 if (deref->deref_type != nir_deref_type_var) {
768 assert(deref->deref_type == nir_deref_type_array);
769 index = nir_ssa_for_src(b, deref->arr.index, 1);
770 if (state->add_bounds_checks)
771 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
772 } else {
773 index = nir_imm_int(b, 0);
774 }
775
776 index = nir_iadd_imm(b, index, binding_offset);
777 nir_rewrite_image_intrinsic(intrin, index, false);
778 }
779 }
780
781 static void
782 lower_load_constant(nir_intrinsic_instr *intrin,
783 struct apply_pipeline_layout_state *state)
784 {
785 nir_builder *b = &state->builder;
786
787 b->cursor = nir_instr_remove(&intrin->instr);
788
789 /* Any constant-offset load_constant instructions should have been removed
790 * by constant folding.
791 */
792 assert(!nir_src_is_const(intrin->src[0]));
793 nir_ssa_def *offset = nir_iadd_imm(b, nir_ssa_for_src(b, intrin->src[0], 1),
794 nir_intrinsic_base(intrin));
795
796 nir_ssa_def *data;
797 if (state->pdevice->use_softpin) {
798 unsigned load_size = intrin->dest.ssa.num_components *
799 intrin->dest.ssa.bit_size / 8;
800 unsigned load_align = intrin->dest.ssa.bit_size / 8;
801
802 assert(load_size < b->shader->constant_data_size);
803 unsigned max_offset = b->shader->constant_data_size - load_size;
804 offset = nir_umin(b, offset, nir_imm_int(b, max_offset));
805
806 nir_ssa_def *const_data_base_addr = nir_pack_64_2x32_split(b,
807 nir_load_reloc_const_intel(b, ANV_SHADER_RELOC_CONST_DATA_ADDR_LOW),
808 nir_load_reloc_const_intel(b, ANV_SHADER_RELOC_CONST_DATA_ADDR_HIGH));
809
810 data = nir_load_global(b, nir_iadd(b, const_data_base_addr,
811 nir_u2u64(b, offset)),
812 load_align,
813 intrin->dest.ssa.num_components,
814 intrin->dest.ssa.bit_size);
815 } else {
816 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
817
818 nir_intrinsic_instr *load_ubo =
819 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
820 load_ubo->num_components = intrin->num_components;
821 load_ubo->src[0] = nir_src_for_ssa(index);
822 load_ubo->src[1] = nir_src_for_ssa(offset);
823 nir_intrinsic_set_align(load_ubo, intrin->dest.ssa.bit_size / 8, 0);
824 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
825 intrin->dest.ssa.num_components,
826 intrin->dest.ssa.bit_size, NULL);
827 nir_builder_instr_insert(b, &load_ubo->instr);
828 data = &load_ubo->dest.ssa;
829 }
830
831 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(data));
832 }
833
834 static void
835 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
836 unsigned *base_index, unsigned plane,
837 struct apply_pipeline_layout_state *state)
838 {
839 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
840 if (deref_src_idx < 0)
841 return;
842
843 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
844 nir_variable *var = nir_deref_instr_get_variable(deref);
845
846 unsigned set = var->data.descriptor_set;
847 unsigned binding = var->data.binding;
848 unsigned array_size =
849 state->layout->set[set].layout->binding[binding].array_size;
850
851 unsigned binding_offset;
852 if (deref_src_type == nir_tex_src_texture_deref) {
853 binding_offset = state->set[set].surface_offsets[binding];
854 } else {
855 assert(deref_src_type == nir_tex_src_sampler_deref);
856 binding_offset = state->set[set].sampler_offsets[binding];
857 }
858
859 nir_builder *b = &state->builder;
860
861 nir_tex_src_type offset_src_type;
862 nir_ssa_def *index = NULL;
863 if (binding_offset > MAX_BINDING_TABLE_SIZE) {
864 const unsigned plane_offset =
865 plane * sizeof(struct anv_sampled_image_descriptor);
866
867 nir_ssa_def *desc =
868 build_descriptor_load(deref, plane_offset, 2, 32, state);
869
870 if (deref_src_type == nir_tex_src_texture_deref) {
871 offset_src_type = nir_tex_src_texture_handle;
872 index = nir_channel(b, desc, 0);
873 } else {
874 assert(deref_src_type == nir_tex_src_sampler_deref);
875 offset_src_type = nir_tex_src_sampler_handle;
876 index = nir_channel(b, desc, 1);
877 }
878 } else {
879 if (deref_src_type == nir_tex_src_texture_deref) {
880 offset_src_type = nir_tex_src_texture_offset;
881 } else {
882 assert(deref_src_type == nir_tex_src_sampler_deref);
883 offset_src_type = nir_tex_src_sampler_offset;
884 }
885
886 *base_index = binding_offset + plane;
887
888 if (deref->deref_type != nir_deref_type_var) {
889 assert(deref->deref_type == nir_deref_type_array);
890
891 if (nir_src_is_const(deref->arr.index)) {
892 unsigned arr_index = MIN2(nir_src_as_uint(deref->arr.index), array_size - 1);
893 struct anv_sampler **immutable_samplers =
894 state->layout->set[set].layout->binding[binding].immutable_samplers;
895 if (immutable_samplers) {
896 /* Array of YCbCr samplers are tightly packed in the binding
897 * tables, compute the offset of an element in the array by
898 * adding the number of planes of all preceding elements.
899 */
900 unsigned desc_arr_index = 0;
901 for (int i = 0; i < arr_index; i++)
902 desc_arr_index += immutable_samplers[i]->n_planes;
903 *base_index += desc_arr_index;
904 } else {
905 *base_index += arr_index;
906 }
907 } else {
908 /* From VK_KHR_sampler_ycbcr_conversion:
909 *
910 * If sampler Y’CBCR conversion is enabled, the combined image
911 * sampler must be indexed only by constant integral expressions
912 * when aggregated into arrays in shader code, irrespective of
913 * the shaderSampledImageArrayDynamicIndexing feature.
914 */
915 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
916
917 index = nir_ssa_for_src(b, deref->arr.index, 1);
918
919 if (state->add_bounds_checks)
920 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
921 }
922 }
923 }
924
925 if (index) {
926 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
927 nir_src_for_ssa(index));
928 tex->src[deref_src_idx].src_type = offset_src_type;
929 } else {
930 nir_tex_instr_remove_src(tex, deref_src_idx);
931 }
932 }
933
934 static uint32_t
935 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
936 {
937 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
938 if (plane_src_idx < 0)
939 return 0;
940
941 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
942
943 nir_tex_instr_remove_src(tex, plane_src_idx);
944
945 return plane;
946 }
947
948 static nir_ssa_def *
949 build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx,
950 unsigned start, unsigned end)
951 {
952 if (start == end - 1) {
953 return srcs[start];
954 } else {
955 unsigned mid = start + (end - start) / 2;
956 return nir_bcsel(b, nir_ilt(b, idx, nir_imm_int(b, mid)),
957 build_def_array_select(b, srcs, idx, start, mid),
958 build_def_array_select(b, srcs, idx, mid, end));
959 }
960 }
961
962 static void
963 lower_gen7_tex_swizzle(nir_tex_instr *tex, unsigned plane,
964 struct apply_pipeline_layout_state *state)
965 {
966 assert(state->pdevice->info.gen == 7 && !state->pdevice->info.is_haswell);
967 if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF ||
968 nir_tex_instr_is_query(tex) ||
969 tex->op == nir_texop_tg4 || /* We can't swizzle TG4 */
970 (tex->is_shadow && tex->is_new_style_shadow))
971 return;
972
973 int deref_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
974 assert(deref_src_idx >= 0);
975
976 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
977 nir_variable *var = nir_deref_instr_get_variable(deref);
978
979 unsigned set = var->data.descriptor_set;
980 unsigned binding = var->data.binding;
981 const struct anv_descriptor_set_binding_layout *bind_layout =
982 &state->layout->set[set].layout->binding[binding];
983
984 if ((bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) == 0)
985 return;
986
987 nir_builder *b = &state->builder;
988 b->cursor = nir_before_instr(&tex->instr);
989
990 const unsigned plane_offset =
991 plane * sizeof(struct anv_texture_swizzle_descriptor);
992 nir_ssa_def *swiz =
993 build_descriptor_load(deref, plane_offset, 1, 32, state);
994
995 b->cursor = nir_after_instr(&tex->instr);
996
997 assert(tex->dest.ssa.bit_size == 32);
998 assert(tex->dest.ssa.num_components == 4);
999
1000 /* Initializing to undef is ok; nir_opt_undef will clean it up. */
1001 nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
1002 nir_ssa_def *comps[8];
1003 for (unsigned i = 0; i < ARRAY_SIZE(comps); i++)
1004 comps[i] = undef;
1005
1006 comps[ISL_CHANNEL_SELECT_ZERO] = nir_imm_int(b, 0);
1007 if (nir_alu_type_get_base_type(tex->dest_type) == nir_type_float)
1008 comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_float(b, 1);
1009 else
1010 comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_int(b, 1);
1011 comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->dest.ssa, 0);
1012 comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->dest.ssa, 1);
1013 comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->dest.ssa, 2);
1014 comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->dest.ssa, 3);
1015
1016 nir_ssa_def *swiz_comps[4];
1017 for (unsigned i = 0; i < 4; i++) {
1018 nir_ssa_def *comp_swiz = nir_extract_u8(b, swiz, nir_imm_int(b, i));
1019 swiz_comps[i] = build_def_array_select(b, comps, comp_swiz, 0, 8);
1020 }
1021 nir_ssa_def *swiz_tex_res = nir_vec(b, swiz_comps, 4);
1022
1023 /* Rewrite uses before we insert so we don't rewrite this use */
1024 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa,
1025 nir_src_for_ssa(swiz_tex_res),
1026 swiz_tex_res->parent_instr);
1027 }
1028
1029 static void
1030 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
1031 {
1032 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
1033
1034 /* On Ivy Bridge and Bay Trail, we have to swizzle in the shader. Do this
1035 * before we lower the derefs away so we can still find the descriptor.
1036 */
1037 if (state->pdevice->info.gen == 7 && !state->pdevice->info.is_haswell)
1038 lower_gen7_tex_swizzle(tex, plane, state);
1039
1040 state->builder.cursor = nir_before_instr(&tex->instr);
1041
1042 lower_tex_deref(tex, nir_tex_src_texture_deref,
1043 &tex->texture_index, plane, state);
1044
1045 lower_tex_deref(tex, nir_tex_src_sampler_deref,
1046 &tex->sampler_index, plane, state);
1047 }
1048
1049 static void
1050 apply_pipeline_layout_block(nir_block *block,
1051 struct apply_pipeline_layout_state *state)
1052 {
1053 nir_foreach_instr_safe(instr, block) {
1054 switch (instr->type) {
1055 case nir_instr_type_intrinsic: {
1056 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1057 switch (intrin->intrinsic) {
1058 case nir_intrinsic_vulkan_resource_index:
1059 lower_res_index_intrinsic(intrin, state);
1060 break;
1061 case nir_intrinsic_vulkan_resource_reindex:
1062 lower_res_reindex_intrinsic(intrin, state);
1063 break;
1064 case nir_intrinsic_load_vulkan_descriptor:
1065 lower_load_vulkan_descriptor(intrin, state);
1066 break;
1067 case nir_intrinsic_get_buffer_size:
1068 lower_get_buffer_size(intrin, state);
1069 break;
1070 case nir_intrinsic_image_deref_load:
1071 case nir_intrinsic_image_deref_store:
1072 case nir_intrinsic_image_deref_atomic_add:
1073 case nir_intrinsic_image_deref_atomic_imin:
1074 case nir_intrinsic_image_deref_atomic_umin:
1075 case nir_intrinsic_image_deref_atomic_imax:
1076 case nir_intrinsic_image_deref_atomic_umax:
1077 case nir_intrinsic_image_deref_atomic_and:
1078 case nir_intrinsic_image_deref_atomic_or:
1079 case nir_intrinsic_image_deref_atomic_xor:
1080 case nir_intrinsic_image_deref_atomic_exchange:
1081 case nir_intrinsic_image_deref_atomic_comp_swap:
1082 case nir_intrinsic_image_deref_size:
1083 case nir_intrinsic_image_deref_samples:
1084 case nir_intrinsic_image_deref_load_param_intel:
1085 case nir_intrinsic_image_deref_load_raw_intel:
1086 case nir_intrinsic_image_deref_store_raw_intel:
1087 lower_image_intrinsic(intrin, state);
1088 break;
1089 case nir_intrinsic_load_constant:
1090 lower_load_constant(intrin, state);
1091 break;
1092 default:
1093 break;
1094 }
1095 break;
1096 }
1097 case nir_instr_type_tex:
1098 lower_tex(nir_instr_as_tex(instr), state);
1099 break;
1100 default:
1101 continue;
1102 }
1103 }
1104 }
1105
1106 struct binding_info {
1107 uint32_t binding;
1108 uint8_t set;
1109 uint16_t score;
1110 };
1111
1112 static int
1113 compare_binding_infos(const void *_a, const void *_b)
1114 {
1115 const struct binding_info *a = _a, *b = _b;
1116 if (a->score != b->score)
1117 return b->score - a->score;
1118
1119 if (a->set != b->set)
1120 return a->set - b->set;
1121
1122 return a->binding - b->binding;
1123 }
1124
1125 void
1126 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
1127 bool robust_buffer_access,
1128 const struct anv_pipeline_layout *layout,
1129 nir_shader *shader,
1130 struct anv_pipeline_bind_map *map)
1131 {
1132 void *mem_ctx = ralloc_context(NULL);
1133
1134 struct apply_pipeline_layout_state state = {
1135 .pdevice = pdevice,
1136 .shader = shader,
1137 .layout = layout,
1138 .add_bounds_checks = robust_buffer_access,
1139 .ssbo_addr_format = anv_nir_ssbo_addr_format(pdevice, robust_buffer_access),
1140 .lowered_instrs = _mesa_pointer_set_create(mem_ctx),
1141 };
1142
1143 for (unsigned s = 0; s < layout->num_sets; s++) {
1144 const unsigned count = layout->set[s].layout->binding_count;
1145 state.set[s].use_count = rzalloc_array(mem_ctx, uint8_t, count);
1146 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
1147 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
1148 }
1149
1150 nir_foreach_function(function, shader) {
1151 if (!function->impl)
1152 continue;
1153
1154 nir_foreach_block(block, function->impl)
1155 get_used_bindings_block(block, &state);
1156 }
1157
1158 for (unsigned s = 0; s < layout->num_sets; s++) {
1159 if (state.set[s].desc_buffer_used) {
1160 map->surface_to_descriptor[map->surface_count] =
1161 (struct anv_pipeline_binding) {
1162 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
1163 .index = s,
1164 };
1165 state.set[s].desc_offset = map->surface_count;
1166 map->surface_count++;
1167 }
1168 }
1169
1170 if (state.uses_constants && !pdevice->use_softpin) {
1171 state.constants_offset = map->surface_count;
1172 map->surface_to_descriptor[map->surface_count].set =
1173 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
1174 map->surface_count++;
1175 }
1176
1177 unsigned used_binding_count = 0;
1178 for (uint32_t set = 0; set < layout->num_sets; set++) {
1179 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
1180 for (unsigned b = 0; b < set_layout->binding_count; b++) {
1181 if (state.set[set].use_count[b] == 0)
1182 continue;
1183
1184 used_binding_count++;
1185 }
1186 }
1187
1188 struct binding_info *infos =
1189 rzalloc_array(mem_ctx, struct binding_info, used_binding_count);
1190 used_binding_count = 0;
1191 for (uint32_t set = 0; set < layout->num_sets; set++) {
1192 const struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
1193 for (unsigned b = 0; b < set_layout->binding_count; b++) {
1194 if (state.set[set].use_count[b] == 0)
1195 continue;
1196
1197 const struct anv_descriptor_set_binding_layout *binding =
1198 &layout->set[set].layout->binding[b];
1199
1200 /* Do a fixed-point calculation to generate a score based on the
1201 * number of uses and the binding array size. We shift by 7 instead
1202 * of 8 because we're going to use the top bit below to make
1203 * everything which does not support bindless super higher priority
1204 * than things which do.
1205 */
1206 uint16_t score = ((uint16_t)state.set[set].use_count[b] << 7) /
1207 binding->array_size;
1208
1209 /* If the descriptor type doesn't support bindless then put it at the
1210 * beginning so we guarantee it gets a slot.
1211 */
1212 if (!anv_descriptor_supports_bindless(pdevice, binding, true) ||
1213 !anv_descriptor_supports_bindless(pdevice, binding, false))
1214 score |= 1 << 15;
1215
1216 infos[used_binding_count++] = (struct binding_info) {
1217 .set = set,
1218 .binding = b,
1219 .score = score,
1220 };
1221 }
1222 }
1223
1224 /* Order the binding infos based on score with highest scores first. If
1225 * scores are equal we then order by set and binding.
1226 */
1227 qsort(infos, used_binding_count, sizeof(struct binding_info),
1228 compare_binding_infos);
1229
1230 for (unsigned i = 0; i < used_binding_count; i++) {
1231 unsigned set = infos[i].set, b = infos[i].binding;
1232 const struct anv_descriptor_set_binding_layout *binding =
1233 &layout->set[set].layout->binding[b];
1234
1235 const uint32_t array_size = binding->array_size;
1236
1237 if (binding->dynamic_offset_index >= 0)
1238 state.has_dynamic_buffers = true;
1239
1240 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
1241 if (map->surface_count + array_size > MAX_BINDING_TABLE_SIZE ||
1242 anv_descriptor_requires_bindless(pdevice, binding, false)) {
1243 /* If this descriptor doesn't fit in the binding table or if it
1244 * requires bindless for some reason, flag it as bindless.
1245 */
1246 assert(anv_descriptor_supports_bindless(pdevice, binding, false));
1247 state.set[set].surface_offsets[b] = BINDLESS_OFFSET;
1248 } else {
1249 state.set[set].surface_offsets[b] = map->surface_count;
1250 if (binding->dynamic_offset_index < 0) {
1251 struct anv_sampler **samplers = binding->immutable_samplers;
1252 for (unsigned i = 0; i < binding->array_size; i++) {
1253 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
1254 for (uint8_t p = 0; p < planes; p++) {
1255 map->surface_to_descriptor[map->surface_count++] =
1256 (struct anv_pipeline_binding) {
1257 .set = set,
1258 .index = binding->descriptor_index + i,
1259 .plane = p,
1260 };
1261 }
1262 }
1263 } else {
1264 for (unsigned i = 0; i < binding->array_size; i++) {
1265 map->surface_to_descriptor[map->surface_count++] =
1266 (struct anv_pipeline_binding) {
1267 .set = set,
1268 .index = binding->descriptor_index + i,
1269 .dynamic_offset_index =
1270 layout->set[set].dynamic_offset_start +
1271 binding->dynamic_offset_index + i,
1272 };
1273 }
1274 }
1275 }
1276 assert(map->surface_count <= MAX_BINDING_TABLE_SIZE);
1277 }
1278
1279 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
1280 if (map->sampler_count + array_size > MAX_SAMPLER_TABLE_SIZE ||
1281 anv_descriptor_requires_bindless(pdevice, binding, true)) {
1282 /* If this descriptor doesn't fit in the binding table or if it
1283 * requires bindless for some reason, flag it as bindless.
1284 *
1285 * We also make large sampler arrays bindless because we can avoid
1286 * using indirect sends thanks to bindless samplers being packed
1287 * less tightly than the sampler table.
1288 */
1289 assert(anv_descriptor_supports_bindless(pdevice, binding, true));
1290 state.set[set].sampler_offsets[b] = BINDLESS_OFFSET;
1291 } else {
1292 state.set[set].sampler_offsets[b] = map->sampler_count;
1293 struct anv_sampler **samplers = binding->immutable_samplers;
1294 for (unsigned i = 0; i < binding->array_size; i++) {
1295 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
1296 for (uint8_t p = 0; p < planes; p++) {
1297 map->sampler_to_descriptor[map->sampler_count++] =
1298 (struct anv_pipeline_binding) {
1299 .set = set,
1300 .index = binding->descriptor_index + i,
1301 .plane = p,
1302 };
1303 }
1304 }
1305 }
1306 }
1307 }
1308
1309 nir_foreach_uniform_variable(var, shader) {
1310 const struct glsl_type *glsl_type = glsl_without_array(var->type);
1311
1312 if (!glsl_type_is_image(glsl_type))
1313 continue;
1314
1315 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
1316
1317 const uint32_t set = var->data.descriptor_set;
1318 const uint32_t binding = var->data.binding;
1319 const struct anv_descriptor_set_binding_layout *bind_layout =
1320 &layout->set[set].layout->binding[binding];
1321 const uint32_t array_size = bind_layout->array_size;
1322
1323 if (state.set[set].use_count[binding] == 0)
1324 continue;
1325
1326 if (state.set[set].surface_offsets[binding] >= MAX_BINDING_TABLE_SIZE)
1327 continue;
1328
1329 struct anv_pipeline_binding *pipe_binding =
1330 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
1331 for (unsigned i = 0; i < array_size; i++) {
1332 assert(pipe_binding[i].set == set);
1333 assert(pipe_binding[i].index == bind_layout->descriptor_index + i);
1334
1335 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
1336 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
1337 pipe_binding[i].input_attachment_index = var->data.index + i;
1338
1339 /* NOTE: This is a uint8_t so we really do need to != 0 here */
1340 pipe_binding[i].write_only =
1341 (var->data.access & ACCESS_NON_READABLE) != 0;
1342 }
1343 }
1344
1345 nir_foreach_function(function, shader) {
1346 if (!function->impl)
1347 continue;
1348
1349 nir_builder_init(&state.builder, function->impl);
1350
1351 /* Before we do the normal lowering, we look for any SSBO operations
1352 * that we can lower to the BTI model and lower them up-front. The BTI
1353 * model can perform better than the A64 model for a couple reasons:
1354 *
1355 * 1. 48-bit address calculations are potentially expensive and using
1356 * the BTI model lets us simply compute 32-bit offsets and the
1357 * hardware adds the 64-bit surface base address.
1358 *
1359 * 2. The BTI messages, because they use surface states, do bounds
1360 * checking for us. With the A64 model, we have to do our own
1361 * bounds checking and this means wider pointers and extra
1362 * calculations and branching in the shader.
1363 *
1364 * The solution to both of these is to convert things to the BTI model
1365 * opportunistically. The reason why we need to do this as a pre-pass
1366 * is for two reasons:
1367 *
1368 * 1. The BTI model requires nir_address_format_32bit_index_offset
1369 * pointers which are not the same type as the pointers needed for
1370 * the A64 model. Because all our derefs are set up for the A64
1371 * model (in case we have variable pointers), we have to crawl all
1372 * the way back to the vulkan_resource_index intrinsic and build a
1373 * completely fresh index+offset calculation.
1374 *
1375 * 2. Because the variable-pointers-capable lowering that we do as part
1376 * of apply_pipeline_layout_block is destructive (It really has to
1377 * be to handle variable pointers properly), we've lost the deref
1378 * information by the time we get to the load/store/atomic
1379 * intrinsics in that pass.
1380 */
1381 lower_direct_buffer_access(function->impl, &state);
1382
1383 nir_foreach_block(block, function->impl)
1384 apply_pipeline_layout_block(block, &state);
1385 nir_metadata_preserve(function->impl, nir_metadata_block_index |
1386 nir_metadata_dominance);
1387 }
1388
1389 ralloc_free(mem_ctx);
1390
1391 /* Now that we're done computing the surface and sampler portions of the
1392 * bind map, hash them. This lets us quickly determine if the actual
1393 * mapping has changed and not just a no-op pipeline change.
1394 */
1395 _mesa_sha1_compute(map->surface_to_descriptor,
1396 map->surface_count * sizeof(struct anv_pipeline_binding),
1397 map->surface_sha1);
1398 _mesa_sha1_compute(map->sampler_to_descriptor,
1399 map->sampler_count * sizeof(struct anv_pipeline_binding),
1400 map->sampler_sha1);
1401 }