456df1853ecaae68b5fa664d928297e3ac94f9b7
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28 #include "util/set.h"
29
30 /* Sampler tables don't actually have a maximum size but we pick one just so
31 * that we don't end up emitting too much state on-the-fly.
32 */
33 #define MAX_SAMPLER_TABLE_SIZE 128
34 #define BINDLESS_OFFSET 255
35
36 struct apply_pipeline_layout_state {
37 const struct anv_physical_device *pdevice;
38
39 nir_shader *shader;
40 nir_builder builder;
41
42 struct anv_pipeline_layout *layout;
43 bool add_bounds_checks;
44 nir_address_format ssbo_addr_format;
45
46 /* Place to flag lowered instructions so we don't lower them twice */
47 struct set *lowered_instrs;
48
49 int dynamic_offset_uniform_start;
50
51 bool uses_constants;
52 uint8_t constants_offset;
53 struct {
54 bool desc_buffer_used;
55 uint8_t desc_offset;
56
57 uint8_t *use_count;
58 uint8_t *surface_offsets;
59 uint8_t *sampler_offsets;
60 } set[MAX_SETS];
61 };
62
63 static void
64 add_binding(struct apply_pipeline_layout_state *state,
65 uint32_t set, uint32_t binding)
66 {
67 const struct anv_descriptor_set_binding_layout *bind_layout =
68 &state->layout->set[set].layout->binding[binding];
69
70 if (state->set[set].use_count[binding] < UINT8_MAX)
71 state->set[set].use_count[binding]++;
72
73 /* Only flag the descriptor buffer as used if there's actually data for
74 * this binding. This lets us be lazy and call this function constantly
75 * without worrying about unnecessarily enabling the buffer.
76 */
77 if (anv_descriptor_size(bind_layout))
78 state->set[set].desc_buffer_used = true;
79 }
80
81 static void
82 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
83 {
84 nir_deref_instr *deref = nir_src_as_deref(src);
85 nir_variable *var = nir_deref_instr_get_variable(deref);
86 add_binding(state, var->data.descriptor_set, var->data.binding);
87 }
88
89 static void
90 add_tex_src_binding(struct apply_pipeline_layout_state *state,
91 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
92 {
93 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
94 if (deref_src_idx < 0)
95 return;
96
97 add_deref_src_binding(state, tex->src[deref_src_idx].src);
98 }
99
100 static void
101 get_used_bindings_block(nir_block *block,
102 struct apply_pipeline_layout_state *state)
103 {
104 nir_foreach_instr_safe(instr, block) {
105 switch (instr->type) {
106 case nir_instr_type_intrinsic: {
107 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
108 switch (intrin->intrinsic) {
109 case nir_intrinsic_vulkan_resource_index:
110 add_binding(state, nir_intrinsic_desc_set(intrin),
111 nir_intrinsic_binding(intrin));
112 break;
113
114 case nir_intrinsic_image_deref_load:
115 case nir_intrinsic_image_deref_store:
116 case nir_intrinsic_image_deref_atomic_add:
117 case nir_intrinsic_image_deref_atomic_min:
118 case nir_intrinsic_image_deref_atomic_max:
119 case nir_intrinsic_image_deref_atomic_and:
120 case nir_intrinsic_image_deref_atomic_or:
121 case nir_intrinsic_image_deref_atomic_xor:
122 case nir_intrinsic_image_deref_atomic_exchange:
123 case nir_intrinsic_image_deref_atomic_comp_swap:
124 case nir_intrinsic_image_deref_size:
125 case nir_intrinsic_image_deref_samples:
126 case nir_intrinsic_image_deref_load_param_intel:
127 case nir_intrinsic_image_deref_load_raw_intel:
128 case nir_intrinsic_image_deref_store_raw_intel:
129 add_deref_src_binding(state, intrin->src[0]);
130 break;
131
132 case nir_intrinsic_load_constant:
133 state->uses_constants = true;
134 break;
135
136 default:
137 break;
138 }
139 break;
140 }
141 case nir_instr_type_tex: {
142 nir_tex_instr *tex = nir_instr_as_tex(instr);
143 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
144 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
145 break;
146 }
147 default:
148 continue;
149 }
150 }
151 }
152
153 static bool
154 find_descriptor_for_index_src(nir_src src,
155 struct apply_pipeline_layout_state *state)
156 {
157 nir_intrinsic_instr *intrin = nir_src_as_intrinsic(src);
158
159 while (intrin && intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex)
160 intrin = nir_src_as_intrinsic(intrin->src[0]);
161
162 if (!intrin || intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
163 return false;
164
165 uint32_t set = nir_intrinsic_desc_set(intrin);
166 uint32_t binding = nir_intrinsic_binding(intrin);
167 uint32_t surface_index = state->set[set].surface_offsets[binding];
168
169 /* Only lower to a BTI message if we have a valid binding table index. */
170 return surface_index < MAX_BINDING_TABLE_SIZE;
171 }
172
173 static bool
174 nir_deref_find_descriptor(nir_deref_instr *deref,
175 struct apply_pipeline_layout_state *state)
176 {
177 while (1) {
178 /* Nothing we will use this on has a variable */
179 assert(deref->deref_type != nir_deref_type_var);
180
181 nir_deref_instr *parent = nir_src_as_deref(deref->parent);
182 if (!parent)
183 break;
184
185 deref = parent;
186 }
187 assert(deref->deref_type == nir_deref_type_cast);
188
189 nir_intrinsic_instr *intrin = nir_src_as_intrinsic(deref->parent);
190 if (!intrin || intrin->intrinsic != nir_intrinsic_load_vulkan_descriptor)
191 return false;
192
193 return find_descriptor_for_index_src(intrin->src[0], state);
194 }
195
196 static nir_ssa_def *
197 build_index_for_res_reindex(nir_intrinsic_instr *intrin,
198 struct apply_pipeline_layout_state *state)
199 {
200 nir_builder *b = &state->builder;
201
202 if (intrin->intrinsic == nir_intrinsic_vulkan_resource_reindex) {
203 nir_ssa_def *bti =
204 build_index_for_res_reindex(nir_src_as_intrinsic(intrin->src[0]), state);
205
206 b->cursor = nir_before_instr(&intrin->instr);
207 return nir_iadd(b, bti, nir_ssa_for_src(b, intrin->src[1], 1));
208 }
209
210 assert(intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
211
212 uint32_t set = nir_intrinsic_desc_set(intrin);
213 uint32_t binding = nir_intrinsic_binding(intrin);
214
215 const struct anv_descriptor_set_binding_layout *bind_layout =
216 &state->layout->set[set].layout->binding[binding];
217
218 uint32_t surface_index = state->set[set].surface_offsets[binding];
219 uint32_t array_size = bind_layout->array_size;
220
221 b->cursor = nir_before_instr(&intrin->instr);
222
223 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
224 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
225 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
226
227 return nir_iadd_imm(b, array_index, surface_index);
228 }
229
230 static nir_ssa_def *
231 build_index_offset_for_deref(nir_deref_instr *deref,
232 struct apply_pipeline_layout_state *state)
233 {
234 nir_builder *b = &state->builder;
235
236 nir_deref_instr *parent = nir_deref_instr_parent(deref);
237 if (parent) {
238 nir_ssa_def *addr = build_index_offset_for_deref(parent, state);
239
240 b->cursor = nir_before_instr(&deref->instr);
241 return nir_explicit_io_address_from_deref(b, deref, addr,
242 nir_address_format_32bit_index_offset);
243 }
244
245 nir_intrinsic_instr *load_desc = nir_src_as_intrinsic(deref->parent);
246 assert(load_desc->intrinsic == nir_intrinsic_load_vulkan_descriptor);
247
248 nir_ssa_def *index =
249 build_index_for_res_reindex(nir_src_as_intrinsic(load_desc->src[0]), state);
250
251 /* Return a 0 offset which will get picked up by the recursion */
252 b->cursor = nir_before_instr(&deref->instr);
253 return nir_vec2(b, index, nir_imm_int(b, 0));
254 }
255
256 static bool
257 try_lower_direct_buffer_intrinsic(nir_intrinsic_instr *intrin, bool is_atomic,
258 struct apply_pipeline_layout_state *state)
259 {
260 nir_builder *b = &state->builder;
261
262 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
263 if (deref->mode != nir_var_mem_ssbo)
264 return false;
265
266 /* 64-bit atomics only support A64 messages so we can't lower them to the
267 * index+offset model.
268 */
269 if (is_atomic && nir_dest_bit_size(intrin->dest) == 64)
270 return false;
271
272 /* Normal binding table-based messages can't handle non-uniform access so
273 * we have to fall back to A64.
274 */
275 if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM)
276 return false;
277
278 if (!nir_deref_find_descriptor(deref, state))
279 return false;
280
281 nir_ssa_def *addr = build_index_offset_for_deref(deref, state);
282
283 b->cursor = nir_before_instr(&intrin->instr);
284 nir_lower_explicit_io_instr(b, intrin, addr,
285 nir_address_format_32bit_index_offset);
286 return true;
287 }
288
289 static void
290 lower_direct_buffer_access(nir_function_impl *impl,
291 struct apply_pipeline_layout_state *state)
292 {
293 nir_foreach_block(block, impl) {
294 nir_foreach_instr_safe(instr, block) {
295 if (instr->type != nir_instr_type_intrinsic)
296 continue;
297
298 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
299 switch (intrin->intrinsic) {
300 case nir_intrinsic_load_deref:
301 case nir_intrinsic_store_deref:
302 try_lower_direct_buffer_intrinsic(intrin, false, state);
303 break;
304 case nir_intrinsic_deref_atomic_add:
305 case nir_intrinsic_deref_atomic_imin:
306 case nir_intrinsic_deref_atomic_umin:
307 case nir_intrinsic_deref_atomic_imax:
308 case nir_intrinsic_deref_atomic_umax:
309 case nir_intrinsic_deref_atomic_and:
310 case nir_intrinsic_deref_atomic_or:
311 case nir_intrinsic_deref_atomic_xor:
312 case nir_intrinsic_deref_atomic_exchange:
313 case nir_intrinsic_deref_atomic_comp_swap:
314 case nir_intrinsic_deref_atomic_fmin:
315 case nir_intrinsic_deref_atomic_fmax:
316 case nir_intrinsic_deref_atomic_fcomp_swap:
317 try_lower_direct_buffer_intrinsic(intrin, true, state);
318 break;
319
320 case nir_intrinsic_get_buffer_size: {
321 /* The get_buffer_size intrinsic always just takes a
322 * index/reindex intrinsic.
323 */
324 if (!find_descriptor_for_index_src(intrin->src[0], state))
325 break;
326
327 nir_ssa_def *index =
328 build_index_for_res_reindex(nir_src_as_intrinsic(intrin->src[0]),
329 state);
330 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
331 nir_src_for_ssa(index));
332 _mesa_set_add(state->lowered_instrs, intrin);
333 }
334
335 default:
336 break;
337 }
338 }
339 }
340 }
341
342 static nir_address_format
343 desc_addr_format(VkDescriptorType desc_type,
344 struct apply_pipeline_layout_state *state)
345 {
346 return (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
347 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) ?
348 state->ssbo_addr_format : nir_address_format_32bit_index_offset;
349 }
350
351 static void
352 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
353 struct apply_pipeline_layout_state *state)
354 {
355 nir_builder *b = &state->builder;
356
357 b->cursor = nir_before_instr(&intrin->instr);
358
359 uint32_t set = nir_intrinsic_desc_set(intrin);
360 uint32_t binding = nir_intrinsic_binding(intrin);
361 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
362
363 const struct anv_descriptor_set_binding_layout *bind_layout =
364 &state->layout->set[set].layout->binding[binding];
365
366 uint32_t surface_index = state->set[set].surface_offsets[binding];
367 uint32_t array_size = bind_layout->array_size;
368
369 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
370 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
371 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
372
373 nir_ssa_def *index;
374 if (state->pdevice->has_a64_buffer_access &&
375 (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
376 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
377 /* We store the descriptor offset as 16.8.8 where the top 16 bits are
378 * the offset into the descriptor set, the next 8 are the binding table
379 * index of the descriptor buffer, and the bottom 8 bits are the offset
380 * (in bytes) into the dynamic offset table.
381 */
382 assert(bind_layout->dynamic_offset_index < MAX_DYNAMIC_BUFFERS);
383 uint32_t dynamic_offset_index = 0xff; /* No dynamic offset */
384 if (bind_layout->dynamic_offset_index >= 0) {
385 dynamic_offset_index =
386 state->layout->set[set].dynamic_offset_start +
387 bind_layout->dynamic_offset_index;
388 }
389
390 const uint32_t desc_offset =
391 bind_layout->descriptor_offset << 16 |
392 (uint32_t)state->set[set].desc_offset << 8 |
393 dynamic_offset_index;
394
395 if (state->add_bounds_checks) {
396 assert(desc_addr_format(desc_type, state) ==
397 nir_address_format_64bit_bounded_global);
398 assert(intrin->dest.ssa.num_components == 4);
399 assert(intrin->dest.ssa.bit_size == 32);
400 index = nir_vec4(b, nir_imm_int(b, desc_offset),
401 nir_ssa_for_src(b, intrin->src[0], 1),
402 nir_imm_int(b, array_size - 1),
403 nir_ssa_undef(b, 1, 32));
404 } else {
405 assert(desc_addr_format(desc_type, state) ==
406 nir_address_format_64bit_global);
407 assert(intrin->dest.ssa.num_components == 1);
408 assert(intrin->dest.ssa.bit_size == 64);
409 index = nir_pack_64_2x32_split(b, nir_imm_int(b, desc_offset),
410 nir_ssa_for_src(b, intrin->src[0], 1));
411 }
412 } else if (bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
413 /* This is an inline uniform block. Just reference the descriptor set
414 * and use the descriptor offset as the base.
415 */
416 assert(desc_addr_format(desc_type, state) ==
417 nir_address_format_32bit_index_offset);
418 assert(intrin->dest.ssa.num_components == 2);
419 assert(intrin->dest.ssa.bit_size == 32);
420 index = nir_imm_ivec2(b, state->set[set].desc_offset,
421 bind_layout->descriptor_offset);
422 } else {
423 assert(desc_addr_format(desc_type, state) ==
424 nir_address_format_32bit_index_offset);
425 assert(intrin->dest.ssa.num_components == 2);
426 assert(intrin->dest.ssa.bit_size == 32);
427 index = nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
428 nir_imm_int(b, 0));
429 }
430
431 assert(intrin->dest.is_ssa);
432 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
433 nir_instr_remove(&intrin->instr);
434 }
435
436 static void
437 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
438 struct apply_pipeline_layout_state *state)
439 {
440 nir_builder *b = &state->builder;
441
442 b->cursor = nir_before_instr(&intrin->instr);
443
444 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
445
446 /* For us, the resource indices are just indices into the binding table and
447 * array elements are sequential. A resource_reindex just turns into an
448 * add of the two indices.
449 */
450 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
451 nir_ssa_def *old_index = intrin->src[0].ssa;
452 nir_ssa_def *offset = intrin->src[1].ssa;
453
454 nir_ssa_def *new_index;
455 switch (desc_addr_format(desc_type, state)) {
456 case nir_address_format_64bit_bounded_global:
457 /* See also lower_res_index_intrinsic() */
458 assert(intrin->dest.ssa.num_components == 4);
459 assert(intrin->dest.ssa.bit_size == 32);
460 new_index = nir_vec4(b, nir_channel(b, old_index, 0),
461 nir_iadd(b, nir_channel(b, old_index, 1),
462 offset),
463 nir_channel(b, old_index, 2),
464 nir_ssa_undef(b, 1, 32));
465 break;
466
467 case nir_address_format_64bit_global: {
468 /* See also lower_res_index_intrinsic() */
469 assert(intrin->dest.ssa.num_components == 1);
470 assert(intrin->dest.ssa.bit_size == 64);
471 nir_ssa_def *base = nir_unpack_64_2x32_split_x(b, old_index);
472 nir_ssa_def *arr_idx = nir_unpack_64_2x32_split_y(b, old_index);
473 new_index = nir_pack_64_2x32_split(b, base, nir_iadd(b, arr_idx, offset));
474 break;
475 }
476
477 case nir_address_format_32bit_index_offset:
478 assert(intrin->dest.ssa.num_components == 2);
479 assert(intrin->dest.ssa.bit_size == 32);
480 new_index = nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
481 nir_channel(b, old_index, 1));
482 break;
483
484 default:
485 unreachable("Uhandled address format");
486 }
487
488 assert(intrin->dest.is_ssa);
489 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
490 nir_instr_remove(&intrin->instr);
491 }
492
493 static nir_ssa_def *
494 build_ssbo_descriptor_load(const VkDescriptorType desc_type,
495 nir_ssa_def *index,
496 struct apply_pipeline_layout_state *state)
497 {
498 nir_builder *b = &state->builder;
499
500 nir_ssa_def *desc_offset, *array_index;
501 switch (state->ssbo_addr_format) {
502 case nir_address_format_64bit_bounded_global:
503 /* See also lower_res_index_intrinsic() */
504 desc_offset = nir_channel(b, index, 0);
505 array_index = nir_umin(b, nir_channel(b, index, 1),
506 nir_channel(b, index, 2));
507 break;
508
509 case nir_address_format_64bit_global:
510 /* See also lower_res_index_intrinsic() */
511 desc_offset = nir_unpack_64_2x32_split_x(b, index);
512 array_index = nir_unpack_64_2x32_split_y(b, index);
513 break;
514
515 default:
516 unreachable("Unhandled address format for SSBO");
517 }
518
519 /* The desc_offset is actually 16.8.8 */
520 nir_ssa_def *desc_buffer_index =
521 nir_extract_u8(b, desc_offset, nir_imm_int(b, 1));
522 nir_ssa_def *desc_offset_base =
523 nir_extract_u16(b, desc_offset, nir_imm_int(b, 1));
524
525 /* Compute the actual descriptor offset */
526 const unsigned descriptor_size =
527 anv_descriptor_type_size(state->pdevice, desc_type);
528 desc_offset = nir_iadd(b, desc_offset_base,
529 nir_imul_imm(b, array_index, descriptor_size));
530
531 nir_intrinsic_instr *desc_load =
532 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
533 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
534 desc_load->src[1] = nir_src_for_ssa(desc_offset);
535 desc_load->num_components = 4;
536 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest, 4, 32, NULL);
537 nir_builder_instr_insert(b, &desc_load->instr);
538
539 return &desc_load->dest.ssa;
540 }
541
542 static void
543 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
544 struct apply_pipeline_layout_state *state)
545 {
546 nir_builder *b = &state->builder;
547
548 b->cursor = nir_before_instr(&intrin->instr);
549
550 const VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
551
552 assert(intrin->src[0].is_ssa);
553 nir_ssa_def *index = intrin->src[0].ssa;
554
555 nir_ssa_def *desc;
556 if (state->pdevice->has_a64_buffer_access &&
557 (desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
558 desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
559 desc = build_ssbo_descriptor_load(desc_type, index, state);
560
561 /* We want nir_address_format_64bit_global */
562 if (!state->add_bounds_checks)
563 desc = nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
564
565 if (state->dynamic_offset_uniform_start >= 0) {
566 /* This shader has dynamic offsets and we have no way of knowing
567 * (save from the dynamic offset base index) if this buffer has a
568 * dynamic offset.
569 */
570 nir_ssa_def *desc_offset, *array_index;
571 switch (state->ssbo_addr_format) {
572 case nir_address_format_64bit_bounded_global:
573 /* See also lower_res_index_intrinsic() */
574 desc_offset = nir_channel(b, index, 0);
575 array_index = nir_umin(b, nir_channel(b, index, 1),
576 nir_channel(b, index, 2));
577 break;
578
579 case nir_address_format_64bit_global:
580 /* See also lower_res_index_intrinsic() */
581 desc_offset = nir_unpack_64_2x32_split_x(b, index);
582 array_index = nir_unpack_64_2x32_split_y(b, index);
583 break;
584
585 default:
586 unreachable("Unhandled address format for SSBO");
587 }
588
589 nir_ssa_def *dyn_offset_base =
590 nir_extract_u8(b, desc_offset, nir_imm_int(b, 0));
591 nir_ssa_def *dyn_offset_idx =
592 nir_iadd(b, dyn_offset_base, array_index);
593 if (state->add_bounds_checks) {
594 dyn_offset_idx = nir_umin(b, dyn_offset_idx,
595 nir_imm_int(b, MAX_DYNAMIC_BUFFERS));
596 }
597
598 nir_intrinsic_instr *dyn_load =
599 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
600 nir_intrinsic_set_base(dyn_load, state->dynamic_offset_uniform_start);
601 nir_intrinsic_set_range(dyn_load, MAX_DYNAMIC_BUFFERS * 4);
602 dyn_load->src[0] = nir_src_for_ssa(nir_imul_imm(b, dyn_offset_idx, 4));
603 dyn_load->num_components = 1;
604 nir_ssa_dest_init(&dyn_load->instr, &dyn_load->dest, 1, 32, NULL);
605 nir_builder_instr_insert(b, &dyn_load->instr);
606
607 nir_ssa_def *dynamic_offset =
608 nir_bcsel(b, nir_ieq(b, dyn_offset_base, nir_imm_int(b, 0xff)),
609 nir_imm_int(b, 0), &dyn_load->dest.ssa);
610
611 switch (state->ssbo_addr_format) {
612 case nir_address_format_64bit_bounded_global: {
613 /* The dynamic offset gets added to the base pointer so that we
614 * have a sliding window range.
615 */
616 nir_ssa_def *base_ptr =
617 nir_pack_64_2x32(b, nir_channels(b, desc, 0x3));
618 base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset));
619 desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr),
620 nir_unpack_64_2x32_split_y(b, base_ptr),
621 nir_channel(b, desc, 2),
622 nir_channel(b, desc, 3));
623 break;
624 }
625
626 case nir_address_format_64bit_global:
627 desc = nir_iadd(b, desc, nir_u2u64(b, dynamic_offset));
628 break;
629
630 default:
631 unreachable("Unhandled address format for SSBO");
632 }
633 }
634 } else {
635 /* We follow the nir_address_format_32bit_index_offset model */
636 desc = index;
637 }
638
639 assert(intrin->dest.is_ssa);
640 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
641 nir_instr_remove(&intrin->instr);
642 }
643
644 static void
645 lower_get_buffer_size(nir_intrinsic_instr *intrin,
646 struct apply_pipeline_layout_state *state)
647 {
648 if (_mesa_set_search(state->lowered_instrs, intrin))
649 return;
650
651 nir_builder *b = &state->builder;
652
653 b->cursor = nir_before_instr(&intrin->instr);
654
655 const VkDescriptorType desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
656
657 assert(intrin->src[0].is_ssa);
658 nir_ssa_def *index = intrin->src[0].ssa;
659
660 if (state->pdevice->has_a64_buffer_access) {
661 nir_ssa_def *desc = build_ssbo_descriptor_load(desc_type, index, state);
662 nir_ssa_def *size = nir_channel(b, desc, 2);
663 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(size));
664 nir_instr_remove(&intrin->instr);
665 } else {
666 /* We're following the nir_address_format_32bit_index_offset model so
667 * the binding table index is the first component of the address. The
668 * back-end wants a scalar binding table index source.
669 */
670 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
671 nir_src_for_ssa(nir_channel(b, index, 0)));
672 }
673 }
674
675 static nir_ssa_def *
676 build_descriptor_load(nir_deref_instr *deref, unsigned offset,
677 unsigned num_components, unsigned bit_size,
678 struct apply_pipeline_layout_state *state)
679 {
680 nir_variable *var = nir_deref_instr_get_variable(deref);
681
682 unsigned set = var->data.descriptor_set;
683 unsigned binding = var->data.binding;
684 unsigned array_size =
685 state->layout->set[set].layout->binding[binding].array_size;
686
687 const struct anv_descriptor_set_binding_layout *bind_layout =
688 &state->layout->set[set].layout->binding[binding];
689
690 nir_builder *b = &state->builder;
691
692 nir_ssa_def *desc_buffer_index =
693 nir_imm_int(b, state->set[set].desc_offset);
694
695 nir_ssa_def *desc_offset =
696 nir_imm_int(b, bind_layout->descriptor_offset + offset);
697 if (deref->deref_type != nir_deref_type_var) {
698 assert(deref->deref_type == nir_deref_type_array);
699
700 const unsigned descriptor_size = anv_descriptor_size(bind_layout);
701 nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
702 if (state->add_bounds_checks)
703 arr_index = nir_umin(b, arr_index, nir_imm_int(b, array_size - 1));
704
705 desc_offset = nir_iadd(b, desc_offset,
706 nir_imul_imm(b, arr_index, descriptor_size));
707 }
708
709 nir_intrinsic_instr *desc_load =
710 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
711 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
712 desc_load->src[1] = nir_src_for_ssa(desc_offset);
713 desc_load->num_components = num_components;
714 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
715 num_components, bit_size, NULL);
716 nir_builder_instr_insert(b, &desc_load->instr);
717
718 return &desc_load->dest.ssa;
719 }
720
721 static void
722 lower_image_intrinsic(nir_intrinsic_instr *intrin,
723 struct apply_pipeline_layout_state *state)
724 {
725 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
726 nir_variable *var = nir_deref_instr_get_variable(deref);
727
728 unsigned set = var->data.descriptor_set;
729 unsigned binding = var->data.binding;
730 unsigned binding_offset = state->set[set].surface_offsets[binding];
731
732 nir_builder *b = &state->builder;
733 b->cursor = nir_before_instr(&intrin->instr);
734
735 const bool use_bindless = state->pdevice->has_bindless_images;
736
737 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
738 b->cursor = nir_instr_remove(&intrin->instr);
739
740 assert(!use_bindless); /* Otherwise our offsets would be wrong */
741 const unsigned param = nir_intrinsic_base(intrin);
742
743 nir_ssa_def *desc =
744 build_descriptor_load(deref, param * 16,
745 intrin->dest.ssa.num_components,
746 intrin->dest.ssa.bit_size, state);
747
748 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
749 } else if (binding_offset > MAX_BINDING_TABLE_SIZE) {
750 const bool write_only =
751 (var->data.image.access & ACCESS_NON_READABLE) != 0;
752 nir_ssa_def *desc =
753 build_descriptor_load(deref, 0, 2, 32, state);
754 nir_ssa_def *handle = nir_channel(b, desc, write_only ? 1 : 0);
755 nir_rewrite_image_intrinsic(intrin, handle, true);
756 } else {
757 unsigned array_size =
758 state->layout->set[set].layout->binding[binding].array_size;
759
760 nir_ssa_def *index = NULL;
761 if (deref->deref_type != nir_deref_type_var) {
762 assert(deref->deref_type == nir_deref_type_array);
763 index = nir_ssa_for_src(b, deref->arr.index, 1);
764 if (state->add_bounds_checks)
765 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
766 } else {
767 index = nir_imm_int(b, 0);
768 }
769
770 index = nir_iadd_imm(b, index, binding_offset);
771 nir_rewrite_image_intrinsic(intrin, index, false);
772 }
773 }
774
775 static void
776 lower_load_constant(nir_intrinsic_instr *intrin,
777 struct apply_pipeline_layout_state *state)
778 {
779 nir_builder *b = &state->builder;
780
781 b->cursor = nir_before_instr(&intrin->instr);
782
783 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
784 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
785 nir_imm_int(b, nir_intrinsic_base(intrin)));
786
787 nir_intrinsic_instr *load_ubo =
788 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
789 load_ubo->num_components = intrin->num_components;
790 load_ubo->src[0] = nir_src_for_ssa(index);
791 load_ubo->src[1] = nir_src_for_ssa(offset);
792 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
793 intrin->dest.ssa.num_components,
794 intrin->dest.ssa.bit_size, NULL);
795 nir_builder_instr_insert(b, &load_ubo->instr);
796
797 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
798 nir_src_for_ssa(&load_ubo->dest.ssa));
799 nir_instr_remove(&intrin->instr);
800 }
801
802 static void
803 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
804 unsigned *base_index, unsigned plane,
805 struct apply_pipeline_layout_state *state)
806 {
807 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
808 if (deref_src_idx < 0)
809 return;
810
811 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
812 nir_variable *var = nir_deref_instr_get_variable(deref);
813
814 unsigned set = var->data.descriptor_set;
815 unsigned binding = var->data.binding;
816 unsigned array_size =
817 state->layout->set[set].layout->binding[binding].array_size;
818
819 unsigned binding_offset;
820 if (deref_src_type == nir_tex_src_texture_deref) {
821 binding_offset = state->set[set].surface_offsets[binding];
822 } else {
823 assert(deref_src_type == nir_tex_src_sampler_deref);
824 binding_offset = state->set[set].sampler_offsets[binding];
825 }
826
827 nir_builder *b = &state->builder;
828
829 nir_tex_src_type offset_src_type;
830 nir_ssa_def *index = NULL;
831 if (binding_offset > MAX_BINDING_TABLE_SIZE) {
832 const unsigned plane_offset =
833 plane * sizeof(struct anv_sampled_image_descriptor);
834
835 nir_ssa_def *desc =
836 build_descriptor_load(deref, plane_offset, 2, 32, state);
837
838 if (deref_src_type == nir_tex_src_texture_deref) {
839 offset_src_type = nir_tex_src_texture_handle;
840 index = nir_channel(b, desc, 0);
841 } else {
842 assert(deref_src_type == nir_tex_src_sampler_deref);
843 offset_src_type = nir_tex_src_sampler_handle;
844 index = nir_channel(b, desc, 1);
845 }
846 } else {
847 if (deref_src_type == nir_tex_src_texture_deref) {
848 offset_src_type = nir_tex_src_texture_offset;
849 } else {
850 assert(deref_src_type == nir_tex_src_sampler_deref);
851 offset_src_type = nir_tex_src_sampler_offset;
852 }
853
854 *base_index = binding_offset + plane;
855
856 if (deref->deref_type != nir_deref_type_var) {
857 assert(deref->deref_type == nir_deref_type_array);
858
859 if (nir_src_is_const(deref->arr.index)) {
860 unsigned arr_index = nir_src_as_uint(deref->arr.index);
861 *base_index += MIN2(arr_index, array_size - 1);
862 } else {
863 /* From VK_KHR_sampler_ycbcr_conversion:
864 *
865 * If sampler Y’CBCR conversion is enabled, the combined image
866 * sampler must be indexed only by constant integral expressions
867 * when aggregated into arrays in shader code, irrespective of
868 * the shaderSampledImageArrayDynamicIndexing feature.
869 */
870 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
871
872 index = nir_ssa_for_src(b, deref->arr.index, 1);
873
874 if (state->add_bounds_checks)
875 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
876 }
877 }
878 }
879
880 if (index) {
881 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
882 nir_src_for_ssa(index));
883 tex->src[deref_src_idx].src_type = offset_src_type;
884 } else {
885 nir_tex_instr_remove_src(tex, deref_src_idx);
886 }
887 }
888
889 static uint32_t
890 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
891 {
892 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
893 if (plane_src_idx < 0)
894 return 0;
895
896 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
897
898 nir_tex_instr_remove_src(tex, plane_src_idx);
899
900 return plane;
901 }
902
903 static nir_ssa_def *
904 build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx,
905 unsigned start, unsigned end)
906 {
907 if (start == end - 1) {
908 return srcs[start];
909 } else {
910 unsigned mid = start + (end - start) / 2;
911 return nir_bcsel(b, nir_ilt(b, idx, nir_imm_int(b, mid)),
912 build_def_array_select(b, srcs, idx, start, mid),
913 build_def_array_select(b, srcs, idx, mid, end));
914 }
915 }
916
917 static void
918 lower_gen7_tex_swizzle(nir_tex_instr *tex, unsigned plane,
919 struct apply_pipeline_layout_state *state)
920 {
921 assert(state->pdevice->info.gen == 7 && !state->pdevice->info.is_haswell);
922 if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF ||
923 nir_tex_instr_is_query(tex) ||
924 tex->op == nir_texop_tg4 || /* We can't swizzle TG4 */
925 (tex->is_shadow && tex->is_new_style_shadow))
926 return;
927
928 int deref_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
929 assert(deref_src_idx >= 0);
930
931 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
932 nir_variable *var = nir_deref_instr_get_variable(deref);
933
934 unsigned set = var->data.descriptor_set;
935 unsigned binding = var->data.binding;
936 const struct anv_descriptor_set_binding_layout *bind_layout =
937 &state->layout->set[set].layout->binding[binding];
938
939 if ((bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) == 0)
940 return;
941
942 nir_builder *b = &state->builder;
943 b->cursor = nir_before_instr(&tex->instr);
944
945 const unsigned plane_offset =
946 plane * sizeof(struct anv_texture_swizzle_descriptor);
947 nir_ssa_def *swiz =
948 build_descriptor_load(deref, plane_offset, 1, 32, state);
949
950 b->cursor = nir_after_instr(&tex->instr);
951
952 assert(tex->dest.ssa.bit_size == 32);
953 assert(tex->dest.ssa.num_components == 4);
954
955 /* Initializing to undef is ok; nir_opt_undef will clean it up. */
956 nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
957 nir_ssa_def *comps[8];
958 for (unsigned i = 0; i < ARRAY_SIZE(comps); i++)
959 comps[i] = undef;
960
961 comps[ISL_CHANNEL_SELECT_ZERO] = nir_imm_int(b, 0);
962 if (nir_alu_type_get_base_type(tex->dest_type) == nir_type_float)
963 comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_float(b, 1);
964 else
965 comps[ISL_CHANNEL_SELECT_ONE] = nir_imm_int(b, 1);
966 comps[ISL_CHANNEL_SELECT_RED] = nir_channel(b, &tex->dest.ssa, 0);
967 comps[ISL_CHANNEL_SELECT_GREEN] = nir_channel(b, &tex->dest.ssa, 1);
968 comps[ISL_CHANNEL_SELECT_BLUE] = nir_channel(b, &tex->dest.ssa, 2);
969 comps[ISL_CHANNEL_SELECT_ALPHA] = nir_channel(b, &tex->dest.ssa, 3);
970
971 nir_ssa_def *swiz_comps[4];
972 for (unsigned i = 0; i < 4; i++) {
973 nir_ssa_def *comp_swiz = nir_extract_u8(b, swiz, nir_imm_int(b, i));
974 swiz_comps[i] = build_def_array_select(b, comps, comp_swiz, 0, 8);
975 }
976 nir_ssa_def *swiz_tex_res = nir_vec(b, swiz_comps, 4);
977
978 /* Rewrite uses before we insert so we don't rewrite this use */
979 nir_ssa_def_rewrite_uses_after(&tex->dest.ssa,
980 nir_src_for_ssa(swiz_tex_res),
981 swiz_tex_res->parent_instr);
982 }
983
984 static void
985 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
986 {
987 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
988
989 /* On Ivy Bridge and Bay Trail, we have to swizzle in the shader. Do this
990 * before we lower the derefs away so we can still find the descriptor.
991 */
992 if (state->pdevice->info.gen == 7 && !state->pdevice->info.is_haswell)
993 lower_gen7_tex_swizzle(tex, plane, state);
994
995 state->builder.cursor = nir_before_instr(&tex->instr);
996
997 lower_tex_deref(tex, nir_tex_src_texture_deref,
998 &tex->texture_index, plane, state);
999
1000 lower_tex_deref(tex, nir_tex_src_sampler_deref,
1001 &tex->sampler_index, plane, state);
1002
1003 /* The backend only ever uses this to mark used surfaces. We don't care
1004 * about that little optimization so it just needs to be non-zero.
1005 */
1006 tex->texture_array_size = 1;
1007 }
1008
1009 static void
1010 apply_pipeline_layout_block(nir_block *block,
1011 struct apply_pipeline_layout_state *state)
1012 {
1013 nir_foreach_instr_safe(instr, block) {
1014 switch (instr->type) {
1015 case nir_instr_type_intrinsic: {
1016 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1017 switch (intrin->intrinsic) {
1018 case nir_intrinsic_vulkan_resource_index:
1019 lower_res_index_intrinsic(intrin, state);
1020 break;
1021 case nir_intrinsic_vulkan_resource_reindex:
1022 lower_res_reindex_intrinsic(intrin, state);
1023 break;
1024 case nir_intrinsic_load_vulkan_descriptor:
1025 lower_load_vulkan_descriptor(intrin, state);
1026 break;
1027 case nir_intrinsic_get_buffer_size:
1028 lower_get_buffer_size(intrin, state);
1029 break;
1030 case nir_intrinsic_image_deref_load:
1031 case nir_intrinsic_image_deref_store:
1032 case nir_intrinsic_image_deref_atomic_add:
1033 case nir_intrinsic_image_deref_atomic_min:
1034 case nir_intrinsic_image_deref_atomic_max:
1035 case nir_intrinsic_image_deref_atomic_and:
1036 case nir_intrinsic_image_deref_atomic_or:
1037 case nir_intrinsic_image_deref_atomic_xor:
1038 case nir_intrinsic_image_deref_atomic_exchange:
1039 case nir_intrinsic_image_deref_atomic_comp_swap:
1040 case nir_intrinsic_image_deref_size:
1041 case nir_intrinsic_image_deref_samples:
1042 case nir_intrinsic_image_deref_load_param_intel:
1043 case nir_intrinsic_image_deref_load_raw_intel:
1044 case nir_intrinsic_image_deref_store_raw_intel:
1045 lower_image_intrinsic(intrin, state);
1046 break;
1047 case nir_intrinsic_load_constant:
1048 lower_load_constant(intrin, state);
1049 break;
1050 default:
1051 break;
1052 }
1053 break;
1054 }
1055 case nir_instr_type_tex:
1056 lower_tex(nir_instr_as_tex(instr), state);
1057 break;
1058 default:
1059 continue;
1060 }
1061 }
1062 }
1063
1064 struct binding_info {
1065 uint32_t binding;
1066 uint8_t set;
1067 uint16_t score;
1068 };
1069
1070 static int
1071 compare_binding_infos(const void *_a, const void *_b)
1072 {
1073 const struct binding_info *a = _a, *b = _b;
1074 if (a->score != b->score)
1075 return b->score - a->score;
1076
1077 if (a->set != b->set)
1078 return a->set - b->set;
1079
1080 return a->binding - b->binding;
1081 }
1082
1083 void
1084 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
1085 bool robust_buffer_access,
1086 struct anv_pipeline_layout *layout,
1087 nir_shader *shader,
1088 struct brw_stage_prog_data *prog_data,
1089 struct anv_pipeline_bind_map *map)
1090 {
1091 void *mem_ctx = ralloc_context(NULL);
1092
1093 struct apply_pipeline_layout_state state = {
1094 .pdevice = pdevice,
1095 .shader = shader,
1096 .layout = layout,
1097 .add_bounds_checks = robust_buffer_access,
1098 .ssbo_addr_format = anv_nir_ssbo_addr_format(pdevice, robust_buffer_access),
1099 .lowered_instrs = _mesa_pointer_set_create(mem_ctx),
1100 .dynamic_offset_uniform_start = -1,
1101 };
1102
1103 for (unsigned s = 0; s < layout->num_sets; s++) {
1104 const unsigned count = layout->set[s].layout->binding_count;
1105 state.set[s].use_count = rzalloc_array(mem_ctx, uint8_t, count);
1106 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
1107 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
1108 }
1109
1110 nir_foreach_function(function, shader) {
1111 if (!function->impl)
1112 continue;
1113
1114 nir_foreach_block(block, function->impl)
1115 get_used_bindings_block(block, &state);
1116 }
1117
1118 for (unsigned s = 0; s < layout->num_sets; s++) {
1119 if (state.set[s].desc_buffer_used) {
1120 map->surface_to_descriptor[map->surface_count] =
1121 (struct anv_pipeline_binding) {
1122 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
1123 .binding = s,
1124 };
1125 state.set[s].desc_offset = map->surface_count;
1126 map->surface_count++;
1127 }
1128 }
1129
1130 if (state.uses_constants) {
1131 state.constants_offset = map->surface_count;
1132 map->surface_to_descriptor[map->surface_count].set =
1133 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
1134 map->surface_count++;
1135 }
1136
1137 unsigned used_binding_count = 0;
1138 for (uint32_t set = 0; set < layout->num_sets; set++) {
1139 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
1140 for (unsigned b = 0; b < set_layout->binding_count; b++) {
1141 if (state.set[set].use_count[b] == 0)
1142 continue;
1143
1144 used_binding_count++;
1145 }
1146 }
1147
1148 struct binding_info *infos =
1149 rzalloc_array(mem_ctx, struct binding_info, used_binding_count);
1150 used_binding_count = 0;
1151 for (uint32_t set = 0; set < layout->num_sets; set++) {
1152 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
1153 for (unsigned b = 0; b < set_layout->binding_count; b++) {
1154 if (state.set[set].use_count[b] == 0)
1155 continue;
1156
1157 struct anv_descriptor_set_binding_layout *binding =
1158 &layout->set[set].layout->binding[b];
1159
1160 /* Do a fixed-point calculation to generate a score based on the
1161 * number of uses and the binding array size. We shift by 7 instead
1162 * of 8 because we're going to use the top bit below to make
1163 * everything which does not support bindless super higher priority
1164 * than things which do.
1165 */
1166 uint16_t score = ((uint16_t)state.set[set].use_count[b] << 7) /
1167 binding->array_size;
1168
1169 /* If the descriptor type doesn't support bindless then put it at the
1170 * beginning so we guarantee it gets a slot.
1171 */
1172 if (!anv_descriptor_supports_bindless(pdevice, binding, true) ||
1173 !anv_descriptor_supports_bindless(pdevice, binding, false))
1174 score |= 1 << 15;
1175
1176 infos[used_binding_count++] = (struct binding_info) {
1177 .set = set,
1178 .binding = b,
1179 .score = score,
1180 };
1181 }
1182 }
1183
1184 /* Order the binding infos based on score with highest scores first. If
1185 * scores are equal we then order by set and binding.
1186 */
1187 qsort(infos, used_binding_count, sizeof(struct binding_info),
1188 compare_binding_infos);
1189
1190 bool have_dynamic_buffers = false;
1191
1192 for (unsigned i = 0; i < used_binding_count; i++) {
1193 unsigned set = infos[i].set, b = infos[i].binding;
1194 struct anv_descriptor_set_binding_layout *binding =
1195 &layout->set[set].layout->binding[b];
1196
1197 if (binding->dynamic_offset_index >= 0)
1198 have_dynamic_buffers = true;
1199
1200 const uint32_t array_size = binding->array_size;
1201
1202 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
1203 if (map->surface_count + array_size > MAX_BINDING_TABLE_SIZE ||
1204 anv_descriptor_requires_bindless(pdevice, binding, false)) {
1205 /* If this descriptor doesn't fit in the binding table or if it
1206 * requires bindless for some reason, flag it as bindless.
1207 */
1208 assert(anv_descriptor_supports_bindless(pdevice, binding, false));
1209 state.set[set].surface_offsets[b] = BINDLESS_OFFSET;
1210 } else {
1211 state.set[set].surface_offsets[b] = map->surface_count;
1212 struct anv_sampler **samplers = binding->immutable_samplers;
1213 for (unsigned i = 0; i < binding->array_size; i++) {
1214 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
1215 for (uint8_t p = 0; p < planes; p++) {
1216 map->surface_to_descriptor[map->surface_count++] =
1217 (struct anv_pipeline_binding) {
1218 .set = set,
1219 .binding = b,
1220 .index = i,
1221 .plane = p,
1222 };
1223 }
1224 }
1225 }
1226 assert(map->surface_count <= MAX_BINDING_TABLE_SIZE);
1227 }
1228
1229 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
1230 if (map->sampler_count + array_size > MAX_SAMPLER_TABLE_SIZE ||
1231 anv_descriptor_requires_bindless(pdevice, binding, true)) {
1232 /* If this descriptor doesn't fit in the binding table or if it
1233 * requires bindless for some reason, flag it as bindless.
1234 *
1235 * We also make large sampler arrays bindless because we can avoid
1236 * using indirect sends thanks to bindless samplers being packed
1237 * less tightly than the sampler table.
1238 */
1239 assert(anv_descriptor_supports_bindless(pdevice, binding, true));
1240 state.set[set].sampler_offsets[b] = BINDLESS_OFFSET;
1241 } else {
1242 state.set[set].sampler_offsets[b] = map->sampler_count;
1243 struct anv_sampler **samplers = binding->immutable_samplers;
1244 for (unsigned i = 0; i < binding->array_size; i++) {
1245 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
1246 for (uint8_t p = 0; p < planes; p++) {
1247 map->sampler_to_descriptor[map->sampler_count++] =
1248 (struct anv_pipeline_binding) {
1249 .set = set,
1250 .binding = b,
1251 .index = i,
1252 .plane = p,
1253 };
1254 }
1255 }
1256 }
1257 }
1258 }
1259
1260 if (have_dynamic_buffers) {
1261 state.dynamic_offset_uniform_start = shader->num_uniforms;
1262 uint32_t *param = brw_stage_prog_data_add_params(prog_data,
1263 MAX_DYNAMIC_BUFFERS);
1264 for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++)
1265 param[i] = ANV_PARAM_DYN_OFFSET(i);
1266 shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 4;
1267 assert(shader->num_uniforms == prog_data->nr_params * 4);
1268 }
1269
1270 nir_foreach_variable(var, &shader->uniforms) {
1271 const struct glsl_type *glsl_type = glsl_without_array(var->type);
1272
1273 if (!glsl_type_is_image(glsl_type))
1274 continue;
1275
1276 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
1277
1278 const uint32_t set = var->data.descriptor_set;
1279 const uint32_t binding = var->data.binding;
1280 const uint32_t array_size =
1281 layout->set[set].layout->binding[binding].array_size;
1282
1283 if (state.set[set].use_count[binding] == 0)
1284 continue;
1285
1286 if (state.set[set].surface_offsets[binding] >= MAX_BINDING_TABLE_SIZE)
1287 continue;
1288
1289 struct anv_pipeline_binding *pipe_binding =
1290 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
1291 for (unsigned i = 0; i < array_size; i++) {
1292 assert(pipe_binding[i].set == set);
1293 assert(pipe_binding[i].binding == binding);
1294 assert(pipe_binding[i].index == i);
1295
1296 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
1297 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
1298 pipe_binding[i].input_attachment_index = var->data.index + i;
1299
1300 pipe_binding[i].write_only =
1301 (var->data.image.access & ACCESS_NON_READABLE) != 0;
1302 }
1303 }
1304
1305 nir_foreach_function(function, shader) {
1306 if (!function->impl)
1307 continue;
1308
1309 /* Before we do the normal lowering, we look for any SSBO operations
1310 * that we can lower to the BTI model and lower them up-front. The BTI
1311 * model can perform better than the A64 model for a couple reasons:
1312 *
1313 * 1. 48-bit address calculations are potentially expensive and using
1314 * the BTI model lets us simply compute 32-bit offsets and the
1315 * hardware adds the 64-bit surface base address.
1316 *
1317 * 2. The BTI messages, because they use surface states, do bounds
1318 * checking for us. With the A64 model, we have to do our own
1319 * bounds checking and this means wider pointers and extra
1320 * calculations and branching in the shader.
1321 *
1322 * The solution to both of these is to convert things to the BTI model
1323 * opportunistically. The reason why we need to do this as a pre-pass
1324 * is for two reasons:
1325 *
1326 * 1. The BTI model requires nir_address_format_32bit_index_offset
1327 * pointers which are not the same type as the pointers needed for
1328 * the A64 model. Because all our derefs are set up for the A64
1329 * model (in case we have variable pointers), we have to crawl all
1330 * the way back to the vulkan_resource_index intrinsic and build a
1331 * completely fresh index+offset calculation.
1332 *
1333 * 2. Because the variable-pointers-capable lowering that we do as part
1334 * of apply_pipeline_layout_block is destructive (It really has to
1335 * be to handle variable pointers properly), we've lost the deref
1336 * information by the time we get to the load/store/atomic
1337 * intrinsics in that pass.
1338 */
1339 lower_direct_buffer_access(function->impl, &state);
1340
1341 nir_builder_init(&state.builder, function->impl);
1342 nir_foreach_block(block, function->impl)
1343 apply_pipeline_layout_block(block, &state);
1344 nir_metadata_preserve(function->impl, nir_metadata_block_index |
1345 nir_metadata_dominance);
1346 }
1347
1348 ralloc_free(mem_ctx);
1349 }