anv/pipeline: Sort bindings by most used first
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28
29 struct apply_pipeline_layout_state {
30 const struct anv_physical_device *pdevice;
31
32 nir_shader *shader;
33 nir_builder builder;
34
35 struct anv_pipeline_layout *layout;
36 bool add_bounds_checks;
37
38 bool uses_constants;
39 uint8_t constants_offset;
40 struct {
41 bool desc_buffer_used;
42 uint8_t desc_offset;
43
44 uint8_t *use_count;
45 uint8_t *surface_offsets;
46 uint8_t *sampler_offsets;
47 } set[MAX_SETS];
48 };
49
50 static void
51 add_binding(struct apply_pipeline_layout_state *state,
52 uint32_t set, uint32_t binding)
53 {
54 const struct anv_descriptor_set_binding_layout *bind_layout =
55 &state->layout->set[set].layout->binding[binding];
56
57 if (state->set[set].use_count[binding] < UINT8_MAX)
58 state->set[set].use_count[binding]++;
59
60 /* Only flag the descriptor buffer as used if there's actually data for
61 * this binding. This lets us be lazy and call this function constantly
62 * without worrying about unnecessarily enabling the buffer.
63 */
64 if (anv_descriptor_size(bind_layout))
65 state->set[set].desc_buffer_used = true;
66 }
67
68 static void
69 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
70 {
71 nir_deref_instr *deref = nir_src_as_deref(src);
72 nir_variable *var = nir_deref_instr_get_variable(deref);
73 add_binding(state, var->data.descriptor_set, var->data.binding);
74 }
75
76 static void
77 add_tex_src_binding(struct apply_pipeline_layout_state *state,
78 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
79 {
80 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
81 if (deref_src_idx < 0)
82 return;
83
84 add_deref_src_binding(state, tex->src[deref_src_idx].src);
85 }
86
87 static void
88 get_used_bindings_block(nir_block *block,
89 struct apply_pipeline_layout_state *state)
90 {
91 nir_foreach_instr_safe(instr, block) {
92 switch (instr->type) {
93 case nir_instr_type_intrinsic: {
94 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
95 switch (intrin->intrinsic) {
96 case nir_intrinsic_vulkan_resource_index:
97 add_binding(state, nir_intrinsic_desc_set(intrin),
98 nir_intrinsic_binding(intrin));
99 break;
100
101 case nir_intrinsic_image_deref_load:
102 case nir_intrinsic_image_deref_store:
103 case nir_intrinsic_image_deref_atomic_add:
104 case nir_intrinsic_image_deref_atomic_min:
105 case nir_intrinsic_image_deref_atomic_max:
106 case nir_intrinsic_image_deref_atomic_and:
107 case nir_intrinsic_image_deref_atomic_or:
108 case nir_intrinsic_image_deref_atomic_xor:
109 case nir_intrinsic_image_deref_atomic_exchange:
110 case nir_intrinsic_image_deref_atomic_comp_swap:
111 case nir_intrinsic_image_deref_size:
112 case nir_intrinsic_image_deref_samples:
113 case nir_intrinsic_image_deref_load_param_intel:
114 case nir_intrinsic_image_deref_load_raw_intel:
115 case nir_intrinsic_image_deref_store_raw_intel:
116 add_deref_src_binding(state, intrin->src[0]);
117 break;
118
119 case nir_intrinsic_load_constant:
120 state->uses_constants = true;
121 break;
122
123 default:
124 break;
125 }
126 break;
127 }
128 case nir_instr_type_tex: {
129 nir_tex_instr *tex = nir_instr_as_tex(instr);
130 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
131 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
132 break;
133 }
134 default:
135 continue;
136 }
137 }
138 }
139
140 static void
141 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
142 struct apply_pipeline_layout_state *state)
143 {
144 nir_builder *b = &state->builder;
145
146 b->cursor = nir_before_instr(&intrin->instr);
147
148 uint32_t set = nir_intrinsic_desc_set(intrin);
149 uint32_t binding = nir_intrinsic_binding(intrin);
150
151 const struct anv_descriptor_set_binding_layout *bind_layout =
152 &state->layout->set[set].layout->binding[binding];
153
154 uint32_t surface_index = state->set[set].surface_offsets[binding];
155 uint32_t array_size = bind_layout->array_size;
156
157 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
158 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
159 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
160
161 nir_ssa_def *index;
162 if (bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
163 /* This is an inline uniform block. Just reference the descriptor set
164 * and use the descriptor offset as the base.
165 */
166 index = nir_imm_ivec2(b, state->set[set].desc_offset,
167 bind_layout->descriptor_offset);
168 } else {
169 /* We're using nir_address_format_32bit_index_offset */
170 index = nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
171 nir_imm_int(b, 0));
172 }
173
174 assert(intrin->dest.is_ssa);
175 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
176 nir_instr_remove(&intrin->instr);
177 }
178
179 static void
180 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
181 struct apply_pipeline_layout_state *state)
182 {
183 nir_builder *b = &state->builder;
184
185 b->cursor = nir_before_instr(&intrin->instr);
186
187 /* For us, the resource indices are just indices into the binding table and
188 * array elements are sequential. A resource_reindex just turns into an
189 * add of the two indices.
190 */
191 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
192 nir_ssa_def *old_index = intrin->src[0].ssa;
193 nir_ssa_def *offset = intrin->src[1].ssa;
194
195 nir_ssa_def *new_index =
196 nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
197 nir_channel(b, old_index, 1));
198
199 assert(intrin->dest.is_ssa);
200 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
201 nir_instr_remove(&intrin->instr);
202 }
203
204 static void
205 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
206 struct apply_pipeline_layout_state *state)
207 {
208 nir_builder *b = &state->builder;
209
210 b->cursor = nir_before_instr(&intrin->instr);
211
212 /* We follow the nir_address_format_32bit_index_offset model */
213 assert(intrin->src[0].is_ssa);
214 nir_ssa_def *index = intrin->src[0].ssa;
215
216 assert(intrin->dest.is_ssa);
217 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
218 nir_instr_remove(&intrin->instr);
219 }
220
221 static void
222 lower_get_buffer_size(nir_intrinsic_instr *intrin,
223 struct apply_pipeline_layout_state *state)
224 {
225 nir_builder *b = &state->builder;
226
227 b->cursor = nir_before_instr(&intrin->instr);
228
229 assert(intrin->src[0].is_ssa);
230 nir_ssa_def *index = intrin->src[0].ssa;
231
232 /* We're following the nir_address_format_32bit_index_offset model so the
233 * binding table index is the first component of the address. The
234 * back-end wants a scalar binding table index source.
235 */
236 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
237 nir_src_for_ssa(nir_channel(b, index, 0)));
238 }
239
240 static nir_ssa_def *
241 build_descriptor_load(nir_deref_instr *deref, unsigned offset,
242 unsigned num_components, unsigned bit_size,
243 struct apply_pipeline_layout_state *state)
244 {
245 nir_variable *var = nir_deref_instr_get_variable(deref);
246
247 unsigned set = var->data.descriptor_set;
248 unsigned binding = var->data.binding;
249 unsigned array_size =
250 state->layout->set[set].layout->binding[binding].array_size;
251
252 const struct anv_descriptor_set_binding_layout *bind_layout =
253 &state->layout->set[set].layout->binding[binding];
254
255 nir_builder *b = &state->builder;
256
257 nir_ssa_def *desc_buffer_index =
258 nir_imm_int(b, state->set[set].desc_offset);
259
260 nir_ssa_def *desc_offset =
261 nir_imm_int(b, bind_layout->descriptor_offset + offset);
262 if (deref->deref_type != nir_deref_type_var) {
263 assert(deref->deref_type == nir_deref_type_array);
264
265 const unsigned descriptor_size = anv_descriptor_size(bind_layout);
266 nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
267 if (state->add_bounds_checks)
268 arr_index = nir_umin(b, arr_index, nir_imm_int(b, array_size - 1));
269
270 desc_offset = nir_iadd(b, desc_offset,
271 nir_imul_imm(b, arr_index, descriptor_size));
272 }
273
274 nir_intrinsic_instr *desc_load =
275 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
276 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
277 desc_load->src[1] = nir_src_for_ssa(desc_offset);
278 desc_load->num_components = num_components;
279 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
280 num_components, bit_size, NULL);
281 nir_builder_instr_insert(b, &desc_load->instr);
282
283 return &desc_load->dest.ssa;
284 }
285
286 static void
287 lower_image_intrinsic(nir_intrinsic_instr *intrin,
288 struct apply_pipeline_layout_state *state)
289 {
290 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
291
292 nir_builder *b = &state->builder;
293 b->cursor = nir_before_instr(&intrin->instr);
294
295 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
296 b->cursor = nir_instr_remove(&intrin->instr);
297
298 const unsigned param = nir_intrinsic_base(intrin);
299
300 nir_ssa_def *desc =
301 build_descriptor_load(deref, param * 16,
302 intrin->dest.ssa.num_components,
303 intrin->dest.ssa.bit_size, state);
304
305 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
306 } else {
307 nir_variable *var = nir_deref_instr_get_variable(deref);
308
309 unsigned set = var->data.descriptor_set;
310 unsigned binding = var->data.binding;
311 unsigned binding_offset = state->set[set].surface_offsets[binding];
312 unsigned array_size =
313 state->layout->set[set].layout->binding[binding].array_size;
314
315 nir_ssa_def *index = NULL;
316 if (deref->deref_type != nir_deref_type_var) {
317 assert(deref->deref_type == nir_deref_type_array);
318 index = nir_ssa_for_src(b, deref->arr.index, 1);
319 if (state->add_bounds_checks)
320 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
321 } else {
322 index = nir_imm_int(b, 0);
323 }
324
325 index = nir_iadd_imm(b, index, binding_offset);
326 nir_rewrite_image_intrinsic(intrin, index, false);
327 }
328 }
329
330 static void
331 lower_load_constant(nir_intrinsic_instr *intrin,
332 struct apply_pipeline_layout_state *state)
333 {
334 nir_builder *b = &state->builder;
335
336 b->cursor = nir_before_instr(&intrin->instr);
337
338 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
339 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
340 nir_imm_int(b, nir_intrinsic_base(intrin)));
341
342 nir_intrinsic_instr *load_ubo =
343 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
344 load_ubo->num_components = intrin->num_components;
345 load_ubo->src[0] = nir_src_for_ssa(index);
346 load_ubo->src[1] = nir_src_for_ssa(offset);
347 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
348 intrin->dest.ssa.num_components,
349 intrin->dest.ssa.bit_size, NULL);
350 nir_builder_instr_insert(b, &load_ubo->instr);
351
352 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
353 nir_src_for_ssa(&load_ubo->dest.ssa));
354 nir_instr_remove(&intrin->instr);
355 }
356
357 static void
358 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
359 unsigned *base_index,
360 struct apply_pipeline_layout_state *state)
361 {
362 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
363 if (deref_src_idx < 0)
364 return;
365
366 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
367 nir_variable *var = nir_deref_instr_get_variable(deref);
368
369 unsigned set = var->data.descriptor_set;
370 unsigned binding = var->data.binding;
371 unsigned array_size =
372 state->layout->set[set].layout->binding[binding].array_size;
373
374 nir_tex_src_type offset_src_type;
375 if (deref_src_type == nir_tex_src_texture_deref) {
376 offset_src_type = nir_tex_src_texture_offset;
377 *base_index = state->set[set].surface_offsets[binding];
378 } else {
379 assert(deref_src_type == nir_tex_src_sampler_deref);
380 offset_src_type = nir_tex_src_sampler_offset;
381 *base_index = state->set[set].sampler_offsets[binding];
382 }
383
384 nir_ssa_def *index = NULL;
385 if (deref->deref_type != nir_deref_type_var) {
386 assert(deref->deref_type == nir_deref_type_array);
387
388 if (nir_src_is_const(deref->arr.index)) {
389 unsigned arr_index = nir_src_as_uint(deref->arr.index);
390 *base_index += MIN2(arr_index, array_size - 1);
391 } else {
392 nir_builder *b = &state->builder;
393
394 /* From VK_KHR_sampler_ycbcr_conversion:
395 *
396 * If sampler Y’CBCR conversion is enabled, the combined image
397 * sampler must be indexed only by constant integral expressions when
398 * aggregated into arrays in shader code, irrespective of the
399 * shaderSampledImageArrayDynamicIndexing feature.
400 */
401 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
402
403 index = nir_ssa_for_src(b, deref->arr.index, 1);
404
405 if (state->add_bounds_checks)
406 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
407 }
408 }
409
410 if (index) {
411 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
412 nir_src_for_ssa(index));
413 tex->src[deref_src_idx].src_type = offset_src_type;
414 } else {
415 nir_tex_instr_remove_src(tex, deref_src_idx);
416 }
417 }
418
419 static uint32_t
420 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
421 {
422 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
423 if (plane_src_idx < 0)
424 return 0;
425
426 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
427
428 nir_tex_instr_remove_src(tex, plane_src_idx);
429
430 return plane;
431 }
432
433 static void
434 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
435 {
436 state->builder.cursor = nir_before_instr(&tex->instr);
437
438 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
439
440 lower_tex_deref(tex, nir_tex_src_texture_deref,
441 &tex->texture_index, state);
442 tex->texture_index += plane;
443
444 lower_tex_deref(tex, nir_tex_src_sampler_deref,
445 &tex->sampler_index, state);
446 tex->sampler_index += plane;
447
448 /* The backend only ever uses this to mark used surfaces. We don't care
449 * about that little optimization so it just needs to be non-zero.
450 */
451 tex->texture_array_size = 1;
452 }
453
454 static void
455 apply_pipeline_layout_block(nir_block *block,
456 struct apply_pipeline_layout_state *state)
457 {
458 nir_foreach_instr_safe(instr, block) {
459 switch (instr->type) {
460 case nir_instr_type_intrinsic: {
461 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
462 switch (intrin->intrinsic) {
463 case nir_intrinsic_vulkan_resource_index:
464 lower_res_index_intrinsic(intrin, state);
465 break;
466 case nir_intrinsic_vulkan_resource_reindex:
467 lower_res_reindex_intrinsic(intrin, state);
468 break;
469 case nir_intrinsic_load_vulkan_descriptor:
470 lower_load_vulkan_descriptor(intrin, state);
471 break;
472 case nir_intrinsic_get_buffer_size:
473 lower_get_buffer_size(intrin, state);
474 break;
475 case nir_intrinsic_image_deref_load:
476 case nir_intrinsic_image_deref_store:
477 case nir_intrinsic_image_deref_atomic_add:
478 case nir_intrinsic_image_deref_atomic_min:
479 case nir_intrinsic_image_deref_atomic_max:
480 case nir_intrinsic_image_deref_atomic_and:
481 case nir_intrinsic_image_deref_atomic_or:
482 case nir_intrinsic_image_deref_atomic_xor:
483 case nir_intrinsic_image_deref_atomic_exchange:
484 case nir_intrinsic_image_deref_atomic_comp_swap:
485 case nir_intrinsic_image_deref_size:
486 case nir_intrinsic_image_deref_samples:
487 case nir_intrinsic_image_deref_load_param_intel:
488 case nir_intrinsic_image_deref_load_raw_intel:
489 case nir_intrinsic_image_deref_store_raw_intel:
490 lower_image_intrinsic(intrin, state);
491 break;
492 case nir_intrinsic_load_constant:
493 lower_load_constant(intrin, state);
494 break;
495 default:
496 break;
497 }
498 break;
499 }
500 case nir_instr_type_tex:
501 lower_tex(nir_instr_as_tex(instr), state);
502 break;
503 default:
504 continue;
505 }
506 }
507 }
508
509 struct binding_info {
510 uint32_t binding;
511 uint8_t set;
512 uint16_t score;
513 };
514
515 static int
516 compare_binding_infos(const void *_a, const void *_b)
517 {
518 const struct binding_info *a = _a, *b = _b;
519 if (a->score != b->score)
520 return b->score - a->score;
521
522 if (a->set != b->set)
523 return a->set - b->set;
524
525 return a->binding - b->binding;
526 }
527
528 void
529 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
530 bool robust_buffer_access,
531 struct anv_pipeline_layout *layout,
532 nir_shader *shader,
533 struct brw_stage_prog_data *prog_data,
534 struct anv_pipeline_bind_map *map)
535 {
536 struct apply_pipeline_layout_state state = {
537 .pdevice = pdevice,
538 .shader = shader,
539 .layout = layout,
540 .add_bounds_checks = robust_buffer_access,
541 };
542
543 void *mem_ctx = ralloc_context(NULL);
544
545 for (unsigned s = 0; s < layout->num_sets; s++) {
546 const unsigned count = layout->set[s].layout->binding_count;
547 state.set[s].use_count = rzalloc_array(mem_ctx, uint8_t, count);
548 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
549 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
550 }
551
552 nir_foreach_function(function, shader) {
553 if (!function->impl)
554 continue;
555
556 nir_foreach_block(block, function->impl)
557 get_used_bindings_block(block, &state);
558 }
559
560 for (unsigned s = 0; s < layout->num_sets; s++) {
561 if (state.set[s].desc_buffer_used) {
562 map->surface_to_descriptor[map->surface_count] =
563 (struct anv_pipeline_binding) {
564 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
565 .binding = s,
566 };
567 state.set[s].desc_offset = map->surface_count;
568 map->surface_count++;
569 }
570 }
571
572 if (state.uses_constants) {
573 state.constants_offset = map->surface_count;
574 map->surface_to_descriptor[map->surface_count].set =
575 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
576 map->surface_count++;
577 }
578
579 unsigned used_binding_count = 0;
580 for (uint32_t set = 0; set < layout->num_sets; set++) {
581 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
582 for (unsigned b = 0; b < set_layout->binding_count; b++) {
583 if (state.set[set].use_count[b] == 0)
584 continue;
585
586 used_binding_count++;
587 }
588 }
589
590 struct binding_info *infos =
591 rzalloc_array(mem_ctx, struct binding_info, used_binding_count);
592 used_binding_count = 0;
593 for (uint32_t set = 0; set < layout->num_sets; set++) {
594 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
595 for (unsigned b = 0; b < set_layout->binding_count; b++) {
596 if (state.set[set].use_count[b] == 0)
597 continue;
598
599 struct anv_descriptor_set_binding_layout *binding =
600 &layout->set[set].layout->binding[b];
601
602 /* Do a fixed-point calculation to generate a score based on the
603 * number of uses and the binding array size.
604 */
605 uint16_t score = ((uint16_t)state.set[set].use_count[b] << 7) /
606 binding->array_size;
607
608 infos[used_binding_count++] = (struct binding_info) {
609 .set = set,
610 .binding = b,
611 .score = score,
612 };
613 }
614 }
615
616 /* Order the binding infos based on score with highest scores first. If
617 * scores are equal we then order by set and binding.
618 */
619 qsort(infos, used_binding_count, sizeof(struct binding_info),
620 compare_binding_infos);
621
622 for (unsigned i = 0; i < used_binding_count; i++) {
623 unsigned set = infos[i].set, b = infos[i].binding;
624 struct anv_descriptor_set_binding_layout *binding =
625 &layout->set[set].layout->binding[b];
626
627 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
628 state.set[set].surface_offsets[b] = map->surface_count;
629 struct anv_sampler **samplers = binding->immutable_samplers;
630 for (unsigned i = 0; i < binding->array_size; i++) {
631 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
632 for (uint8_t p = 0; p < planes; p++) {
633 map->surface_to_descriptor[map->surface_count++] =
634 (struct anv_pipeline_binding) {
635 .set = set,
636 .binding = b,
637 .index = i,
638 .plane = p,
639 };
640 }
641 }
642 }
643 assert(map->surface_count <= MAX_BINDING_TABLE_SIZE);
644
645 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
646 state.set[set].sampler_offsets[b] = map->sampler_count;
647 struct anv_sampler **samplers = binding->immutable_samplers;
648 for (unsigned i = 0; i < binding->array_size; i++) {
649 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
650 for (uint8_t p = 0; p < planes; p++) {
651 map->sampler_to_descriptor[map->sampler_count++] =
652 (struct anv_pipeline_binding) {
653 .set = set,
654 .binding = b,
655 .index = i,
656 .plane = p,
657 };
658 }
659 }
660 }
661 }
662
663 nir_foreach_variable(var, &shader->uniforms) {
664 const struct glsl_type *glsl_type = glsl_without_array(var->type);
665
666 if (!glsl_type_is_image(glsl_type))
667 continue;
668
669 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
670
671 const uint32_t set = var->data.descriptor_set;
672 const uint32_t binding = var->data.binding;
673 const uint32_t array_size =
674 layout->set[set].layout->binding[binding].array_size;
675
676 if (state.set[set].use_count[binding] == 0)
677 continue;
678
679 struct anv_pipeline_binding *pipe_binding =
680 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
681 for (unsigned i = 0; i < array_size; i++) {
682 assert(pipe_binding[i].set == set);
683 assert(pipe_binding[i].binding == binding);
684 assert(pipe_binding[i].index == i);
685
686 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
687 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
688 pipe_binding[i].input_attachment_index = var->data.index + i;
689
690 pipe_binding[i].write_only =
691 (var->data.image.access & ACCESS_NON_READABLE) != 0;
692 }
693 }
694
695 nir_foreach_function(function, shader) {
696 if (!function->impl)
697 continue;
698
699 nir_builder_init(&state.builder, function->impl);
700 nir_foreach_block(block, function->impl)
701 apply_pipeline_layout_block(block, &state);
702 nir_metadata_preserve(function->impl, nir_metadata_block_index |
703 nir_metadata_dominance);
704 }
705
706 ralloc_free(mem_ctx);
707 }