anv: Put image params in the descriptor set buffer on gen8 and earlier
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28
29 struct apply_pipeline_layout_state {
30 const struct anv_physical_device *pdevice;
31
32 nir_shader *shader;
33 nir_builder builder;
34
35 struct anv_pipeline_layout *layout;
36 bool add_bounds_checks;
37
38 bool uses_constants;
39 uint8_t constants_offset;
40 struct {
41 bool desc_buffer_used;
42 uint8_t desc_offset;
43
44 BITSET_WORD *used;
45 uint8_t *surface_offsets;
46 uint8_t *sampler_offsets;
47 } set[MAX_SETS];
48 };
49
50 static void
51 add_binding(struct apply_pipeline_layout_state *state,
52 uint32_t set, uint32_t binding)
53 {
54 const struct anv_descriptor_set_binding_layout *bind_layout =
55 &state->layout->set[set].layout->binding[binding];
56
57 BITSET_SET(state->set[set].used, binding);
58
59 /* Only flag the descriptor buffer as used if there's actually data for
60 * this binding. This lets us be lazy and call this function constantly
61 * without worrying about unnecessarily enabling the buffer.
62 */
63 if (anv_descriptor_size(bind_layout))
64 state->set[set].desc_buffer_used = true;
65 }
66
67 static void
68 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
69 {
70 nir_deref_instr *deref = nir_src_as_deref(src);
71 nir_variable *var = nir_deref_instr_get_variable(deref);
72 add_binding(state, var->data.descriptor_set, var->data.binding);
73 }
74
75 static void
76 add_tex_src_binding(struct apply_pipeline_layout_state *state,
77 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
78 {
79 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
80 if (deref_src_idx < 0)
81 return;
82
83 add_deref_src_binding(state, tex->src[deref_src_idx].src);
84 }
85
86 static void
87 get_used_bindings_block(nir_block *block,
88 struct apply_pipeline_layout_state *state)
89 {
90 nir_foreach_instr_safe(instr, block) {
91 switch (instr->type) {
92 case nir_instr_type_intrinsic: {
93 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
94 switch (intrin->intrinsic) {
95 case nir_intrinsic_vulkan_resource_index:
96 add_binding(state, nir_intrinsic_desc_set(intrin),
97 nir_intrinsic_binding(intrin));
98 break;
99
100 case nir_intrinsic_image_deref_load:
101 case nir_intrinsic_image_deref_store:
102 case nir_intrinsic_image_deref_atomic_add:
103 case nir_intrinsic_image_deref_atomic_min:
104 case nir_intrinsic_image_deref_atomic_max:
105 case nir_intrinsic_image_deref_atomic_and:
106 case nir_intrinsic_image_deref_atomic_or:
107 case nir_intrinsic_image_deref_atomic_xor:
108 case nir_intrinsic_image_deref_atomic_exchange:
109 case nir_intrinsic_image_deref_atomic_comp_swap:
110 case nir_intrinsic_image_deref_size:
111 case nir_intrinsic_image_deref_samples:
112 case nir_intrinsic_image_deref_load_param_intel:
113 case nir_intrinsic_image_deref_load_raw_intel:
114 case nir_intrinsic_image_deref_store_raw_intel:
115 add_deref_src_binding(state, intrin->src[0]);
116 break;
117
118 case nir_intrinsic_load_constant:
119 state->uses_constants = true;
120 break;
121
122 default:
123 break;
124 }
125 break;
126 }
127 case nir_instr_type_tex: {
128 nir_tex_instr *tex = nir_instr_as_tex(instr);
129 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
130 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
131 break;
132 }
133 default:
134 continue;
135 }
136 }
137 }
138
139 static void
140 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
141 struct apply_pipeline_layout_state *state)
142 {
143 nir_builder *b = &state->builder;
144
145 b->cursor = nir_before_instr(&intrin->instr);
146
147 uint32_t set = nir_intrinsic_desc_set(intrin);
148 uint32_t binding = nir_intrinsic_binding(intrin);
149
150 const struct anv_descriptor_set_binding_layout *bind_layout =
151 &state->layout->set[set].layout->binding[binding];
152
153 uint32_t surface_index = state->set[set].surface_offsets[binding];
154 uint32_t array_size = bind_layout->array_size;
155
156 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
157 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
158 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
159
160 nir_ssa_def *index;
161 if (bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
162 /* This is an inline uniform block. Just reference the descriptor set
163 * and use the descriptor offset as the base.
164 */
165 index = nir_imm_ivec2(b, state->set[set].desc_offset,
166 bind_layout->descriptor_offset);
167 } else {
168 /* We're using nir_address_format_32bit_index_offset */
169 index = nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
170 nir_imm_int(b, 0));
171 }
172
173 assert(intrin->dest.is_ssa);
174 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
175 nir_instr_remove(&intrin->instr);
176 }
177
178 static void
179 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
180 struct apply_pipeline_layout_state *state)
181 {
182 nir_builder *b = &state->builder;
183
184 b->cursor = nir_before_instr(&intrin->instr);
185
186 /* For us, the resource indices are just indices into the binding table and
187 * array elements are sequential. A resource_reindex just turns into an
188 * add of the two indices.
189 */
190 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
191 nir_ssa_def *old_index = intrin->src[0].ssa;
192 nir_ssa_def *offset = intrin->src[1].ssa;
193
194 nir_ssa_def *new_index =
195 nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
196 nir_channel(b, old_index, 1));
197
198 assert(intrin->dest.is_ssa);
199 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
200 nir_instr_remove(&intrin->instr);
201 }
202
203 static void
204 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
205 struct apply_pipeline_layout_state *state)
206 {
207 nir_builder *b = &state->builder;
208
209 b->cursor = nir_before_instr(&intrin->instr);
210
211 /* We follow the nir_address_format_32bit_index_offset model */
212 assert(intrin->src[0].is_ssa);
213 nir_ssa_def *index = intrin->src[0].ssa;
214
215 assert(intrin->dest.is_ssa);
216 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
217 nir_instr_remove(&intrin->instr);
218 }
219
220 static void
221 lower_get_buffer_size(nir_intrinsic_instr *intrin,
222 struct apply_pipeline_layout_state *state)
223 {
224 nir_builder *b = &state->builder;
225
226 b->cursor = nir_before_instr(&intrin->instr);
227
228 assert(intrin->src[0].is_ssa);
229 nir_ssa_def *index = intrin->src[0].ssa;
230
231 /* We're following the nir_address_format_32bit_index_offset model so the
232 * binding table index is the first component of the address. The
233 * back-end wants a scalar binding table index source.
234 */
235 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
236 nir_src_for_ssa(nir_channel(b, index, 0)));
237 }
238
239 static nir_ssa_def *
240 build_descriptor_load(nir_deref_instr *deref, unsigned offset,
241 unsigned num_components, unsigned bit_size,
242 struct apply_pipeline_layout_state *state)
243 {
244 nir_variable *var = nir_deref_instr_get_variable(deref);
245
246 unsigned set = var->data.descriptor_set;
247 unsigned binding = var->data.binding;
248 unsigned array_size =
249 state->layout->set[set].layout->binding[binding].array_size;
250
251 const struct anv_descriptor_set_binding_layout *bind_layout =
252 &state->layout->set[set].layout->binding[binding];
253
254 nir_builder *b = &state->builder;
255
256 nir_ssa_def *desc_buffer_index =
257 nir_imm_int(b, state->set[set].desc_offset);
258
259 nir_ssa_def *desc_offset =
260 nir_imm_int(b, bind_layout->descriptor_offset + offset);
261 if (deref->deref_type != nir_deref_type_var) {
262 assert(deref->deref_type == nir_deref_type_array);
263
264 const unsigned descriptor_size = anv_descriptor_size(bind_layout);
265 nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
266 if (state->add_bounds_checks)
267 arr_index = nir_umin(b, arr_index, nir_imm_int(b, array_size - 1));
268
269 desc_offset = nir_iadd(b, desc_offset,
270 nir_imul_imm(b, arr_index, descriptor_size));
271 }
272
273 nir_intrinsic_instr *desc_load =
274 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
275 desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
276 desc_load->src[1] = nir_src_for_ssa(desc_offset);
277 desc_load->num_components = num_components;
278 nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
279 num_components, bit_size, NULL);
280 nir_builder_instr_insert(b, &desc_load->instr);
281
282 return &desc_load->dest.ssa;
283 }
284
285 static void
286 lower_image_intrinsic(nir_intrinsic_instr *intrin,
287 struct apply_pipeline_layout_state *state)
288 {
289 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
290
291 nir_builder *b = &state->builder;
292 b->cursor = nir_before_instr(&intrin->instr);
293
294 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
295 b->cursor = nir_instr_remove(&intrin->instr);
296
297 const unsigned param = nir_intrinsic_base(intrin);
298
299 nir_ssa_def *desc =
300 build_descriptor_load(deref, param * 16,
301 intrin->dest.ssa.num_components,
302 intrin->dest.ssa.bit_size, state);
303
304 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
305 } else {
306 nir_variable *var = nir_deref_instr_get_variable(deref);
307
308 unsigned set = var->data.descriptor_set;
309 unsigned binding = var->data.binding;
310 unsigned binding_offset = state->set[set].surface_offsets[binding];
311 unsigned array_size =
312 state->layout->set[set].layout->binding[binding].array_size;
313
314 nir_ssa_def *index = NULL;
315 if (deref->deref_type != nir_deref_type_var) {
316 assert(deref->deref_type == nir_deref_type_array);
317 index = nir_ssa_for_src(b, deref->arr.index, 1);
318 if (state->add_bounds_checks)
319 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
320 } else {
321 index = nir_imm_int(b, 0);
322 }
323
324 index = nir_iadd_imm(b, index, binding_offset);
325 nir_rewrite_image_intrinsic(intrin, index, false);
326 }
327 }
328
329 static void
330 lower_load_constant(nir_intrinsic_instr *intrin,
331 struct apply_pipeline_layout_state *state)
332 {
333 nir_builder *b = &state->builder;
334
335 b->cursor = nir_before_instr(&intrin->instr);
336
337 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
338 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
339 nir_imm_int(b, nir_intrinsic_base(intrin)));
340
341 nir_intrinsic_instr *load_ubo =
342 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
343 load_ubo->num_components = intrin->num_components;
344 load_ubo->src[0] = nir_src_for_ssa(index);
345 load_ubo->src[1] = nir_src_for_ssa(offset);
346 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
347 intrin->dest.ssa.num_components,
348 intrin->dest.ssa.bit_size, NULL);
349 nir_builder_instr_insert(b, &load_ubo->instr);
350
351 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
352 nir_src_for_ssa(&load_ubo->dest.ssa));
353 nir_instr_remove(&intrin->instr);
354 }
355
356 static void
357 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
358 unsigned *base_index,
359 struct apply_pipeline_layout_state *state)
360 {
361 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
362 if (deref_src_idx < 0)
363 return;
364
365 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
366 nir_variable *var = nir_deref_instr_get_variable(deref);
367
368 unsigned set = var->data.descriptor_set;
369 unsigned binding = var->data.binding;
370 unsigned array_size =
371 state->layout->set[set].layout->binding[binding].array_size;
372
373 nir_tex_src_type offset_src_type;
374 if (deref_src_type == nir_tex_src_texture_deref) {
375 offset_src_type = nir_tex_src_texture_offset;
376 *base_index = state->set[set].surface_offsets[binding];
377 } else {
378 assert(deref_src_type == nir_tex_src_sampler_deref);
379 offset_src_type = nir_tex_src_sampler_offset;
380 *base_index = state->set[set].sampler_offsets[binding];
381 }
382
383 nir_ssa_def *index = NULL;
384 if (deref->deref_type != nir_deref_type_var) {
385 assert(deref->deref_type == nir_deref_type_array);
386
387 if (nir_src_is_const(deref->arr.index)) {
388 unsigned arr_index = nir_src_as_uint(deref->arr.index);
389 *base_index += MIN2(arr_index, array_size - 1);
390 } else {
391 nir_builder *b = &state->builder;
392
393 /* From VK_KHR_sampler_ycbcr_conversion:
394 *
395 * If sampler Y’CBCR conversion is enabled, the combined image
396 * sampler must be indexed only by constant integral expressions when
397 * aggregated into arrays in shader code, irrespective of the
398 * shaderSampledImageArrayDynamicIndexing feature.
399 */
400 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
401
402 index = nir_ssa_for_src(b, deref->arr.index, 1);
403
404 if (state->add_bounds_checks)
405 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
406 }
407 }
408
409 if (index) {
410 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
411 nir_src_for_ssa(index));
412 tex->src[deref_src_idx].src_type = offset_src_type;
413 } else {
414 nir_tex_instr_remove_src(tex, deref_src_idx);
415 }
416 }
417
418 static uint32_t
419 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
420 {
421 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
422 if (plane_src_idx < 0)
423 return 0;
424
425 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
426
427 nir_tex_instr_remove_src(tex, plane_src_idx);
428
429 return plane;
430 }
431
432 static void
433 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
434 {
435 state->builder.cursor = nir_before_instr(&tex->instr);
436
437 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
438
439 lower_tex_deref(tex, nir_tex_src_texture_deref,
440 &tex->texture_index, state);
441 tex->texture_index += plane;
442
443 lower_tex_deref(tex, nir_tex_src_sampler_deref,
444 &tex->sampler_index, state);
445 tex->sampler_index += plane;
446
447 /* The backend only ever uses this to mark used surfaces. We don't care
448 * about that little optimization so it just needs to be non-zero.
449 */
450 tex->texture_array_size = 1;
451 }
452
453 static void
454 apply_pipeline_layout_block(nir_block *block,
455 struct apply_pipeline_layout_state *state)
456 {
457 nir_foreach_instr_safe(instr, block) {
458 switch (instr->type) {
459 case nir_instr_type_intrinsic: {
460 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
461 switch (intrin->intrinsic) {
462 case nir_intrinsic_vulkan_resource_index:
463 lower_res_index_intrinsic(intrin, state);
464 break;
465 case nir_intrinsic_vulkan_resource_reindex:
466 lower_res_reindex_intrinsic(intrin, state);
467 break;
468 case nir_intrinsic_load_vulkan_descriptor:
469 lower_load_vulkan_descriptor(intrin, state);
470 break;
471 case nir_intrinsic_get_buffer_size:
472 lower_get_buffer_size(intrin, state);
473 break;
474 case nir_intrinsic_image_deref_load:
475 case nir_intrinsic_image_deref_store:
476 case nir_intrinsic_image_deref_atomic_add:
477 case nir_intrinsic_image_deref_atomic_min:
478 case nir_intrinsic_image_deref_atomic_max:
479 case nir_intrinsic_image_deref_atomic_and:
480 case nir_intrinsic_image_deref_atomic_or:
481 case nir_intrinsic_image_deref_atomic_xor:
482 case nir_intrinsic_image_deref_atomic_exchange:
483 case nir_intrinsic_image_deref_atomic_comp_swap:
484 case nir_intrinsic_image_deref_size:
485 case nir_intrinsic_image_deref_samples:
486 case nir_intrinsic_image_deref_load_param_intel:
487 case nir_intrinsic_image_deref_load_raw_intel:
488 case nir_intrinsic_image_deref_store_raw_intel:
489 lower_image_intrinsic(intrin, state);
490 break;
491 case nir_intrinsic_load_constant:
492 lower_load_constant(intrin, state);
493 break;
494 default:
495 break;
496 }
497 break;
498 }
499 case nir_instr_type_tex:
500 lower_tex(nir_instr_as_tex(instr), state);
501 break;
502 default:
503 continue;
504 }
505 }
506 }
507
508 void
509 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
510 bool robust_buffer_access,
511 struct anv_pipeline_layout *layout,
512 nir_shader *shader,
513 struct brw_stage_prog_data *prog_data,
514 struct anv_pipeline_bind_map *map)
515 {
516 struct apply_pipeline_layout_state state = {
517 .pdevice = pdevice,
518 .shader = shader,
519 .layout = layout,
520 .add_bounds_checks = robust_buffer_access,
521 };
522
523 void *mem_ctx = ralloc_context(NULL);
524
525 for (unsigned s = 0; s < layout->num_sets; s++) {
526 const unsigned count = layout->set[s].layout->binding_count;
527 const unsigned words = BITSET_WORDS(count);
528 state.set[s].used = rzalloc_array(mem_ctx, BITSET_WORD, words);
529 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
530 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
531 }
532
533 nir_foreach_function(function, shader) {
534 if (!function->impl)
535 continue;
536
537 nir_foreach_block(block, function->impl)
538 get_used_bindings_block(block, &state);
539 }
540
541 for (unsigned s = 0; s < layout->num_sets; s++) {
542 if (state.set[s].desc_buffer_used) {
543 map->surface_to_descriptor[map->surface_count] =
544 (struct anv_pipeline_binding) {
545 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
546 .binding = s,
547 };
548 state.set[s].desc_offset = map->surface_count;
549 map->surface_count++;
550 }
551 }
552
553 if (state.uses_constants) {
554 state.constants_offset = map->surface_count;
555 map->surface_to_descriptor[map->surface_count].set =
556 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
557 map->surface_count++;
558 }
559
560 for (uint32_t set = 0; set < layout->num_sets; set++) {
561 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
562
563 BITSET_WORD b, _tmp;
564 BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
565 set_layout->binding_count) {
566 struct anv_descriptor_set_binding_layout *binding =
567 &set_layout->binding[b];
568
569 if (binding->array_size == 0)
570 continue;
571
572 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
573 state.set[set].surface_offsets[b] = map->surface_count;
574 struct anv_sampler **samplers = binding->immutable_samplers;
575 for (unsigned i = 0; i < binding->array_size; i++) {
576 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
577 for (uint8_t p = 0; p < planes; p++) {
578 map->surface_to_descriptor[map->surface_count++] =
579 (struct anv_pipeline_binding) {
580 .set = set,
581 .binding = b,
582 .index = i,
583 .plane = p,
584 };
585 }
586 }
587 }
588
589 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
590 state.set[set].sampler_offsets[b] = map->sampler_count;
591 struct anv_sampler **samplers = binding->immutable_samplers;
592 for (unsigned i = 0; i < binding->array_size; i++) {
593 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
594 for (uint8_t p = 0; p < planes; p++) {
595 map->sampler_to_descriptor[map->sampler_count++] =
596 (struct anv_pipeline_binding) {
597 .set = set,
598 .binding = b,
599 .index = i,
600 .plane = p,
601 };
602 }
603 }
604 }
605 }
606 }
607
608 nir_foreach_variable(var, &shader->uniforms) {
609 const struct glsl_type *glsl_type = glsl_without_array(var->type);
610
611 if (!glsl_type_is_image(glsl_type))
612 continue;
613
614 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
615
616 const uint32_t set = var->data.descriptor_set;
617 const uint32_t binding = var->data.binding;
618 const uint32_t array_size =
619 layout->set[set].layout->binding[binding].array_size;
620
621 if (!BITSET_TEST(state.set[set].used, binding))
622 continue;
623
624 struct anv_pipeline_binding *pipe_binding =
625 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
626 for (unsigned i = 0; i < array_size; i++) {
627 assert(pipe_binding[i].set == set);
628 assert(pipe_binding[i].binding == binding);
629 assert(pipe_binding[i].index == i);
630
631 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
632 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
633 pipe_binding[i].input_attachment_index = var->data.index + i;
634
635 pipe_binding[i].write_only =
636 (var->data.image.access & ACCESS_NON_READABLE) != 0;
637 }
638 }
639
640 nir_foreach_function(function, shader) {
641 if (!function->impl)
642 continue;
643
644 nir_builder_init(&state.builder, function->impl);
645 nir_foreach_block(block, function->impl)
646 apply_pipeline_layout_block(block, &state);
647 nir_metadata_preserve(function->impl, nir_metadata_block_index |
648 nir_metadata_dominance);
649 }
650
651 ralloc_free(mem_ctx);
652 }