1cb3ef51b303656003e106d6a1ee85b250f2471f
[mesa.git] / src / intel / vulkan / anv_nir_apply_pipeline_layout.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
28
29 struct apply_pipeline_layout_state {
30 const struct anv_physical_device *pdevice;
31
32 nir_shader *shader;
33 nir_builder builder;
34
35 struct anv_pipeline_layout *layout;
36 bool add_bounds_checks;
37
38 unsigned first_image_uniform;
39
40 bool uses_constants;
41 uint8_t constants_offset;
42 struct {
43 bool desc_buffer_used;
44 uint8_t desc_offset;
45
46 BITSET_WORD *used;
47 uint8_t *surface_offsets;
48 uint8_t *sampler_offsets;
49 uint8_t *image_offsets;
50 } set[MAX_SETS];
51 };
52
53 static void
54 add_binding(struct apply_pipeline_layout_state *state,
55 uint32_t set, uint32_t binding)
56 {
57 const struct anv_descriptor_set_binding_layout *bind_layout =
58 &state->layout->set[set].layout->binding[binding];
59
60 BITSET_SET(state->set[set].used, binding);
61
62 /* Only flag the descriptor buffer as used if there's actually data for
63 * this binding. This lets us be lazy and call this function constantly
64 * without worrying about unnecessarily enabling the buffer.
65 */
66 if (anv_descriptor_size(bind_layout))
67 state->set[set].desc_buffer_used = true;
68 }
69
70 static void
71 add_deref_src_binding(struct apply_pipeline_layout_state *state, nir_src src)
72 {
73 nir_deref_instr *deref = nir_src_as_deref(src);
74 nir_variable *var = nir_deref_instr_get_variable(deref);
75 add_binding(state, var->data.descriptor_set, var->data.binding);
76 }
77
78 static void
79 add_tex_src_binding(struct apply_pipeline_layout_state *state,
80 nir_tex_instr *tex, nir_tex_src_type deref_src_type)
81 {
82 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
83 if (deref_src_idx < 0)
84 return;
85
86 add_deref_src_binding(state, tex->src[deref_src_idx].src);
87 }
88
89 static void
90 get_used_bindings_block(nir_block *block,
91 struct apply_pipeline_layout_state *state)
92 {
93 nir_foreach_instr_safe(instr, block) {
94 switch (instr->type) {
95 case nir_instr_type_intrinsic: {
96 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
97 switch (intrin->intrinsic) {
98 case nir_intrinsic_vulkan_resource_index:
99 add_binding(state, nir_intrinsic_desc_set(intrin),
100 nir_intrinsic_binding(intrin));
101 break;
102
103 case nir_intrinsic_image_deref_load:
104 case nir_intrinsic_image_deref_store:
105 case nir_intrinsic_image_deref_atomic_add:
106 case nir_intrinsic_image_deref_atomic_min:
107 case nir_intrinsic_image_deref_atomic_max:
108 case nir_intrinsic_image_deref_atomic_and:
109 case nir_intrinsic_image_deref_atomic_or:
110 case nir_intrinsic_image_deref_atomic_xor:
111 case nir_intrinsic_image_deref_atomic_exchange:
112 case nir_intrinsic_image_deref_atomic_comp_swap:
113 case nir_intrinsic_image_deref_size:
114 case nir_intrinsic_image_deref_samples:
115 case nir_intrinsic_image_deref_load_param_intel:
116 case nir_intrinsic_image_deref_load_raw_intel:
117 case nir_intrinsic_image_deref_store_raw_intel:
118 add_deref_src_binding(state, intrin->src[0]);
119 break;
120
121 case nir_intrinsic_load_constant:
122 state->uses_constants = true;
123 break;
124
125 default:
126 break;
127 }
128 break;
129 }
130 case nir_instr_type_tex: {
131 nir_tex_instr *tex = nir_instr_as_tex(instr);
132 add_tex_src_binding(state, tex, nir_tex_src_texture_deref);
133 add_tex_src_binding(state, tex, nir_tex_src_sampler_deref);
134 break;
135 }
136 default:
137 continue;
138 }
139 }
140 }
141
142 static void
143 lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
144 struct apply_pipeline_layout_state *state)
145 {
146 nir_builder *b = &state->builder;
147
148 b->cursor = nir_before_instr(&intrin->instr);
149
150 uint32_t set = nir_intrinsic_desc_set(intrin);
151 uint32_t binding = nir_intrinsic_binding(intrin);
152
153 uint32_t surface_index = state->set[set].surface_offsets[binding];
154 uint32_t array_size =
155 state->layout->set[set].layout->binding[binding].array_size;
156
157 nir_ssa_def *array_index = nir_ssa_for_src(b, intrin->src[0], 1);
158 if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
159 array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
160
161 nir_ssa_def *block_index = nir_iadd_imm(b, array_index, surface_index);
162
163 assert(intrin->dest.is_ssa);
164 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
165 nir_instr_remove(&intrin->instr);
166 }
167
168 static void
169 lower_res_reindex_intrinsic(nir_intrinsic_instr *intrin,
170 struct apply_pipeline_layout_state *state)
171 {
172 nir_builder *b = &state->builder;
173
174 b->cursor = nir_before_instr(&intrin->instr);
175
176 /* For us, the resource indices are just indices into the binding table and
177 * array elements are sequential. A resource_reindex just turns into an
178 * add of the two indices.
179 */
180 assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
181 nir_ssa_def *new_index = nir_iadd(b, intrin->src[0].ssa,
182 intrin->src[1].ssa);
183
184 assert(intrin->dest.is_ssa);
185 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
186 nir_instr_remove(&intrin->instr);
187 }
188
189 static void
190 lower_load_vulkan_descriptor(nir_intrinsic_instr *intrin,
191 struct apply_pipeline_layout_state *state)
192 {
193 nir_builder *b = &state->builder;
194
195 b->cursor = nir_before_instr(&intrin->instr);
196
197 /* We follow the nir_address_format_vk_index_offset model */
198 assert(intrin->src[0].is_ssa);
199 nir_ssa_def *vec2 = nir_vec2(b, intrin->src[0].ssa, nir_imm_int(b, 0));
200
201 assert(intrin->dest.is_ssa);
202 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(vec2));
203 nir_instr_remove(&intrin->instr);
204 }
205
206 static void
207 lower_image_intrinsic(nir_intrinsic_instr *intrin,
208 struct apply_pipeline_layout_state *state)
209 {
210 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
211 nir_variable *var = nir_deref_instr_get_variable(deref);
212
213 unsigned set = var->data.descriptor_set;
214 unsigned binding = var->data.binding;
215 unsigned array_size =
216 state->layout->set[set].layout->binding[binding].array_size;
217
218 nir_builder *b = &state->builder;
219 b->cursor = nir_before_instr(&intrin->instr);
220
221 nir_ssa_def *index = NULL;
222 if (deref->deref_type != nir_deref_type_var) {
223 assert(deref->deref_type == nir_deref_type_array);
224 index = nir_ssa_for_src(b, deref->arr.index, 1);
225 if (state->add_bounds_checks)
226 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
227 } else {
228 index = nir_imm_int(b, 0);
229 }
230
231 if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
232 b->cursor = nir_instr_remove(&intrin->instr);
233
234 nir_intrinsic_instr *load =
235 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
236
237 nir_intrinsic_set_base(load, state->first_image_uniform +
238 state->set[set].image_offsets[binding] *
239 BRW_IMAGE_PARAM_SIZE * 4);
240 nir_intrinsic_set_range(load, array_size * BRW_IMAGE_PARAM_SIZE * 4);
241
242 const unsigned param = nir_intrinsic_base(intrin);
243 nir_ssa_def *offset =
244 nir_imul(b, index, nir_imm_int(b, BRW_IMAGE_PARAM_SIZE * 4));
245 offset = nir_iadd(b, offset, nir_imm_int(b, param * 16));
246 load->src[0] = nir_src_for_ssa(offset);
247
248 load->num_components = intrin->dest.ssa.num_components;
249 nir_ssa_dest_init(&load->instr, &load->dest,
250 intrin->dest.ssa.num_components,
251 intrin->dest.ssa.bit_size, NULL);
252 nir_builder_instr_insert(b, &load->instr);
253
254 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
255 nir_src_for_ssa(&load->dest.ssa));
256 } else {
257 unsigned binding_offset = state->set[set].surface_offsets[binding];
258 index = nir_iadd(b, index, nir_imm_int(b, binding_offset));
259 brw_nir_rewrite_image_intrinsic(intrin, index);
260 }
261 }
262
263 static void
264 lower_load_constant(nir_intrinsic_instr *intrin,
265 struct apply_pipeline_layout_state *state)
266 {
267 nir_builder *b = &state->builder;
268
269 b->cursor = nir_before_instr(&intrin->instr);
270
271 nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
272 nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
273 nir_imm_int(b, nir_intrinsic_base(intrin)));
274
275 nir_intrinsic_instr *load_ubo =
276 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
277 load_ubo->num_components = intrin->num_components;
278 load_ubo->src[0] = nir_src_for_ssa(index);
279 load_ubo->src[1] = nir_src_for_ssa(offset);
280 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
281 intrin->dest.ssa.num_components,
282 intrin->dest.ssa.bit_size, NULL);
283 nir_builder_instr_insert(b, &load_ubo->instr);
284
285 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
286 nir_src_for_ssa(&load_ubo->dest.ssa));
287 nir_instr_remove(&intrin->instr);
288 }
289
290 static void
291 lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
292 unsigned *base_index,
293 struct apply_pipeline_layout_state *state)
294 {
295 int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
296 if (deref_src_idx < 0)
297 return;
298
299 nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
300 nir_variable *var = nir_deref_instr_get_variable(deref);
301
302 unsigned set = var->data.descriptor_set;
303 unsigned binding = var->data.binding;
304 unsigned array_size =
305 state->layout->set[set].layout->binding[binding].array_size;
306
307 nir_tex_src_type offset_src_type;
308 if (deref_src_type == nir_tex_src_texture_deref) {
309 offset_src_type = nir_tex_src_texture_offset;
310 *base_index = state->set[set].surface_offsets[binding];
311 } else {
312 assert(deref_src_type == nir_tex_src_sampler_deref);
313 offset_src_type = nir_tex_src_sampler_offset;
314 *base_index = state->set[set].sampler_offsets[binding];
315 }
316
317 nir_ssa_def *index = NULL;
318 if (deref->deref_type != nir_deref_type_var) {
319 assert(deref->deref_type == nir_deref_type_array);
320
321 if (nir_src_is_const(deref->arr.index)) {
322 unsigned arr_index = nir_src_as_uint(deref->arr.index);
323 *base_index += MIN2(arr_index, array_size - 1);
324 } else {
325 nir_builder *b = &state->builder;
326
327 /* From VK_KHR_sampler_ycbcr_conversion:
328 *
329 * If sampler Y’CBCR conversion is enabled, the combined image
330 * sampler must be indexed only by constant integral expressions when
331 * aggregated into arrays in shader code, irrespective of the
332 * shaderSampledImageArrayDynamicIndexing feature.
333 */
334 assert(nir_tex_instr_src_index(tex, nir_tex_src_plane) == -1);
335
336 index = nir_ssa_for_src(b, deref->arr.index, 1);
337
338 if (state->add_bounds_checks)
339 index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
340 }
341 }
342
343 if (index) {
344 nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
345 nir_src_for_ssa(index));
346 tex->src[deref_src_idx].src_type = offset_src_type;
347 } else {
348 nir_tex_instr_remove_src(tex, deref_src_idx);
349 }
350 }
351
352 static uint32_t
353 tex_instr_get_and_remove_plane_src(nir_tex_instr *tex)
354 {
355 int plane_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_plane);
356 if (plane_src_idx < 0)
357 return 0;
358
359 unsigned plane = nir_src_as_uint(tex->src[plane_src_idx].src);
360
361 nir_tex_instr_remove_src(tex, plane_src_idx);
362
363 return plane;
364 }
365
366 static void
367 lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
368 {
369 state->builder.cursor = nir_before_instr(&tex->instr);
370
371 unsigned plane = tex_instr_get_and_remove_plane_src(tex);
372
373 lower_tex_deref(tex, nir_tex_src_texture_deref,
374 &tex->texture_index, state);
375 tex->texture_index += plane;
376
377 lower_tex_deref(tex, nir_tex_src_sampler_deref,
378 &tex->sampler_index, state);
379 tex->sampler_index += plane;
380
381 /* The backend only ever uses this to mark used surfaces. We don't care
382 * about that little optimization so it just needs to be non-zero.
383 */
384 tex->texture_array_size = 1;
385 }
386
387 static void
388 apply_pipeline_layout_block(nir_block *block,
389 struct apply_pipeline_layout_state *state)
390 {
391 nir_foreach_instr_safe(instr, block) {
392 switch (instr->type) {
393 case nir_instr_type_intrinsic: {
394 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
395 switch (intrin->intrinsic) {
396 case nir_intrinsic_vulkan_resource_index:
397 lower_res_index_intrinsic(intrin, state);
398 break;
399 case nir_intrinsic_vulkan_resource_reindex:
400 lower_res_reindex_intrinsic(intrin, state);
401 break;
402 case nir_intrinsic_load_vulkan_descriptor:
403 lower_load_vulkan_descriptor(intrin, state);
404 break;
405 case nir_intrinsic_image_deref_load:
406 case nir_intrinsic_image_deref_store:
407 case nir_intrinsic_image_deref_atomic_add:
408 case nir_intrinsic_image_deref_atomic_min:
409 case nir_intrinsic_image_deref_atomic_max:
410 case nir_intrinsic_image_deref_atomic_and:
411 case nir_intrinsic_image_deref_atomic_or:
412 case nir_intrinsic_image_deref_atomic_xor:
413 case nir_intrinsic_image_deref_atomic_exchange:
414 case nir_intrinsic_image_deref_atomic_comp_swap:
415 case nir_intrinsic_image_deref_size:
416 case nir_intrinsic_image_deref_samples:
417 case nir_intrinsic_image_deref_load_param_intel:
418 case nir_intrinsic_image_deref_load_raw_intel:
419 case nir_intrinsic_image_deref_store_raw_intel:
420 lower_image_intrinsic(intrin, state);
421 break;
422 case nir_intrinsic_load_constant:
423 lower_load_constant(intrin, state);
424 break;
425 default:
426 break;
427 }
428 break;
429 }
430 case nir_instr_type_tex:
431 lower_tex(nir_instr_as_tex(instr), state);
432 break;
433 default:
434 continue;
435 }
436 }
437 }
438
439 static void
440 setup_vec4_uniform_value(uint32_t *params, uint32_t offset, unsigned n)
441 {
442 for (unsigned i = 0; i < n; ++i)
443 params[i] = ANV_PARAM_PUSH(offset + i * sizeof(uint32_t));
444
445 for (unsigned i = n; i < 4; ++i)
446 params[i] = BRW_PARAM_BUILTIN_ZERO;
447 }
448
449 void
450 anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
451 bool robust_buffer_access,
452 struct anv_pipeline_layout *layout,
453 nir_shader *shader,
454 struct brw_stage_prog_data *prog_data,
455 struct anv_pipeline_bind_map *map)
456 {
457 struct apply_pipeline_layout_state state = {
458 .pdevice = pdevice,
459 .shader = shader,
460 .layout = layout,
461 .add_bounds_checks = robust_buffer_access,
462 };
463
464 void *mem_ctx = ralloc_context(NULL);
465
466 for (unsigned s = 0; s < layout->num_sets; s++) {
467 const unsigned count = layout->set[s].layout->binding_count;
468 const unsigned words = BITSET_WORDS(count);
469 state.set[s].used = rzalloc_array(mem_ctx, BITSET_WORD, words);
470 state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
471 state.set[s].sampler_offsets = rzalloc_array(mem_ctx, uint8_t, count);
472 state.set[s].image_offsets = rzalloc_array(mem_ctx, uint8_t, count);
473 }
474
475 nir_foreach_function(function, shader) {
476 if (!function->impl)
477 continue;
478
479 nir_foreach_block(block, function->impl)
480 get_used_bindings_block(block, &state);
481 }
482
483 for (unsigned s = 0; s < layout->num_sets; s++) {
484 if (state.set[s].desc_buffer_used) {
485 map->surface_to_descriptor[map->surface_count] =
486 (struct anv_pipeline_binding) {
487 .set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
488 .binding = s,
489 };
490 state.set[s].desc_offset = map->surface_count;
491 map->surface_count++;
492 }
493 }
494
495 if (state.uses_constants) {
496 state.constants_offset = map->surface_count;
497 map->surface_to_descriptor[map->surface_count].set =
498 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS;
499 map->surface_count++;
500 }
501
502 for (uint32_t set = 0; set < layout->num_sets; set++) {
503 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
504
505 BITSET_WORD b, _tmp;
506 BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
507 set_layout->binding_count) {
508 struct anv_descriptor_set_binding_layout *binding =
509 &set_layout->binding[b];
510
511 if (binding->array_size == 0)
512 continue;
513
514 if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
515 state.set[set].surface_offsets[b] = map->surface_count;
516 struct anv_sampler **samplers = binding->immutable_samplers;
517 for (unsigned i = 0; i < binding->array_size; i++) {
518 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
519 for (uint8_t p = 0; p < planes; p++) {
520 map->surface_to_descriptor[map->surface_count++] =
521 (struct anv_pipeline_binding) {
522 .set = set,
523 .binding = b,
524 .index = i,
525 .plane = p,
526 };
527 }
528 }
529 }
530
531 if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
532 state.set[set].sampler_offsets[b] = map->sampler_count;
533 struct anv_sampler **samplers = binding->immutable_samplers;
534 for (unsigned i = 0; i < binding->array_size; i++) {
535 uint8_t planes = samplers ? samplers[i]->n_planes : 1;
536 for (uint8_t p = 0; p < planes; p++) {
537 map->sampler_to_descriptor[map->sampler_count++] =
538 (struct anv_pipeline_binding) {
539 .set = set,
540 .binding = b,
541 .index = i,
542 .plane = p,
543 };
544 }
545 }
546 }
547
548 if (binding->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
549 state.set[set].image_offsets[b] = map->image_param_count;
550 map->image_param_count += binding->array_size;
551 }
552 }
553 }
554
555 if (map->image_param_count > 0) {
556 assert(map->image_param_count <= MAX_GEN8_IMAGES);
557 assert(shader->num_uniforms == prog_data->nr_params * 4);
558 state.first_image_uniform = shader->num_uniforms;
559 uint32_t *param = brw_stage_prog_data_add_params(prog_data,
560 map->image_param_count *
561 BRW_IMAGE_PARAM_SIZE);
562 struct anv_push_constants *null_data = NULL;
563 const struct brw_image_param *image_param = null_data->images;
564 for (uint32_t i = 0; i < map->image_param_count; i++) {
565 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
566 (uintptr_t)image_param->offset, 2);
567 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
568 (uintptr_t)image_param->size, 3);
569 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
570 (uintptr_t)image_param->stride, 4);
571 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
572 (uintptr_t)image_param->tiling, 3);
573 setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
574 (uintptr_t)image_param->swizzling, 2);
575
576 param += BRW_IMAGE_PARAM_SIZE;
577 image_param ++;
578 }
579 assert(param == prog_data->param + prog_data->nr_params);
580
581 shader->num_uniforms += map->image_param_count *
582 BRW_IMAGE_PARAM_SIZE * 4;
583 assert(shader->num_uniforms == prog_data->nr_params * 4);
584 }
585
586 nir_foreach_variable(var, &shader->uniforms) {
587 const struct glsl_type *glsl_type = glsl_without_array(var->type);
588
589 if (!glsl_type_is_image(glsl_type))
590 continue;
591
592 enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
593
594 const uint32_t set = var->data.descriptor_set;
595 const uint32_t binding = var->data.binding;
596 const uint32_t array_size =
597 layout->set[set].layout->binding[binding].array_size;
598
599 if (!BITSET_TEST(state.set[set].used, binding))
600 continue;
601
602 struct anv_pipeline_binding *pipe_binding =
603 &map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
604 for (unsigned i = 0; i < array_size; i++) {
605 assert(pipe_binding[i].set == set);
606 assert(pipe_binding[i].binding == binding);
607 assert(pipe_binding[i].index == i);
608
609 if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
610 dim == GLSL_SAMPLER_DIM_SUBPASS_MS)
611 pipe_binding[i].input_attachment_index = var->data.index + i;
612
613 pipe_binding[i].write_only =
614 (var->data.image.access & ACCESS_NON_READABLE) != 0;
615 }
616 }
617
618 nir_foreach_function(function, shader) {
619 if (!function->impl)
620 continue;
621
622 nir_builder_init(&state.builder, function->impl);
623 nir_foreach_block(block, function->impl)
624 apply_pipeline_layout_block(block, &state);
625 nir_metadata_preserve(function->impl, nir_metadata_block_index |
626 nir_metadata_dominance);
627 }
628
629 ralloc_free(mem_ctx);
630 }