2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
29 struct apply_pipeline_layout_state
{
30 const struct anv_physical_device
*pdevice
;
35 struct anv_pipeline_layout
*layout
;
36 bool add_bounds_checks
;
39 uint8_t constants_offset
;
41 bool desc_buffer_used
;
45 uint8_t *surface_offsets
;
46 uint8_t *sampler_offsets
;
51 add_binding(struct apply_pipeline_layout_state
*state
,
52 uint32_t set
, uint32_t binding
)
54 const struct anv_descriptor_set_binding_layout
*bind_layout
=
55 &state
->layout
->set
[set
].layout
->binding
[binding
];
57 BITSET_SET(state
->set
[set
].used
, binding
);
59 /* Only flag the descriptor buffer as used if there's actually data for
60 * this binding. This lets us be lazy and call this function constantly
61 * without worrying about unnecessarily enabling the buffer.
63 if (anv_descriptor_size(bind_layout
))
64 state
->set
[set
].desc_buffer_used
= true;
68 add_deref_src_binding(struct apply_pipeline_layout_state
*state
, nir_src src
)
70 nir_deref_instr
*deref
= nir_src_as_deref(src
);
71 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
72 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
76 add_tex_src_binding(struct apply_pipeline_layout_state
*state
,
77 nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
)
79 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
80 if (deref_src_idx
< 0)
83 add_deref_src_binding(state
, tex
->src
[deref_src_idx
].src
);
87 get_used_bindings_block(nir_block
*block
,
88 struct apply_pipeline_layout_state
*state
)
90 nir_foreach_instr_safe(instr
, block
) {
91 switch (instr
->type
) {
92 case nir_instr_type_intrinsic
: {
93 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
94 switch (intrin
->intrinsic
) {
95 case nir_intrinsic_vulkan_resource_index
:
96 add_binding(state
, nir_intrinsic_desc_set(intrin
),
97 nir_intrinsic_binding(intrin
));
100 case nir_intrinsic_image_deref_load
:
101 case nir_intrinsic_image_deref_store
:
102 case nir_intrinsic_image_deref_atomic_add
:
103 case nir_intrinsic_image_deref_atomic_min
:
104 case nir_intrinsic_image_deref_atomic_max
:
105 case nir_intrinsic_image_deref_atomic_and
:
106 case nir_intrinsic_image_deref_atomic_or
:
107 case nir_intrinsic_image_deref_atomic_xor
:
108 case nir_intrinsic_image_deref_atomic_exchange
:
109 case nir_intrinsic_image_deref_atomic_comp_swap
:
110 case nir_intrinsic_image_deref_size
:
111 case nir_intrinsic_image_deref_samples
:
112 case nir_intrinsic_image_deref_load_param_intel
:
113 case nir_intrinsic_image_deref_load_raw_intel
:
114 case nir_intrinsic_image_deref_store_raw_intel
:
115 add_deref_src_binding(state
, intrin
->src
[0]);
118 case nir_intrinsic_load_constant
:
119 state
->uses_constants
= true;
127 case nir_instr_type_tex
: {
128 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
129 add_tex_src_binding(state
, tex
, nir_tex_src_texture_deref
);
130 add_tex_src_binding(state
, tex
, nir_tex_src_sampler_deref
);
140 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
141 struct apply_pipeline_layout_state
*state
)
143 nir_builder
*b
= &state
->builder
;
145 b
->cursor
= nir_before_instr(&intrin
->instr
);
147 uint32_t set
= nir_intrinsic_desc_set(intrin
);
148 uint32_t binding
= nir_intrinsic_binding(intrin
);
150 const struct anv_descriptor_set_binding_layout
*bind_layout
=
151 &state
->layout
->set
[set
].layout
->binding
[binding
];
153 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
154 uint32_t array_size
= bind_layout
->array_size
;
156 nir_ssa_def
*array_index
= nir_ssa_for_src(b
, intrin
->src
[0], 1);
157 if (nir_src_is_const(intrin
->src
[0]) || state
->add_bounds_checks
)
158 array_index
= nir_umin(b
, array_index
, nir_imm_int(b
, array_size
- 1));
161 if (bind_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
162 /* This is an inline uniform block. Just reference the descriptor set
163 * and use the descriptor offset as the base.
165 index
= nir_imm_ivec2(b
, state
->set
[set
].desc_offset
,
166 bind_layout
->descriptor_offset
);
168 /* We're using nir_address_format_32bit_index_offset */
169 index
= nir_vec2(b
, nir_iadd_imm(b
, array_index
, surface_index
),
173 assert(intrin
->dest
.is_ssa
);
174 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
175 nir_instr_remove(&intrin
->instr
);
179 lower_res_reindex_intrinsic(nir_intrinsic_instr
*intrin
,
180 struct apply_pipeline_layout_state
*state
)
182 nir_builder
*b
= &state
->builder
;
184 b
->cursor
= nir_before_instr(&intrin
->instr
);
186 /* For us, the resource indices are just indices into the binding table and
187 * array elements are sequential. A resource_reindex just turns into an
188 * add of the two indices.
190 assert(intrin
->src
[0].is_ssa
&& intrin
->src
[1].is_ssa
);
191 nir_ssa_def
*old_index
= intrin
->src
[0].ssa
;
192 nir_ssa_def
*offset
= intrin
->src
[1].ssa
;
194 nir_ssa_def
*new_index
=
195 nir_vec2(b
, nir_iadd(b
, nir_channel(b
, old_index
, 0), offset
),
196 nir_channel(b
, old_index
, 1));
198 assert(intrin
->dest
.is_ssa
);
199 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(new_index
));
200 nir_instr_remove(&intrin
->instr
);
204 lower_load_vulkan_descriptor(nir_intrinsic_instr
*intrin
,
205 struct apply_pipeline_layout_state
*state
)
207 nir_builder
*b
= &state
->builder
;
209 b
->cursor
= nir_before_instr(&intrin
->instr
);
211 /* We follow the nir_address_format_32bit_index_offset model */
212 assert(intrin
->src
[0].is_ssa
);
213 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
215 assert(intrin
->dest
.is_ssa
);
216 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
217 nir_instr_remove(&intrin
->instr
);
221 lower_get_buffer_size(nir_intrinsic_instr
*intrin
,
222 struct apply_pipeline_layout_state
*state
)
224 nir_builder
*b
= &state
->builder
;
226 b
->cursor
= nir_before_instr(&intrin
->instr
);
228 assert(intrin
->src
[0].is_ssa
);
229 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
231 /* We're following the nir_address_format_32bit_index_offset model so the
232 * binding table index is the first component of the address. The
233 * back-end wants a scalar binding table index source.
235 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
236 nir_src_for_ssa(nir_channel(b
, index
, 0)));
240 build_descriptor_load(nir_deref_instr
*deref
, unsigned offset
,
241 unsigned num_components
, unsigned bit_size
,
242 struct apply_pipeline_layout_state
*state
)
244 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
246 unsigned set
= var
->data
.descriptor_set
;
247 unsigned binding
= var
->data
.binding
;
248 unsigned array_size
=
249 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
251 const struct anv_descriptor_set_binding_layout
*bind_layout
=
252 &state
->layout
->set
[set
].layout
->binding
[binding
];
254 nir_builder
*b
= &state
->builder
;
256 nir_ssa_def
*desc_buffer_index
=
257 nir_imm_int(b
, state
->set
[set
].desc_offset
);
259 nir_ssa_def
*desc_offset
=
260 nir_imm_int(b
, bind_layout
->descriptor_offset
+ offset
);
261 if (deref
->deref_type
!= nir_deref_type_var
) {
262 assert(deref
->deref_type
== nir_deref_type_array
);
264 const unsigned descriptor_size
= anv_descriptor_size(bind_layout
);
265 nir_ssa_def
*arr_index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
266 if (state
->add_bounds_checks
)
267 arr_index
= nir_umin(b
, arr_index
, nir_imm_int(b
, array_size
- 1));
269 desc_offset
= nir_iadd(b
, desc_offset
,
270 nir_imul_imm(b
, arr_index
, descriptor_size
));
273 nir_intrinsic_instr
*desc_load
=
274 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
275 desc_load
->src
[0] = nir_src_for_ssa(desc_buffer_index
);
276 desc_load
->src
[1] = nir_src_for_ssa(desc_offset
);
277 desc_load
->num_components
= num_components
;
278 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
279 num_components
, bit_size
, NULL
);
280 nir_builder_instr_insert(b
, &desc_load
->instr
);
282 return &desc_load
->dest
.ssa
;
286 lower_image_intrinsic(nir_intrinsic_instr
*intrin
,
287 struct apply_pipeline_layout_state
*state
)
289 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
291 nir_builder
*b
= &state
->builder
;
292 b
->cursor
= nir_before_instr(&intrin
->instr
);
294 if (intrin
->intrinsic
== nir_intrinsic_image_deref_load_param_intel
) {
295 b
->cursor
= nir_instr_remove(&intrin
->instr
);
297 const unsigned param
= nir_intrinsic_base(intrin
);
300 build_descriptor_load(deref
, param
* 16,
301 intrin
->dest
.ssa
.num_components
,
302 intrin
->dest
.ssa
.bit_size
, state
);
304 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(desc
));
306 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
308 unsigned set
= var
->data
.descriptor_set
;
309 unsigned binding
= var
->data
.binding
;
310 unsigned binding_offset
= state
->set
[set
].surface_offsets
[binding
];
311 unsigned array_size
=
312 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
314 nir_ssa_def
*index
= NULL
;
315 if (deref
->deref_type
!= nir_deref_type_var
) {
316 assert(deref
->deref_type
== nir_deref_type_array
);
317 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
318 if (state
->add_bounds_checks
)
319 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
321 index
= nir_imm_int(b
, 0);
324 index
= nir_iadd_imm(b
, index
, binding_offset
);
325 nir_rewrite_image_intrinsic(intrin
, index
, false);
330 lower_load_constant(nir_intrinsic_instr
*intrin
,
331 struct apply_pipeline_layout_state
*state
)
333 nir_builder
*b
= &state
->builder
;
335 b
->cursor
= nir_before_instr(&intrin
->instr
);
337 nir_ssa_def
*index
= nir_imm_int(b
, state
->constants_offset
);
338 nir_ssa_def
*offset
= nir_iadd(b
, nir_ssa_for_src(b
, intrin
->src
[0], 1),
339 nir_imm_int(b
, nir_intrinsic_base(intrin
)));
341 nir_intrinsic_instr
*load_ubo
=
342 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
343 load_ubo
->num_components
= intrin
->num_components
;
344 load_ubo
->src
[0] = nir_src_for_ssa(index
);
345 load_ubo
->src
[1] = nir_src_for_ssa(offset
);
346 nir_ssa_dest_init(&load_ubo
->instr
, &load_ubo
->dest
,
347 intrin
->dest
.ssa
.num_components
,
348 intrin
->dest
.ssa
.bit_size
, NULL
);
349 nir_builder_instr_insert(b
, &load_ubo
->instr
);
351 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
352 nir_src_for_ssa(&load_ubo
->dest
.ssa
));
353 nir_instr_remove(&intrin
->instr
);
357 lower_tex_deref(nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
,
358 unsigned *base_index
,
359 struct apply_pipeline_layout_state
*state
)
361 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
362 if (deref_src_idx
< 0)
365 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[deref_src_idx
].src
);
366 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
368 unsigned set
= var
->data
.descriptor_set
;
369 unsigned binding
= var
->data
.binding
;
370 unsigned array_size
=
371 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
373 nir_tex_src_type offset_src_type
;
374 if (deref_src_type
== nir_tex_src_texture_deref
) {
375 offset_src_type
= nir_tex_src_texture_offset
;
376 *base_index
= state
->set
[set
].surface_offsets
[binding
];
378 assert(deref_src_type
== nir_tex_src_sampler_deref
);
379 offset_src_type
= nir_tex_src_sampler_offset
;
380 *base_index
= state
->set
[set
].sampler_offsets
[binding
];
383 nir_ssa_def
*index
= NULL
;
384 if (deref
->deref_type
!= nir_deref_type_var
) {
385 assert(deref
->deref_type
== nir_deref_type_array
);
387 if (nir_src_is_const(deref
->arr
.index
)) {
388 unsigned arr_index
= nir_src_as_uint(deref
->arr
.index
);
389 *base_index
+= MIN2(arr_index
, array_size
- 1);
391 nir_builder
*b
= &state
->builder
;
393 /* From VK_KHR_sampler_ycbcr_conversion:
395 * If sampler Y’CBCR conversion is enabled, the combined image
396 * sampler must be indexed only by constant integral expressions when
397 * aggregated into arrays in shader code, irrespective of the
398 * shaderSampledImageArrayDynamicIndexing feature.
400 assert(nir_tex_instr_src_index(tex
, nir_tex_src_plane
) == -1);
402 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
404 if (state
->add_bounds_checks
)
405 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
410 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[deref_src_idx
].src
,
411 nir_src_for_ssa(index
));
412 tex
->src
[deref_src_idx
].src_type
= offset_src_type
;
414 nir_tex_instr_remove_src(tex
, deref_src_idx
);
419 tex_instr_get_and_remove_plane_src(nir_tex_instr
*tex
)
421 int plane_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_plane
);
422 if (plane_src_idx
< 0)
425 unsigned plane
= nir_src_as_uint(tex
->src
[plane_src_idx
].src
);
427 nir_tex_instr_remove_src(tex
, plane_src_idx
);
433 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
435 state
->builder
.cursor
= nir_before_instr(&tex
->instr
);
437 unsigned plane
= tex_instr_get_and_remove_plane_src(tex
);
439 lower_tex_deref(tex
, nir_tex_src_texture_deref
,
440 &tex
->texture_index
, state
);
441 tex
->texture_index
+= plane
;
443 lower_tex_deref(tex
, nir_tex_src_sampler_deref
,
444 &tex
->sampler_index
, state
);
445 tex
->sampler_index
+= plane
;
447 /* The backend only ever uses this to mark used surfaces. We don't care
448 * about that little optimization so it just needs to be non-zero.
450 tex
->texture_array_size
= 1;
454 apply_pipeline_layout_block(nir_block
*block
,
455 struct apply_pipeline_layout_state
*state
)
457 nir_foreach_instr_safe(instr
, block
) {
458 switch (instr
->type
) {
459 case nir_instr_type_intrinsic
: {
460 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
461 switch (intrin
->intrinsic
) {
462 case nir_intrinsic_vulkan_resource_index
:
463 lower_res_index_intrinsic(intrin
, state
);
465 case nir_intrinsic_vulkan_resource_reindex
:
466 lower_res_reindex_intrinsic(intrin
, state
);
468 case nir_intrinsic_load_vulkan_descriptor
:
469 lower_load_vulkan_descriptor(intrin
, state
);
471 case nir_intrinsic_get_buffer_size
:
472 lower_get_buffer_size(intrin
, state
);
474 case nir_intrinsic_image_deref_load
:
475 case nir_intrinsic_image_deref_store
:
476 case nir_intrinsic_image_deref_atomic_add
:
477 case nir_intrinsic_image_deref_atomic_min
:
478 case nir_intrinsic_image_deref_atomic_max
:
479 case nir_intrinsic_image_deref_atomic_and
:
480 case nir_intrinsic_image_deref_atomic_or
:
481 case nir_intrinsic_image_deref_atomic_xor
:
482 case nir_intrinsic_image_deref_atomic_exchange
:
483 case nir_intrinsic_image_deref_atomic_comp_swap
:
484 case nir_intrinsic_image_deref_size
:
485 case nir_intrinsic_image_deref_samples
:
486 case nir_intrinsic_image_deref_load_param_intel
:
487 case nir_intrinsic_image_deref_load_raw_intel
:
488 case nir_intrinsic_image_deref_store_raw_intel
:
489 lower_image_intrinsic(intrin
, state
);
491 case nir_intrinsic_load_constant
:
492 lower_load_constant(intrin
, state
);
499 case nir_instr_type_tex
:
500 lower_tex(nir_instr_as_tex(instr
), state
);
509 anv_nir_apply_pipeline_layout(const struct anv_physical_device
*pdevice
,
510 bool robust_buffer_access
,
511 struct anv_pipeline_layout
*layout
,
513 struct brw_stage_prog_data
*prog_data
,
514 struct anv_pipeline_bind_map
*map
)
516 struct apply_pipeline_layout_state state
= {
520 .add_bounds_checks
= robust_buffer_access
,
523 void *mem_ctx
= ralloc_context(NULL
);
525 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
526 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
527 const unsigned words
= BITSET_WORDS(count
);
528 state
.set
[s
].used
= rzalloc_array(mem_ctx
, BITSET_WORD
, words
);
529 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
530 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
533 nir_foreach_function(function
, shader
) {
537 nir_foreach_block(block
, function
->impl
)
538 get_used_bindings_block(block
, &state
);
541 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
542 if (state
.set
[s
].desc_buffer_used
) {
543 map
->surface_to_descriptor
[map
->surface_count
] =
544 (struct anv_pipeline_binding
) {
545 .set
= ANV_DESCRIPTOR_SET_DESCRIPTORS
,
548 state
.set
[s
].desc_offset
= map
->surface_count
;
549 map
->surface_count
++;
553 if (state
.uses_constants
) {
554 state
.constants_offset
= map
->surface_count
;
555 map
->surface_to_descriptor
[map
->surface_count
].set
=
556 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS
;
557 map
->surface_count
++;
560 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
561 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
564 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
565 set_layout
->binding_count
) {
566 struct anv_descriptor_set_binding_layout
*binding
=
567 &set_layout
->binding
[b
];
569 if (binding
->array_size
== 0)
572 if (binding
->data
& ANV_DESCRIPTOR_SURFACE_STATE
) {
573 state
.set
[set
].surface_offsets
[b
] = map
->surface_count
;
574 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
575 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
576 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
577 for (uint8_t p
= 0; p
< planes
; p
++) {
578 map
->surface_to_descriptor
[map
->surface_count
++] =
579 (struct anv_pipeline_binding
) {
589 if (binding
->data
& ANV_DESCRIPTOR_SAMPLER_STATE
) {
590 state
.set
[set
].sampler_offsets
[b
] = map
->sampler_count
;
591 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
592 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
593 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
594 for (uint8_t p
= 0; p
< planes
; p
++) {
595 map
->sampler_to_descriptor
[map
->sampler_count
++] =
596 (struct anv_pipeline_binding
) {
608 nir_foreach_variable(var
, &shader
->uniforms
) {
609 const struct glsl_type
*glsl_type
= glsl_without_array(var
->type
);
611 if (!glsl_type_is_image(glsl_type
))
614 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(glsl_type
);
616 const uint32_t set
= var
->data
.descriptor_set
;
617 const uint32_t binding
= var
->data
.binding
;
618 const uint32_t array_size
=
619 layout
->set
[set
].layout
->binding
[binding
].array_size
;
621 if (!BITSET_TEST(state
.set
[set
].used
, binding
))
624 struct anv_pipeline_binding
*pipe_binding
=
625 &map
->surface_to_descriptor
[state
.set
[set
].surface_offsets
[binding
]];
626 for (unsigned i
= 0; i
< array_size
; i
++) {
627 assert(pipe_binding
[i
].set
== set
);
628 assert(pipe_binding
[i
].binding
== binding
);
629 assert(pipe_binding
[i
].index
== i
);
631 if (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
632 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
)
633 pipe_binding
[i
].input_attachment_index
= var
->data
.index
+ i
;
635 pipe_binding
[i
].write_only
=
636 (var
->data
.image
.access
& ACCESS_NON_READABLE
) != 0;
640 nir_foreach_function(function
, shader
) {
644 nir_builder_init(&state
.builder
, function
->impl
);
645 nir_foreach_block(block
, function
->impl
)
646 apply_pipeline_layout_block(block
, &state
);
647 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
648 nir_metadata_dominance
);
651 ralloc_free(mem_ctx
);