2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
29 struct apply_pipeline_layout_state
{
30 const struct anv_physical_device
*pdevice
;
35 struct anv_pipeline_layout
*layout
;
36 bool add_bounds_checks
;
38 unsigned first_image_uniform
;
41 uint8_t constants_offset
;
43 bool desc_buffer_used
;
47 uint8_t *surface_offsets
;
48 uint8_t *sampler_offsets
;
49 uint8_t *image_offsets
;
54 add_binding(struct apply_pipeline_layout_state
*state
,
55 uint32_t set
, uint32_t binding
)
57 const struct anv_descriptor_set_binding_layout
*bind_layout
=
58 &state
->layout
->set
[set
].layout
->binding
[binding
];
60 BITSET_SET(state
->set
[set
].used
, binding
);
62 /* Only flag the descriptor buffer as used if there's actually data for
63 * this binding. This lets us be lazy and call this function constantly
64 * without worrying about unnecessarily enabling the buffer.
66 if (anv_descriptor_size(bind_layout
))
67 state
->set
[set
].desc_buffer_used
= true;
71 add_deref_src_binding(struct apply_pipeline_layout_state
*state
, nir_src src
)
73 nir_deref_instr
*deref
= nir_src_as_deref(src
);
74 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
75 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
79 add_tex_src_binding(struct apply_pipeline_layout_state
*state
,
80 nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
)
82 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
83 if (deref_src_idx
< 0)
86 add_deref_src_binding(state
, tex
->src
[deref_src_idx
].src
);
90 get_used_bindings_block(nir_block
*block
,
91 struct apply_pipeline_layout_state
*state
)
93 nir_foreach_instr_safe(instr
, block
) {
94 switch (instr
->type
) {
95 case nir_instr_type_intrinsic
: {
96 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
97 switch (intrin
->intrinsic
) {
98 case nir_intrinsic_vulkan_resource_index
:
99 add_binding(state
, nir_intrinsic_desc_set(intrin
),
100 nir_intrinsic_binding(intrin
));
103 case nir_intrinsic_image_deref_load
:
104 case nir_intrinsic_image_deref_store
:
105 case nir_intrinsic_image_deref_atomic_add
:
106 case nir_intrinsic_image_deref_atomic_min
:
107 case nir_intrinsic_image_deref_atomic_max
:
108 case nir_intrinsic_image_deref_atomic_and
:
109 case nir_intrinsic_image_deref_atomic_or
:
110 case nir_intrinsic_image_deref_atomic_xor
:
111 case nir_intrinsic_image_deref_atomic_exchange
:
112 case nir_intrinsic_image_deref_atomic_comp_swap
:
113 case nir_intrinsic_image_deref_size
:
114 case nir_intrinsic_image_deref_samples
:
115 case nir_intrinsic_image_deref_load_param_intel
:
116 case nir_intrinsic_image_deref_load_raw_intel
:
117 case nir_intrinsic_image_deref_store_raw_intel
:
118 add_deref_src_binding(state
, intrin
->src
[0]);
121 case nir_intrinsic_load_constant
:
122 state
->uses_constants
= true;
130 case nir_instr_type_tex
: {
131 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
132 add_tex_src_binding(state
, tex
, nir_tex_src_texture_deref
);
133 add_tex_src_binding(state
, tex
, nir_tex_src_sampler_deref
);
143 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
144 struct apply_pipeline_layout_state
*state
)
146 nir_builder
*b
= &state
->builder
;
148 b
->cursor
= nir_before_instr(&intrin
->instr
);
150 uint32_t set
= nir_intrinsic_desc_set(intrin
);
151 uint32_t binding
= nir_intrinsic_binding(intrin
);
153 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
154 uint32_t array_size
=
155 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
157 nir_ssa_def
*array_index
= nir_ssa_for_src(b
, intrin
->src
[0], 1);
158 if (nir_src_is_const(intrin
->src
[0]) || state
->add_bounds_checks
)
159 array_index
= nir_umin(b
, array_index
, nir_imm_int(b
, array_size
- 1));
161 /* We're using nir_address_format_vk_index_offset */
163 nir_vec2(b
, nir_iadd_imm(b
, array_index
, surface_index
),
166 assert(intrin
->dest
.is_ssa
);
167 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
168 nir_instr_remove(&intrin
->instr
);
172 lower_res_reindex_intrinsic(nir_intrinsic_instr
*intrin
,
173 struct apply_pipeline_layout_state
*state
)
175 nir_builder
*b
= &state
->builder
;
177 b
->cursor
= nir_before_instr(&intrin
->instr
);
179 /* For us, the resource indices are just indices into the binding table and
180 * array elements are sequential. A resource_reindex just turns into an
181 * add of the two indices.
183 assert(intrin
->src
[0].is_ssa
&& intrin
->src
[1].is_ssa
);
184 nir_ssa_def
*old_index
= intrin
->src
[0].ssa
;
185 nir_ssa_def
*offset
= intrin
->src
[1].ssa
;
187 nir_ssa_def
*new_index
=
188 nir_vec2(b
, nir_iadd(b
, nir_channel(b
, old_index
, 0), offset
),
189 nir_channel(b
, old_index
, 1));
191 assert(intrin
->dest
.is_ssa
);
192 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(new_index
));
193 nir_instr_remove(&intrin
->instr
);
197 lower_load_vulkan_descriptor(nir_intrinsic_instr
*intrin
,
198 struct apply_pipeline_layout_state
*state
)
200 nir_builder
*b
= &state
->builder
;
202 b
->cursor
= nir_before_instr(&intrin
->instr
);
204 /* We follow the nir_address_format_vk_index_offset model */
205 assert(intrin
->src
[0].is_ssa
);
206 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
208 assert(intrin
->dest
.is_ssa
);
209 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
210 nir_instr_remove(&intrin
->instr
);
214 lower_get_buffer_size(nir_intrinsic_instr
*intrin
,
215 struct apply_pipeline_layout_state
*state
)
217 nir_builder
*b
= &state
->builder
;
219 b
->cursor
= nir_before_instr(&intrin
->instr
);
221 assert(intrin
->src
[0].is_ssa
);
222 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
224 /* We're following the nir_address_format_vk_index_offset model so the
225 * binding table index is the first component of the address. The
226 * back-end wants a scalar binding table index source.
228 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
229 nir_src_for_ssa(nir_channel(b
, index
, 0)));
233 lower_image_intrinsic(nir_intrinsic_instr
*intrin
,
234 struct apply_pipeline_layout_state
*state
)
236 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
237 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
239 unsigned set
= var
->data
.descriptor_set
;
240 unsigned binding
= var
->data
.binding
;
241 unsigned array_size
=
242 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
244 nir_builder
*b
= &state
->builder
;
245 b
->cursor
= nir_before_instr(&intrin
->instr
);
247 nir_ssa_def
*index
= NULL
;
248 if (deref
->deref_type
!= nir_deref_type_var
) {
249 assert(deref
->deref_type
== nir_deref_type_array
);
250 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
251 if (state
->add_bounds_checks
)
252 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
254 index
= nir_imm_int(b
, 0);
257 if (intrin
->intrinsic
== nir_intrinsic_image_deref_load_param_intel
) {
258 b
->cursor
= nir_instr_remove(&intrin
->instr
);
260 nir_intrinsic_instr
*load
=
261 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
263 nir_intrinsic_set_base(load
, state
->first_image_uniform
+
264 state
->set
[set
].image_offsets
[binding
] *
265 BRW_IMAGE_PARAM_SIZE
* 4);
266 nir_intrinsic_set_range(load
, array_size
* BRW_IMAGE_PARAM_SIZE
* 4);
268 const unsigned param
= nir_intrinsic_base(intrin
);
269 nir_ssa_def
*offset
=
270 nir_imul(b
, index
, nir_imm_int(b
, BRW_IMAGE_PARAM_SIZE
* 4));
271 offset
= nir_iadd(b
, offset
, nir_imm_int(b
, param
* 16));
272 load
->src
[0] = nir_src_for_ssa(offset
);
274 load
->num_components
= intrin
->dest
.ssa
.num_components
;
275 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
276 intrin
->dest
.ssa
.num_components
,
277 intrin
->dest
.ssa
.bit_size
, NULL
);
278 nir_builder_instr_insert(b
, &load
->instr
);
280 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
281 nir_src_for_ssa(&load
->dest
.ssa
));
283 unsigned binding_offset
= state
->set
[set
].surface_offsets
[binding
];
284 index
= nir_iadd(b
, index
, nir_imm_int(b
, binding_offset
));
285 brw_nir_rewrite_image_intrinsic(intrin
, index
);
290 lower_load_constant(nir_intrinsic_instr
*intrin
,
291 struct apply_pipeline_layout_state
*state
)
293 nir_builder
*b
= &state
->builder
;
295 b
->cursor
= nir_before_instr(&intrin
->instr
);
297 nir_ssa_def
*index
= nir_imm_int(b
, state
->constants_offset
);
298 nir_ssa_def
*offset
= nir_iadd(b
, nir_ssa_for_src(b
, intrin
->src
[0], 1),
299 nir_imm_int(b
, nir_intrinsic_base(intrin
)));
301 nir_intrinsic_instr
*load_ubo
=
302 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
303 load_ubo
->num_components
= intrin
->num_components
;
304 load_ubo
->src
[0] = nir_src_for_ssa(index
);
305 load_ubo
->src
[1] = nir_src_for_ssa(offset
);
306 nir_ssa_dest_init(&load_ubo
->instr
, &load_ubo
->dest
,
307 intrin
->dest
.ssa
.num_components
,
308 intrin
->dest
.ssa
.bit_size
, NULL
);
309 nir_builder_instr_insert(b
, &load_ubo
->instr
);
311 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
312 nir_src_for_ssa(&load_ubo
->dest
.ssa
));
313 nir_instr_remove(&intrin
->instr
);
317 lower_tex_deref(nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
,
318 unsigned *base_index
,
319 struct apply_pipeline_layout_state
*state
)
321 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
322 if (deref_src_idx
< 0)
325 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[deref_src_idx
].src
);
326 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
328 unsigned set
= var
->data
.descriptor_set
;
329 unsigned binding
= var
->data
.binding
;
330 unsigned array_size
=
331 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
333 nir_tex_src_type offset_src_type
;
334 if (deref_src_type
== nir_tex_src_texture_deref
) {
335 offset_src_type
= nir_tex_src_texture_offset
;
336 *base_index
= state
->set
[set
].surface_offsets
[binding
];
338 assert(deref_src_type
== nir_tex_src_sampler_deref
);
339 offset_src_type
= nir_tex_src_sampler_offset
;
340 *base_index
= state
->set
[set
].sampler_offsets
[binding
];
343 nir_ssa_def
*index
= NULL
;
344 if (deref
->deref_type
!= nir_deref_type_var
) {
345 assert(deref
->deref_type
== nir_deref_type_array
);
347 if (nir_src_is_const(deref
->arr
.index
)) {
348 unsigned arr_index
= nir_src_as_uint(deref
->arr
.index
);
349 *base_index
+= MIN2(arr_index
, array_size
- 1);
351 nir_builder
*b
= &state
->builder
;
353 /* From VK_KHR_sampler_ycbcr_conversion:
355 * If sampler Y’CBCR conversion is enabled, the combined image
356 * sampler must be indexed only by constant integral expressions when
357 * aggregated into arrays in shader code, irrespective of the
358 * shaderSampledImageArrayDynamicIndexing feature.
360 assert(nir_tex_instr_src_index(tex
, nir_tex_src_plane
) == -1);
362 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
364 if (state
->add_bounds_checks
)
365 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
370 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[deref_src_idx
].src
,
371 nir_src_for_ssa(index
));
372 tex
->src
[deref_src_idx
].src_type
= offset_src_type
;
374 nir_tex_instr_remove_src(tex
, deref_src_idx
);
379 tex_instr_get_and_remove_plane_src(nir_tex_instr
*tex
)
381 int plane_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_plane
);
382 if (plane_src_idx
< 0)
385 unsigned plane
= nir_src_as_uint(tex
->src
[plane_src_idx
].src
);
387 nir_tex_instr_remove_src(tex
, plane_src_idx
);
393 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
395 state
->builder
.cursor
= nir_before_instr(&tex
->instr
);
397 unsigned plane
= tex_instr_get_and_remove_plane_src(tex
);
399 lower_tex_deref(tex
, nir_tex_src_texture_deref
,
400 &tex
->texture_index
, state
);
401 tex
->texture_index
+= plane
;
403 lower_tex_deref(tex
, nir_tex_src_sampler_deref
,
404 &tex
->sampler_index
, state
);
405 tex
->sampler_index
+= plane
;
407 /* The backend only ever uses this to mark used surfaces. We don't care
408 * about that little optimization so it just needs to be non-zero.
410 tex
->texture_array_size
= 1;
414 apply_pipeline_layout_block(nir_block
*block
,
415 struct apply_pipeline_layout_state
*state
)
417 nir_foreach_instr_safe(instr
, block
) {
418 switch (instr
->type
) {
419 case nir_instr_type_intrinsic
: {
420 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
421 switch (intrin
->intrinsic
) {
422 case nir_intrinsic_vulkan_resource_index
:
423 lower_res_index_intrinsic(intrin
, state
);
425 case nir_intrinsic_vulkan_resource_reindex
:
426 lower_res_reindex_intrinsic(intrin
, state
);
428 case nir_intrinsic_load_vulkan_descriptor
:
429 lower_load_vulkan_descriptor(intrin
, state
);
431 case nir_intrinsic_get_buffer_size
:
432 lower_get_buffer_size(intrin
, state
);
434 case nir_intrinsic_image_deref_load
:
435 case nir_intrinsic_image_deref_store
:
436 case nir_intrinsic_image_deref_atomic_add
:
437 case nir_intrinsic_image_deref_atomic_min
:
438 case nir_intrinsic_image_deref_atomic_max
:
439 case nir_intrinsic_image_deref_atomic_and
:
440 case nir_intrinsic_image_deref_atomic_or
:
441 case nir_intrinsic_image_deref_atomic_xor
:
442 case nir_intrinsic_image_deref_atomic_exchange
:
443 case nir_intrinsic_image_deref_atomic_comp_swap
:
444 case nir_intrinsic_image_deref_size
:
445 case nir_intrinsic_image_deref_samples
:
446 case nir_intrinsic_image_deref_load_param_intel
:
447 case nir_intrinsic_image_deref_load_raw_intel
:
448 case nir_intrinsic_image_deref_store_raw_intel
:
449 lower_image_intrinsic(intrin
, state
);
451 case nir_intrinsic_load_constant
:
452 lower_load_constant(intrin
, state
);
459 case nir_instr_type_tex
:
460 lower_tex(nir_instr_as_tex(instr
), state
);
469 setup_vec4_uniform_value(uint32_t *params
, uint32_t offset
, unsigned n
)
471 for (unsigned i
= 0; i
< n
; ++i
)
472 params
[i
] = ANV_PARAM_PUSH(offset
+ i
* sizeof(uint32_t));
474 for (unsigned i
= n
; i
< 4; ++i
)
475 params
[i
] = BRW_PARAM_BUILTIN_ZERO
;
479 anv_nir_apply_pipeline_layout(const struct anv_physical_device
*pdevice
,
480 bool robust_buffer_access
,
481 struct anv_pipeline_layout
*layout
,
483 struct brw_stage_prog_data
*prog_data
,
484 struct anv_pipeline_bind_map
*map
)
486 struct apply_pipeline_layout_state state
= {
490 .add_bounds_checks
= robust_buffer_access
,
493 void *mem_ctx
= ralloc_context(NULL
);
495 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
496 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
497 const unsigned words
= BITSET_WORDS(count
);
498 state
.set
[s
].used
= rzalloc_array(mem_ctx
, BITSET_WORD
, words
);
499 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
500 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
501 state
.set
[s
].image_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
504 nir_foreach_function(function
, shader
) {
508 nir_foreach_block(block
, function
->impl
)
509 get_used_bindings_block(block
, &state
);
512 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
513 if (state
.set
[s
].desc_buffer_used
) {
514 map
->surface_to_descriptor
[map
->surface_count
] =
515 (struct anv_pipeline_binding
) {
516 .set
= ANV_DESCRIPTOR_SET_DESCRIPTORS
,
519 state
.set
[s
].desc_offset
= map
->surface_count
;
520 map
->surface_count
++;
524 if (state
.uses_constants
) {
525 state
.constants_offset
= map
->surface_count
;
526 map
->surface_to_descriptor
[map
->surface_count
].set
=
527 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS
;
528 map
->surface_count
++;
531 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
532 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
535 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
536 set_layout
->binding_count
) {
537 struct anv_descriptor_set_binding_layout
*binding
=
538 &set_layout
->binding
[b
];
540 if (binding
->array_size
== 0)
543 if (binding
->data
& ANV_DESCRIPTOR_SURFACE_STATE
) {
544 state
.set
[set
].surface_offsets
[b
] = map
->surface_count
;
545 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
546 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
547 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
548 for (uint8_t p
= 0; p
< planes
; p
++) {
549 map
->surface_to_descriptor
[map
->surface_count
++] =
550 (struct anv_pipeline_binding
) {
560 if (binding
->data
& ANV_DESCRIPTOR_SAMPLER_STATE
) {
561 state
.set
[set
].sampler_offsets
[b
] = map
->sampler_count
;
562 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
563 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
564 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
565 for (uint8_t p
= 0; p
< planes
; p
++) {
566 map
->sampler_to_descriptor
[map
->sampler_count
++] =
567 (struct anv_pipeline_binding
) {
577 if (binding
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
) {
578 state
.set
[set
].image_offsets
[b
] = map
->image_param_count
;
579 map
->image_param_count
+= binding
->array_size
;
584 if (map
->image_param_count
> 0) {
585 assert(map
->image_param_count
<= MAX_GEN8_IMAGES
);
586 assert(shader
->num_uniforms
== prog_data
->nr_params
* 4);
587 state
.first_image_uniform
= shader
->num_uniforms
;
588 uint32_t *param
= brw_stage_prog_data_add_params(prog_data
,
589 map
->image_param_count
*
590 BRW_IMAGE_PARAM_SIZE
);
591 struct anv_push_constants
*null_data
= NULL
;
592 const struct brw_image_param
*image_param
= null_data
->images
;
593 for (uint32_t i
= 0; i
< map
->image_param_count
; i
++) {
594 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
595 (uintptr_t)image_param
->offset
, 2);
596 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
597 (uintptr_t)image_param
->size
, 3);
598 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
599 (uintptr_t)image_param
->stride
, 4);
600 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
601 (uintptr_t)image_param
->tiling
, 3);
602 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
603 (uintptr_t)image_param
->swizzling
, 2);
605 param
+= BRW_IMAGE_PARAM_SIZE
;
608 assert(param
== prog_data
->param
+ prog_data
->nr_params
);
610 shader
->num_uniforms
+= map
->image_param_count
*
611 BRW_IMAGE_PARAM_SIZE
* 4;
612 assert(shader
->num_uniforms
== prog_data
->nr_params
* 4);
615 nir_foreach_variable(var
, &shader
->uniforms
) {
616 const struct glsl_type
*glsl_type
= glsl_without_array(var
->type
);
618 if (!glsl_type_is_image(glsl_type
))
621 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(glsl_type
);
623 const uint32_t set
= var
->data
.descriptor_set
;
624 const uint32_t binding
= var
->data
.binding
;
625 const uint32_t array_size
=
626 layout
->set
[set
].layout
->binding
[binding
].array_size
;
628 if (!BITSET_TEST(state
.set
[set
].used
, binding
))
631 struct anv_pipeline_binding
*pipe_binding
=
632 &map
->surface_to_descriptor
[state
.set
[set
].surface_offsets
[binding
]];
633 for (unsigned i
= 0; i
< array_size
; i
++) {
634 assert(pipe_binding
[i
].set
== set
);
635 assert(pipe_binding
[i
].binding
== binding
);
636 assert(pipe_binding
[i
].index
== i
);
638 if (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
639 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
)
640 pipe_binding
[i
].input_attachment_index
= var
->data
.index
+ i
;
642 pipe_binding
[i
].write_only
=
643 (var
->data
.image
.access
& ACCESS_NON_READABLE
) != 0;
647 nir_foreach_function(function
, shader
) {
651 nir_builder_init(&state
.builder
, function
->impl
);
652 nir_foreach_block(block
, function
->impl
)
653 apply_pipeline_layout_block(block
, &state
);
654 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
655 nir_metadata_dominance
);
658 ralloc_free(mem_ctx
);