2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
29 struct apply_pipeline_layout_state
{
30 const struct anv_physical_device
*pdevice
;
35 struct anv_pipeline_layout
*layout
;
36 bool add_bounds_checks
;
39 uint8_t constants_offset
;
41 bool desc_buffer_used
;
45 uint8_t *surface_offsets
;
46 uint8_t *sampler_offsets
;
51 add_binding(struct apply_pipeline_layout_state
*state
,
52 uint32_t set
, uint32_t binding
)
54 const struct anv_descriptor_set_binding_layout
*bind_layout
=
55 &state
->layout
->set
[set
].layout
->binding
[binding
];
57 if (state
->set
[set
].use_count
[binding
] < UINT8_MAX
)
58 state
->set
[set
].use_count
[binding
]++;
60 /* Only flag the descriptor buffer as used if there's actually data for
61 * this binding. This lets us be lazy and call this function constantly
62 * without worrying about unnecessarily enabling the buffer.
64 if (anv_descriptor_size(bind_layout
))
65 state
->set
[set
].desc_buffer_used
= true;
69 add_deref_src_binding(struct apply_pipeline_layout_state
*state
, nir_src src
)
71 nir_deref_instr
*deref
= nir_src_as_deref(src
);
72 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
73 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
77 add_tex_src_binding(struct apply_pipeline_layout_state
*state
,
78 nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
)
80 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
81 if (deref_src_idx
< 0)
84 add_deref_src_binding(state
, tex
->src
[deref_src_idx
].src
);
88 get_used_bindings_block(nir_block
*block
,
89 struct apply_pipeline_layout_state
*state
)
91 nir_foreach_instr_safe(instr
, block
) {
92 switch (instr
->type
) {
93 case nir_instr_type_intrinsic
: {
94 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
95 switch (intrin
->intrinsic
) {
96 case nir_intrinsic_vulkan_resource_index
:
97 add_binding(state
, nir_intrinsic_desc_set(intrin
),
98 nir_intrinsic_binding(intrin
));
101 case nir_intrinsic_image_deref_load
:
102 case nir_intrinsic_image_deref_store
:
103 case nir_intrinsic_image_deref_atomic_add
:
104 case nir_intrinsic_image_deref_atomic_min
:
105 case nir_intrinsic_image_deref_atomic_max
:
106 case nir_intrinsic_image_deref_atomic_and
:
107 case nir_intrinsic_image_deref_atomic_or
:
108 case nir_intrinsic_image_deref_atomic_xor
:
109 case nir_intrinsic_image_deref_atomic_exchange
:
110 case nir_intrinsic_image_deref_atomic_comp_swap
:
111 case nir_intrinsic_image_deref_size
:
112 case nir_intrinsic_image_deref_samples
:
113 case nir_intrinsic_image_deref_load_param_intel
:
114 case nir_intrinsic_image_deref_load_raw_intel
:
115 case nir_intrinsic_image_deref_store_raw_intel
:
116 add_deref_src_binding(state
, intrin
->src
[0]);
119 case nir_intrinsic_load_constant
:
120 state
->uses_constants
= true;
128 case nir_instr_type_tex
: {
129 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
130 add_tex_src_binding(state
, tex
, nir_tex_src_texture_deref
);
131 add_tex_src_binding(state
, tex
, nir_tex_src_sampler_deref
);
141 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
142 struct apply_pipeline_layout_state
*state
)
144 nir_builder
*b
= &state
->builder
;
146 b
->cursor
= nir_before_instr(&intrin
->instr
);
148 uint32_t set
= nir_intrinsic_desc_set(intrin
);
149 uint32_t binding
= nir_intrinsic_binding(intrin
);
151 const struct anv_descriptor_set_binding_layout
*bind_layout
=
152 &state
->layout
->set
[set
].layout
->binding
[binding
];
154 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
155 uint32_t array_size
= bind_layout
->array_size
;
157 nir_ssa_def
*array_index
= nir_ssa_for_src(b
, intrin
->src
[0], 1);
158 if (nir_src_is_const(intrin
->src
[0]) || state
->add_bounds_checks
)
159 array_index
= nir_umin(b
, array_index
, nir_imm_int(b
, array_size
- 1));
162 if (bind_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
163 /* This is an inline uniform block. Just reference the descriptor set
164 * and use the descriptor offset as the base.
166 index
= nir_imm_ivec2(b
, state
->set
[set
].desc_offset
,
167 bind_layout
->descriptor_offset
);
169 /* We're using nir_address_format_32bit_index_offset */
170 index
= nir_vec2(b
, nir_iadd_imm(b
, array_index
, surface_index
),
174 assert(intrin
->dest
.is_ssa
);
175 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
176 nir_instr_remove(&intrin
->instr
);
180 lower_res_reindex_intrinsic(nir_intrinsic_instr
*intrin
,
181 struct apply_pipeline_layout_state
*state
)
183 nir_builder
*b
= &state
->builder
;
185 b
->cursor
= nir_before_instr(&intrin
->instr
);
187 /* For us, the resource indices are just indices into the binding table and
188 * array elements are sequential. A resource_reindex just turns into an
189 * add of the two indices.
191 assert(intrin
->src
[0].is_ssa
&& intrin
->src
[1].is_ssa
);
192 nir_ssa_def
*old_index
= intrin
->src
[0].ssa
;
193 nir_ssa_def
*offset
= intrin
->src
[1].ssa
;
195 nir_ssa_def
*new_index
=
196 nir_vec2(b
, nir_iadd(b
, nir_channel(b
, old_index
, 0), offset
),
197 nir_channel(b
, old_index
, 1));
199 assert(intrin
->dest
.is_ssa
);
200 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(new_index
));
201 nir_instr_remove(&intrin
->instr
);
205 lower_load_vulkan_descriptor(nir_intrinsic_instr
*intrin
,
206 struct apply_pipeline_layout_state
*state
)
208 nir_builder
*b
= &state
->builder
;
210 b
->cursor
= nir_before_instr(&intrin
->instr
);
212 /* We follow the nir_address_format_32bit_index_offset model */
213 assert(intrin
->src
[0].is_ssa
);
214 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
216 assert(intrin
->dest
.is_ssa
);
217 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
218 nir_instr_remove(&intrin
->instr
);
222 lower_get_buffer_size(nir_intrinsic_instr
*intrin
,
223 struct apply_pipeline_layout_state
*state
)
225 nir_builder
*b
= &state
->builder
;
227 b
->cursor
= nir_before_instr(&intrin
->instr
);
229 assert(intrin
->src
[0].is_ssa
);
230 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
232 /* We're following the nir_address_format_32bit_index_offset model so the
233 * binding table index is the first component of the address. The
234 * back-end wants a scalar binding table index source.
236 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
237 nir_src_for_ssa(nir_channel(b
, index
, 0)));
241 build_descriptor_load(nir_deref_instr
*deref
, unsigned offset
,
242 unsigned num_components
, unsigned bit_size
,
243 struct apply_pipeline_layout_state
*state
)
245 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
247 unsigned set
= var
->data
.descriptor_set
;
248 unsigned binding
= var
->data
.binding
;
249 unsigned array_size
=
250 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
252 const struct anv_descriptor_set_binding_layout
*bind_layout
=
253 &state
->layout
->set
[set
].layout
->binding
[binding
];
255 nir_builder
*b
= &state
->builder
;
257 nir_ssa_def
*desc_buffer_index
=
258 nir_imm_int(b
, state
->set
[set
].desc_offset
);
260 nir_ssa_def
*desc_offset
=
261 nir_imm_int(b
, bind_layout
->descriptor_offset
+ offset
);
262 if (deref
->deref_type
!= nir_deref_type_var
) {
263 assert(deref
->deref_type
== nir_deref_type_array
);
265 const unsigned descriptor_size
= anv_descriptor_size(bind_layout
);
266 nir_ssa_def
*arr_index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
267 if (state
->add_bounds_checks
)
268 arr_index
= nir_umin(b
, arr_index
, nir_imm_int(b
, array_size
- 1));
270 desc_offset
= nir_iadd(b
, desc_offset
,
271 nir_imul_imm(b
, arr_index
, descriptor_size
));
274 nir_intrinsic_instr
*desc_load
=
275 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
276 desc_load
->src
[0] = nir_src_for_ssa(desc_buffer_index
);
277 desc_load
->src
[1] = nir_src_for_ssa(desc_offset
);
278 desc_load
->num_components
= num_components
;
279 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
280 num_components
, bit_size
, NULL
);
281 nir_builder_instr_insert(b
, &desc_load
->instr
);
283 return &desc_load
->dest
.ssa
;
287 lower_image_intrinsic(nir_intrinsic_instr
*intrin
,
288 struct apply_pipeline_layout_state
*state
)
290 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
292 nir_builder
*b
= &state
->builder
;
293 b
->cursor
= nir_before_instr(&intrin
->instr
);
295 if (intrin
->intrinsic
== nir_intrinsic_image_deref_load_param_intel
) {
296 b
->cursor
= nir_instr_remove(&intrin
->instr
);
298 const unsigned param
= nir_intrinsic_base(intrin
);
301 build_descriptor_load(deref
, param
* 16,
302 intrin
->dest
.ssa
.num_components
,
303 intrin
->dest
.ssa
.bit_size
, state
);
305 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(desc
));
307 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
309 unsigned set
= var
->data
.descriptor_set
;
310 unsigned binding
= var
->data
.binding
;
311 unsigned binding_offset
= state
->set
[set
].surface_offsets
[binding
];
312 unsigned array_size
=
313 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
315 nir_ssa_def
*index
= NULL
;
316 if (deref
->deref_type
!= nir_deref_type_var
) {
317 assert(deref
->deref_type
== nir_deref_type_array
);
318 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
319 if (state
->add_bounds_checks
)
320 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
322 index
= nir_imm_int(b
, 0);
325 index
= nir_iadd_imm(b
, index
, binding_offset
);
326 nir_rewrite_image_intrinsic(intrin
, index
, false);
331 lower_load_constant(nir_intrinsic_instr
*intrin
,
332 struct apply_pipeline_layout_state
*state
)
334 nir_builder
*b
= &state
->builder
;
336 b
->cursor
= nir_before_instr(&intrin
->instr
);
338 nir_ssa_def
*index
= nir_imm_int(b
, state
->constants_offset
);
339 nir_ssa_def
*offset
= nir_iadd(b
, nir_ssa_for_src(b
, intrin
->src
[0], 1),
340 nir_imm_int(b
, nir_intrinsic_base(intrin
)));
342 nir_intrinsic_instr
*load_ubo
=
343 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
344 load_ubo
->num_components
= intrin
->num_components
;
345 load_ubo
->src
[0] = nir_src_for_ssa(index
);
346 load_ubo
->src
[1] = nir_src_for_ssa(offset
);
347 nir_ssa_dest_init(&load_ubo
->instr
, &load_ubo
->dest
,
348 intrin
->dest
.ssa
.num_components
,
349 intrin
->dest
.ssa
.bit_size
, NULL
);
350 nir_builder_instr_insert(b
, &load_ubo
->instr
);
352 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
353 nir_src_for_ssa(&load_ubo
->dest
.ssa
));
354 nir_instr_remove(&intrin
->instr
);
358 lower_tex_deref(nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
,
359 unsigned *base_index
,
360 struct apply_pipeline_layout_state
*state
)
362 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
363 if (deref_src_idx
< 0)
366 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[deref_src_idx
].src
);
367 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
369 unsigned set
= var
->data
.descriptor_set
;
370 unsigned binding
= var
->data
.binding
;
371 unsigned array_size
=
372 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
374 nir_tex_src_type offset_src_type
;
375 if (deref_src_type
== nir_tex_src_texture_deref
) {
376 offset_src_type
= nir_tex_src_texture_offset
;
377 *base_index
= state
->set
[set
].surface_offsets
[binding
];
379 assert(deref_src_type
== nir_tex_src_sampler_deref
);
380 offset_src_type
= nir_tex_src_sampler_offset
;
381 *base_index
= state
->set
[set
].sampler_offsets
[binding
];
384 nir_ssa_def
*index
= NULL
;
385 if (deref
->deref_type
!= nir_deref_type_var
) {
386 assert(deref
->deref_type
== nir_deref_type_array
);
388 if (nir_src_is_const(deref
->arr
.index
)) {
389 unsigned arr_index
= nir_src_as_uint(deref
->arr
.index
);
390 *base_index
+= MIN2(arr_index
, array_size
- 1);
392 nir_builder
*b
= &state
->builder
;
394 /* From VK_KHR_sampler_ycbcr_conversion:
396 * If sampler Y’CBCR conversion is enabled, the combined image
397 * sampler must be indexed only by constant integral expressions when
398 * aggregated into arrays in shader code, irrespective of the
399 * shaderSampledImageArrayDynamicIndexing feature.
401 assert(nir_tex_instr_src_index(tex
, nir_tex_src_plane
) == -1);
403 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
405 if (state
->add_bounds_checks
)
406 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
411 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[deref_src_idx
].src
,
412 nir_src_for_ssa(index
));
413 tex
->src
[deref_src_idx
].src_type
= offset_src_type
;
415 nir_tex_instr_remove_src(tex
, deref_src_idx
);
420 tex_instr_get_and_remove_plane_src(nir_tex_instr
*tex
)
422 int plane_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_plane
);
423 if (plane_src_idx
< 0)
426 unsigned plane
= nir_src_as_uint(tex
->src
[plane_src_idx
].src
);
428 nir_tex_instr_remove_src(tex
, plane_src_idx
);
434 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
436 state
->builder
.cursor
= nir_before_instr(&tex
->instr
);
438 unsigned plane
= tex_instr_get_and_remove_plane_src(tex
);
440 lower_tex_deref(tex
, nir_tex_src_texture_deref
,
441 &tex
->texture_index
, state
);
442 tex
->texture_index
+= plane
;
444 lower_tex_deref(tex
, nir_tex_src_sampler_deref
,
445 &tex
->sampler_index
, state
);
446 tex
->sampler_index
+= plane
;
448 /* The backend only ever uses this to mark used surfaces. We don't care
449 * about that little optimization so it just needs to be non-zero.
451 tex
->texture_array_size
= 1;
455 apply_pipeline_layout_block(nir_block
*block
,
456 struct apply_pipeline_layout_state
*state
)
458 nir_foreach_instr_safe(instr
, block
) {
459 switch (instr
->type
) {
460 case nir_instr_type_intrinsic
: {
461 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
462 switch (intrin
->intrinsic
) {
463 case nir_intrinsic_vulkan_resource_index
:
464 lower_res_index_intrinsic(intrin
, state
);
466 case nir_intrinsic_vulkan_resource_reindex
:
467 lower_res_reindex_intrinsic(intrin
, state
);
469 case nir_intrinsic_load_vulkan_descriptor
:
470 lower_load_vulkan_descriptor(intrin
, state
);
472 case nir_intrinsic_get_buffer_size
:
473 lower_get_buffer_size(intrin
, state
);
475 case nir_intrinsic_image_deref_load
:
476 case nir_intrinsic_image_deref_store
:
477 case nir_intrinsic_image_deref_atomic_add
:
478 case nir_intrinsic_image_deref_atomic_min
:
479 case nir_intrinsic_image_deref_atomic_max
:
480 case nir_intrinsic_image_deref_atomic_and
:
481 case nir_intrinsic_image_deref_atomic_or
:
482 case nir_intrinsic_image_deref_atomic_xor
:
483 case nir_intrinsic_image_deref_atomic_exchange
:
484 case nir_intrinsic_image_deref_atomic_comp_swap
:
485 case nir_intrinsic_image_deref_size
:
486 case nir_intrinsic_image_deref_samples
:
487 case nir_intrinsic_image_deref_load_param_intel
:
488 case nir_intrinsic_image_deref_load_raw_intel
:
489 case nir_intrinsic_image_deref_store_raw_intel
:
490 lower_image_intrinsic(intrin
, state
);
492 case nir_intrinsic_load_constant
:
493 lower_load_constant(intrin
, state
);
500 case nir_instr_type_tex
:
501 lower_tex(nir_instr_as_tex(instr
), state
);
509 struct binding_info
{
516 compare_binding_infos(const void *_a
, const void *_b
)
518 const struct binding_info
*a
= _a
, *b
= _b
;
519 if (a
->score
!= b
->score
)
520 return b
->score
- a
->score
;
522 if (a
->set
!= b
->set
)
523 return a
->set
- b
->set
;
525 return a
->binding
- b
->binding
;
529 anv_nir_apply_pipeline_layout(const struct anv_physical_device
*pdevice
,
530 bool robust_buffer_access
,
531 struct anv_pipeline_layout
*layout
,
533 struct brw_stage_prog_data
*prog_data
,
534 struct anv_pipeline_bind_map
*map
)
536 struct apply_pipeline_layout_state state
= {
540 .add_bounds_checks
= robust_buffer_access
,
543 void *mem_ctx
= ralloc_context(NULL
);
545 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
546 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
547 state
.set
[s
].use_count
= rzalloc_array(mem_ctx
, uint8_t, count
);
548 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
549 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
552 nir_foreach_function(function
, shader
) {
556 nir_foreach_block(block
, function
->impl
)
557 get_used_bindings_block(block
, &state
);
560 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
561 if (state
.set
[s
].desc_buffer_used
) {
562 map
->surface_to_descriptor
[map
->surface_count
] =
563 (struct anv_pipeline_binding
) {
564 .set
= ANV_DESCRIPTOR_SET_DESCRIPTORS
,
567 state
.set
[s
].desc_offset
= map
->surface_count
;
568 map
->surface_count
++;
572 if (state
.uses_constants
) {
573 state
.constants_offset
= map
->surface_count
;
574 map
->surface_to_descriptor
[map
->surface_count
].set
=
575 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS
;
576 map
->surface_count
++;
579 unsigned used_binding_count
= 0;
580 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
581 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
582 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
583 if (state
.set
[set
].use_count
[b
] == 0)
586 used_binding_count
++;
590 struct binding_info
*infos
=
591 rzalloc_array(mem_ctx
, struct binding_info
, used_binding_count
);
592 used_binding_count
= 0;
593 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
594 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
595 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
596 if (state
.set
[set
].use_count
[b
] == 0)
599 struct anv_descriptor_set_binding_layout
*binding
=
600 &layout
->set
[set
].layout
->binding
[b
];
602 /* Do a fixed-point calculation to generate a score based on the
603 * number of uses and the binding array size.
605 uint16_t score
= ((uint16_t)state
.set
[set
].use_count
[b
] << 7) /
608 infos
[used_binding_count
++] = (struct binding_info
) {
616 /* Order the binding infos based on score with highest scores first. If
617 * scores are equal we then order by set and binding.
619 qsort(infos
, used_binding_count
, sizeof(struct binding_info
),
620 compare_binding_infos
);
622 for (unsigned i
= 0; i
< used_binding_count
; i
++) {
623 unsigned set
= infos
[i
].set
, b
= infos
[i
].binding
;
624 struct anv_descriptor_set_binding_layout
*binding
=
625 &layout
->set
[set
].layout
->binding
[b
];
627 if (binding
->data
& ANV_DESCRIPTOR_SURFACE_STATE
) {
628 state
.set
[set
].surface_offsets
[b
] = map
->surface_count
;
629 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
630 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
631 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
632 for (uint8_t p
= 0; p
< planes
; p
++) {
633 map
->surface_to_descriptor
[map
->surface_count
++] =
634 (struct anv_pipeline_binding
) {
643 assert(map
->surface_count
<= MAX_BINDING_TABLE_SIZE
);
645 if (binding
->data
& ANV_DESCRIPTOR_SAMPLER_STATE
) {
646 state
.set
[set
].sampler_offsets
[b
] = map
->sampler_count
;
647 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
648 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
649 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
650 for (uint8_t p
= 0; p
< planes
; p
++) {
651 map
->sampler_to_descriptor
[map
->sampler_count
++] =
652 (struct anv_pipeline_binding
) {
663 nir_foreach_variable(var
, &shader
->uniforms
) {
664 const struct glsl_type
*glsl_type
= glsl_without_array(var
->type
);
666 if (!glsl_type_is_image(glsl_type
))
669 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(glsl_type
);
671 const uint32_t set
= var
->data
.descriptor_set
;
672 const uint32_t binding
= var
->data
.binding
;
673 const uint32_t array_size
=
674 layout
->set
[set
].layout
->binding
[binding
].array_size
;
676 if (state
.set
[set
].use_count
[binding
] == 0)
679 struct anv_pipeline_binding
*pipe_binding
=
680 &map
->surface_to_descriptor
[state
.set
[set
].surface_offsets
[binding
]];
681 for (unsigned i
= 0; i
< array_size
; i
++) {
682 assert(pipe_binding
[i
].set
== set
);
683 assert(pipe_binding
[i
].binding
== binding
);
684 assert(pipe_binding
[i
].index
== i
);
686 if (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
687 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
)
688 pipe_binding
[i
].input_attachment_index
= var
->data
.index
+ i
;
690 pipe_binding
[i
].write_only
=
691 (var
->data
.image
.access
& ACCESS_NON_READABLE
) != 0;
695 nir_foreach_function(function
, shader
) {
699 nir_builder_init(&state
.builder
, function
->impl
);
700 nir_foreach_block(block
, function
->impl
)
701 apply_pipeline_layout_block(block
, &state
);
702 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
703 nir_metadata_dominance
);
706 ralloc_free(mem_ctx
);