2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
27 #include "compiler/brw_nir.h"
29 /* Sampler tables don't actually have a maximum size but we pick one just so
30 * that we don't end up emitting too much state on-the-fly.
32 #define MAX_SAMPLER_TABLE_SIZE 128
33 #define BINDLESS_OFFSET 255
35 struct apply_pipeline_layout_state
{
36 const struct anv_physical_device
*pdevice
;
41 struct anv_pipeline_layout
*layout
;
42 bool add_bounds_checks
;
45 uint8_t constants_offset
;
47 bool desc_buffer_used
;
51 uint8_t *surface_offsets
;
52 uint8_t *sampler_offsets
;
57 add_binding(struct apply_pipeline_layout_state
*state
,
58 uint32_t set
, uint32_t binding
)
60 const struct anv_descriptor_set_binding_layout
*bind_layout
=
61 &state
->layout
->set
[set
].layout
->binding
[binding
];
63 if (state
->set
[set
].use_count
[binding
] < UINT8_MAX
)
64 state
->set
[set
].use_count
[binding
]++;
66 /* Only flag the descriptor buffer as used if there's actually data for
67 * this binding. This lets us be lazy and call this function constantly
68 * without worrying about unnecessarily enabling the buffer.
70 if (anv_descriptor_size(bind_layout
))
71 state
->set
[set
].desc_buffer_used
= true;
75 add_deref_src_binding(struct apply_pipeline_layout_state
*state
, nir_src src
)
77 nir_deref_instr
*deref
= nir_src_as_deref(src
);
78 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
79 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
83 add_tex_src_binding(struct apply_pipeline_layout_state
*state
,
84 nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
)
86 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
87 if (deref_src_idx
< 0)
90 add_deref_src_binding(state
, tex
->src
[deref_src_idx
].src
);
94 get_used_bindings_block(nir_block
*block
,
95 struct apply_pipeline_layout_state
*state
)
97 nir_foreach_instr_safe(instr
, block
) {
98 switch (instr
->type
) {
99 case nir_instr_type_intrinsic
: {
100 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
101 switch (intrin
->intrinsic
) {
102 case nir_intrinsic_vulkan_resource_index
:
103 add_binding(state
, nir_intrinsic_desc_set(intrin
),
104 nir_intrinsic_binding(intrin
));
107 case nir_intrinsic_image_deref_load
:
108 case nir_intrinsic_image_deref_store
:
109 case nir_intrinsic_image_deref_atomic_add
:
110 case nir_intrinsic_image_deref_atomic_min
:
111 case nir_intrinsic_image_deref_atomic_max
:
112 case nir_intrinsic_image_deref_atomic_and
:
113 case nir_intrinsic_image_deref_atomic_or
:
114 case nir_intrinsic_image_deref_atomic_xor
:
115 case nir_intrinsic_image_deref_atomic_exchange
:
116 case nir_intrinsic_image_deref_atomic_comp_swap
:
117 case nir_intrinsic_image_deref_size
:
118 case nir_intrinsic_image_deref_samples
:
119 case nir_intrinsic_image_deref_load_param_intel
:
120 case nir_intrinsic_image_deref_load_raw_intel
:
121 case nir_intrinsic_image_deref_store_raw_intel
:
122 add_deref_src_binding(state
, intrin
->src
[0]);
125 case nir_intrinsic_load_constant
:
126 state
->uses_constants
= true;
134 case nir_instr_type_tex
: {
135 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
136 add_tex_src_binding(state
, tex
, nir_tex_src_texture_deref
);
137 add_tex_src_binding(state
, tex
, nir_tex_src_sampler_deref
);
147 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
148 struct apply_pipeline_layout_state
*state
)
150 nir_builder
*b
= &state
->builder
;
152 b
->cursor
= nir_before_instr(&intrin
->instr
);
154 uint32_t set
= nir_intrinsic_desc_set(intrin
);
155 uint32_t binding
= nir_intrinsic_binding(intrin
);
157 const struct anv_descriptor_set_binding_layout
*bind_layout
=
158 &state
->layout
->set
[set
].layout
->binding
[binding
];
160 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
161 uint32_t array_size
= bind_layout
->array_size
;
163 nir_ssa_def
*array_index
= nir_ssa_for_src(b
, intrin
->src
[0], 1);
164 if (nir_src_is_const(intrin
->src
[0]) || state
->add_bounds_checks
)
165 array_index
= nir_umin(b
, array_index
, nir_imm_int(b
, array_size
- 1));
168 if (bind_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
169 /* This is an inline uniform block. Just reference the descriptor set
170 * and use the descriptor offset as the base.
172 index
= nir_imm_ivec2(b
, state
->set
[set
].desc_offset
,
173 bind_layout
->descriptor_offset
);
175 /* We're using nir_address_format_32bit_index_offset */
176 index
= nir_vec2(b
, nir_iadd_imm(b
, array_index
, surface_index
),
180 assert(intrin
->dest
.is_ssa
);
181 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
182 nir_instr_remove(&intrin
->instr
);
186 lower_res_reindex_intrinsic(nir_intrinsic_instr
*intrin
,
187 struct apply_pipeline_layout_state
*state
)
189 nir_builder
*b
= &state
->builder
;
191 b
->cursor
= nir_before_instr(&intrin
->instr
);
193 /* For us, the resource indices are just indices into the binding table and
194 * array elements are sequential. A resource_reindex just turns into an
195 * add of the two indices.
197 assert(intrin
->src
[0].is_ssa
&& intrin
->src
[1].is_ssa
);
198 nir_ssa_def
*old_index
= intrin
->src
[0].ssa
;
199 nir_ssa_def
*offset
= intrin
->src
[1].ssa
;
201 nir_ssa_def
*new_index
=
202 nir_vec2(b
, nir_iadd(b
, nir_channel(b
, old_index
, 0), offset
),
203 nir_channel(b
, old_index
, 1));
205 assert(intrin
->dest
.is_ssa
);
206 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(new_index
));
207 nir_instr_remove(&intrin
->instr
);
211 lower_load_vulkan_descriptor(nir_intrinsic_instr
*intrin
,
212 struct apply_pipeline_layout_state
*state
)
214 nir_builder
*b
= &state
->builder
;
216 b
->cursor
= nir_before_instr(&intrin
->instr
);
218 /* We follow the nir_address_format_32bit_index_offset model */
219 assert(intrin
->src
[0].is_ssa
);
220 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
222 assert(intrin
->dest
.is_ssa
);
223 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(index
));
224 nir_instr_remove(&intrin
->instr
);
228 lower_get_buffer_size(nir_intrinsic_instr
*intrin
,
229 struct apply_pipeline_layout_state
*state
)
231 nir_builder
*b
= &state
->builder
;
233 b
->cursor
= nir_before_instr(&intrin
->instr
);
235 assert(intrin
->src
[0].is_ssa
);
236 nir_ssa_def
*index
= intrin
->src
[0].ssa
;
238 /* We're following the nir_address_format_32bit_index_offset model so the
239 * binding table index is the first component of the address. The
240 * back-end wants a scalar binding table index source.
242 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
243 nir_src_for_ssa(nir_channel(b
, index
, 0)));
247 build_descriptor_load(nir_deref_instr
*deref
, unsigned offset
,
248 unsigned num_components
, unsigned bit_size
,
249 struct apply_pipeline_layout_state
*state
)
251 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
253 unsigned set
= var
->data
.descriptor_set
;
254 unsigned binding
= var
->data
.binding
;
255 unsigned array_size
=
256 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
258 const struct anv_descriptor_set_binding_layout
*bind_layout
=
259 &state
->layout
->set
[set
].layout
->binding
[binding
];
261 nir_builder
*b
= &state
->builder
;
263 nir_ssa_def
*desc_buffer_index
=
264 nir_imm_int(b
, state
->set
[set
].desc_offset
);
266 nir_ssa_def
*desc_offset
=
267 nir_imm_int(b
, bind_layout
->descriptor_offset
+ offset
);
268 if (deref
->deref_type
!= nir_deref_type_var
) {
269 assert(deref
->deref_type
== nir_deref_type_array
);
271 const unsigned descriptor_size
= anv_descriptor_size(bind_layout
);
272 nir_ssa_def
*arr_index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
273 if (state
->add_bounds_checks
)
274 arr_index
= nir_umin(b
, arr_index
, nir_imm_int(b
, array_size
- 1));
276 desc_offset
= nir_iadd(b
, desc_offset
,
277 nir_imul_imm(b
, arr_index
, descriptor_size
));
280 nir_intrinsic_instr
*desc_load
=
281 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
282 desc_load
->src
[0] = nir_src_for_ssa(desc_buffer_index
);
283 desc_load
->src
[1] = nir_src_for_ssa(desc_offset
);
284 desc_load
->num_components
= num_components
;
285 nir_ssa_dest_init(&desc_load
->instr
, &desc_load
->dest
,
286 num_components
, bit_size
, NULL
);
287 nir_builder_instr_insert(b
, &desc_load
->instr
);
289 return &desc_load
->dest
.ssa
;
293 lower_image_intrinsic(nir_intrinsic_instr
*intrin
,
294 struct apply_pipeline_layout_state
*state
)
296 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
298 nir_builder
*b
= &state
->builder
;
299 b
->cursor
= nir_before_instr(&intrin
->instr
);
301 if (intrin
->intrinsic
== nir_intrinsic_image_deref_load_param_intel
) {
302 b
->cursor
= nir_instr_remove(&intrin
->instr
);
304 const unsigned param
= nir_intrinsic_base(intrin
);
307 build_descriptor_load(deref
, param
* 16,
308 intrin
->dest
.ssa
.num_components
,
309 intrin
->dest
.ssa
.bit_size
, state
);
311 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(desc
));
313 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
315 unsigned set
= var
->data
.descriptor_set
;
316 unsigned binding
= var
->data
.binding
;
317 unsigned binding_offset
= state
->set
[set
].surface_offsets
[binding
];
318 unsigned array_size
=
319 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
321 nir_ssa_def
*index
= NULL
;
322 if (deref
->deref_type
!= nir_deref_type_var
) {
323 assert(deref
->deref_type
== nir_deref_type_array
);
324 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
325 if (state
->add_bounds_checks
)
326 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
328 index
= nir_imm_int(b
, 0);
331 index
= nir_iadd_imm(b
, index
, binding_offset
);
332 nir_rewrite_image_intrinsic(intrin
, index
, false);
337 lower_load_constant(nir_intrinsic_instr
*intrin
,
338 struct apply_pipeline_layout_state
*state
)
340 nir_builder
*b
= &state
->builder
;
342 b
->cursor
= nir_before_instr(&intrin
->instr
);
344 nir_ssa_def
*index
= nir_imm_int(b
, state
->constants_offset
);
345 nir_ssa_def
*offset
= nir_iadd(b
, nir_ssa_for_src(b
, intrin
->src
[0], 1),
346 nir_imm_int(b
, nir_intrinsic_base(intrin
)));
348 nir_intrinsic_instr
*load_ubo
=
349 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo
);
350 load_ubo
->num_components
= intrin
->num_components
;
351 load_ubo
->src
[0] = nir_src_for_ssa(index
);
352 load_ubo
->src
[1] = nir_src_for_ssa(offset
);
353 nir_ssa_dest_init(&load_ubo
->instr
, &load_ubo
->dest
,
354 intrin
->dest
.ssa
.num_components
,
355 intrin
->dest
.ssa
.bit_size
, NULL
);
356 nir_builder_instr_insert(b
, &load_ubo
->instr
);
358 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
359 nir_src_for_ssa(&load_ubo
->dest
.ssa
));
360 nir_instr_remove(&intrin
->instr
);
364 lower_tex_deref(nir_tex_instr
*tex
, nir_tex_src_type deref_src_type
,
365 unsigned *base_index
,
366 struct apply_pipeline_layout_state
*state
)
368 int deref_src_idx
= nir_tex_instr_src_index(tex
, deref_src_type
);
369 if (deref_src_idx
< 0)
372 nir_deref_instr
*deref
= nir_src_as_deref(tex
->src
[deref_src_idx
].src
);
373 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
375 unsigned set
= var
->data
.descriptor_set
;
376 unsigned binding
= var
->data
.binding
;
377 unsigned array_size
=
378 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
380 nir_tex_src_type offset_src_type
;
381 if (deref_src_type
== nir_tex_src_texture_deref
) {
382 offset_src_type
= nir_tex_src_texture_offset
;
383 *base_index
= state
->set
[set
].surface_offsets
[binding
];
385 assert(deref_src_type
== nir_tex_src_sampler_deref
);
386 offset_src_type
= nir_tex_src_sampler_offset
;
387 *base_index
= state
->set
[set
].sampler_offsets
[binding
];
390 nir_ssa_def
*index
= NULL
;
391 if (deref
->deref_type
!= nir_deref_type_var
) {
392 assert(deref
->deref_type
== nir_deref_type_array
);
394 if (nir_src_is_const(deref
->arr
.index
)) {
395 unsigned arr_index
= nir_src_as_uint(deref
->arr
.index
);
396 *base_index
+= MIN2(arr_index
, array_size
- 1);
398 nir_builder
*b
= &state
->builder
;
400 /* From VK_KHR_sampler_ycbcr_conversion:
402 * If sampler Y’CBCR conversion is enabled, the combined image
403 * sampler must be indexed only by constant integral expressions when
404 * aggregated into arrays in shader code, irrespective of the
405 * shaderSampledImageArrayDynamicIndexing feature.
407 assert(nir_tex_instr_src_index(tex
, nir_tex_src_plane
) == -1);
409 index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
411 if (state
->add_bounds_checks
)
412 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
417 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[deref_src_idx
].src
,
418 nir_src_for_ssa(index
));
419 tex
->src
[deref_src_idx
].src_type
= offset_src_type
;
421 nir_tex_instr_remove_src(tex
, deref_src_idx
);
426 tex_instr_get_and_remove_plane_src(nir_tex_instr
*tex
)
428 int plane_src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_plane
);
429 if (plane_src_idx
< 0)
432 unsigned plane
= nir_src_as_uint(tex
->src
[plane_src_idx
].src
);
434 nir_tex_instr_remove_src(tex
, plane_src_idx
);
440 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
442 state
->builder
.cursor
= nir_before_instr(&tex
->instr
);
444 unsigned plane
= tex_instr_get_and_remove_plane_src(tex
);
446 lower_tex_deref(tex
, nir_tex_src_texture_deref
,
447 &tex
->texture_index
, state
);
448 tex
->texture_index
+= plane
;
450 lower_tex_deref(tex
, nir_tex_src_sampler_deref
,
451 &tex
->sampler_index
, state
);
452 tex
->sampler_index
+= plane
;
454 /* The backend only ever uses this to mark used surfaces. We don't care
455 * about that little optimization so it just needs to be non-zero.
457 tex
->texture_array_size
= 1;
461 apply_pipeline_layout_block(nir_block
*block
,
462 struct apply_pipeline_layout_state
*state
)
464 nir_foreach_instr_safe(instr
, block
) {
465 switch (instr
->type
) {
466 case nir_instr_type_intrinsic
: {
467 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
468 switch (intrin
->intrinsic
) {
469 case nir_intrinsic_vulkan_resource_index
:
470 lower_res_index_intrinsic(intrin
, state
);
472 case nir_intrinsic_vulkan_resource_reindex
:
473 lower_res_reindex_intrinsic(intrin
, state
);
475 case nir_intrinsic_load_vulkan_descriptor
:
476 lower_load_vulkan_descriptor(intrin
, state
);
478 case nir_intrinsic_get_buffer_size
:
479 lower_get_buffer_size(intrin
, state
);
481 case nir_intrinsic_image_deref_load
:
482 case nir_intrinsic_image_deref_store
:
483 case nir_intrinsic_image_deref_atomic_add
:
484 case nir_intrinsic_image_deref_atomic_min
:
485 case nir_intrinsic_image_deref_atomic_max
:
486 case nir_intrinsic_image_deref_atomic_and
:
487 case nir_intrinsic_image_deref_atomic_or
:
488 case nir_intrinsic_image_deref_atomic_xor
:
489 case nir_intrinsic_image_deref_atomic_exchange
:
490 case nir_intrinsic_image_deref_atomic_comp_swap
:
491 case nir_intrinsic_image_deref_size
:
492 case nir_intrinsic_image_deref_samples
:
493 case nir_intrinsic_image_deref_load_param_intel
:
494 case nir_intrinsic_image_deref_load_raw_intel
:
495 case nir_intrinsic_image_deref_store_raw_intel
:
496 lower_image_intrinsic(intrin
, state
);
498 case nir_intrinsic_load_constant
:
499 lower_load_constant(intrin
, state
);
506 case nir_instr_type_tex
:
507 lower_tex(nir_instr_as_tex(instr
), state
);
515 struct binding_info
{
522 compare_binding_infos(const void *_a
, const void *_b
)
524 const struct binding_info
*a
= _a
, *b
= _b
;
525 if (a
->score
!= b
->score
)
526 return b
->score
- a
->score
;
528 if (a
->set
!= b
->set
)
529 return a
->set
- b
->set
;
531 return a
->binding
- b
->binding
;
535 anv_nir_apply_pipeline_layout(const struct anv_physical_device
*pdevice
,
536 bool robust_buffer_access
,
537 struct anv_pipeline_layout
*layout
,
539 struct brw_stage_prog_data
*prog_data
,
540 struct anv_pipeline_bind_map
*map
)
542 struct apply_pipeline_layout_state state
= {
546 .add_bounds_checks
= robust_buffer_access
,
549 void *mem_ctx
= ralloc_context(NULL
);
551 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
552 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
553 state
.set
[s
].use_count
= rzalloc_array(mem_ctx
, uint8_t, count
);
554 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
555 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
558 nir_foreach_function(function
, shader
) {
562 nir_foreach_block(block
, function
->impl
)
563 get_used_bindings_block(block
, &state
);
566 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
567 if (state
.set
[s
].desc_buffer_used
) {
568 map
->surface_to_descriptor
[map
->surface_count
] =
569 (struct anv_pipeline_binding
) {
570 .set
= ANV_DESCRIPTOR_SET_DESCRIPTORS
,
573 state
.set
[s
].desc_offset
= map
->surface_count
;
574 map
->surface_count
++;
578 if (state
.uses_constants
) {
579 state
.constants_offset
= map
->surface_count
;
580 map
->surface_to_descriptor
[map
->surface_count
].set
=
581 ANV_DESCRIPTOR_SET_SHADER_CONSTANTS
;
582 map
->surface_count
++;
585 unsigned used_binding_count
= 0;
586 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
587 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
588 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
589 if (state
.set
[set
].use_count
[b
] == 0)
592 used_binding_count
++;
596 struct binding_info
*infos
=
597 rzalloc_array(mem_ctx
, struct binding_info
, used_binding_count
);
598 used_binding_count
= 0;
599 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
600 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
601 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
602 if (state
.set
[set
].use_count
[b
] == 0)
605 struct anv_descriptor_set_binding_layout
*binding
=
606 &layout
->set
[set
].layout
->binding
[b
];
608 /* Do a fixed-point calculation to generate a score based on the
609 * number of uses and the binding array size. We shift by 7 instead
610 * of 8 because we're going to use the top bit below to make
611 * everything which does not support bindless super higher priority
612 * than things which do.
614 uint16_t score
= ((uint16_t)state
.set
[set
].use_count
[b
] << 7) /
617 /* If the descriptor type doesn't support bindless then put it at the
618 * beginning so we guarantee it gets a slot.
620 if (!anv_descriptor_supports_bindless(pdevice
, binding
, true) ||
621 !anv_descriptor_supports_bindless(pdevice
, binding
, false))
624 infos
[used_binding_count
++] = (struct binding_info
) {
632 /* Order the binding infos based on score with highest scores first. If
633 * scores are equal we then order by set and binding.
635 qsort(infos
, used_binding_count
, sizeof(struct binding_info
),
636 compare_binding_infos
);
638 for (unsigned i
= 0; i
< used_binding_count
; i
++) {
639 unsigned set
= infos
[i
].set
, b
= infos
[i
].binding
;
640 struct anv_descriptor_set_binding_layout
*binding
=
641 &layout
->set
[set
].layout
->binding
[b
];
643 const uint32_t array_size
= binding
->array_size
;
645 if (binding
->data
& ANV_DESCRIPTOR_SURFACE_STATE
) {
646 if (map
->surface_count
+ array_size
> MAX_BINDING_TABLE_SIZE
||
647 anv_descriptor_requires_bindless(pdevice
, binding
, false)) {
648 /* If this descriptor doesn't fit in the binding table or if it
649 * requires bindless for some reason, flag it as bindless.
651 assert(anv_descriptor_supports_bindless(pdevice
, binding
, false));
652 state
.set
[set
].surface_offsets
[b
] = BINDLESS_OFFSET
;
654 state
.set
[set
].surface_offsets
[b
] = map
->surface_count
;
655 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
656 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
657 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
658 for (uint8_t p
= 0; p
< planes
; p
++) {
659 map
->surface_to_descriptor
[map
->surface_count
++] =
660 (struct anv_pipeline_binding
) {
669 assert(map
->surface_count
<= MAX_BINDING_TABLE_SIZE
);
672 if (binding
->data
& ANV_DESCRIPTOR_SAMPLER_STATE
) {
673 if (map
->sampler_count
+ array_size
> MAX_SAMPLER_TABLE_SIZE
||
674 anv_descriptor_requires_bindless(pdevice
, binding
, true)) {
675 /* If this descriptor doesn't fit in the binding table or if it
676 * requires bindless for some reason, flag it as bindless.
678 assert(anv_descriptor_supports_bindless(pdevice
, binding
, true));
679 state
.set
[set
].sampler_offsets
[b
] = BINDLESS_OFFSET
;
681 state
.set
[set
].sampler_offsets
[b
] = map
->sampler_count
;
682 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
683 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
684 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
685 for (uint8_t p
= 0; p
< planes
; p
++) {
686 map
->sampler_to_descriptor
[map
->sampler_count
++] =
687 (struct anv_pipeline_binding
) {
699 nir_foreach_variable(var
, &shader
->uniforms
) {
700 const struct glsl_type
*glsl_type
= glsl_without_array(var
->type
);
702 if (!glsl_type_is_image(glsl_type
))
705 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(glsl_type
);
707 const uint32_t set
= var
->data
.descriptor_set
;
708 const uint32_t binding
= var
->data
.binding
;
709 const uint32_t array_size
=
710 layout
->set
[set
].layout
->binding
[binding
].array_size
;
712 if (state
.set
[set
].use_count
[binding
] == 0)
715 if (state
.set
[set
].surface_offsets
[binding
] >= MAX_BINDING_TABLE_SIZE
)
718 struct anv_pipeline_binding
*pipe_binding
=
719 &map
->surface_to_descriptor
[state
.set
[set
].surface_offsets
[binding
]];
720 for (unsigned i
= 0; i
< array_size
; i
++) {
721 assert(pipe_binding
[i
].set
== set
);
722 assert(pipe_binding
[i
].binding
== binding
);
723 assert(pipe_binding
[i
].index
== i
);
725 if (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
726 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
)
727 pipe_binding
[i
].input_attachment_index
= var
->data
.index
+ i
;
729 pipe_binding
[i
].write_only
=
730 (var
->data
.image
.access
& ACCESS_NON_READABLE
) != 0;
734 nir_foreach_function(function
, shader
) {
738 nir_builder_init(&state
.builder
, function
->impl
);
739 nir_foreach_block(block
, function
->impl
)
740 apply_pipeline_layout_block(block
, &state
);
741 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
742 nir_metadata_dominance
);
745 ralloc_free(mem_ctx
);