2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
28 struct apply_pipeline_layout_state
{
32 struct anv_pipeline_layout
*layout
;
33 bool add_bounds_checks
;
37 uint8_t *surface_offsets
;
38 uint8_t *sampler_offsets
;
39 uint8_t *image_offsets
;
44 add_binding(struct apply_pipeline_layout_state
*state
,
45 uint32_t set
, uint32_t binding
)
47 BITSET_SET(state
->set
[set
].used
, binding
);
51 add_var_binding(struct apply_pipeline_layout_state
*state
, nir_variable
*var
)
53 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
57 get_used_bindings_block(nir_block
*block
,
58 struct apply_pipeline_layout_state
*state
)
60 nir_foreach_instr_safe(instr
, block
) {
61 switch (instr
->type
) {
62 case nir_instr_type_intrinsic
: {
63 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
64 switch (intrin
->intrinsic
) {
65 case nir_intrinsic_vulkan_resource_index
:
66 add_binding(state
, nir_intrinsic_desc_set(intrin
),
67 nir_intrinsic_binding(intrin
));
70 case nir_intrinsic_image_load
:
71 case nir_intrinsic_image_store
:
72 case nir_intrinsic_image_atomic_add
:
73 case nir_intrinsic_image_atomic_min
:
74 case nir_intrinsic_image_atomic_max
:
75 case nir_intrinsic_image_atomic_and
:
76 case nir_intrinsic_image_atomic_or
:
77 case nir_intrinsic_image_atomic_xor
:
78 case nir_intrinsic_image_atomic_exchange
:
79 case nir_intrinsic_image_atomic_comp_swap
:
80 case nir_intrinsic_image_size
:
81 case nir_intrinsic_image_samples
:
82 add_var_binding(state
, intrin
->variables
[0]->var
);
90 case nir_instr_type_tex
: {
91 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
93 add_var_binding(state
, tex
->texture
->var
);
95 add_var_binding(state
, tex
->sampler
->var
);
105 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
106 struct apply_pipeline_layout_state
*state
)
108 nir_builder
*b
= &state
->builder
;
110 b
->cursor
= nir_before_instr(&intrin
->instr
);
112 uint32_t set
= nir_intrinsic_desc_set(intrin
);
113 uint32_t binding
= nir_intrinsic_binding(intrin
);
115 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
116 uint32_t array_size
=
117 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
119 nir_ssa_def
*block_index
= nir_ssa_for_src(b
, intrin
->src
[0], 1);
121 if (state
->add_bounds_checks
)
122 block_index
= nir_umin(b
, block_index
, nir_imm_int(b
, array_size
- 1));
124 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
), block_index
);
126 assert(intrin
->dest
.is_ssa
);
127 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(block_index
));
128 nir_instr_remove(&intrin
->instr
);
132 lower_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
,
133 unsigned *const_index
, unsigned array_size
,
134 nir_tex_src_type src_type
, bool allow_indirect
,
135 struct apply_pipeline_layout_state
*state
)
137 nir_builder
*b
= &state
->builder
;
139 if (deref
->deref
.child
) {
140 assert(deref
->deref
.child
->deref_type
== nir_deref_type_array
);
141 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
143 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
144 /* From VK_KHR_sampler_ycbcr_conversion:
146 * If sampler Y’CBCR conversion is enabled, the combined image
147 * sampler must be indexed only by constant integral expressions when
148 * aggregated into arrays in shader code, irrespective of the
149 * shaderSampledImageArrayDynamicIndexing feature.
151 assert(allow_indirect
);
154 nir_iadd(b
, nir_imm_int(b
, deref_array
->base_offset
),
155 nir_ssa_for_src(b
, deref_array
->indirect
, 1));
157 if (state
->add_bounds_checks
)
158 index
= nir_umin(b
, index
, nir_imm_int(b
, array_size
- 1));
160 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
163 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
164 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
165 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
168 ralloc_free(tex
->src
);
171 /* Now we can go ahead and move the source over to being a
172 * first-class texture source.
174 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
175 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
].src
,
176 nir_src_for_ssa(index
));
179 *const_index
+= MIN2(deref_array
->base_offset
, array_size
- 1);
185 cleanup_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
)
187 if (deref
->deref
.child
== NULL
)
190 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
192 if (deref_array
->deref_array_type
!= nir_deref_array_type_indirect
)
195 nir_instr_rewrite_src(&tex
->instr
, &deref_array
->indirect
, NIR_SRC_INIT
);
199 has_tex_src_plane(nir_tex_instr
*tex
)
201 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
202 if (tex
->src
[i
].src_type
== nir_tex_src_plane
)
210 extract_tex_src_plane(nir_tex_instr
*tex
)
212 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
, tex
->num_srcs
- 1);
215 for (unsigned i
= 0, w
= 0; i
< tex
->num_srcs
; i
++) {
216 if (tex
->src
[i
].src_type
== nir_tex_src_plane
) {
217 nir_const_value
*const_plane
=
218 nir_src_as_const_value(tex
->src
[i
].src
);
220 /* Our color conversion lowering pass should only ever insert
223 plane
= const_plane
->u32
[0];
225 /* Remove the source from the instruction */
226 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[i
].src
, NIR_SRC_INIT
);
228 new_srcs
[w
].src_type
= tex
->src
[i
].src_type
;
229 nir_instr_move_src(&tex
->instr
, &new_srcs
[w
].src
, &tex
->src
[i
].src
);
234 ralloc_free(tex
->src
);
242 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
244 /* No one should have come by and lowered it already */
245 assert(tex
->texture
);
247 state
->builder
.cursor
= nir_before_instr(&tex
->instr
);
249 unsigned set
= tex
->texture
->var
->data
.descriptor_set
;
250 unsigned binding
= tex
->texture
->var
->data
.binding
;
251 unsigned array_size
=
252 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
253 bool has_plane
= has_tex_src_plane(tex
);
254 unsigned plane
= has_plane
? extract_tex_src_plane(tex
) : 0;
256 tex
->texture_index
= state
->set
[set
].surface_offsets
[binding
];
257 lower_tex_deref(tex
, tex
->texture
, &tex
->texture_index
, array_size
,
258 nir_tex_src_texture_offset
, !has_plane
, state
);
259 tex
->texture_index
+= plane
;
262 unsigned set
= tex
->sampler
->var
->data
.descriptor_set
;
263 unsigned binding
= tex
->sampler
->var
->data
.binding
;
264 unsigned array_size
=
265 state
->layout
->set
[set
].layout
->binding
[binding
].array_size
;
266 tex
->sampler_index
= state
->set
[set
].sampler_offsets
[binding
];
267 lower_tex_deref(tex
, tex
->sampler
, &tex
->sampler_index
, array_size
,
268 nir_tex_src_sampler_offset
, !has_plane
, state
);
269 tex
->sampler_index
+= plane
;
272 /* The backend only ever uses this to mark used surfaces. We don't care
273 * about that little optimization so it just needs to be non-zero.
275 tex
->texture_array_size
= 1;
277 cleanup_tex_deref(tex
, tex
->texture
);
279 cleanup_tex_deref(tex
, tex
->sampler
);
285 apply_pipeline_layout_block(nir_block
*block
,
286 struct apply_pipeline_layout_state
*state
)
288 nir_foreach_instr_safe(instr
, block
) {
289 switch (instr
->type
) {
290 case nir_instr_type_intrinsic
: {
291 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
292 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
293 lower_res_index_intrinsic(intrin
, state
);
297 case nir_instr_type_tex
:
298 lower_tex(nir_instr_as_tex(instr
), state
);
307 setup_vec4_uniform_value(const union gl_constant_value
**params
,
308 const union gl_constant_value
*values
,
311 static const gl_constant_value zero
= { 0 };
313 for (unsigned i
= 0; i
< n
; ++i
)
314 params
[i
] = &values
[i
];
316 for (unsigned i
= n
; i
< 4; ++i
)
321 anv_nir_apply_pipeline_layout(struct anv_pipeline
*pipeline
,
323 struct brw_stage_prog_data
*prog_data
,
324 struct anv_pipeline_bind_map
*map
)
326 struct anv_pipeline_layout
*layout
= pipeline
->layout
;
328 struct apply_pipeline_layout_state state
= {
331 .add_bounds_checks
= pipeline
->device
->robust_buffer_access
,
334 void *mem_ctx
= ralloc_context(NULL
);
336 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
337 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
338 const unsigned words
= BITSET_WORDS(count
);
339 state
.set
[s
].used
= rzalloc_array(mem_ctx
, BITSET_WORD
, words
);
340 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
341 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
342 state
.set
[s
].image_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
345 nir_foreach_function(function
, shader
) {
349 nir_foreach_block(block
, function
->impl
)
350 get_used_bindings_block(block
, &state
);
353 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
354 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
357 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
358 set_layout
->binding_count
) {
359 if (set_layout
->binding
[b
].stage
[shader
->stage
].surface_index
>= 0) {
360 map
->surface_count
+=
361 anv_descriptor_set_binding_layout_get_hw_size(&set_layout
->binding
[b
]);
363 if (set_layout
->binding
[b
].stage
[shader
->stage
].sampler_index
>= 0) {
364 map
->sampler_count
+=
365 anv_descriptor_set_binding_layout_get_hw_size(&set_layout
->binding
[b
]);
367 if (set_layout
->binding
[b
].stage
[shader
->stage
].image_index
>= 0)
368 map
->image_count
+= set_layout
->binding
[b
].array_size
;
372 unsigned surface
= 0;
373 unsigned sampler
= 0;
375 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
376 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
379 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
380 set_layout
->binding_count
) {
381 struct anv_descriptor_set_binding_layout
*binding
=
382 &set_layout
->binding
[b
];
384 if (binding
->stage
[shader
->stage
].surface_index
>= 0) {
385 state
.set
[set
].surface_offsets
[b
] = surface
;
386 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
387 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
388 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
389 for (uint8_t p
= 0; p
< planes
; p
++) {
390 map
->surface_to_descriptor
[surface
].set
= set
;
391 map
->surface_to_descriptor
[surface
].binding
= b
;
392 map
->surface_to_descriptor
[surface
].index
= i
;
393 map
->surface_to_descriptor
[surface
].plane
= p
;
399 if (binding
->stage
[shader
->stage
].sampler_index
>= 0) {
400 state
.set
[set
].sampler_offsets
[b
] = sampler
;
401 struct anv_sampler
**samplers
= binding
->immutable_samplers
;
402 for (unsigned i
= 0; i
< binding
->array_size
; i
++) {
403 uint8_t planes
= samplers
? samplers
[i
]->n_planes
: 1;
404 for (uint8_t p
= 0; p
< planes
; p
++) {
405 map
->sampler_to_descriptor
[sampler
].set
= set
;
406 map
->sampler_to_descriptor
[sampler
].binding
= b
;
407 map
->sampler_to_descriptor
[sampler
].index
= i
;
408 map
->sampler_to_descriptor
[sampler
].plane
= p
;
414 if (binding
->stage
[shader
->stage
].image_index
>= 0) {
415 state
.set
[set
].image_offsets
[b
] = image
;
416 image
+= binding
->array_size
;
421 nir_foreach_variable(var
, &shader
->uniforms
) {
422 if (!glsl_type_is_image(var
->interface_type
))
425 enum glsl_sampler_dim dim
= glsl_get_sampler_dim(var
->interface_type
);
427 const uint32_t set
= var
->data
.descriptor_set
;
428 const uint32_t binding
= var
->data
.binding
;
429 const uint32_t array_size
=
430 layout
->set
[set
].layout
->binding
[binding
].array_size
;
432 if (!BITSET_TEST(state
.set
[set
].used
, binding
))
435 struct anv_pipeline_binding
*pipe_binding
=
436 &map
->surface_to_descriptor
[state
.set
[set
].surface_offsets
[binding
]];
437 for (unsigned i
= 0; i
< array_size
; i
++) {
438 assert(pipe_binding
[i
].set
== set
);
439 assert(pipe_binding
[i
].binding
== binding
);
440 assert(pipe_binding
[i
].index
== i
);
442 if (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
443 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
)
444 pipe_binding
[i
].input_attachment_index
= var
->data
.index
+ i
;
446 pipe_binding
[i
].write_only
= var
->data
.image
.write_only
;
450 nir_foreach_function(function
, shader
) {
454 nir_builder_init(&state
.builder
, function
->impl
);
455 nir_foreach_block(block
, function
->impl
)
456 apply_pipeline_layout_block(block
, &state
);
457 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
458 nir_metadata_dominance
);
461 if (map
->image_count
> 0) {
462 assert(map
->image_count
<= MAX_IMAGES
);
463 nir_foreach_variable(var
, &shader
->uniforms
) {
464 if (glsl_type_is_image(var
->type
) ||
465 (glsl_type_is_array(var
->type
) &&
466 glsl_type_is_image(glsl_get_array_element(var
->type
)))) {
467 /* Images are represented as uniform push constants and the actual
468 * information required for reading/writing to/from the image is
469 * storred in the uniform.
471 unsigned set
= var
->data
.descriptor_set
;
472 unsigned binding
= var
->data
.binding
;
473 unsigned image_index
= state
.set
[set
].image_offsets
[binding
];
475 var
->data
.driver_location
= shader
->num_uniforms
+
476 image_index
* BRW_IMAGE_PARAM_SIZE
* 4;
480 struct anv_push_constants
*null_data
= NULL
;
481 const gl_constant_value
**param
=
482 prog_data
->param
+ (shader
->num_uniforms
/ 4);
483 const struct brw_image_param
*image_param
= null_data
->images
;
484 for (uint32_t i
= 0; i
< map
->image_count
; i
++) {
485 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
486 (const union gl_constant_value
*)&image_param
->surface_idx
, 1);
487 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
488 (const union gl_constant_value
*)image_param
->offset
, 2);
489 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
490 (const union gl_constant_value
*)image_param
->size
, 3);
491 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
492 (const union gl_constant_value
*)image_param
->stride
, 4);
493 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
494 (const union gl_constant_value
*)image_param
->tiling
, 3);
495 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
496 (const union gl_constant_value
*)image_param
->swizzling
, 2);
498 param
+= BRW_IMAGE_PARAM_SIZE
;
502 shader
->num_uniforms
+= map
->image_count
* BRW_IMAGE_PARAM_SIZE
* 4;
505 ralloc_free(mem_ctx
);