2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
28 struct apply_pipeline_layout_state
{
34 uint8_t *surface_offsets
;
35 uint8_t *sampler_offsets
;
36 uint8_t *image_offsets
;
41 add_binding(struct apply_pipeline_layout_state
*state
,
42 uint32_t set
, uint32_t binding
)
44 BITSET_SET(state
->set
[set
].used
, binding
);
48 add_var_binding(struct apply_pipeline_layout_state
*state
, nir_variable
*var
)
50 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
54 get_used_bindings_block(nir_block
*block
,
55 struct apply_pipeline_layout_state
*state
)
57 nir_foreach_instr_safe(instr
, block
) {
58 switch (instr
->type
) {
59 case nir_instr_type_intrinsic
: {
60 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
61 switch (intrin
->intrinsic
) {
62 case nir_intrinsic_vulkan_resource_index
:
63 add_binding(state
, nir_intrinsic_desc_set(intrin
),
64 nir_intrinsic_binding(intrin
));
67 case nir_intrinsic_image_load
:
68 case nir_intrinsic_image_store
:
69 case nir_intrinsic_image_atomic_add
:
70 case nir_intrinsic_image_atomic_min
:
71 case nir_intrinsic_image_atomic_max
:
72 case nir_intrinsic_image_atomic_and
:
73 case nir_intrinsic_image_atomic_or
:
74 case nir_intrinsic_image_atomic_xor
:
75 case nir_intrinsic_image_atomic_exchange
:
76 case nir_intrinsic_image_atomic_comp_swap
:
77 case nir_intrinsic_image_size
:
78 case nir_intrinsic_image_samples
:
79 add_var_binding(state
, intrin
->variables
[0]->var
);
87 case nir_instr_type_tex
: {
88 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
90 add_var_binding(state
, tex
->texture
->var
);
92 add_var_binding(state
, tex
->sampler
->var
);
102 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
103 struct apply_pipeline_layout_state
*state
)
105 nir_builder
*b
= &state
->builder
;
107 b
->cursor
= nir_before_instr(&intrin
->instr
);
109 uint32_t set
= nir_intrinsic_desc_set(intrin
);
110 uint32_t binding
= nir_intrinsic_binding(intrin
);
112 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
114 nir_const_value
*const_block_idx
=
115 nir_src_as_const_value(intrin
->src
[0]);
117 nir_ssa_def
*block_index
;
118 if (const_block_idx
) {
119 block_index
= nir_imm_int(b
, surface_index
+ const_block_idx
->u32
[0]);
121 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
),
122 nir_ssa_for_src(b
, intrin
->src
[0], 1));
125 assert(intrin
->dest
.is_ssa
);
126 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(block_index
));
127 nir_instr_remove(&intrin
->instr
);
131 lower_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
,
132 unsigned *const_index
, nir_tex_src_type src_type
,
133 struct apply_pipeline_layout_state
*state
)
135 if (deref
->deref
.child
) {
136 assert(deref
->deref
.child
->deref_type
== nir_deref_type_array
);
137 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
139 *const_index
+= deref_array
->base_offset
;
141 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
142 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
145 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
146 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
147 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
150 ralloc_free(tex
->src
);
153 /* Now we can go ahead and move the source over to being a
154 * first-class texture source.
156 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
158 assert(deref_array
->indirect
.is_ssa
);
159 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
- 1].src
,
160 deref_array
->indirect
);
166 cleanup_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
)
168 if (deref
->deref
.child
== NULL
)
171 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
173 if (deref_array
->deref_array_type
!= nir_deref_array_type_indirect
)
176 nir_instr_rewrite_src(&tex
->instr
, &deref_array
->indirect
, NIR_SRC_INIT
);
180 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
182 /* No one should have come by and lowered it already */
183 assert(tex
->texture
);
185 unsigned set
= tex
->texture
->var
->data
.descriptor_set
;
186 unsigned binding
= tex
->texture
->var
->data
.binding
;
187 tex
->texture_index
= state
->set
[set
].surface_offsets
[binding
];
188 lower_tex_deref(tex
, tex
->texture
, &tex
->texture_index
,
189 nir_tex_src_texture_offset
, state
);
192 unsigned set
= tex
->sampler
->var
->data
.descriptor_set
;
193 unsigned binding
= tex
->sampler
->var
->data
.binding
;
194 tex
->sampler_index
= state
->set
[set
].sampler_offsets
[binding
];
195 lower_tex_deref(tex
, tex
->sampler
, &tex
->sampler_index
,
196 nir_tex_src_sampler_offset
, state
);
199 /* The backend only ever uses this to mark used surfaces. We don't care
200 * about that little optimization so it just needs to be non-zero.
202 tex
->texture_array_size
= 1;
204 cleanup_tex_deref(tex
, tex
->texture
);
206 cleanup_tex_deref(tex
, tex
->sampler
);
212 apply_pipeline_layout_block(nir_block
*block
,
213 struct apply_pipeline_layout_state
*state
)
215 nir_foreach_instr_safe(instr
, block
) {
216 switch (instr
->type
) {
217 case nir_instr_type_intrinsic
: {
218 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
219 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
220 lower_res_index_intrinsic(intrin
, state
);
224 case nir_instr_type_tex
:
225 lower_tex(nir_instr_as_tex(instr
), state
);
234 setup_vec4_uniform_value(const union gl_constant_value
**params
,
235 const union gl_constant_value
*values
,
238 static const gl_constant_value zero
= { 0 };
240 for (unsigned i
= 0; i
< n
; ++i
)
241 params
[i
] = &values
[i
];
243 for (unsigned i
= n
; i
< 4; ++i
)
248 anv_nir_apply_pipeline_layout(struct anv_pipeline
*pipeline
,
250 struct brw_stage_prog_data
*prog_data
,
251 struct anv_pipeline_bind_map
*map
)
253 struct anv_pipeline_layout
*layout
= pipeline
->layout
;
255 struct apply_pipeline_layout_state state
= {
259 void *mem_ctx
= ralloc_context(NULL
);
261 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
262 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
263 const unsigned words
= BITSET_WORDS(count
);
264 state
.set
[s
].used
= rzalloc_array(mem_ctx
, BITSET_WORD
, words
);
265 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
266 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
267 state
.set
[s
].image_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
270 nir_foreach_function(function
, shader
) {
274 nir_foreach_block(block
, function
->impl
)
275 get_used_bindings_block(block
, &state
);
278 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
279 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
282 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
283 set_layout
->binding_count
) {
284 if (set_layout
->binding
[b
].stage
[shader
->stage
].surface_index
>= 0)
285 map
->surface_count
+= set_layout
->binding
[b
].array_size
;
286 if (set_layout
->binding
[b
].stage
[shader
->stage
].sampler_index
>= 0)
287 map
->sampler_count
+= set_layout
->binding
[b
].array_size
;
288 if (set_layout
->binding
[b
].stage
[shader
->stage
].image_index
>= 0)
289 map
->image_count
+= set_layout
->binding
[b
].array_size
;
293 unsigned surface
= 0;
294 unsigned sampler
= 0;
296 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
297 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
300 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
301 set_layout
->binding_count
) {
302 unsigned array_size
= set_layout
->binding
[b
].array_size
;
303 unsigned set_offset
= set_layout
->binding
[b
].descriptor_index
;
305 if (set_layout
->binding
[b
].stage
[shader
->stage
].surface_index
>= 0) {
306 state
.set
[set
].surface_offsets
[b
] = surface
;
307 for (unsigned i
= 0; i
< array_size
; i
++) {
308 map
->surface_to_descriptor
[surface
+ i
].set
= set
;
309 map
->surface_to_descriptor
[surface
+ i
].offset
= set_offset
+ i
;
311 surface
+= array_size
;
314 if (set_layout
->binding
[b
].stage
[shader
->stage
].sampler_index
>= 0) {
315 state
.set
[set
].sampler_offsets
[b
] = sampler
;
316 for (unsigned i
= 0; i
< array_size
; i
++) {
317 map
->sampler_to_descriptor
[sampler
+ i
].set
= set
;
318 map
->sampler_to_descriptor
[sampler
+ i
].offset
= set_offset
+ i
;
320 sampler
+= array_size
;
323 if (set_layout
->binding
[b
].stage
[shader
->stage
].image_index
>= 0) {
324 state
.set
[set
].image_offsets
[b
] = image
;
330 nir_foreach_function(function
, shader
) {
334 nir_builder_init(&state
.builder
, function
->impl
);
335 nir_foreach_block(block
, function
->impl
)
336 apply_pipeline_layout_block(block
, &state
);
337 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
338 nir_metadata_dominance
);
341 if (map
->image_count
> 0) {
342 assert(map
->image_count
<= MAX_IMAGES
);
343 nir_foreach_variable(var
, &shader
->uniforms
) {
344 if (glsl_type_is_image(var
->type
) ||
345 (glsl_type_is_array(var
->type
) &&
346 glsl_type_is_image(glsl_get_array_element(var
->type
)))) {
347 /* Images are represented as uniform push constants and the actual
348 * information required for reading/writing to/from the image is
349 * storred in the uniform.
351 unsigned set
= var
->data
.descriptor_set
;
352 unsigned binding
= var
->data
.binding
;
353 unsigned image_index
= state
.set
[set
].image_offsets
[binding
];
355 var
->data
.driver_location
= shader
->num_uniforms
+
356 image_index
* BRW_IMAGE_PARAM_SIZE
* 4;
360 struct anv_push_constants
*null_data
= NULL
;
361 const gl_constant_value
**param
=
362 prog_data
->param
+ (shader
->num_uniforms
/ 4);
363 const struct brw_image_param
*image_param
= null_data
->images
;
364 for (uint32_t i
= 0; i
< map
->image_count
; i
++) {
365 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
366 (const union gl_constant_value
*)&image_param
->surface_idx
, 1);
367 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
368 (const union gl_constant_value
*)image_param
->offset
, 2);
369 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
370 (const union gl_constant_value
*)image_param
->size
, 3);
371 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
372 (const union gl_constant_value
*)image_param
->stride
, 4);
373 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
374 (const union gl_constant_value
*)image_param
->tiling
, 3);
375 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
376 (const union gl_constant_value
*)image_param
->swizzling
, 2);
378 param
+= BRW_IMAGE_PARAM_SIZE
;
382 shader
->num_uniforms
+= map
->image_count
* BRW_IMAGE_PARAM_SIZE
* 4;
385 ralloc_free(mem_ctx
);