2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
28 struct apply_pipeline_layout_state
{
34 uint8_t *surface_offsets
;
35 uint8_t *sampler_offsets
;
36 uint8_t *image_offsets
;
41 add_binding(struct apply_pipeline_layout_state
*state
,
42 uint32_t set
, uint32_t binding
)
44 BITSET_SET(state
->set
[set
].used
, binding
);
48 add_var_binding(struct apply_pipeline_layout_state
*state
, nir_variable
*var
)
50 add_binding(state
, var
->data
.descriptor_set
, var
->data
.binding
);
54 get_used_bindings_block(nir_block
*block
, void *void_state
)
56 struct apply_pipeline_layout_state
*state
= void_state
;
58 nir_foreach_instr_safe(block
, instr
) {
59 switch (instr
->type
) {
60 case nir_instr_type_intrinsic
: {
61 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
62 switch (intrin
->intrinsic
) {
63 case nir_intrinsic_vulkan_resource_index
:
64 add_binding(state
, nir_intrinsic_desc_set(intrin
),
65 nir_intrinsic_binding(intrin
));
68 case nir_intrinsic_image_load
:
69 case nir_intrinsic_image_store
:
70 case nir_intrinsic_image_atomic_add
:
71 case nir_intrinsic_image_atomic_min
:
72 case nir_intrinsic_image_atomic_max
:
73 case nir_intrinsic_image_atomic_and
:
74 case nir_intrinsic_image_atomic_or
:
75 case nir_intrinsic_image_atomic_xor
:
76 case nir_intrinsic_image_atomic_exchange
:
77 case nir_intrinsic_image_atomic_comp_swap
:
78 case nir_intrinsic_image_size
:
79 case nir_intrinsic_image_samples
:
80 add_var_binding(state
, intrin
->variables
[0]->var
);
88 case nir_instr_type_tex
: {
89 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
91 add_var_binding(state
, tex
->texture
->var
);
93 add_var_binding(state
, tex
->sampler
->var
);
105 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
106 struct apply_pipeline_layout_state
*state
)
108 nir_builder
*b
= &state
->builder
;
110 b
->cursor
= nir_before_instr(&intrin
->instr
);
112 uint32_t set
= nir_intrinsic_desc_set(intrin
);
113 uint32_t binding
= nir_intrinsic_binding(intrin
);
115 uint32_t surface_index
= state
->set
[set
].surface_offsets
[binding
];
117 nir_const_value
*const_block_idx
=
118 nir_src_as_const_value(intrin
->src
[0]);
120 nir_ssa_def
*block_index
;
121 if (const_block_idx
) {
122 block_index
= nir_imm_int(b
, surface_index
+ const_block_idx
->u32
[0]);
124 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
),
125 nir_ssa_for_src(b
, intrin
->src
[0], 1));
128 assert(intrin
->dest
.is_ssa
);
129 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(block_index
));
130 nir_instr_remove(&intrin
->instr
);
134 lower_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
,
135 unsigned *const_index
, nir_tex_src_type src_type
,
136 struct apply_pipeline_layout_state
*state
)
138 if (deref
->deref
.child
) {
139 assert(deref
->deref
.child
->deref_type
== nir_deref_type_array
);
140 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
142 *const_index
+= deref_array
->base_offset
;
144 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
145 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
148 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
149 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
150 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
153 ralloc_free(tex
->src
);
156 /* Now we can go ahead and move the source over to being a
157 * first-class texture source.
159 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
161 assert(deref_array
->indirect
.is_ssa
);
162 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
- 1].src
,
163 deref_array
->indirect
);
169 cleanup_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
)
171 if (deref
->deref
.child
== NULL
)
174 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
176 if (deref_array
->deref_array_type
!= nir_deref_array_type_indirect
)
179 nir_instr_rewrite_src(&tex
->instr
, &deref_array
->indirect
, NIR_SRC_INIT
);
183 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
185 /* No one should have come by and lowered it already */
186 assert(tex
->texture
);
188 unsigned set
= tex
->texture
->var
->data
.descriptor_set
;
189 unsigned binding
= tex
->texture
->var
->data
.binding
;
190 tex
->texture_index
= state
->set
[set
].surface_offsets
[binding
];
191 lower_tex_deref(tex
, tex
->texture
, &tex
->texture_index
,
192 nir_tex_src_texture_offset
, state
);
195 unsigned set
= tex
->sampler
->var
->data
.descriptor_set
;
196 unsigned binding
= tex
->sampler
->var
->data
.binding
;
197 tex
->sampler_index
= state
->set
[set
].sampler_offsets
[binding
];
198 lower_tex_deref(tex
, tex
->sampler
, &tex
->sampler_index
,
199 nir_tex_src_sampler_offset
, state
);
202 /* The backend only ever uses this to mark used surfaces. We don't care
203 * about that little optimization so it just needs to be non-zero.
205 tex
->texture_array_size
= 1;
207 cleanup_tex_deref(tex
, tex
->texture
);
209 cleanup_tex_deref(tex
, tex
->sampler
);
215 apply_pipeline_layout_block(nir_block
*block
, void *void_state
)
217 struct apply_pipeline_layout_state
*state
= void_state
;
219 nir_foreach_instr_safe(block
, instr
) {
220 switch (instr
->type
) {
221 case nir_instr_type_intrinsic
: {
222 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
223 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
224 lower_res_index_intrinsic(intrin
, state
);
228 case nir_instr_type_tex
:
229 lower_tex(nir_instr_as_tex(instr
), state
);
240 setup_vec4_uniform_value(const union gl_constant_value
**params
,
241 const union gl_constant_value
*values
,
244 static const gl_constant_value zero
= { 0 };
246 for (unsigned i
= 0; i
< n
; ++i
)
247 params
[i
] = &values
[i
];
249 for (unsigned i
= n
; i
< 4; ++i
)
254 anv_nir_apply_pipeline_layout(struct anv_pipeline
*pipeline
,
256 struct brw_stage_prog_data
*prog_data
,
257 struct anv_pipeline_bind_map
*map
)
259 struct anv_pipeline_layout
*layout
= pipeline
->layout
;
261 struct apply_pipeline_layout_state state
= {
265 void *mem_ctx
= ralloc_context(NULL
);
267 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
268 const unsigned count
= layout
->set
[s
].layout
->binding_count
;
269 const unsigned words
= BITSET_WORDS(count
);
270 state
.set
[s
].used
= rzalloc_array(mem_ctx
, BITSET_WORD
, words
);
271 state
.set
[s
].surface_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
272 state
.set
[s
].sampler_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
273 state
.set
[s
].image_offsets
= rzalloc_array(mem_ctx
, uint8_t, count
);
276 nir_foreach_function(shader
, function
) {
278 nir_foreach_block(function
->impl
, get_used_bindings_block
, &state
);
281 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
282 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
285 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
286 set_layout
->binding_count
) {
287 if (set_layout
->binding
[b
].stage
[shader
->stage
].surface_index
>= 0)
288 map
->surface_count
+= set_layout
->binding
[b
].array_size
;
289 if (set_layout
->binding
[b
].stage
[shader
->stage
].sampler_index
>= 0)
290 map
->sampler_count
+= set_layout
->binding
[b
].array_size
;
291 if (set_layout
->binding
[b
].stage
[shader
->stage
].image_index
>= 0)
292 map
->image_count
+= set_layout
->binding
[b
].array_size
;
296 unsigned surface
= 0;
297 unsigned sampler
= 0;
299 for (uint32_t set
= 0; set
< layout
->num_sets
; set
++) {
300 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[set
].layout
;
303 BITSET_FOREACH_SET(b
, _tmp
, state
.set
[set
].used
,
304 set_layout
->binding_count
) {
305 unsigned array_size
= set_layout
->binding
[b
].array_size
;
306 unsigned set_offset
= set_layout
->binding
[b
].descriptor_index
;
308 if (set_layout
->binding
[b
].stage
[shader
->stage
].surface_index
>= 0) {
309 state
.set
[set
].surface_offsets
[b
] = surface
;
310 for (unsigned i
= 0; i
< array_size
; i
++) {
311 map
->surface_to_descriptor
[surface
+ i
].set
= set
;
312 map
->surface_to_descriptor
[surface
+ i
].offset
= set_offset
+ i
;
314 surface
+= array_size
;
317 if (set_layout
->binding
[b
].stage
[shader
->stage
].sampler_index
>= 0) {
318 state
.set
[set
].sampler_offsets
[b
] = sampler
;
319 for (unsigned i
= 0; i
< array_size
; i
++) {
320 map
->sampler_to_descriptor
[sampler
+ i
].set
= set
;
321 map
->sampler_to_descriptor
[sampler
+ i
].offset
= set_offset
+ i
;
323 sampler
+= array_size
;
326 if (set_layout
->binding
[b
].stage
[shader
->stage
].image_index
>= 0) {
327 state
.set
[set
].image_offsets
[b
] = image
;
333 nir_foreach_function(shader
, function
) {
334 if (function
->impl
) {
335 nir_builder_init(&state
.builder
, function
->impl
);
336 nir_foreach_block(function
->impl
, apply_pipeline_layout_block
, &state
);
337 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
338 nir_metadata_dominance
);
342 if (map
->image_count
> 0) {
343 assert(map
->image_count
<= MAX_IMAGES
);
344 nir_foreach_variable(var
, &shader
->uniforms
) {
345 if (glsl_type_is_image(var
->type
) ||
346 (glsl_type_is_array(var
->type
) &&
347 glsl_type_is_image(glsl_get_array_element(var
->type
)))) {
348 /* Images are represented as uniform push constants and the actual
349 * information required for reading/writing to/from the image is
350 * storred in the uniform.
352 unsigned set
= var
->data
.descriptor_set
;
353 unsigned binding
= var
->data
.binding
;
354 unsigned image_index
= state
.set
[set
].image_offsets
[binding
];
356 var
->data
.driver_location
= shader
->num_uniforms
+
357 image_index
* BRW_IMAGE_PARAM_SIZE
* 4;
361 struct anv_push_constants
*null_data
= NULL
;
362 const gl_constant_value
**param
=
363 prog_data
->param
+ (shader
->num_uniforms
/ 4);
364 const struct brw_image_param
*image_param
= null_data
->images
;
365 for (uint32_t i
= 0; i
< map
->image_count
; i
++) {
366 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
367 (const union gl_constant_value
*)&image_param
->surface_idx
, 1);
368 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
369 (const union gl_constant_value
*)image_param
->offset
, 2);
370 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
371 (const union gl_constant_value
*)image_param
->size
, 3);
372 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
373 (const union gl_constant_value
*)image_param
->stride
, 4);
374 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
375 (const union gl_constant_value
*)image_param
->tiling
, 3);
376 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
377 (const union gl_constant_value
*)image_param
->swizzling
, 2);
379 param
+= BRW_IMAGE_PARAM_SIZE
;
383 shader
->num_uniforms
+= map
->image_count
* BRW_IMAGE_PARAM_SIZE
* 4;
386 ralloc_free(mem_ctx
);