2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "nir/nir_builder.h"
28 struct apply_pipeline_layout_state
{
32 const struct anv_pipeline_layout
*layout
;
38 get_surface_index(unsigned set
, unsigned binding
,
39 struct apply_pipeline_layout_state
*state
)
41 assert(set
< state
->layout
->num_sets
);
42 struct anv_descriptor_set_layout
*set_layout
=
43 state
->layout
->set
[set
].layout
;
45 gl_shader_stage stage
= state
->shader
->stage
;
47 assert(binding
< set_layout
->binding_count
);
49 assert(set_layout
->binding
[binding
].stage
[stage
].surface_index
>= 0);
51 uint32_t surface_index
=
52 state
->layout
->set
[set
].stage
[stage
].surface_start
+
53 set_layout
->binding
[binding
].stage
[stage
].surface_index
;
55 assert(surface_index
< state
->layout
->stage
[stage
].surface_count
);
61 get_sampler_index(unsigned set
, unsigned binding
,
62 struct apply_pipeline_layout_state
*state
)
64 assert(set
< state
->layout
->num_sets
);
65 struct anv_descriptor_set_layout
*set_layout
=
66 state
->layout
->set
[set
].layout
;
68 gl_shader_stage stage
= state
->shader
->stage
;
70 assert(binding
< set_layout
->binding_count
);
72 assert(set_layout
->binding
[binding
].stage
[stage
].sampler_index
>= 0);
74 uint32_t sampler_index
=
75 state
->layout
->set
[set
].stage
[stage
].sampler_start
+
76 set_layout
->binding
[binding
].stage
[stage
].sampler_index
;
78 assert(sampler_index
< state
->layout
->stage
[stage
].sampler_count
);
84 get_image_index(unsigned set
, unsigned binding
,
85 struct apply_pipeline_layout_state
*state
)
87 assert(set
< state
->layout
->num_sets
);
88 struct anv_descriptor_set_layout
*set_layout
=
89 state
->layout
->set
[set
].layout
;
91 assert(binding
< set_layout
->binding_count
);
93 gl_shader_stage stage
= state
->shader
->stage
;
95 assert(set_layout
->binding
[binding
].stage
[stage
].image_index
>= 0);
97 uint32_t image_index
=
98 state
->layout
->set
[set
].stage
[stage
].image_start
+
99 set_layout
->binding
[binding
].stage
[stage
].image_index
;
101 assert(image_index
< state
->layout
->stage
[stage
].image_count
);
107 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
108 struct apply_pipeline_layout_state
*state
)
110 nir_builder
*b
= &state
->builder
;
112 b
->cursor
= nir_before_instr(&intrin
->instr
);
114 uint32_t set
= nir_intrinsic_desc_set(intrin
);
115 uint32_t binding
= nir_intrinsic_binding(intrin
);
117 uint32_t surface_index
= get_surface_index(set
, binding
, state
);
119 nir_const_value
*const_block_idx
=
120 nir_src_as_const_value(intrin
->src
[0]);
122 nir_ssa_def
*block_index
;
123 if (const_block_idx
) {
124 block_index
= nir_imm_int(b
, surface_index
+ const_block_idx
->u
[0]);
126 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
),
127 nir_ssa_for_src(b
, intrin
->src
[0], 1));
130 assert(intrin
->dest
.is_ssa
);
131 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(block_index
));
132 nir_instr_remove(&intrin
->instr
);
136 lower_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
,
137 unsigned *const_index
, nir_tex_src_type src_type
,
138 struct apply_pipeline_layout_state
*state
)
140 if (deref
->deref
.child
) {
141 assert(deref
->deref
.child
->deref_type
== nir_deref_type_array
);
142 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
144 *const_index
+= deref_array
->base_offset
;
146 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
147 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
150 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
151 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
152 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
155 ralloc_free(tex
->src
);
158 /* Now we can go ahead and move the source over to being a
159 * first-class texture source.
161 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
163 assert(deref_array
->indirect
.is_ssa
);
164 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
- 1].src
,
165 deref_array
->indirect
);
171 cleanup_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
)
173 if (deref
->deref
.child
== NULL
)
176 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
178 if (deref_array
->deref_array_type
!= nir_deref_array_type_indirect
)
181 nir_instr_rewrite_src(&tex
->instr
, &deref_array
->indirect
, NIR_SRC_INIT
);
185 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
187 /* No one should have come by and lowered it already */
188 assert(tex
->texture
);
191 get_surface_index(tex
->texture
->var
->data
.descriptor_set
,
192 tex
->texture
->var
->data
.binding
, state
);
193 lower_tex_deref(tex
, tex
->texture
, &tex
->texture_index
,
194 nir_tex_src_texture_offset
, state
);
198 get_sampler_index(tex
->sampler
->var
->data
.descriptor_set
,
199 tex
->sampler
->var
->data
.binding
, state
);
200 lower_tex_deref(tex
, tex
->sampler
, &tex
->sampler_index
,
201 nir_tex_src_sampler_offset
, state
);
204 /* The backend only ever uses this to mark used surfaces. We don't care
205 * about that little optimization so it just needs to be non-zero.
207 tex
->texture_array_size
= 1;
209 cleanup_tex_deref(tex
, tex
->texture
);
211 cleanup_tex_deref(tex
, tex
->sampler
);
217 apply_pipeline_layout_block(nir_block
*block
, void *void_state
)
219 struct apply_pipeline_layout_state
*state
= void_state
;
221 nir_foreach_instr_safe(block
, instr
) {
222 switch (instr
->type
) {
223 case nir_instr_type_intrinsic
: {
224 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
225 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
226 lower_res_index_intrinsic(intrin
, state
);
227 state
->progress
= true;
231 case nir_instr_type_tex
:
232 lower_tex(nir_instr_as_tex(instr
), state
);
233 /* All texture instructions need lowering */
234 state
->progress
= true;
245 setup_vec4_uniform_value(const union gl_constant_value
**params
,
246 const union gl_constant_value
*values
,
249 static const gl_constant_value zero
= { 0 };
251 for (unsigned i
= 0; i
< n
; ++i
)
252 params
[i
] = &values
[i
];
254 for (unsigned i
= n
; i
< 4; ++i
)
259 anv_nir_apply_pipeline_layout(nir_shader
*shader
,
260 struct brw_stage_prog_data
*prog_data
,
261 const struct anv_pipeline_layout
*layout
)
263 struct apply_pipeline_layout_state state
= {
268 nir_foreach_function(shader
, function
) {
269 if (function
->impl
) {
270 nir_builder_init(&state
.builder
, function
->impl
);
271 nir_foreach_block(function
->impl
, apply_pipeline_layout_block
, &state
);
272 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
273 nir_metadata_dominance
);
277 if (layout
->stage
[shader
->stage
].image_count
> 0) {
278 nir_foreach_variable(var
, &shader
->uniforms
) {
279 if (glsl_type_is_image(var
->type
) ||
280 (glsl_type_is_array(var
->type
) &&
281 glsl_type_is_image(glsl_get_array_element(var
->type
)))) {
282 /* Images are represented as uniform push constants and the actual
283 * information required for reading/writing to/from the image is
284 * storred in the uniform.
286 unsigned image_index
= get_image_index(var
->data
.descriptor_set
,
287 var
->data
.binding
, &state
);
289 var
->data
.driver_location
= shader
->num_uniforms
+
290 image_index
* BRW_IMAGE_PARAM_SIZE
* 4;
294 struct anv_push_constants
*null_data
= NULL
;
295 const gl_constant_value
**param
= prog_data
->param
+ shader
->num_uniforms
;
296 const struct brw_image_param
*image_param
= null_data
->images
;
297 for (uint32_t i
= 0; i
< layout
->stage
[shader
->stage
].image_count
; i
++) {
298 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
299 (const union gl_constant_value
*)&image_param
->surface_idx
, 1);
300 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
301 (const union gl_constant_value
*)image_param
->offset
, 2);
302 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
303 (const union gl_constant_value
*)image_param
->size
, 3);
304 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
305 (const union gl_constant_value
*)image_param
->stride
, 4);
306 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
307 (const union gl_constant_value
*)image_param
->tiling
, 3);
308 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
309 (const union gl_constant_value
*)image_param
->swizzling
, 2);
311 param
+= BRW_IMAGE_PARAM_SIZE
;
315 shader
->num_uniforms
+= layout
->stage
[shader
->stage
].image_count
*
316 BRW_IMAGE_PARAM_SIZE
* 4;
319 return state
.progress
;