2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "program/prog_parameter.h"
26 #include "glsl/nir/nir_builder.h"
28 struct apply_pipeline_layout_state
{
32 const struct anv_pipeline_layout
*layout
;
38 get_surface_index(unsigned set
, unsigned binding
,
39 struct apply_pipeline_layout_state
*state
)
41 assert(set
< state
->layout
->num_sets
);
42 struct anv_descriptor_set_layout
*set_layout
=
43 state
->layout
->set
[set
].layout
;
45 gl_shader_stage stage
= state
->shader
->stage
;
47 assert(binding
< set_layout
->binding_count
);
49 assert(set_layout
->binding
[binding
].stage
[stage
].surface_index
>= 0);
51 uint32_t surface_index
=
52 state
->layout
->set
[set
].stage
[stage
].surface_start
+
53 set_layout
->binding
[binding
].stage
[stage
].surface_index
;
55 assert(surface_index
< state
->layout
->stage
[stage
].surface_count
);
61 get_sampler_index(unsigned set
, unsigned binding
, nir_texop tex_op
,
62 struct apply_pipeline_layout_state
*state
)
64 assert(set
< state
->layout
->num_sets
);
65 struct anv_descriptor_set_layout
*set_layout
=
66 state
->layout
->set
[set
].layout
;
68 assert(binding
< set_layout
->binding_count
);
70 gl_shader_stage stage
= state
->shader
->stage
;
72 if (set_layout
->binding
[binding
].stage
[stage
].sampler_index
< 0) {
73 assert(tex_op
== nir_texop_txf
);
77 uint32_t sampler_index
=
78 state
->layout
->set
[set
].stage
[stage
].sampler_start
+
79 set_layout
->binding
[binding
].stage
[stage
].sampler_index
;
81 assert(sampler_index
< state
->layout
->stage
[stage
].sampler_count
);
87 get_image_index(unsigned set
, unsigned binding
,
88 struct apply_pipeline_layout_state
*state
)
90 assert(set
< state
->layout
->num_sets
);
91 struct anv_descriptor_set_layout
*set_layout
=
92 state
->layout
->set
[set
].layout
;
94 assert(binding
< set_layout
->binding_count
);
96 gl_shader_stage stage
= state
->shader
->stage
;
98 assert(set_layout
->binding
[binding
].stage
[stage
].image_index
>= 0);
100 uint32_t image_index
=
101 state
->layout
->set
[set
].stage
[stage
].image_start
+
102 set_layout
->binding
[binding
].stage
[stage
].image_index
;
104 assert(image_index
< state
->layout
->stage
[stage
].image_count
);
110 lower_res_index_intrinsic(nir_intrinsic_instr
*intrin
,
111 struct apply_pipeline_layout_state
*state
)
113 nir_builder
*b
= &state
->builder
;
115 b
->cursor
= nir_before_instr(&intrin
->instr
);
117 uint32_t set
= intrin
->const_index
[0];
118 uint32_t binding
= intrin
->const_index
[1];
120 uint32_t surface_index
= get_surface_index(set
, binding
, state
);
122 nir_const_value
*const_block_idx
=
123 nir_src_as_const_value(intrin
->src
[0]);
125 nir_ssa_def
*block_index
;
126 if (const_block_idx
) {
127 block_index
= nir_imm_int(b
, surface_index
+ const_block_idx
->u
[0]);
129 block_index
= nir_iadd(b
, nir_imm_int(b
, surface_index
),
130 nir_ssa_for_src(b
, intrin
->src
[0], 1));
133 assert(intrin
->dest
.is_ssa
);
134 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(block_index
));
135 nir_instr_remove(&intrin
->instr
);
139 lower_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
,
140 unsigned *const_index
, nir_tex_src_type src_type
,
141 struct apply_pipeline_layout_state
*state
)
143 if (deref
->deref
.child
) {
144 assert(deref
->deref
.child
->deref_type
== nir_deref_type_array
);
145 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
147 *const_index
+= deref_array
->base_offset
;
149 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
150 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
153 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
154 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
155 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
, &tex
->src
[i
].src
);
158 ralloc_free(tex
->src
);
161 /* Now we can go ahead and move the source over to being a
162 * first-class texture source.
164 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
166 assert(deref_array
->indirect
.is_ssa
);
167 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
- 1].src
,
168 deref_array
->indirect
);
174 cleanup_tex_deref(nir_tex_instr
*tex
, nir_deref_var
*deref
)
176 if (deref
->deref
.child
== NULL
)
179 nir_deref_array
*deref_array
= nir_deref_as_array(deref
->deref
.child
);
181 if (deref_array
->deref_array_type
!= nir_deref_array_type_indirect
)
184 nir_instr_rewrite_src(&tex
->instr
, &deref_array
->indirect
, NIR_SRC_INIT
);
188 lower_tex(nir_tex_instr
*tex
, struct apply_pipeline_layout_state
*state
)
190 /* No one should have come by and lowered it already */
191 assert(tex
->sampler
);
193 nir_deref_var
*tex_deref
= tex
->texture
? tex
->texture
: tex
->sampler
;
195 get_surface_index(tex_deref
->var
->data
.descriptor_set
,
196 tex_deref
->var
->data
.binding
, state
);
197 lower_tex_deref(tex
, tex_deref
, &tex
->texture_index
,
198 nir_tex_src_texture_offset
, state
);
201 get_sampler_index(tex
->sampler
->var
->data
.descriptor_set
,
202 tex
->sampler
->var
->data
.binding
, tex
->op
, state
);
203 lower_tex_deref(tex
, tex
->sampler
, &tex
->sampler_index
,
204 nir_tex_src_sampler_offset
, state
);
207 cleanup_tex_deref(tex
, tex
->texture
);
208 cleanup_tex_deref(tex
, tex
->sampler
);
214 apply_pipeline_layout_block(nir_block
*block
, void *void_state
)
216 struct apply_pipeline_layout_state
*state
= void_state
;
218 nir_foreach_instr_safe(block
, instr
) {
219 switch (instr
->type
) {
220 case nir_instr_type_intrinsic
: {
221 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
222 if (intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
) {
223 lower_res_index_intrinsic(intrin
, state
);
224 state
->progress
= true;
228 case nir_instr_type_tex
:
229 lower_tex(nir_instr_as_tex(instr
), state
);
230 /* All texture instructions need lowering */
231 state
->progress
= true;
242 setup_vec4_uniform_value(const union gl_constant_value
**params
,
243 const union gl_constant_value
*values
,
246 static const gl_constant_value zero
= { 0 };
248 for (unsigned i
= 0; i
< n
; ++i
)
249 params
[i
] = &values
[i
];
251 for (unsigned i
= n
; i
< 4; ++i
)
256 anv_nir_apply_pipeline_layout(nir_shader
*shader
,
257 struct brw_stage_prog_data
*prog_data
,
258 const struct anv_pipeline_layout
*layout
)
260 struct apply_pipeline_layout_state state
= {
265 nir_foreach_function(shader
, function
) {
266 if (function
->impl
) {
267 nir_builder_init(&state
.builder
, function
->impl
);
268 nir_foreach_block(function
->impl
, apply_pipeline_layout_block
, &state
);
269 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
270 nir_metadata_dominance
);
274 if (layout
->stage
[shader
->stage
].image_count
> 0) {
275 nir_foreach_variable(var
, &shader
->uniforms
) {
276 if (glsl_type_is_image(var
->type
) ||
277 (glsl_type_is_array(var
->type
) &&
278 glsl_type_is_image(glsl_get_array_element(var
->type
)))) {
279 /* Images are represented as uniform push constants and the actual
280 * information required for reading/writing to/from the image is
281 * storred in the uniform.
283 unsigned image_index
= get_image_index(var
->data
.descriptor_set
,
284 var
->data
.binding
, &state
);
286 var
->data
.driver_location
= shader
->num_uniforms
+
287 image_index
* BRW_IMAGE_PARAM_SIZE
* 4;
291 struct anv_push_constants
*null_data
= NULL
;
292 const gl_constant_value
**param
= prog_data
->param
+ shader
->num_uniforms
;
293 const struct brw_image_param
*image_param
= null_data
->images
;
294 for (uint32_t i
= 0; i
< layout
->stage
[shader
->stage
].image_count
; i
++) {
295 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET
,
296 (const union gl_constant_value
*)&image_param
->surface_idx
, 1);
297 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
,
298 (const union gl_constant_value
*)image_param
->offset
, 2);
299 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SIZE_OFFSET
,
300 (const union gl_constant_value
*)image_param
->size
, 3);
301 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
,
302 (const union gl_constant_value
*)image_param
->stride
, 4);
303 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_TILING_OFFSET
,
304 (const union gl_constant_value
*)image_param
->tiling
, 3);
305 setup_vec4_uniform_value(param
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
,
306 (const union gl_constant_value
*)image_param
->swizzling
, 2);
308 param
+= BRW_IMAGE_PARAM_SIZE
;
312 shader
->num_uniforms
+= layout
->stage
[shader
->stage
].image_count
*
313 BRW_IMAGE_PARAM_SIZE
* 4;
316 return state
.progress
;