2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/nir/nir_builder.h"
27 struct apply_dynamic_offsets_state
{
32 struct anv_pipeline_layout
*layout
;
34 uint32_t indices_start
;
38 apply_dynamic_offsets_block(nir_block
*block
, void *void_state
)
40 struct apply_dynamic_offsets_state
*state
= void_state
;
41 struct anv_descriptor_set_layout
*set_layout
;
43 nir_builder
*b
= &state
->builder
;
45 nir_foreach_instr_safe(block
, instr
) {
46 if (instr
->type
!= nir_instr_type_intrinsic
)
49 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
51 unsigned block_idx_src
;
52 switch (intrin
->intrinsic
) {
53 case nir_intrinsic_load_ubo
:
54 case nir_intrinsic_load_ubo_indirect
:
55 case nir_intrinsic_load_ssbo
:
56 case nir_intrinsic_load_ssbo_indirect
:
59 case nir_intrinsic_store_ssbo
:
60 case nir_intrinsic_store_ssbo_indirect
:
64 continue; /* the loop */
67 nir_instr
*res_instr
= intrin
->src
[block_idx_src
].ssa
->parent_instr
;
68 assert(res_instr
->type
== nir_instr_type_intrinsic
);
69 nir_intrinsic_instr
*res_intrin
= nir_instr_as_intrinsic(res_instr
);
70 assert(res_intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
);
72 unsigned set
= res_intrin
->const_index
[0];
73 unsigned binding
= res_intrin
->const_index
[1];
75 set_layout
= state
->layout
->set
[set
].layout
;
76 if (set_layout
->binding
[binding
].dynamic_offset_index
< 0)
79 b
->cursor
= nir_before_instr(&intrin
->instr
);
82 switch (intrin
->intrinsic
) {
83 case nir_intrinsic_load_ubo_indirect
:
84 case nir_intrinsic_load_ssbo_indirect
:
87 case nir_intrinsic_store_ssbo_indirect
:
95 /* First, we need to generate the uniform load for the buffer offset */
96 uint32_t index
= state
->layout
->set
[set
].dynamic_offset_start
+
97 set_layout
->binding
[binding
].dynamic_offset_index
;
99 nir_const_value
*const_arr_idx
=
100 nir_src_as_const_value(res_intrin
->src
[0]);
102 nir_intrinsic_op offset_load_op
;
104 offset_load_op
= nir_intrinsic_load_uniform
;
106 offset_load_op
= nir_intrinsic_load_uniform_indirect
;
108 nir_intrinsic_instr
*offset_load
=
109 nir_intrinsic_instr_create(state
->shader
, offset_load_op
);
110 offset_load
->num_components
= 2;
111 offset_load
->const_index
[0] = state
->indices_start
+ index
* 2;
114 offset_load
->const_index
[1] = const_arr_idx
->u
[0] * 2;
116 offset_load
->const_index
[1] = 0;
117 offset_load
->src
[0] = nir_src_for_ssa(
118 nir_imul(b
, nir_ssa_for_src(b
, res_intrin
->src
[0], 1),
122 nir_ssa_dest_init(&offset_load
->instr
, &offset_load
->dest
, 2, NULL
);
123 nir_builder_instr_insert(b
, &offset_load
->instr
);
125 /* We calculate the full offset and don't bother with the base
126 * offset. We need the full offset for the predicate anyway.
128 nir_ssa_def
*rel_offset
= nir_imm_int(b
, intrin
->const_index
[0]);
129 if (indirect_src
>= 0) {
130 assert(intrin
->src
[indirect_src
].is_ssa
);
131 rel_offset
= nir_iadd(b
, intrin
->src
[indirect_src
].ssa
, rel_offset
);
133 nir_ssa_def
*global_offset
= nir_iadd(b
, rel_offset
,
134 &offset_load
->dest
.ssa
);
136 /* Now we replace the load/store intrinsic */
138 nir_intrinsic_op indirect_op
;
139 switch (intrin
->intrinsic
) {
140 case nir_intrinsic_load_ubo
:
141 indirect_op
= nir_intrinsic_load_ubo_indirect
;
143 case nir_intrinsic_load_ssbo
:
144 indirect_op
= nir_intrinsic_load_ssbo_indirect
;
146 case nir_intrinsic_store_ssbo
:
147 indirect_op
= nir_intrinsic_store_ssbo_indirect
;
150 unreachable("Invalid direct load/store intrinsic");
153 nir_intrinsic_instr
*copy
=
154 nir_intrinsic_instr_create(state
->shader
, indirect_op
);
155 copy
->num_components
= intrin
->num_components
;
157 /* The indirect is always the last source */
158 indirect_src
= nir_intrinsic_infos
[indirect_op
].num_srcs
- 1;
160 for (unsigned i
= 0; i
< (unsigned)indirect_src
; i
++)
161 nir_src_copy(©
->src
[i
], &intrin
->src
[i
], ©
->instr
);
163 copy
->src
[indirect_src
] = nir_src_for_ssa(global_offset
);
164 nir_ssa_dest_init(©
->instr
, ©
->dest
,
165 intrin
->dest
.ssa
.num_components
,
166 intrin
->dest
.ssa
.name
);
168 /* In order to avoid out-of-bounds access, we predicate */
169 nir_ssa_def
*pred
= nir_fge(b
, nir_channel(b
, &offset_load
->dest
.ssa
, 1),
171 nir_if
*if_stmt
= nir_if_create(b
->shader
);
172 if_stmt
->condition
= nir_src_for_ssa(pred
);
173 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
175 nir_instr_insert_after_cf_list(&if_stmt
->then_list
, ©
->instr
);
177 if (indirect_op
!= nir_intrinsic_store_ssbo
) {
178 /* It's a load, we need a phi node */
179 nir_phi_instr
*phi
= nir_phi_instr_create(b
->shader
);
180 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
181 intrin
->num_components
, NULL
);
183 nir_phi_src
*src1
= ralloc(phi
, nir_phi_src
);
184 struct exec_node
*tnode
= exec_list_get_tail(&if_stmt
->then_list
);
185 src1
->pred
= exec_node_data(nir_block
, tnode
, cf_node
.node
);
186 src1
->src
= nir_src_for_ssa(©
->dest
.ssa
);
187 exec_list_push_tail(&phi
->srcs
, &src1
->node
);
189 b
->cursor
= nir_after_cf_list(&if_stmt
->else_list
);
190 nir_ssa_def
*zero
= nir_build_imm(b
, intrin
->num_components
,
191 (nir_const_value
) { .u
= { 0, 0, 0, 0 } });
193 nir_phi_src
*src2
= ralloc(phi
, nir_phi_src
);
194 struct exec_node
*enode
= exec_list_get_tail(&if_stmt
->else_list
);
195 src2
->pred
= exec_node_data(nir_block
, enode
, cf_node
.node
);
196 src2
->src
= nir_src_for_ssa(zero
);
197 exec_list_push_tail(&phi
->srcs
, &src2
->node
);
199 nir_instr_insert_after_cf(&if_stmt
->cf_node
, &phi
->instr
);
201 assert(intrin
->dest
.is_ssa
);
202 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
203 nir_src_for_ssa(&phi
->dest
.ssa
));
206 nir_instr_remove(&intrin
->instr
);
213 anv_nir_apply_dynamic_offsets(struct anv_pipeline
*pipeline
,
215 struct brw_stage_prog_data
*prog_data
)
217 struct apply_dynamic_offsets_state state
= {
219 .stage
= anv_vk_shader_stage_for_mesa_stage(shader
->stage
),
220 .layout
= pipeline
->layout
,
221 .indices_start
= shader
->num_uniforms
,
224 if (!state
.layout
|| !state
.layout
->stage
[state
.stage
].has_dynamic_offsets
)
227 nir_foreach_overload(shader
, overload
) {
228 if (overload
->impl
) {
229 nir_builder_init(&state
.builder
, overload
->impl
);
230 nir_foreach_block(overload
->impl
, apply_dynamic_offsets_block
, &state
);
231 nir_metadata_preserve(overload
->impl
, nir_metadata_block_index
|
232 nir_metadata_dominance
);
236 struct anv_push_constants
*null_data
= NULL
;
237 for (unsigned i
= 0; i
< MAX_DYNAMIC_BUFFERS
; i
++) {
238 prog_data
->param
[i
* 2 + shader
->num_uniforms
] =
239 (const gl_constant_value
*)&null_data
->dynamic
[i
].offset
;
240 prog_data
->param
[i
* 2 + 1 + shader
->num_uniforms
] =
241 (const gl_constant_value
*)&null_data
->dynamic
[i
].range
;
244 shader
->num_uniforms
+= MAX_DYNAMIC_BUFFERS
* 2;