2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir/nir_builder.h"
27 struct apply_dynamic_offsets_state
{
31 const struct anv_pipeline_layout
*layout
;
33 uint32_t indices_start
;
37 apply_dynamic_offsets_block(nir_block
*block
, void *void_state
)
39 struct apply_dynamic_offsets_state
*state
= void_state
;
40 struct anv_descriptor_set_layout
*set_layout
;
42 nir_builder
*b
= &state
->builder
;
44 nir_foreach_instr_safe(block
, instr
) {
45 if (instr
->type
!= nir_instr_type_intrinsic
)
48 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
50 unsigned block_idx_src
;
51 switch (intrin
->intrinsic
) {
52 case nir_intrinsic_load_ubo
:
53 case nir_intrinsic_load_ssbo
:
56 case nir_intrinsic_store_ssbo
:
60 continue; /* the loop */
63 nir_instr
*res_instr
= intrin
->src
[block_idx_src
].ssa
->parent_instr
;
64 assert(res_instr
->type
== nir_instr_type_intrinsic
);
65 nir_intrinsic_instr
*res_intrin
= nir_instr_as_intrinsic(res_instr
);
66 assert(res_intrin
->intrinsic
== nir_intrinsic_vulkan_resource_index
);
68 unsigned set
= res_intrin
->const_index
[0];
69 unsigned binding
= res_intrin
->const_index
[1];
71 set_layout
= state
->layout
->set
[set
].layout
;
72 if (set_layout
->binding
[binding
].dynamic_offset_index
< 0)
75 b
->cursor
= nir_before_instr(&intrin
->instr
);
77 /* First, we need to generate the uniform load for the buffer offset */
78 uint32_t index
= state
->layout
->set
[set
].dynamic_offset_start
+
79 set_layout
->binding
[binding
].dynamic_offset_index
;
81 nir_intrinsic_instr
*offset_load
=
82 nir_intrinsic_instr_create(state
->shader
, nir_intrinsic_load_uniform
);
83 offset_load
->num_components
= 2;
84 offset_load
->const_index
[0] = state
->indices_start
+ index
* 8;
85 offset_load
->src
[0] = nir_src_for_ssa(nir_imul(b
, res_intrin
->src
[0].ssa
,
88 nir_ssa_dest_init(&offset_load
->instr
, &offset_load
->dest
, 2, NULL
);
89 nir_builder_instr_insert(b
, &offset_load
->instr
);
91 nir_src
*offset_src
= nir_get_io_offset_src(intrin
);
92 nir_ssa_def
*new_offset
= nir_iadd(b
, offset_src
->ssa
,
93 &offset_load
->dest
.ssa
);
95 /* In order to avoid out-of-bounds access, we predicate */
96 nir_ssa_def
*pred
= nir_uge(b
, nir_channel(b
, &offset_load
->dest
.ssa
, 1),
98 nir_if
*if_stmt
= nir_if_create(b
->shader
);
99 if_stmt
->condition
= nir_src_for_ssa(pred
);
100 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
102 nir_instr_remove(&intrin
->instr
);
103 *offset_src
= nir_src_for_ssa(new_offset
);
104 nir_instr_insert_after_cf_list(&if_stmt
->then_list
, &intrin
->instr
);
106 if (intrin
->intrinsic
!= nir_intrinsic_store_ssbo
) {
107 /* It's a load, we need a phi node */
108 nir_phi_instr
*phi
= nir_phi_instr_create(b
->shader
);
109 nir_ssa_dest_init(&phi
->instr
, &phi
->dest
,
110 intrin
->num_components
, NULL
);
112 nir_phi_src
*src1
= ralloc(phi
, nir_phi_src
);
113 struct exec_node
*tnode
= exec_list_get_tail(&if_stmt
->then_list
);
114 src1
->pred
= exec_node_data(nir_block
, tnode
, cf_node
.node
);
115 src1
->src
= nir_src_for_ssa(&intrin
->dest
.ssa
);
116 exec_list_push_tail(&phi
->srcs
, &src1
->node
);
118 b
->cursor
= nir_after_cf_list(&if_stmt
->else_list
);
119 nir_ssa_def
*zero
= nir_build_imm(b
, intrin
->num_components
,
120 (nir_const_value
) { .u
= { 0, 0, 0, 0 } });
122 nir_phi_src
*src2
= ralloc(phi
, nir_phi_src
);
123 struct exec_node
*enode
= exec_list_get_tail(&if_stmt
->else_list
);
124 src2
->pred
= exec_node_data(nir_block
, enode
, cf_node
.node
);
125 src2
->src
= nir_src_for_ssa(zero
);
126 exec_list_push_tail(&phi
->srcs
, &src2
->node
);
128 assert(intrin
->dest
.is_ssa
);
129 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
130 nir_src_for_ssa(&phi
->dest
.ssa
));
132 nir_instr_insert_after_cf(&if_stmt
->cf_node
, &phi
->instr
);
140 anv_nir_apply_dynamic_offsets(struct anv_pipeline
*pipeline
,
142 struct brw_stage_prog_data
*prog_data
)
144 struct apply_dynamic_offsets_state state
= {
146 .layout
= pipeline
->layout
,
147 .indices_start
= shader
->num_uniforms
,
150 if (!state
.layout
|| !state
.layout
->stage
[shader
->stage
].has_dynamic_offsets
)
153 nir_foreach_function(shader
, function
) {
154 if (function
->impl
) {
155 nir_builder_init(&state
.builder
, function
->impl
);
156 nir_foreach_block(function
->impl
, apply_dynamic_offsets_block
, &state
);
157 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
158 nir_metadata_dominance
);
162 struct anv_push_constants
*null_data
= NULL
;
163 for (unsigned i
= 0; i
< MAX_DYNAMIC_BUFFERS
; i
++) {
164 prog_data
->param
[i
* 2 + shader
->num_uniforms
/ 4] =
165 (const union gl_constant_value
*)&null_data
->dynamic
[i
].offset
;
166 prog_data
->param
[i
* 2 + 1 + shader
->num_uniforms
/ 4] =
167 (const union gl_constant_value
*)&null_data
->dynamic
[i
].range
;
170 shader
->num_uniforms
+= MAX_DYNAMIC_BUFFERS
* 8;