06fe8aafd301643433cb641b286a8eeb4944dee6
[mesa.git] / src / intel / vulkan / anv_nir_apply_dynamic_offsets.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26
27 struct apply_dynamic_offsets_state {
28 nir_shader *shader;
29 nir_builder builder;
30
31 const struct anv_pipeline_layout *layout;
32
33 uint32_t indices_start;
34 };
35
36 static bool
37 apply_dynamic_offsets_block(nir_block *block, void *void_state)
38 {
39 struct apply_dynamic_offsets_state *state = void_state;
40 struct anv_descriptor_set_layout *set_layout;
41
42 nir_builder *b = &state->builder;
43
44 nir_foreach_instr_safe(block, instr) {
45 if (instr->type != nir_instr_type_intrinsic)
46 continue;
47
48 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
49
50 unsigned block_idx_src;
51 switch (intrin->intrinsic) {
52 case nir_intrinsic_load_ubo:
53 case nir_intrinsic_load_ssbo:
54 block_idx_src = 0;
55 break;
56 case nir_intrinsic_store_ssbo:
57 block_idx_src = 1;
58 break;
59 default:
60 continue; /* the loop */
61 }
62
63 nir_instr *res_instr = intrin->src[block_idx_src].ssa->parent_instr;
64 assert(res_instr->type == nir_instr_type_intrinsic);
65 nir_intrinsic_instr *res_intrin = nir_instr_as_intrinsic(res_instr);
66 assert(res_intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
67
68 unsigned set = res_intrin->const_index[0];
69 unsigned binding = res_intrin->const_index[1];
70
71 set_layout = state->layout->set[set].layout;
72 if (set_layout->binding[binding].dynamic_offset_index < 0)
73 continue;
74
75 b->cursor = nir_before_instr(&intrin->instr);
76
77 /* First, we need to generate the uniform load for the buffer offset */
78 uint32_t index = state->layout->set[set].dynamic_offset_start +
79 set_layout->binding[binding].dynamic_offset_index;
80 uint32_t array_size = set_layout->binding[binding].array_size;
81
82 nir_intrinsic_instr *offset_load =
83 nir_intrinsic_instr_create(state->shader, nir_intrinsic_load_uniform);
84 offset_load->num_components = 2;
85 nir_intrinsic_set_base(offset_load, state->indices_start + index * 8);
86 nir_intrinsic_set_range(offset_load, array_size * 8);
87 offset_load->src[0] = nir_src_for_ssa(nir_imul(b, res_intrin->src[0].ssa,
88 nir_imm_int(b, 8)));
89
90 nir_ssa_dest_init(&offset_load->instr, &offset_load->dest, 2, 32, NULL);
91 nir_builder_instr_insert(b, &offset_load->instr);
92
93 nir_src *offset_src = nir_get_io_offset_src(intrin);
94 nir_ssa_def *new_offset = nir_iadd(b, offset_src->ssa,
95 &offset_load->dest.ssa);
96
97 /* In order to avoid out-of-bounds access, we predicate */
98 nir_ssa_def *pred = nir_uge(b, nir_channel(b, &offset_load->dest.ssa, 1),
99 offset_src->ssa);
100 nir_if *if_stmt = nir_if_create(b->shader);
101 if_stmt->condition = nir_src_for_ssa(pred);
102 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
103
104 nir_instr_remove(&intrin->instr);
105 *offset_src = nir_src_for_ssa(new_offset);
106 nir_instr_insert_after_cf_list(&if_stmt->then_list, &intrin->instr);
107
108 if (intrin->intrinsic != nir_intrinsic_store_ssbo) {
109 /* It's a load, we need a phi node */
110 nir_phi_instr *phi = nir_phi_instr_create(b->shader);
111 nir_ssa_dest_init(&phi->instr, &phi->dest,
112 intrin->num_components,
113 intrin->dest.ssa.bit_size, NULL);
114
115 nir_phi_src *src1 = ralloc(phi, nir_phi_src);
116 struct exec_node *tnode = exec_list_get_tail(&if_stmt->then_list);
117 src1->pred = exec_node_data(nir_block, tnode, cf_node.node);
118 src1->src = nir_src_for_ssa(&intrin->dest.ssa);
119 exec_list_push_tail(&phi->srcs, &src1->node);
120
121 b->cursor = nir_after_cf_list(&if_stmt->else_list);
122 nir_ssa_def *zero = nir_build_imm(b, intrin->num_components,
123 (nir_const_value) { .u32 = { 0, 0, 0, 0 } });
124
125 nir_phi_src *src2 = ralloc(phi, nir_phi_src);
126 struct exec_node *enode = exec_list_get_tail(&if_stmt->else_list);
127 src2->pred = exec_node_data(nir_block, enode, cf_node.node);
128 src2->src = nir_src_for_ssa(zero);
129 exec_list_push_tail(&phi->srcs, &src2->node);
130
131 assert(intrin->dest.is_ssa);
132 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
133 nir_src_for_ssa(&phi->dest.ssa));
134
135 nir_instr_insert_after_cf(&if_stmt->cf_node, &phi->instr);
136 }
137 }
138
139 return true;
140 }
141
142 void
143 anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
144 nir_shader *shader,
145 struct brw_stage_prog_data *prog_data)
146 {
147 struct apply_dynamic_offsets_state state = {
148 .shader = shader,
149 .layout = pipeline->layout,
150 .indices_start = shader->num_uniforms,
151 };
152
153 if (!state.layout || !state.layout->stage[shader->stage].has_dynamic_offsets)
154 return;
155
156 nir_foreach_function(shader, function) {
157 if (function->impl) {
158 nir_builder_init(&state.builder, function->impl);
159 nir_foreach_block(function->impl, apply_dynamic_offsets_block, &state);
160 nir_metadata_preserve(function->impl, nir_metadata_block_index |
161 nir_metadata_dominance);
162 }
163 }
164
165 struct anv_push_constants *null_data = NULL;
166 for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++) {
167 prog_data->param[i * 2 + shader->num_uniforms / 4] =
168 (const union gl_constant_value *)&null_data->dynamic[i].offset;
169 prog_data->param[i * 2 + 1 + shader->num_uniforms / 4] =
170 (const union gl_constant_value *)&null_data->dynamic[i].range;
171 }
172
173 shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 8;
174 }