Merge commit mesa-public/master into vulkan
[mesa.git] / src / compiler / nir / nir_lower_indirect_derefs.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 static void
28 emit_load_store(nir_builder *b, nir_intrinsic_instr *orig_instr,
29 nir_deref_var *deref, nir_deref *tail,
30 nir_ssa_def **dest, nir_ssa_def *src);
31
32 static void
33 emit_indirect_load_store(nir_builder *b, nir_intrinsic_instr *orig_instr,
34 nir_deref_var *deref, nir_deref *arr_parent,
35 int start, int end,
36 nir_ssa_def **dest, nir_ssa_def *src)
37 {
38 assert(arr_parent->child &&
39 arr_parent->child->deref_type == nir_deref_type_array);
40 nir_deref_array *arr = nir_deref_as_array(arr_parent->child);
41 assert(arr->deref_array_type == nir_deref_array_type_indirect);
42 assert(arr->indirect.is_ssa);
43
44 assert(start < end);
45 if (start == end - 1) {
46 /* Base case. Just emit the load/store op */
47 nir_deref_array direct = *arr;
48 direct.deref_array_type = nir_deref_array_type_direct;
49 direct.base_offset += start;
50 direct.indirect = NIR_SRC_INIT;
51
52 arr_parent->child = &direct.deref;
53 emit_load_store(b, orig_instr, deref, &arr->deref, dest, src);
54 arr_parent->child = &arr->deref;
55 } else {
56 int mid = start + (end - start) / 2;
57
58 nir_ssa_def *then_dest, *else_dest;
59
60 nir_if *if_stmt = nir_if_create(b->shader);
61 if_stmt->condition = nir_src_for_ssa(nir_ilt(b, arr->indirect.ssa,
62 nir_imm_int(b, mid)));
63 nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
64
65 b->cursor = nir_after_cf_list(&if_stmt->then_list);
66 emit_indirect_load_store(b, orig_instr, deref, arr_parent,
67 start, mid, &then_dest, src);
68
69 b->cursor = nir_after_cf_list(&if_stmt->else_list);
70 emit_indirect_load_store(b, orig_instr, deref, arr_parent,
71 mid, end, &else_dest, src);
72
73 b->cursor = nir_after_cf_node(&if_stmt->cf_node);
74
75 if (src == NULL) {
76 /* We're a load. We need to insert a phi node */
77 nir_phi_instr *phi = nir_phi_instr_create(b->shader);
78 nir_ssa_dest_init(&phi->instr, &phi->dest,
79 then_dest->num_components, NULL);
80
81 nir_phi_src *src0 = ralloc(phi, nir_phi_src);
82 src0->pred = nir_cf_node_as_block(nir_if_last_then_node(if_stmt));
83 src0->src = nir_src_for_ssa(then_dest);
84 exec_list_push_tail(&phi->srcs, &src0->node);
85
86 nir_phi_src *src1 = ralloc(phi, nir_phi_src);
87 src1->pred = nir_cf_node_as_block(nir_if_last_else_node(if_stmt));
88 src1->src = nir_src_for_ssa(else_dest);
89 exec_list_push_tail(&phi->srcs, &src1->node);
90
91 nir_builder_instr_insert(b, &phi->instr);
92 *dest = &phi->dest.ssa;
93 }
94 }
95 }
96
97 static void
98 emit_load_store(nir_builder *b, nir_intrinsic_instr *orig_instr,
99 nir_deref_var *deref, nir_deref *tail,
100 nir_ssa_def **dest, nir_ssa_def *src)
101 {
102 for (; tail->child; tail = tail->child) {
103 if (tail->child->deref_type != nir_deref_type_array)
104 continue;
105
106 nir_deref_array *arr = nir_deref_as_array(tail->child);
107 if (arr->deref_array_type != nir_deref_array_type_indirect)
108 continue;
109
110 int length = glsl_get_length(tail->type);
111
112 emit_indirect_load_store(b, orig_instr, deref, tail, -arr->base_offset,
113 length - arr->base_offset, dest, src);
114 return;
115 }
116
117 assert(tail && tail->child == NULL);
118
119 /* We reached the end of the deref chain. Emit the instruction */
120
121 if (src == NULL) {
122 /* This is a load instruction */
123 nir_intrinsic_instr *load =
124 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
125 load->num_components = orig_instr->num_components;
126 load->variables[0] =
127 nir_deref_as_var(nir_copy_deref(load, &deref->deref));
128 nir_ssa_dest_init(&load->instr, &load->dest,
129 load->num_components, NULL);
130 nir_builder_instr_insert(b, &load->instr);
131 *dest = &load->dest.ssa;
132 } else {
133 /* This is a store instruction */
134 nir_intrinsic_instr *store =
135 nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
136 store->num_components = orig_instr->num_components;
137 store->const_index[0] = orig_instr->const_index[0]; /* writemask */
138 store->variables[0] =
139 nir_deref_as_var(nir_copy_deref(store, &deref->deref));
140 store->src[0] = nir_src_for_ssa(src);
141 nir_builder_instr_insert(b, &store->instr);
142 }
143 }
144
145 static bool
146 deref_has_indirect(nir_deref_var *deref)
147 {
148 for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
149 if (tail->deref_type != nir_deref_type_array)
150 continue;
151
152 nir_deref_array *arr = nir_deref_as_array(tail);
153 if (arr->deref_array_type == nir_deref_array_type_indirect)
154 return true;
155 }
156
157 return false;
158 }
159
160 struct lower_indirect_state {
161 nir_builder builder;
162 uint32_t mode_mask;
163 bool progress;
164 };
165
166 static bool
167 lower_indirect_block(nir_block *block, void *void_state)
168 {
169 struct lower_indirect_state *state = void_state;
170
171 nir_foreach_instr_safe(block, instr) {
172 if (instr->type != nir_instr_type_intrinsic)
173 continue;
174
175 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
176 if (intrin->intrinsic != nir_intrinsic_load_var &&
177 intrin->intrinsic != nir_intrinsic_store_var)
178 continue;
179
180 if (!deref_has_indirect(intrin->variables[0]))
181 continue;
182
183 /* Only lower variables whose mode is in the mask */
184 if (!(state->mode_mask & (1 << intrin->variables[0]->var->data.mode)))
185 continue;
186
187 state->builder.cursor = nir_before_instr(&intrin->instr);
188
189 if (intrin->intrinsic == nir_intrinsic_load_var) {
190 nir_ssa_def *result;
191 emit_load_store(&state->builder, intrin, intrin->variables[0],
192 &intrin->variables[0]->deref, &result, NULL);
193 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(result));
194 } else {
195 assert(intrin->src[0].is_ssa);
196 emit_load_store(&state->builder, intrin, intrin->variables[0],
197 &intrin->variables[0]->deref, NULL, intrin->src[0].ssa);
198 }
199 nir_instr_remove(&intrin->instr);
200 state->progress = true;
201 }
202
203 return true;
204 }
205
206 static bool
207 lower_indirects_impl(nir_function_impl *impl, uint32_t mode_mask)
208 {
209 struct lower_indirect_state state;
210
211 state.progress = false;
212 state.mode_mask = mode_mask;
213 nir_builder_init(&state.builder, impl);
214
215 nir_foreach_block(impl, lower_indirect_block, &state);
216
217 if (state.progress)
218 nir_metadata_preserve(impl, nir_metadata_none);
219
220 return state.progress;
221 }
222
223 /** Lowers indirect variable loads/stores to direct loads/stores.
224 *
225 * The pass works by replacing any indirect load or store with an if-ladder
226 * that does a binary search on the array index.
227 */
228 bool
229 nir_lower_indirect_derefs(nir_shader *shader, uint32_t mode_mask)
230 {
231 bool progress = false;
232
233 nir_foreach_function(shader, function) {
234 if (function->impl)
235 progress = lower_indirects_impl(function->impl, mode_mask) || progress;
236 }
237
238 return progress;
239 }