7826729db85537e6f8ac151bf5dce346ce6486d7
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_shader.h"
26 #include "glsl/glsl_parser_extras.h"
27 #include "glsl/nir/glsl_to_nir.h"
28 #include "program/prog_to_nir.h"
29
30 static bool
31 remap_vs_attrs(nir_block *block, void *closure)
32 {
33 GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
34
35 nir_foreach_instr(block, instr) {
36 if (instr->type != nir_instr_type_intrinsic)
37 continue;
38
39 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
40
41 /* We set EmitNoIndirect for VS inputs, so there are no indirects. */
42 assert(intrin->intrinsic != nir_intrinsic_load_input_indirect);
43
44 if (intrin->intrinsic == nir_intrinsic_load_input) {
45 /* Attributes come in a contiguous block, ordered by their
46 * gl_vert_attrib value. That means we can compute the slot
47 * number for an attribute by masking out the enabled attributes
48 * before it and counting the bits.
49 */
50 int attr = intrin->const_index[0];
51 int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
52 intrin->const_index[0] = 4 * slot;
53 }
54 }
55 return true;
56 }
57
58 static void
59 brw_nir_lower_inputs(nir_shader *nir,
60 const struct brw_device_info *devinfo,
61 bool is_scalar)
62 {
63 switch (nir->stage) {
64 case MESA_SHADER_VERTEX:
65 /* For now, leave the vec4 backend doing the old method. */
66 if (!is_scalar) {
67 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
68 type_size_vec4);
69 break;
70 }
71
72 /* Start with the location of the variable's base. */
73 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
74 var->data.driver_location = var->data.location;
75 }
76
77 /* Now use nir_lower_io to walk dereference chains. Attribute arrays
78 * are loaded as one vec4 per element (or matrix column), so we use
79 * type_size_vec4 here.
80 */
81 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
82
83 /* Finally, translate VERT_ATTRIB_* values into the actual registers.
84 *
85 * Note that we can use nir->info.inputs_read instead of key->inputs_read
86 * since the two are identical aside from Gen4-5 edge flag differences.
87 */
88 GLbitfield64 inputs_read = nir->info.inputs_read;
89 nir_foreach_overload(nir, overload) {
90 if (overload->impl) {
91 nir_foreach_block(overload->impl, remap_vs_attrs, &inputs_read);
92 }
93 }
94 break;
95 case MESA_SHADER_GEOMETRY: {
96 if (!is_scalar) {
97 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
98 var->data.driver_location = var->data.location;
99 }
100 } else {
101 /* The GLSL linker will have already matched up GS inputs and
102 * the outputs of prior stages. The driver does extend VS outputs
103 * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
104 * neither of which offer geometry shader support. So we can
105 * safely ignore that.
106 *
107 * For SSO pipelines, we use a fixed VUE map layout based on variable
108 * locations, so we can rely on rendezvous-by-location to make this
109 * work.
110 *
111 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
112 * written by previous stages and shows up via payload magic.
113 */
114 struct brw_vue_map input_vue_map;
115 GLbitfield64 inputs_read =
116 nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
117 brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
118 nir->info.separate_shader);
119
120 /* Start with the slot for the variable's base. */
121 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
122 assert(input_vue_map.varying_to_slot[var->data.location] != -1);
123 var->data.driver_location =
124 input_vue_map.varying_to_slot[var->data.location];
125 }
126
127 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
128 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
129 }
130 break;
131 }
132 case MESA_SHADER_FRAGMENT:
133 assert(is_scalar);
134 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
135 type_size_scalar);
136 break;
137 case MESA_SHADER_COMPUTE:
138 /* Compute shaders have no inputs. */
139 assert(exec_list_is_empty(&nir->inputs));
140 break;
141 default:
142 unreachable("unsupported shader stage");
143 }
144 }
145
146 static void
147 brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
148 {
149 switch (nir->stage) {
150 case MESA_SHADER_VERTEX:
151 case MESA_SHADER_GEOMETRY:
152 if (is_scalar) {
153 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
154 type_size_vec4_times_4);
155 nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
156 } else {
157 nir_foreach_variable(var, &nir->outputs)
158 var->data.driver_location = var->data.location;
159 }
160 break;
161 case MESA_SHADER_FRAGMENT:
162 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
163 type_size_scalar);
164 break;
165 case MESA_SHADER_COMPUTE:
166 /* Compute shaders have no outputs. */
167 assert(exec_list_is_empty(&nir->outputs));
168 break;
169 default:
170 unreachable("unsupported shader stage");
171 }
172 }
173
174 #define _OPT(do_pass) (({ \
175 bool this_progress = true; \
176 do_pass \
177 nir_validate_shader(nir); \
178 this_progress; \
179 }))
180
181 #define OPT(pass, ...) _OPT( \
182 nir_metadata_set_validation_flag(nir); \
183 this_progress = pass(nir ,##__VA_ARGS__); \
184 if (this_progress) { \
185 progress = true; \
186 nir_metadata_check_validation_flag(nir); \
187 } \
188 )
189
190 #define OPT_V(pass, ...) _OPT( \
191 pass(nir, ##__VA_ARGS__); \
192 )
193
194 static void
195 nir_optimize(nir_shader *nir, bool is_scalar)
196 {
197 bool progress;
198 do {
199 progress = false;
200 OPT_V(nir_lower_vars_to_ssa);
201
202 if (is_scalar) {
203 OPT_V(nir_lower_alu_to_scalar);
204 }
205
206 OPT(nir_copy_prop);
207
208 if (is_scalar) {
209 OPT_V(nir_lower_phis_to_scalar);
210 }
211
212 OPT(nir_copy_prop);
213 OPT(nir_opt_dce);
214 OPT(nir_opt_cse);
215 OPT(nir_opt_peephole_select);
216 OPT(nir_opt_algebraic);
217 OPT(nir_opt_constant_folding);
218 OPT(nir_opt_dead_cf);
219 OPT(nir_opt_remove_phis);
220 OPT(nir_opt_undef);
221 } while (progress);
222 }
223
224 nir_shader *
225 brw_create_nir(struct brw_context *brw,
226 const struct gl_shader_program *shader_prog,
227 const struct gl_program *prog,
228 gl_shader_stage stage,
229 bool is_scalar)
230 {
231 struct gl_context *ctx = &brw->ctx;
232 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
233 const nir_shader_compiler_options *options =
234 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
235 static const nir_lower_tex_options tex_options = {
236 .lower_txp = ~0,
237 };
238 bool debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
239 bool progress = false;
240 nir_shader *nir;
241
242 /* First, lower the GLSL IR or Mesa IR to NIR */
243 if (shader_prog) {
244 nir = glsl_to_nir(shader_prog, stage, options);
245 } else {
246 nir = prog_to_nir(prog, options);
247 OPT_V(nir_convert_to_ssa); /* turn registers into SSA */
248 }
249 nir_validate_shader(nir);
250
251 if (stage == MESA_SHADER_GEOMETRY) {
252 OPT(nir_lower_gs_intrinsics);
253 }
254
255 OPT(nir_lower_global_vars_to_local);
256
257 OPT_V(nir_lower_tex, &tex_options);
258
259 OPT(nir_normalize_cubemap_coords);
260
261 OPT(nir_split_var_copies);
262
263 nir_optimize(nir, is_scalar);
264
265 /* Lower a bunch of stuff */
266 OPT_V(nir_lower_var_copies);
267
268 /* Get rid of split copies */
269 nir_optimize(nir, is_scalar);
270
271 OPT_V(brw_nir_lower_inputs, devinfo, is_scalar);
272 OPT_V(brw_nir_lower_outputs, is_scalar);
273 nir_assign_var_locations(&nir->uniforms,
274 &nir->num_uniforms,
275 is_scalar ? type_size_scalar : type_size_vec4);
276 OPT_V(nir_lower_io, -1, is_scalar ? type_size_scalar : type_size_vec4);
277
278 OPT(nir_remove_dead_variables);
279
280 if (shader_prog) {
281 OPT_V(nir_lower_samplers, shader_prog);
282 }
283
284 OPT(nir_lower_system_values);
285
286 if (shader_prog) {
287 OPT_V(nir_lower_atomics, shader_prog);
288 }
289
290 nir_optimize(nir, is_scalar);
291
292 if (brw->gen >= 6) {
293 /* Try and fuse multiply-adds */
294 OPT(brw_nir_opt_peephole_ffma);
295 }
296
297 OPT(nir_opt_algebraic_late);
298
299 OPT(nir_lower_locals_to_regs);
300
301 OPT_V(nir_lower_to_source_mods);
302 OPT(nir_copy_prop);
303 OPT(nir_opt_dce);
304
305 if (unlikely(debug_enabled)) {
306 /* Re-index SSA defs so we print more sensible numbers. */
307 nir_foreach_overload(nir, overload) {
308 if (overload->impl)
309 nir_index_ssa_defs(overload->impl);
310 }
311
312 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
313 _mesa_shader_stage_to_string(stage));
314 nir_print_shader(nir, stderr);
315 }
316
317 OPT_V(nir_convert_from_ssa, true);
318
319 if (!is_scalar) {
320 OPT_V(nir_move_vec_src_uses_to_dest);
321 OPT(nir_lower_vec_to_movs);
322 }
323
324 /* Needed only so that OPT and OPT_V can set it */
325 (void)progress;
326
327 /* This is the last pass we run before we start emitting stuff. It
328 * determines when we need to insert boolean resolves on Gen <= 5. We
329 * run it last because it stashes data in instr->pass_flags and we don't
330 * want that to be squashed by other NIR passes.
331 */
332 if (brw->gen <= 5)
333 brw_nir_analyze_boolean_resolves(nir);
334
335 nir_sweep(nir);
336
337 if (unlikely(debug_enabled)) {
338 fprintf(stderr, "NIR (final form) for %s shader:\n",
339 _mesa_shader_stage_to_string(stage));
340 nir_print_shader(nir, stderr);
341 }
342
343 return nir;
344 }
345
346 enum brw_reg_type
347 brw_type_for_nir_type(nir_alu_type type)
348 {
349 switch (type) {
350 case nir_type_unsigned:
351 return BRW_REGISTER_TYPE_UD;
352 case nir_type_bool:
353 case nir_type_int:
354 return BRW_REGISTER_TYPE_D;
355 case nir_type_float:
356 return BRW_REGISTER_TYPE_F;
357 default:
358 unreachable("unknown type");
359 }
360
361 return BRW_REGISTER_TYPE_F;
362 }
363
364 /* Returns the glsl_base_type corresponding to a nir_alu_type.
365 * This is used by both brw_vec4_nir and brw_fs_nir.
366 */
367 enum glsl_base_type
368 brw_glsl_base_type_for_nir_type(nir_alu_type type)
369 {
370 switch (type) {
371 case nir_type_float:
372 return GLSL_TYPE_FLOAT;
373
374 case nir_type_int:
375 return GLSL_TYPE_INT;
376
377 case nir_type_unsigned:
378 return GLSL_TYPE_UINT;
379
380 default:
381 unreachable("bad type");
382 }
383 }