bd91254f5bfb817cd774579791c79f785ba4b22d
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_shader.h"
26 #include "glsl/glsl_parser_extras.h"
27 #include "glsl/nir/glsl_to_nir.h"
28 #include "program/prog_to_nir.h"
29
30 static bool
31 remap_vs_attrs(nir_block *block, void *closure)
32 {
33 GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
34
35 nir_foreach_instr(block, instr) {
36 if (instr->type != nir_instr_type_intrinsic)
37 continue;
38
39 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
40
41 /* We set EmitNoIndirect for VS inputs, so there are no indirects. */
42 assert(intrin->intrinsic != nir_intrinsic_load_input_indirect);
43
44 if (intrin->intrinsic == nir_intrinsic_load_input) {
45 /* Attributes come in a contiguous block, ordered by their
46 * gl_vert_attrib value. That means we can compute the slot
47 * number for an attribute by masking out the enabled attributes
48 * before it and counting the bits.
49 */
50 int attr = intrin->const_index[0];
51 int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
52 intrin->const_index[0] = 4 * slot;
53 }
54 }
55 return true;
56 }
57
58 static void
59 brw_nir_lower_inputs(nir_shader *nir,
60 const struct brw_device_info *devinfo,
61 bool is_scalar)
62 {
63 switch (nir->stage) {
64 case MESA_SHADER_VERTEX:
65 /* For now, leave the vec4 backend doing the old method. */
66 if (!is_scalar) {
67 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
68 type_size_vec4);
69 break;
70 }
71
72 /* Start with the location of the variable's base. */
73 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
74 var->data.driver_location = var->data.location;
75 }
76
77 /* Now use nir_lower_io to walk dereference chains. Attribute arrays
78 * are loaded as one vec4 per element (or matrix column), so we use
79 * type_size_vec4 here.
80 */
81 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
82
83 /* Finally, translate VERT_ATTRIB_* values into the actual registers.
84 *
85 * Note that we can use nir->info.inputs_read instead of key->inputs_read
86 * since the two are identical aside from Gen4-5 edge flag differences.
87 */
88 GLbitfield64 inputs_read = nir->info.inputs_read;
89 nir_foreach_overload(nir, overload) {
90 if (overload->impl) {
91 nir_foreach_block(overload->impl, remap_vs_attrs, &inputs_read);
92 }
93 }
94 break;
95 case MESA_SHADER_GEOMETRY: {
96 if (!is_scalar) {
97 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
98 var->data.driver_location = var->data.location;
99 }
100 } else {
101 /* The GLSL linker will have already matched up GS inputs and
102 * the outputs of prior stages. The driver does extend VS outputs
103 * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
104 * neither of which offer geometry shader support. So we can
105 * safely ignore that.
106 *
107 * For SSO pipelines, we use a fixed VUE map layout based on variable
108 * locations, so we can rely on rendezvous-by-location to make this
109 * work.
110 *
111 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
112 * written by previous stages and shows up via payload magic.
113 */
114 struct brw_vue_map input_vue_map;
115 GLbitfield64 inputs_read =
116 nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
117 brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
118 nir->info.separate_shader);
119
120 /* Start with the slot for the variable's base. */
121 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
122 assert(input_vue_map.varying_to_slot[var->data.location] != -1);
123 var->data.driver_location =
124 input_vue_map.varying_to_slot[var->data.location];
125 }
126
127 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
128 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
129 }
130 break;
131 }
132 case MESA_SHADER_FRAGMENT:
133 assert(is_scalar);
134 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
135 type_size_scalar);
136 break;
137 case MESA_SHADER_COMPUTE:
138 /* Compute shaders have no inputs. */
139 assert(exec_list_is_empty(&nir->inputs));
140 break;
141 default:
142 unreachable("unsupported shader stage");
143 }
144 }
145
146 static void
147 brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
148 {
149 switch (nir->stage) {
150 case MESA_SHADER_VERTEX:
151 case MESA_SHADER_GEOMETRY:
152 if (is_scalar) {
153 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
154 type_size_vec4_times_4);
155 nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
156 } else {
157 nir_foreach_variable(var, &nir->outputs)
158 var->data.driver_location = var->data.location;
159 }
160 break;
161 case MESA_SHADER_FRAGMENT:
162 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
163 type_size_scalar);
164 break;
165 case MESA_SHADER_COMPUTE:
166 /* Compute shaders have no outputs. */
167 assert(exec_list_is_empty(&nir->outputs));
168 break;
169 default:
170 unreachable("unsupported shader stage");
171 }
172 }
173
174 static bool
175 should_clone_nir()
176 {
177 static int should_clone = -1;
178 if (should_clone < 1)
179 should_clone = brw_env_var_as_boolean("NIR_TEST_CLONE", false);
180
181 return should_clone;
182 }
183
184 #define _OPT(do_pass) (({ \
185 bool this_progress = true; \
186 do_pass \
187 nir_validate_shader(nir); \
188 if (should_clone_nir()) { \
189 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
190 ralloc_free(nir); \
191 nir = clone; \
192 } \
193 this_progress; \
194 }))
195
196 #define OPT(pass, ...) _OPT( \
197 nir_metadata_set_validation_flag(nir); \
198 this_progress = pass(nir ,##__VA_ARGS__); \
199 if (this_progress) { \
200 progress = true; \
201 nir_metadata_check_validation_flag(nir); \
202 } \
203 )
204
205 #define OPT_V(pass, ...) _OPT( \
206 pass(nir, ##__VA_ARGS__); \
207 )
208
209 static nir_shader *
210 nir_optimize(nir_shader *nir, bool is_scalar)
211 {
212 bool progress;
213 do {
214 progress = false;
215 OPT_V(nir_lower_vars_to_ssa);
216
217 if (is_scalar) {
218 OPT_V(nir_lower_alu_to_scalar);
219 }
220
221 OPT(nir_copy_prop);
222
223 if (is_scalar) {
224 OPT_V(nir_lower_phis_to_scalar);
225 }
226
227 OPT(nir_copy_prop);
228 OPT(nir_opt_dce);
229 OPT(nir_opt_cse);
230 OPT(nir_opt_peephole_select);
231 OPT(nir_opt_algebraic);
232 OPT(nir_opt_constant_folding);
233 OPT(nir_opt_dead_cf);
234 OPT(nir_opt_remove_phis);
235 OPT(nir_opt_undef);
236 } while (progress);
237
238 return nir;
239 }
240
241 nir_shader *
242 brw_create_nir(struct brw_context *brw,
243 const struct gl_shader_program *shader_prog,
244 const struct gl_program *prog,
245 gl_shader_stage stage,
246 bool is_scalar)
247 {
248 struct gl_context *ctx = &brw->ctx;
249 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
250 const nir_shader_compiler_options *options =
251 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
252 static const nir_lower_tex_options tex_options = {
253 .lower_txp = ~0,
254 };
255 bool debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
256 bool progress = false;
257 nir_shader *nir;
258
259 /* First, lower the GLSL IR or Mesa IR to NIR */
260 if (shader_prog) {
261 nir = glsl_to_nir(shader_prog, stage, options);
262 } else {
263 nir = prog_to_nir(prog, options);
264 OPT_V(nir_convert_to_ssa); /* turn registers into SSA */
265 }
266 nir_validate_shader(nir);
267
268 if (stage == MESA_SHADER_GEOMETRY) {
269 OPT(nir_lower_gs_intrinsics);
270 }
271
272 OPT(nir_lower_global_vars_to_local);
273
274 OPT_V(nir_lower_tex, &tex_options);
275
276 OPT(nir_normalize_cubemap_coords);
277
278 OPT(nir_split_var_copies);
279
280 nir = nir_optimize(nir, is_scalar);
281
282 /* Lower a bunch of stuff */
283 OPT_V(nir_lower_var_copies);
284
285 /* Get rid of split copies */
286 nir = nir_optimize(nir, is_scalar);
287
288 OPT_V(brw_nir_lower_inputs, devinfo, is_scalar);
289 OPT_V(brw_nir_lower_outputs, is_scalar);
290 nir_assign_var_locations(&nir->uniforms,
291 &nir->num_uniforms,
292 is_scalar ? type_size_scalar : type_size_vec4);
293 OPT_V(nir_lower_io, nir_var_all, is_scalar ? type_size_scalar : type_size_vec4);
294
295 OPT(nir_remove_dead_variables);
296
297 if (shader_prog) {
298 OPT_V(nir_lower_samplers, shader_prog);
299 }
300
301 OPT(nir_lower_system_values);
302
303 if (shader_prog) {
304 OPT_V(nir_lower_atomics, shader_prog);
305 }
306
307 nir = nir_optimize(nir, is_scalar);
308
309 if (brw->gen >= 6) {
310 /* Try and fuse multiply-adds */
311 OPT(brw_nir_opt_peephole_ffma);
312 }
313
314 OPT(nir_opt_algebraic_late);
315
316 OPT(nir_lower_locals_to_regs);
317
318 OPT_V(nir_lower_to_source_mods);
319 OPT(nir_copy_prop);
320 OPT(nir_opt_dce);
321
322 if (unlikely(debug_enabled)) {
323 /* Re-index SSA defs so we print more sensible numbers. */
324 nir_foreach_overload(nir, overload) {
325 if (overload->impl)
326 nir_index_ssa_defs(overload->impl);
327 }
328
329 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
330 _mesa_shader_stage_to_string(stage));
331 nir_print_shader(nir, stderr);
332 }
333
334 OPT_V(nir_convert_from_ssa, true);
335
336 if (!is_scalar) {
337 OPT_V(nir_move_vec_src_uses_to_dest);
338 OPT(nir_lower_vec_to_movs);
339 }
340
341 /* Needed only so that OPT and OPT_V can set it */
342 (void)progress;
343
344 /* This is the last pass we run before we start emitting stuff. It
345 * determines when we need to insert boolean resolves on Gen <= 5. We
346 * run it last because it stashes data in instr->pass_flags and we don't
347 * want that to be squashed by other NIR passes.
348 */
349 if (brw->gen <= 5)
350 brw_nir_analyze_boolean_resolves(nir);
351
352 nir_sweep(nir);
353
354 if (unlikely(debug_enabled)) {
355 fprintf(stderr, "NIR (final form) for %s shader:\n",
356 _mesa_shader_stage_to_string(stage));
357 nir_print_shader(nir, stderr);
358 }
359
360 return nir;
361 }
362
363 enum brw_reg_type
364 brw_type_for_nir_type(nir_alu_type type)
365 {
366 switch (type) {
367 case nir_type_unsigned:
368 return BRW_REGISTER_TYPE_UD;
369 case nir_type_bool:
370 case nir_type_int:
371 return BRW_REGISTER_TYPE_D;
372 case nir_type_float:
373 return BRW_REGISTER_TYPE_F;
374 default:
375 unreachable("unknown type");
376 }
377
378 return BRW_REGISTER_TYPE_F;
379 }
380
381 /* Returns the glsl_base_type corresponding to a nir_alu_type.
382 * This is used by both brw_vec4_nir and brw_fs_nir.
383 */
384 enum glsl_base_type
385 brw_glsl_base_type_for_nir_type(nir_alu_type type)
386 {
387 switch (type) {
388 case nir_type_float:
389 return GLSL_TYPE_FLOAT;
390
391 case nir_type_int:
392 return GLSL_TYPE_INT;
393
394 case nir_type_unsigned:
395 return GLSL_TYPE_UINT;
396
397 default:
398 unreachable("bad type");
399 }
400 }