nir/nir_opt_peephole_ffma: Move this lowering pass to the i965 driver
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_nir.h"
25 #include "brw_shader.h"
26 #include "glsl/glsl_parser_extras.h"
27 #include "glsl/nir/glsl_to_nir.h"
28 #include "program/prog_to_nir.h"
29
30 static bool
31 remap_vs_attrs(nir_block *block, void *closure)
32 {
33 GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
34
35 nir_foreach_instr(block, instr) {
36 if (instr->type != nir_instr_type_intrinsic)
37 continue;
38
39 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
40
41 /* We set EmitNoIndirect for VS inputs, so there are no indirects. */
42 assert(intrin->intrinsic != nir_intrinsic_load_input_indirect);
43
44 if (intrin->intrinsic == nir_intrinsic_load_input) {
45 /* Attributes come in a contiguous block, ordered by their
46 * gl_vert_attrib value. That means we can compute the slot
47 * number for an attribute by masking out the enabled attributes
48 * before it and counting the bits.
49 */
50 int attr = intrin->const_index[0];
51 int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
52 intrin->const_index[0] = 4 * slot;
53 }
54 }
55 return true;
56 }
57
58 static void
59 brw_nir_lower_inputs(const struct brw_device_info *devinfo,
60 nir_shader *nir, bool is_scalar)
61 {
62 switch (nir->stage) {
63 case MESA_SHADER_VERTEX:
64 /* For now, leave the vec4 backend doing the old method. */
65 if (!is_scalar) {
66 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
67 type_size_vec4);
68 break;
69 }
70
71 /* Start with the location of the variable's base. */
72 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
73 var->data.driver_location = var->data.location;
74 }
75
76 /* Now use nir_lower_io to walk dereference chains. Attribute arrays
77 * are loaded as one vec4 per element (or matrix column), so we use
78 * type_size_vec4 here.
79 */
80 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
81
82 /* Finally, translate VERT_ATTRIB_* values into the actual registers.
83 *
84 * Note that we can use nir->info.inputs_read instead of key->inputs_read
85 * since the two are identical aside from Gen4-5 edge flag differences.
86 */
87 GLbitfield64 inputs_read = nir->info.inputs_read;
88 nir_foreach_overload(nir, overload) {
89 if (overload->impl) {
90 nir_foreach_block(overload->impl, remap_vs_attrs, &inputs_read);
91 }
92 }
93 break;
94 case MESA_SHADER_GEOMETRY: {
95 if (!is_scalar) {
96 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
97 var->data.driver_location = var->data.location;
98 }
99 } else {
100 /* The GLSL linker will have already matched up GS inputs and
101 * the outputs of prior stages. The driver does extend VS outputs
102 * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
103 * neither of which offer geometry shader support. So we can
104 * safely ignore that.
105 *
106 * For SSO pipelines, we use a fixed VUE map layout based on variable
107 * locations, so we can rely on rendezvous-by-location to make this
108 * work.
109 *
110 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
111 * written by previous stages and shows up via payload magic.
112 */
113 struct brw_vue_map input_vue_map;
114 GLbitfield64 inputs_read =
115 nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
116 brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
117 nir->info.separate_shader);
118
119 /* Start with the slot for the variable's base. */
120 foreach_list_typed(nir_variable, var, node, &nir->inputs) {
121 assert(input_vue_map.varying_to_slot[var->data.location] != -1);
122 var->data.driver_location =
123 input_vue_map.varying_to_slot[var->data.location];
124 }
125
126 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
127 nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
128 }
129 break;
130 }
131 case MESA_SHADER_FRAGMENT:
132 assert(is_scalar);
133 nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
134 type_size_scalar);
135 break;
136 case MESA_SHADER_COMPUTE:
137 /* Compute shaders have no inputs. */
138 assert(exec_list_is_empty(&nir->inputs));
139 break;
140 default:
141 unreachable("unsupported shader stage");
142 }
143 }
144
145 static void
146 brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
147 {
148 switch (nir->stage) {
149 case MESA_SHADER_VERTEX:
150 case MESA_SHADER_GEOMETRY:
151 if (is_scalar) {
152 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
153 type_size_vec4_times_4);
154 nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
155 } else {
156 nir_foreach_variable(var, &nir->outputs)
157 var->data.driver_location = var->data.location;
158 }
159 break;
160 case MESA_SHADER_FRAGMENT:
161 nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
162 type_size_scalar);
163 break;
164 case MESA_SHADER_COMPUTE:
165 /* Compute shaders have no outputs. */
166 assert(exec_list_is_empty(&nir->outputs));
167 break;
168 default:
169 unreachable("unsupported shader stage");
170 }
171 }
172
173 static void
174 nir_optimize(nir_shader *nir, bool is_scalar)
175 {
176 bool progress;
177 do {
178 progress = false;
179 nir_lower_vars_to_ssa(nir);
180 nir_validate_shader(nir);
181
182 if (is_scalar) {
183 nir_lower_alu_to_scalar(nir);
184 nir_validate_shader(nir);
185 }
186
187 progress |= nir_copy_prop(nir);
188 nir_validate_shader(nir);
189
190 if (is_scalar) {
191 nir_lower_phis_to_scalar(nir);
192 nir_validate_shader(nir);
193 }
194
195 progress |= nir_copy_prop(nir);
196 nir_validate_shader(nir);
197 progress |= nir_opt_dce(nir);
198 nir_validate_shader(nir);
199 progress |= nir_opt_cse(nir);
200 nir_validate_shader(nir);
201 progress |= nir_opt_peephole_select(nir);
202 nir_validate_shader(nir);
203 progress |= nir_opt_algebraic(nir);
204 nir_validate_shader(nir);
205 progress |= nir_opt_constant_folding(nir);
206 nir_validate_shader(nir);
207 progress |= nir_opt_dead_cf(nir);
208 nir_validate_shader(nir);
209 progress |= nir_opt_remove_phis(nir);
210 nir_validate_shader(nir);
211 progress |= nir_opt_undef(nir);
212 nir_validate_shader(nir);
213 } while (progress);
214 }
215
216 nir_shader *
217 brw_create_nir(struct brw_context *brw,
218 const struct gl_shader_program *shader_prog,
219 const struct gl_program *prog,
220 gl_shader_stage stage,
221 bool is_scalar)
222 {
223 struct gl_context *ctx = &brw->ctx;
224 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
225 const nir_shader_compiler_options *options =
226 ctx->Const.ShaderCompilerOptions[stage].NirOptions;
227 static const nir_lower_tex_options tex_options = {
228 .lower_txp = ~0,
229 };
230 bool debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
231 nir_shader *nir;
232
233 /* First, lower the GLSL IR or Mesa IR to NIR */
234 if (shader_prog) {
235 nir = glsl_to_nir(shader_prog, stage, options);
236 } else {
237 nir = prog_to_nir(prog, options);
238 nir_convert_to_ssa(nir); /* turn registers into SSA */
239 }
240 nir_validate_shader(nir);
241
242 if (stage == MESA_SHADER_GEOMETRY) {
243 nir_lower_gs_intrinsics(nir);
244 nir_validate_shader(nir);
245 }
246
247 nir_lower_global_vars_to_local(nir);
248 nir_validate_shader(nir);
249
250 nir_lower_tex(nir, &tex_options);
251 nir_validate_shader(nir);
252
253 nir_normalize_cubemap_coords(nir);
254 nir_validate_shader(nir);
255
256 nir_split_var_copies(nir);
257 nir_validate_shader(nir);
258
259 nir_optimize(nir, is_scalar);
260
261 /* Lower a bunch of stuff */
262 nir_lower_var_copies(nir);
263 nir_validate_shader(nir);
264
265 /* Get rid of split copies */
266 nir_optimize(nir, is_scalar);
267
268 brw_nir_lower_inputs(devinfo, nir, is_scalar);
269 brw_nir_lower_outputs(nir, is_scalar);
270 nir_assign_var_locations(&nir->uniforms,
271 &nir->num_uniforms,
272 is_scalar ? type_size_scalar : type_size_vec4);
273 nir_lower_io(nir, -1, is_scalar ? type_size_scalar : type_size_vec4);
274 nir_validate_shader(nir);
275
276 nir_remove_dead_variables(nir);
277 nir_validate_shader(nir);
278
279 if (shader_prog) {
280 nir_lower_samplers(nir, shader_prog);
281 nir_validate_shader(nir);
282 }
283
284 nir_lower_system_values(nir);
285 nir_validate_shader(nir);
286
287 if (shader_prog) {
288 nir_lower_atomics(nir, shader_prog);
289 nir_validate_shader(nir);
290 }
291
292 nir_optimize(nir, is_scalar);
293
294 if (brw->gen >= 6) {
295 /* Try and fuse multiply-adds */
296 brw_nir_opt_peephole_ffma(nir);
297 nir_validate_shader(nir);
298 }
299
300 nir_opt_algebraic_late(nir);
301 nir_validate_shader(nir);
302
303 nir_lower_locals_to_regs(nir);
304 nir_validate_shader(nir);
305
306 nir_lower_to_source_mods(nir);
307 nir_validate_shader(nir);
308 nir_copy_prop(nir);
309 nir_validate_shader(nir);
310 nir_opt_dce(nir);
311 nir_validate_shader(nir);
312
313 if (unlikely(debug_enabled)) {
314 /* Re-index SSA defs so we print more sensible numbers. */
315 nir_foreach_overload(nir, overload) {
316 if (overload->impl)
317 nir_index_ssa_defs(overload->impl);
318 }
319
320 fprintf(stderr, "NIR (SSA form) for %s shader:\n",
321 _mesa_shader_stage_to_string(stage));
322 nir_print_shader(nir, stderr);
323 }
324
325 nir_convert_from_ssa(nir, true);
326 nir_validate_shader(nir);
327
328 if (!is_scalar) {
329 nir_move_vec_src_uses_to_dest(nir);
330 nir_validate_shader(nir);
331
332 nir_lower_vec_to_movs(nir);
333 nir_validate_shader(nir);
334 }
335
336 /* This is the last pass we run before we start emitting stuff. It
337 * determines when we need to insert boolean resolves on Gen <= 5. We
338 * run it last because it stashes data in instr->pass_flags and we don't
339 * want that to be squashed by other NIR passes.
340 */
341 if (brw->gen <= 5)
342 brw_nir_analyze_boolean_resolves(nir);
343
344 nir_sweep(nir);
345
346 if (unlikely(debug_enabled)) {
347 fprintf(stderr, "NIR (final form) for %s shader:\n",
348 _mesa_shader_stage_to_string(stage));
349 nir_print_shader(nir, stderr);
350 }
351
352 return nir;
353 }
354
355 enum brw_reg_type
356 brw_type_for_nir_type(nir_alu_type type)
357 {
358 switch (type) {
359 case nir_type_unsigned:
360 return BRW_REGISTER_TYPE_UD;
361 case nir_type_bool:
362 case nir_type_int:
363 return BRW_REGISTER_TYPE_D;
364 case nir_type_float:
365 return BRW_REGISTER_TYPE_F;
366 default:
367 unreachable("unknown type");
368 }
369
370 return BRW_REGISTER_TYPE_F;
371 }
372
373 /* Returns the glsl_base_type corresponding to a nir_alu_type.
374 * This is used by both brw_vec4_nir and brw_fs_nir.
375 */
376 enum glsl_base_type
377 brw_glsl_base_type_for_nir_type(nir_alu_type type)
378 {
379 switch (type) {
380 case nir_type_float:
381 return GLSL_TYPE_FLOAT;
382
383 case nir_type_int:
384 return GLSL_TYPE_INT;
385
386 case nir_type_unsigned:
387 return GLSL_TYPE_UINT;
388
389 default:
390 unreachable("bad type");
391 }
392 }