2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
28 #include "util/debug.h"
29 #include "util/u_math.h"
32 #include "ir3_compiler.h"
33 #include "ir3_shader.h"
35 static void ir3_setup_const_state(struct ir3_shader
*shader
, nir_shader
*nir
);
37 static const nir_shader_compiler_options options
= {
48 .lower_uadd_carry
= true,
49 .lower_usub_borrow
= true,
50 .lower_mul_high
= true,
51 .lower_mul_2x32_64
= true,
53 .vertex_id_zero_based
= true,
54 .lower_extract_byte
= true,
55 .lower_extract_word
= true,
56 .lower_all_io_to_elements
= true,
57 .lower_helper_invocation
= true,
58 .lower_bitfield_insert_to_shifts
= true,
59 .lower_bitfield_extract_to_shifts
= true,
60 .use_interpolated_input_intrinsics
= true,
62 .lower_to_scalar
= true,
66 /* we don't want to lower vertex_id to _zero_based on newer gpus: */
67 static const nir_shader_compiler_options options_a6xx
= {
78 .lower_uadd_carry
= true,
79 .lower_usub_borrow
= true,
80 .lower_mul_high
= true,
81 .lower_mul_2x32_64
= true,
83 .vertex_id_zero_based
= false,
84 .lower_extract_byte
= true,
85 .lower_extract_word
= true,
86 .lower_all_io_to_elements
= true,
87 .lower_helper_invocation
= true,
88 .lower_bitfield_insert_to_shifts
= true,
89 .lower_bitfield_extract_to_shifts
= true,
90 .use_interpolated_input_intrinsics
= true,
93 .lower_to_scalar
= true,
97 const nir_shader_compiler_options
*
98 ir3_get_compiler_options(struct ir3_compiler
*compiler
)
100 if (compiler
->gpu_id
>= 600)
101 return &options_a6xx
;
105 /* for given shader key, are any steps handled in nir? */
107 ir3_key_lowers_nir(const struct ir3_shader_key
*key
)
109 return key
->fsaturate_s
| key
->fsaturate_t
| key
->fsaturate_r
|
110 key
->vsaturate_s
| key
->vsaturate_t
| key
->vsaturate_r
|
111 key
->ucp_enables
| key
->color_two_side
|
112 key
->fclamp_color
| key
->vclamp_color
|
113 key
->tessellation
| key
->has_gs
;
116 #define OPT(nir, pass, ...) ({ \
117 bool this_progress = false; \
118 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
122 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
125 ir3_optimize_loop(nir_shader
*s
)
128 unsigned lower_flrp
=
129 (s
->options
->lower_flrp16
? 16 : 0) |
130 (s
->options
->lower_flrp32
? 32 : 0) |
131 (s
->options
->lower_flrp64
? 64 : 0);
136 OPT_V(s
, nir_lower_vars_to_ssa
);
137 progress
|= OPT(s
, nir_opt_copy_prop_vars
);
138 progress
|= OPT(s
, nir_opt_dead_write_vars
);
139 progress
|= OPT(s
, nir_lower_alu_to_scalar
, NULL
, NULL
);
140 progress
|= OPT(s
, nir_lower_phis_to_scalar
);
142 progress
|= OPT(s
, nir_copy_prop
);
143 progress
|= OPT(s
, nir_opt_dce
);
144 progress
|= OPT(s
, nir_opt_cse
);
147 gcm
= env_var_as_unsigned("GCM", 0);
149 progress
|= OPT(s
, nir_opt_gcm
, true);
151 progress
|= OPT(s
, nir_opt_gcm
, false);
152 progress
|= OPT(s
, nir_opt_peephole_select
, 16, true, true);
153 progress
|= OPT(s
, nir_opt_intrinsics
);
154 progress
|= OPT(s
, nir_opt_algebraic
);
155 progress
|= OPT(s
, nir_lower_alu
);
156 progress
|= OPT(s
, nir_opt_constant_folding
);
158 if (lower_flrp
!= 0) {
159 if (OPT(s
, nir_lower_flrp
,
161 false /* always_precise */,
162 s
->options
->lower_ffma
)) {
163 OPT(s
, nir_opt_constant_folding
);
167 /* Nothing should rematerialize any flrps, so we only
168 * need to do this lowering once.
173 progress
|= OPT(s
, nir_opt_dead_cf
);
174 if (OPT(s
, nir_opt_trivial_continues
)) {
176 /* If nir_opt_trivial_continues makes progress, then we need to clean
177 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
180 OPT(s
, nir_copy_prop
);
183 progress
|= OPT(s
, nir_opt_if
, false);
184 progress
|= OPT(s
, nir_opt_remove_phis
);
185 progress
|= OPT(s
, nir_opt_undef
);
191 ir3_optimize_nir(struct ir3_shader
*shader
, nir_shader
*s
,
192 const struct ir3_shader_key
*key
)
194 struct nir_lower_tex_options tex_options
= {
196 .lower_tg4_offsets
= true,
199 if (key
&& (key
->has_gs
|| key
->tessellation
)) {
200 switch (shader
->type
) {
201 case MESA_SHADER_VERTEX
:
202 NIR_PASS_V(s
, ir3_nir_lower_to_explicit_io
, shader
, key
->tessellation
);
204 case MESA_SHADER_TESS_CTRL
:
205 NIR_PASS_V(s
, ir3_nir_lower_tess_ctrl
, shader
, key
->tessellation
);
207 case MESA_SHADER_TESS_EVAL
:
208 NIR_PASS_V(s
, ir3_nir_lower_tess_eval
, key
->tessellation
);
210 NIR_PASS_V(s
, ir3_nir_lower_to_explicit_io
, shader
, key
->tessellation
);
212 case MESA_SHADER_GEOMETRY
:
213 NIR_PASS_V(s
, ir3_nir_lower_gs
, shader
);
221 switch (shader
->type
) {
222 case MESA_SHADER_FRAGMENT
:
223 tex_options
.saturate_s
= key
->fsaturate_s
;
224 tex_options
.saturate_t
= key
->fsaturate_t
;
225 tex_options
.saturate_r
= key
->fsaturate_r
;
227 case MESA_SHADER_VERTEX
:
228 tex_options
.saturate_s
= key
->vsaturate_s
;
229 tex_options
.saturate_t
= key
->vsaturate_t
;
230 tex_options
.saturate_r
= key
->vsaturate_r
;
238 if (shader
->compiler
->gpu_id
>= 400) {
239 /* a4xx seems to have *no* sam.p */
240 tex_options
.lower_txp
= ~0; /* lower all txp */
242 /* a3xx just needs to avoid sam.p for 3d tex */
243 tex_options
.lower_txp
= (1 << GLSL_SAMPLER_DIM_3D
);
246 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
247 debug_printf("----------------------\n");
248 nir_print_shader(s
, stdout
);
249 debug_printf("----------------------\n");
252 OPT_V(s
, nir_lower_regs_to_ssa
);
253 OPT_V(s
, ir3_nir_lower_io_offsets
);
256 if (s
->info
.stage
== MESA_SHADER_VERTEX
) {
257 OPT_V(s
, nir_lower_clip_vs
, key
->ucp_enables
, false, false, NULL
);
258 if (key
->vclamp_color
)
259 OPT_V(s
, nir_lower_clamp_color_outputs
);
260 } else if (s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
261 OPT_V(s
, nir_lower_clip_fs
, key
->ucp_enables
, false);
262 if (key
->fclamp_color
)
263 OPT_V(s
, nir_lower_clamp_color_outputs
);
265 if (key
->color_two_side
) {
266 OPT_V(s
, nir_lower_two_sided_color
);
269 /* only want to do this the first time (when key is null)
270 * and not again on any potential 2nd variant lowering pass:
272 OPT_V(s
, ir3_nir_apply_trig_workarounds
);
274 /* This wouldn't hurt to run multiple times, but there is
277 if (shader
->type
== MESA_SHADER_FRAGMENT
)
278 OPT_V(s
, nir_lower_fb_read
);
281 OPT_V(s
, nir_lower_tex
, &tex_options
);
282 OPT_V(s
, nir_lower_load_const_to_scalar
);
283 if (shader
->compiler
->gpu_id
< 500)
284 OPT_V(s
, ir3_nir_lower_tg4_to_tex
);
286 ir3_optimize_loop(s
);
288 /* do ubo load and idiv lowering after first opt loop to get a chance to
289 * propagate constants for divide by immed power-of-two and constant ubo
292 * NOTE that UBO analysis pass should only be done once, before variants
294 const bool ubo_progress
= !key
&& OPT(s
, ir3_nir_analyze_ubo_ranges
, shader
);
295 const bool idiv_progress
= OPT(s
, nir_lower_idiv
, nir_lower_idiv_fast
);
296 if (ubo_progress
|| idiv_progress
)
297 ir3_optimize_loop(s
);
299 /* Do late algebraic optimization to turn add(a, neg(b)) back into
300 * subs, then the mandatory cleanup after algebraic. Note that it may
301 * produce fnegs, and if so then we need to keep running to squash
304 bool more_late_algebraic
= true;
305 while (more_late_algebraic
) {
306 more_late_algebraic
= OPT(s
, nir_opt_algebraic_late
);
307 OPT_V(s
, nir_opt_constant_folding
);
308 OPT_V(s
, nir_copy_prop
);
309 OPT_V(s
, nir_opt_dce
);
310 OPT_V(s
, nir_opt_cse
);
313 OPT_V(s
, nir_remove_dead_variables
, nir_var_function_temp
);
315 OPT_V(s
, nir_opt_sink
, nir_move_const_undef
);
317 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
318 debug_printf("----------------------\n");
319 nir_print_shader(s
, stdout
);
320 debug_printf("----------------------\n");
325 /* The first time thru, when not creating variant, do the one-time
326 * const_state layout setup. This should be done after ubo range
330 ir3_setup_const_state(shader
, s
);
335 ir3_nir_scan_driver_consts(nir_shader
*shader
,
336 struct ir3_const_state
*layout
)
338 nir_foreach_function(function
, shader
) {
342 nir_foreach_block(block
, function
->impl
) {
343 nir_foreach_instr(instr
, block
) {
344 if (instr
->type
!= nir_instr_type_intrinsic
)
347 nir_intrinsic_instr
*intr
=
348 nir_instr_as_intrinsic(instr
);
351 switch (intr
->intrinsic
) {
352 case nir_intrinsic_get_buffer_size
:
353 idx
= nir_src_as_uint(intr
->src
[0]);
354 if (layout
->ssbo_size
.mask
& (1 << idx
))
356 layout
->ssbo_size
.mask
|= (1 << idx
);
357 layout
->ssbo_size
.off
[idx
] =
358 layout
->ssbo_size
.count
;
359 layout
->ssbo_size
.count
+= 1; /* one const per */
361 case nir_intrinsic_image_deref_atomic_add
:
362 case nir_intrinsic_image_deref_atomic_imin
:
363 case nir_intrinsic_image_deref_atomic_umin
:
364 case nir_intrinsic_image_deref_atomic_imax
:
365 case nir_intrinsic_image_deref_atomic_umax
:
366 case nir_intrinsic_image_deref_atomic_and
:
367 case nir_intrinsic_image_deref_atomic_or
:
368 case nir_intrinsic_image_deref_atomic_xor
:
369 case nir_intrinsic_image_deref_atomic_exchange
:
370 case nir_intrinsic_image_deref_atomic_comp_swap
:
371 case nir_intrinsic_image_deref_store
:
372 case nir_intrinsic_image_deref_size
:
373 idx
= nir_intrinsic_get_var(intr
, 0)->data
.driver_location
;
374 if (layout
->image_dims
.mask
& (1 << idx
))
376 layout
->image_dims
.mask
|= (1 << idx
);
377 layout
->image_dims
.off
[idx
] =
378 layout
->image_dims
.count
;
379 layout
->image_dims
.count
+= 3; /* three const per */
381 case nir_intrinsic_load_ubo
:
382 if (nir_src_is_const(intr
->src
[0])) {
383 layout
->num_ubos
= MAX2(layout
->num_ubos
,
384 nir_src_as_uint(intr
->src
[0]) + 1);
386 layout
->num_ubos
= shader
->info
.num_ubos
;
389 case nir_intrinsic_load_base_vertex
:
390 case nir_intrinsic_load_first_vertex
:
391 layout
->num_driver_params
=
392 MAX2(layout
->num_driver_params
, IR3_DP_VTXID_BASE
+ 1);
394 case nir_intrinsic_load_user_clip_plane
:
395 layout
->num_driver_params
=
396 MAX2(layout
->num_driver_params
, IR3_DP_UCP7_W
+ 1);
398 case nir_intrinsic_load_num_work_groups
:
399 layout
->num_driver_params
=
400 MAX2(layout
->num_driver_params
, IR3_DP_NUM_WORK_GROUPS_Z
+ 1);
402 case nir_intrinsic_load_local_group_size
:
403 layout
->num_driver_params
=
404 MAX2(layout
->num_driver_params
, IR3_DP_LOCAL_GROUP_SIZE_Z
+ 1);
415 ir3_setup_const_state(struct ir3_shader
*shader
, nir_shader
*nir
)
417 struct ir3_compiler
*compiler
= shader
->compiler
;
418 struct ir3_const_state
*const_state
= &shader
->const_state
;
420 memset(&const_state
->offsets
, ~0, sizeof(const_state
->offsets
));
422 ir3_nir_scan_driver_consts(nir
, const_state
);
424 if ((compiler
->gpu_id
< 500) &&
425 (shader
->stream_output
.num_outputs
> 0)) {
426 const_state
->num_driver_params
=
427 MAX2(const_state
->num_driver_params
, IR3_DP_VTXCNT_MAX
+ 1);
430 /* num_driver_params is scalar, align to vec4: */
431 const_state
->num_driver_params
= align(const_state
->num_driver_params
, 4);
433 debug_assert((shader
->ubo_state
.size
% 16) == 0);
434 unsigned constoff
= align(shader
->ubo_state
.size
/ 16, 8);
435 unsigned ptrsz
= ir3_pointer_size(compiler
);
437 if (const_state
->num_ubos
> 0) {
438 const_state
->offsets
.ubo
= constoff
;
439 constoff
+= align(nir
->info
.num_ubos
* ptrsz
, 4) / 4;
442 if (const_state
->ssbo_size
.count
> 0) {
443 unsigned cnt
= const_state
->ssbo_size
.count
;
444 const_state
->offsets
.ssbo_sizes
= constoff
;
445 constoff
+= align(cnt
, 4) / 4;
448 if (const_state
->image_dims
.count
> 0) {
449 unsigned cnt
= const_state
->image_dims
.count
;
450 const_state
->offsets
.image_dims
= constoff
;
451 constoff
+= align(cnt
, 4) / 4;
454 if (const_state
->num_driver_params
> 0)
455 const_state
->offsets
.driver_param
= constoff
;
456 constoff
+= const_state
->num_driver_params
/ 4;
458 if ((shader
->type
== MESA_SHADER_VERTEX
) &&
459 (compiler
->gpu_id
< 500) &&
460 shader
->stream_output
.num_outputs
> 0) {
461 const_state
->offsets
.tfbo
= constoff
;
462 constoff
+= align(IR3_MAX_SO_BUFFERS
* ptrsz
, 4) / 4;
465 switch (shader
->type
) {
466 case MESA_SHADER_VERTEX
:
467 const_state
->offsets
.primitive_param
= constoff
;
470 case MESA_SHADER_TESS_CTRL
:
471 case MESA_SHADER_TESS_EVAL
:
472 constoff
= align(constoff
- 1, 4) + 3;
473 const_state
->offsets
.primitive_param
= constoff
;
474 const_state
->offsets
.primitive_map
= constoff
+ 5;
475 constoff
+= 5 + DIV_ROUND_UP(nir
->num_inputs
, 4);
477 case MESA_SHADER_GEOMETRY
:
478 const_state
->offsets
.primitive_param
= constoff
;
479 const_state
->offsets
.primitive_map
= constoff
+ 1;
480 constoff
+= 1 + DIV_ROUND_UP(nir
->num_inputs
, 4);
486 const_state
->offsets
.immediate
= constoff
;