2 * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
28 #include "util/debug.h"
29 #include "util/u_math.h"
32 #include "ir3_compiler.h"
33 #include "ir3_shader.h"
35 static const nir_shader_compiler_options options
= {
46 .lower_uadd_carry
= true,
47 .lower_usub_borrow
= true,
48 .lower_mul_high
= true,
49 .lower_mul_2x32_64
= true,
51 .vertex_id_zero_based
= true,
52 .lower_extract_byte
= true,
53 .lower_extract_word
= true,
54 .lower_all_io_to_elements
= true,
55 .lower_helper_invocation
= true,
56 .lower_bitfield_insert_to_shifts
= true,
57 .lower_bitfield_extract_to_shifts
= true,
58 .lower_pack_half_2x16
= true,
59 .lower_pack_snorm_4x8
= true,
60 .lower_pack_snorm_2x16
= true,
61 .lower_pack_unorm_4x8
= true,
62 .lower_pack_unorm_2x16
= true,
63 .lower_unpack_half_2x16
= true,
64 .lower_unpack_snorm_4x8
= true,
65 .lower_unpack_snorm_2x16
= true,
66 .lower_unpack_unorm_4x8
= true,
67 .lower_unpack_unorm_2x16
= true,
68 .lower_pack_split
= true,
69 .use_interpolated_input_intrinsics
= true,
71 .lower_to_scalar
= true,
73 .lower_wpos_pntc
= true,
76 /* we don't want to lower vertex_id to _zero_based on newer gpus: */
77 static const nir_shader_compiler_options options_a6xx
= {
88 .lower_uadd_carry
= true,
89 .lower_usub_borrow
= true,
90 .lower_mul_high
= true,
91 .lower_mul_2x32_64
= true,
93 .vertex_id_zero_based
= false,
94 .lower_extract_byte
= true,
95 .lower_extract_word
= true,
96 .lower_all_io_to_elements
= true,
97 .lower_helper_invocation
= true,
98 .lower_bitfield_insert_to_shifts
= true,
99 .lower_bitfield_extract_to_shifts
= true,
100 .lower_pack_half_2x16
= true,
101 .lower_pack_snorm_4x8
= true,
102 .lower_pack_snorm_2x16
= true,
103 .lower_pack_unorm_4x8
= true,
104 .lower_pack_unorm_2x16
= true,
105 .lower_unpack_half_2x16
= true,
106 .lower_unpack_snorm_4x8
= true,
107 .lower_unpack_snorm_2x16
= true,
108 .lower_unpack_unorm_4x8
= true,
109 .lower_unpack_unorm_2x16
= true,
110 .lower_pack_split
= true,
111 .use_interpolated_input_intrinsics
= true,
112 .lower_rotate
= true,
113 .vectorize_io
= true,
114 .lower_to_scalar
= true,
116 .max_unroll_iterations
= 32,
117 .lower_wpos_pntc
= true,
120 const nir_shader_compiler_options
*
121 ir3_get_compiler_options(struct ir3_compiler
*compiler
)
123 if (compiler
->gpu_id
>= 600)
124 return &options_a6xx
;
128 #define OPT(nir, pass, ...) ({ \
129 bool this_progress = false; \
130 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
134 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
137 ir3_optimize_loop(nir_shader
*s
)
140 unsigned lower_flrp
=
141 (s
->options
->lower_flrp16
? 16 : 0) |
142 (s
->options
->lower_flrp32
? 32 : 0) |
143 (s
->options
->lower_flrp64
? 64 : 0);
148 OPT_V(s
, nir_lower_vars_to_ssa
);
149 progress
|= OPT(s
, nir_opt_copy_prop_vars
);
150 progress
|= OPT(s
, nir_opt_dead_write_vars
);
151 progress
|= OPT(s
, nir_lower_alu_to_scalar
, NULL
, NULL
);
152 progress
|= OPT(s
, nir_lower_phis_to_scalar
);
154 progress
|= OPT(s
, nir_copy_prop
);
155 progress
|= OPT(s
, nir_opt_dce
);
156 progress
|= OPT(s
, nir_opt_cse
);
159 gcm
= env_var_as_unsigned("GCM", 0);
161 progress
|= OPT(s
, nir_opt_gcm
, true);
163 progress
|= OPT(s
, nir_opt_gcm
, false);
164 progress
|= OPT(s
, nir_opt_peephole_select
, 16, true, true);
165 progress
|= OPT(s
, nir_opt_intrinsics
);
166 progress
|= OPT(s
, nir_opt_algebraic
);
167 progress
|= OPT(s
, nir_lower_alu
);
168 progress
|= OPT(s
, nir_lower_pack
);
169 progress
|= OPT(s
, nir_opt_constant_folding
);
171 if (lower_flrp
!= 0) {
172 if (OPT(s
, nir_lower_flrp
,
174 false /* always_precise */,
175 s
->options
->lower_ffma
)) {
176 OPT(s
, nir_opt_constant_folding
);
180 /* Nothing should rematerialize any flrps, so we only
181 * need to do this lowering once.
186 progress
|= OPT(s
, nir_opt_dead_cf
);
187 if (OPT(s
, nir_opt_trivial_continues
)) {
189 /* If nir_opt_trivial_continues makes progress, then we need to clean
190 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
193 OPT(s
, nir_copy_prop
);
196 progress
|= OPT(s
, nir_opt_if
, false);
197 progress
|= OPT(s
, nir_opt_remove_phis
);
198 progress
|= OPT(s
, nir_opt_undef
);
203 should_split_wrmask(const nir_instr
*instr
, const void *data
)
205 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
207 switch (intr
->intrinsic
) {
208 case nir_intrinsic_store_ssbo
:
209 case nir_intrinsic_store_shared
:
210 case nir_intrinsic_store_global
:
218 ir3_optimize_nir(struct ir3_shader
*shader
, nir_shader
*s
)
220 struct nir_lower_tex_options tex_options
= {
222 .lower_tg4_offsets
= true,
225 if (shader
->compiler
->gpu_id
>= 400) {
226 /* a4xx seems to have *no* sam.p */
227 tex_options
.lower_txp
= ~0; /* lower all txp */
229 /* a3xx just needs to avoid sam.p for 3d tex */
230 tex_options
.lower_txp
= (1 << GLSL_SAMPLER_DIM_3D
);
233 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
234 debug_printf("----------------------\n");
235 nir_print_shader(s
, stdout
);
236 debug_printf("----------------------\n");
239 OPT_V(s
, nir_lower_regs_to_ssa
);
240 OPT_V(s
, nir_lower_wrmasks
, should_split_wrmask
, s
);
242 OPT_V(s
, ir3_nir_apply_trig_workarounds
);
244 if (shader
->type
== MESA_SHADER_FRAGMENT
)
245 OPT_V(s
, nir_lower_fb_read
);
247 OPT_V(s
, nir_lower_tex
, &tex_options
);
248 OPT_V(s
, nir_lower_load_const_to_scalar
);
249 if (shader
->compiler
->gpu_id
< 500)
250 OPT_V(s
, ir3_nir_lower_tg4_to_tex
);
252 ir3_optimize_loop(s
);
254 /* do idiv lowering after first opt loop to get a chance to propagate
255 * constants for divide by immed power-of-two:
257 const bool idiv_progress
= OPT(s
, nir_lower_idiv
, nir_lower_idiv_fast
);
260 ir3_optimize_loop(s
);
262 OPT_V(s
, nir_remove_dead_variables
, nir_var_function_temp
, NULL
);
264 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
265 debug_printf("----------------------\n");
266 nir_print_shader(s
, stdout
);
267 debug_printf("----------------------\n");
274 ir3_nir_lower_variant(struct ir3_shader_variant
*so
, nir_shader
*s
)
276 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
277 debug_printf("----------------------\n");
278 nir_print_shader(s
, stdout
);
279 debug_printf("----------------------\n");
282 bool progress
= false;
284 if (so
->key
.has_gs
|| so
->key
.tessellation
) {
285 switch (so
->shader
->type
) {
286 case MESA_SHADER_VERTEX
:
287 NIR_PASS_V(s
, ir3_nir_lower_to_explicit_output
, so
, so
->key
.tessellation
);
290 case MESA_SHADER_TESS_CTRL
:
291 NIR_PASS_V(s
, ir3_nir_lower_tess_ctrl
, so
, so
->key
.tessellation
);
292 NIR_PASS_V(s
, ir3_nir_lower_to_explicit_input
);
295 case MESA_SHADER_TESS_EVAL
:
296 NIR_PASS_V(s
, ir3_nir_lower_tess_eval
, so
->key
.tessellation
);
298 NIR_PASS_V(s
, ir3_nir_lower_to_explicit_output
, so
, so
->key
.tessellation
);
301 case MESA_SHADER_GEOMETRY
:
302 NIR_PASS_V(s
, ir3_nir_lower_to_explicit_input
);
310 if (s
->info
.stage
== MESA_SHADER_VERTEX
) {
311 if (so
->key
.ucp_enables
)
312 progress
|= OPT(s
, nir_lower_clip_vs
, so
->key
.ucp_enables
, false, false, NULL
);
313 if (so
->key
.vclamp_color
)
314 progress
|= OPT(s
, nir_lower_clamp_color_outputs
);
315 } else if (s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
316 if (so
->key
.ucp_enables
)
317 progress
|= OPT(s
, nir_lower_clip_fs
, so
->key
.ucp_enables
, false);
318 if (so
->key
.fclamp_color
)
319 progress
|= OPT(s
, nir_lower_clamp_color_outputs
);
321 if (so
->key
.color_two_side
) {
322 OPT_V(s
, nir_lower_two_sided_color
);
326 struct nir_lower_tex_options tex_options
= { };
328 switch (so
->shader
->type
) {
329 case MESA_SHADER_FRAGMENT
:
330 tex_options
.saturate_s
= so
->key
.fsaturate_s
;
331 tex_options
.saturate_t
= so
->key
.fsaturate_t
;
332 tex_options
.saturate_r
= so
->key
.fsaturate_r
;
334 case MESA_SHADER_VERTEX
:
335 tex_options
.saturate_s
= so
->key
.vsaturate_s
;
336 tex_options
.saturate_t
= so
->key
.vsaturate_t
;
337 tex_options
.saturate_r
= so
->key
.vsaturate_r
;
344 if (tex_options
.saturate_s
|| tex_options
.saturate_t
||
345 tex_options
.saturate_r
) {
346 progress
|= OPT(s
, nir_lower_tex
, &tex_options
);
349 if (!so
->binning_pass
)
350 OPT_V(s
, ir3_nir_analyze_ubo_ranges
, so
);
352 progress
|= OPT(s
, ir3_nir_lower_ubo_loads
, so
);
354 /* UBO offset lowering has to come after we've decided what will
355 * be left as load_ubo
357 OPT_V(s
, ir3_nir_lower_io_offsets
, so
->shader
->compiler
->gpu_id
);
360 ir3_optimize_loop(s
);
362 /* Do late algebraic optimization to turn add(a, neg(b)) back into
363 * subs, then the mandatory cleanup after algebraic. Note that it may
364 * produce fnegs, and if so then we need to keep running to squash
367 bool more_late_algebraic
= true;
368 while (more_late_algebraic
) {
369 more_late_algebraic
= OPT(s
, nir_opt_algebraic_late
);
370 OPT_V(s
, nir_opt_constant_folding
);
371 OPT_V(s
, nir_copy_prop
);
372 OPT_V(s
, nir_opt_dce
);
373 OPT_V(s
, nir_opt_cse
);
376 OPT_V(s
, nir_opt_sink
, nir_move_const_undef
);
378 if (ir3_shader_debug
& IR3_DBG_DISASM
) {
379 debug_printf("----------------------\n");
380 nir_print_shader(s
, stdout
);
381 debug_printf("----------------------\n");
386 /* Binning pass variants re-use the const_state of the corresponding
387 * draw pass shader, so that same const emit can be re-used for both
390 if (!so
->binning_pass
)
391 ir3_setup_const_state(s
, so
, ir3_const_state(so
));
395 ir3_nir_scan_driver_consts(nir_shader
*shader
,
396 struct ir3_const_state
*layout
)
398 nir_foreach_function (function
, shader
) {
402 nir_foreach_block (block
, function
->impl
) {
403 nir_foreach_instr (instr
, block
) {
404 if (instr
->type
!= nir_instr_type_intrinsic
)
407 nir_intrinsic_instr
*intr
=
408 nir_instr_as_intrinsic(instr
);
411 switch (intr
->intrinsic
) {
412 case nir_intrinsic_get_buffer_size
:
413 idx
= nir_src_as_uint(intr
->src
[0]);
414 if (layout
->ssbo_size
.mask
& (1 << idx
))
416 layout
->ssbo_size
.mask
|= (1 << idx
);
417 layout
->ssbo_size
.off
[idx
] =
418 layout
->ssbo_size
.count
;
419 layout
->ssbo_size
.count
+= 1; /* one const per */
421 case nir_intrinsic_image_atomic_add
:
422 case nir_intrinsic_image_atomic_imin
:
423 case nir_intrinsic_image_atomic_umin
:
424 case nir_intrinsic_image_atomic_imax
:
425 case nir_intrinsic_image_atomic_umax
:
426 case nir_intrinsic_image_atomic_and
:
427 case nir_intrinsic_image_atomic_or
:
428 case nir_intrinsic_image_atomic_xor
:
429 case nir_intrinsic_image_atomic_exchange
:
430 case nir_intrinsic_image_atomic_comp_swap
:
431 case nir_intrinsic_image_store
:
432 case nir_intrinsic_image_size
:
433 idx
= nir_src_as_uint(intr
->src
[0]);
434 if (layout
->image_dims
.mask
& (1 << idx
))
436 layout
->image_dims
.mask
|= (1 << idx
);
437 layout
->image_dims
.off
[idx
] =
438 layout
->image_dims
.count
;
439 layout
->image_dims
.count
+= 3; /* three const per */
441 case nir_intrinsic_load_base_vertex
:
442 case nir_intrinsic_load_first_vertex
:
443 layout
->num_driver_params
=
444 MAX2(layout
->num_driver_params
, IR3_DP_VTXID_BASE
+ 1);
446 case nir_intrinsic_load_base_instance
:
447 layout
->num_driver_params
=
448 MAX2(layout
->num_driver_params
, IR3_DP_INSTID_BASE
+ 1);
450 case nir_intrinsic_load_user_clip_plane
:
451 layout
->num_driver_params
=
452 MAX2(layout
->num_driver_params
, IR3_DP_UCP7_W
+ 1);
454 case nir_intrinsic_load_num_work_groups
:
455 layout
->num_driver_params
=
456 MAX2(layout
->num_driver_params
, IR3_DP_NUM_WORK_GROUPS_Z
+ 1);
458 case nir_intrinsic_load_local_group_size
:
459 layout
->num_driver_params
=
460 MAX2(layout
->num_driver_params
, IR3_DP_LOCAL_GROUP_SIZE_Z
+ 1);
470 /* Sets up the variant-dependent constant state for the ir3_shader. Note
471 * that it is also used from ir3_nir_analyze_ubo_ranges() to figure out the
472 * maximum number of driver params that would eventually be used, to leave
473 * space for this function to allocate the driver params.
476 ir3_setup_const_state(nir_shader
*nir
, struct ir3_shader_variant
*v
,
477 struct ir3_const_state
*const_state
)
479 struct ir3_compiler
*compiler
= v
->shader
->compiler
;
481 memset(&const_state
->offsets
, ~0, sizeof(const_state
->offsets
));
483 ir3_nir_scan_driver_consts(nir
, const_state
);
485 if ((compiler
->gpu_id
< 500) &&
486 (v
->shader
->stream_output
.num_outputs
> 0)) {
487 const_state
->num_driver_params
=
488 MAX2(const_state
->num_driver_params
, IR3_DP_VTXCNT_MAX
+ 1);
491 const_state
->num_ubos
= nir
->info
.num_ubos
;
493 /* num_driver_params is scalar, align to vec4: */
494 const_state
->num_driver_params
= align(const_state
->num_driver_params
, 4);
496 debug_assert((const_state
->ubo_state
.size
% 16) == 0);
497 unsigned constoff
= const_state
->ubo_state
.size
/ 16;
498 unsigned ptrsz
= ir3_pointer_size(compiler
);
500 if (const_state
->num_ubos
> 0) {
501 const_state
->offsets
.ubo
= constoff
;
502 constoff
+= align(const_state
->num_ubos
* ptrsz
, 4) / 4;
505 if (const_state
->ssbo_size
.count
> 0) {
506 unsigned cnt
= const_state
->ssbo_size
.count
;
507 const_state
->offsets
.ssbo_sizes
= constoff
;
508 constoff
+= align(cnt
, 4) / 4;
511 if (const_state
->image_dims
.count
> 0) {
512 unsigned cnt
= const_state
->image_dims
.count
;
513 const_state
->offsets
.image_dims
= constoff
;
514 constoff
+= align(cnt
, 4) / 4;
517 if (const_state
->num_driver_params
> 0) {
518 /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
519 if (v
->type
== MESA_SHADER_VERTEX
&& compiler
->gpu_id
>= 600)
520 constoff
= MAX2(constoff
, 1);
521 const_state
->offsets
.driver_param
= constoff
;
523 constoff
+= const_state
->num_driver_params
/ 4;
525 if ((v
->type
== MESA_SHADER_VERTEX
) &&
526 (compiler
->gpu_id
< 500) &&
527 v
->shader
->stream_output
.num_outputs
> 0) {
528 const_state
->offsets
.tfbo
= constoff
;
529 constoff
+= align(IR3_MAX_SO_BUFFERS
* ptrsz
, 4) / 4;
533 case MESA_SHADER_VERTEX
:
534 const_state
->offsets
.primitive_param
= constoff
;
537 case MESA_SHADER_TESS_CTRL
:
538 case MESA_SHADER_TESS_EVAL
:
539 constoff
= align(constoff
- 1, 4) + 3;
540 const_state
->offsets
.primitive_param
= constoff
;
541 const_state
->offsets
.primitive_map
= constoff
+ 5;
542 constoff
+= 5 + DIV_ROUND_UP(nir
->num_inputs
, 4);
544 case MESA_SHADER_GEOMETRY
:
545 const_state
->offsets
.primitive_param
= constoff
;
546 const_state
->offsets
.primitive_map
= constoff
+ 1;
547 constoff
+= 1 + DIV_ROUND_UP(nir
->num_inputs
, 4);
553 const_state
->offsets
.immediate
= constoff
;
555 assert(constoff
<= ir3_max_const(v
));