2 * Copyright © 2019 Google, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir_builder.h"
31 struct primitive_map
{
39 nir_variable
*vertex_count_var
;
40 nir_variable
*emitted_vertex_var
;
41 nir_variable
*vertex_flags_out
;
43 struct exec_list old_outputs
;
44 struct exec_list emit_outputs
;
46 nir_ssa_def
*outer_levels
[4];
47 nir_ssa_def
*inner_levels
[2];
51 bitfield_extract(nir_builder
*b
, nir_ssa_def
*v
, uint32_t start
, uint32_t mask
)
53 return nir_iand(b
, nir_ushr(b
, v
, nir_imm_int(b
, start
)),
54 nir_imm_int(b
, mask
));
58 build_invocation_id(nir_builder
*b
, struct state
*state
)
60 return bitfield_extract(b
, state
->header
, 11, 31);
64 build_vertex_id(nir_builder
*b
, struct state
*state
)
66 return bitfield_extract(b
, state
->header
, 6, 31);
70 build_local_primitive_id(nir_builder
*b
, struct state
*state
)
72 return bitfield_extract(b
, state
->header
, 0, 63);
76 get_var(struct exec_list
*list
, int driver_location
)
78 nir_foreach_variable (v
, list
) {
79 if (v
->data
.driver_location
== driver_location
) {
88 build_local_offset(nir_builder
*b
, struct state
*state
,
89 nir_ssa_def
*vertex
, uint32_t base
, nir_ssa_def
*offset
)
91 nir_ssa_def
*primitive_stride
= nir_load_vs_primitive_stride_ir3(b
);
92 nir_ssa_def
*primitive_offset
=
93 nir_imul24(b
, build_local_primitive_id(b
, state
), primitive_stride
);
94 nir_ssa_def
*attr_offset
;
95 nir_ssa_def
*vertex_stride
;
97 switch (b
->shader
->info
.stage
) {
98 case MESA_SHADER_VERTEX
:
99 case MESA_SHADER_TESS_EVAL
:
100 vertex_stride
= nir_imm_int(b
, state
->map
.stride
* 4);
101 attr_offset
= nir_imm_int(b
, state
->map
.loc
[base
] * 4);
103 case MESA_SHADER_TESS_CTRL
:
104 case MESA_SHADER_GEOMETRY
:
105 vertex_stride
= nir_load_vs_vertex_stride_ir3(b
);
106 attr_offset
= nir_load_primitive_location_ir3(b
, base
);
109 unreachable("bad shader stage");
112 nir_ssa_def
*vertex_offset
= nir_imul24(b
, vertex
, vertex_stride
);
114 return nir_iadd(b
, nir_iadd(b
, primitive_offset
, vertex_offset
),
115 nir_iadd(b
, attr_offset
, offset
));
118 static nir_intrinsic_instr
*
119 replace_intrinsic(nir_builder
*b
, nir_intrinsic_instr
*intr
,
120 nir_intrinsic_op op
, nir_ssa_def
*src0
, nir_ssa_def
*src1
, nir_ssa_def
*src2
)
122 nir_intrinsic_instr
*new_intr
=
123 nir_intrinsic_instr_create(b
->shader
, op
);
125 new_intr
->src
[0] = nir_src_for_ssa(src0
);
127 new_intr
->src
[1] = nir_src_for_ssa(src1
);
129 new_intr
->src
[2] = nir_src_for_ssa(src2
);
131 new_intr
->num_components
= intr
->num_components
;
133 if (nir_intrinsic_infos
[op
].has_dest
)
134 nir_ssa_dest_init(&new_intr
->instr
, &new_intr
->dest
,
135 intr
->num_components
, 32, NULL
);
137 nir_builder_instr_insert(b
, &new_intr
->instr
);
139 if (nir_intrinsic_infos
[op
].has_dest
)
140 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(&new_intr
->dest
.ssa
));
142 nir_instr_remove(&intr
->instr
);
148 build_primitive_map(nir_shader
*shader
, struct primitive_map
*map
, struct exec_list
*list
)
150 nir_foreach_variable (var
, list
) {
151 switch (var
->data
.location
) {
152 case VARYING_SLOT_TESS_LEVEL_OUTER
:
153 case VARYING_SLOT_TESS_LEVEL_INNER
:
157 unsigned size
= glsl_count_attribute_slots(var
->type
, false) * 4;
159 assert(var
->data
.driver_location
< ARRAY_SIZE(map
->size
));
160 map
->size
[var
->data
.driver_location
] =
161 MAX2(map
->size
[var
->data
.driver_location
], size
);
165 for (uint32_t i
= 0; i
< ARRAY_SIZE(map
->size
); i
++) {
166 if (map
->size
[i
] == 0)
168 nir_variable
*var
= get_var(list
, i
);
175 map
->size
[i
] = map
->size
[i
] / glsl_get_length(var
->type
);
182 lower_block_to_explicit_output(nir_block
*block
, nir_builder
*b
, struct state
*state
)
184 nir_foreach_instr_safe (instr
, block
) {
185 if (instr
->type
!= nir_instr_type_intrinsic
)
188 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
190 switch (intr
->intrinsic
) {
191 case nir_intrinsic_store_output
: {
192 // src[] = { value, offset }.
194 /* nir_lower_io_to_temporaries replaces all access to output
195 * variables with temp variables and then emits a nir_copy_var at
196 * the end of the shader. Thus, we should always get a full wrmask
199 assert(util_is_power_of_two_nonzero(nir_intrinsic_write_mask(intr
) + 1));
201 b
->cursor
= nir_instr_remove(&intr
->instr
);
203 nir_ssa_def
*vertex_id
= build_vertex_id(b
, state
);
204 nir_ssa_def
*offset
= build_local_offset(b
, state
, vertex_id
, nir_intrinsic_base(intr
),
206 nir_intrinsic_instr
*store
=
207 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_shared_ir3
);
209 store
->src
[0] = nir_src_for_ssa(intr
->src
[0].ssa
);
210 store
->src
[1] = nir_src_for_ssa(offset
);
211 store
->num_components
= intr
->num_components
;
213 nir_builder_instr_insert(b
, &store
->instr
);
224 local_thread_id(nir_builder
*b
)
226 return bitfield_extract(b
, nir_load_gs_header_ir3(b
), 16, 1023);
230 ir3_nir_lower_to_explicit_output(nir_shader
*shader
, struct ir3_shader
*s
, unsigned topology
)
232 struct state state
= { };
234 build_primitive_map(shader
, &state
.map
, &shader
->outputs
);
235 memcpy(s
->output_loc
, state
.map
.loc
, sizeof(s
->output_loc
));
237 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
241 nir_builder_init(&b
, impl
);
242 b
.cursor
= nir_before_cf_list(&impl
->body
);
244 if (s
->type
== MESA_SHADER_VERTEX
&& topology
!= IR3_TESS_NONE
)
245 state
.header
= nir_load_tcs_header_ir3(&b
);
247 state
.header
= nir_load_gs_header_ir3(&b
);
249 nir_foreach_block_safe (block
, impl
)
250 lower_block_to_explicit_output(block
, &b
, &state
);
252 nir_metadata_preserve(impl
, nir_metadata_block_index
|
253 nir_metadata_dominance
);
255 s
->output_size
= state
.map
.stride
;
260 lower_block_to_explicit_input(nir_block
*block
, nir_builder
*b
, struct state
*state
)
262 nir_foreach_instr_safe (instr
, block
) {
263 if (instr
->type
!= nir_instr_type_intrinsic
)
266 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
268 switch (intr
->intrinsic
) {
269 case nir_intrinsic_load_per_vertex_input
: {
270 // src[] = { vertex, offset }.
272 b
->cursor
= nir_before_instr(&intr
->instr
);
274 nir_ssa_def
*offset
= build_local_offset(b
, state
,
275 intr
->src
[0].ssa
, // this is typically gl_InvocationID
276 nir_intrinsic_base(intr
),
279 replace_intrinsic(b
, intr
, nir_intrinsic_load_shared_ir3
, offset
, NULL
, NULL
);
283 case nir_intrinsic_load_invocation_id
: {
284 b
->cursor
= nir_before_instr(&intr
->instr
);
286 nir_ssa_def
*iid
= build_invocation_id(b
, state
);
287 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(iid
));
288 nir_instr_remove(&intr
->instr
);
299 ir3_nir_lower_to_explicit_input(nir_shader
*shader
)
301 struct state state
= { };
303 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
307 nir_builder_init(&b
, impl
);
308 b
.cursor
= nir_before_cf_list(&impl
->body
);
310 if (shader
->info
.stage
== MESA_SHADER_GEOMETRY
)
311 state
.header
= nir_load_gs_header_ir3(&b
);
313 state
.header
= nir_load_tcs_header_ir3(&b
);
315 nir_foreach_block_safe (block
, impl
)
316 lower_block_to_explicit_input(block
, &b
, &state
);
321 build_per_vertex_offset(nir_builder
*b
, struct state
*state
,
322 nir_ssa_def
*vertex
, nir_ssa_def
*offset
, nir_variable
*var
)
324 nir_ssa_def
*primitive_id
= nir_load_primitive_id(b
);
325 nir_ssa_def
*patch_stride
= nir_load_hs_patch_stride_ir3(b
);
326 nir_ssa_def
*patch_offset
= nir_imul24(b
, primitive_id
, patch_stride
);
327 nir_ssa_def
*attr_offset
;
328 int loc
= var
->data
.driver_location
;
330 switch (b
->shader
->info
.stage
) {
331 case MESA_SHADER_TESS_CTRL
:
332 attr_offset
= nir_imm_int(b
, state
->map
.loc
[loc
]);
334 case MESA_SHADER_TESS_EVAL
:
335 attr_offset
= nir_load_primitive_location_ir3(b
, loc
);
338 unreachable("bad shader state");
341 nir_ssa_def
*attr_stride
= nir_imm_int(b
, state
->map
.size
[loc
]);
342 nir_ssa_def
*vertex_offset
= nir_imul24(b
, vertex
, attr_stride
);
344 return nir_iadd(b
, nir_iadd(b
, patch_offset
, attr_offset
),
345 nir_iadd(b
, vertex_offset
, nir_ishl(b
, offset
, nir_imm_int(b
, 2))));
349 build_patch_offset(nir_builder
*b
, struct state
*state
, nir_ssa_def
*offset
, nir_variable
*var
)
351 debug_assert(var
&& var
->data
.patch
);
353 return build_per_vertex_offset(b
, state
, nir_imm_int(b
, 0), offset
, var
);
357 build_tessfactor_base(nir_builder
*b
, gl_varying_slot slot
, struct state
*state
)
359 uint32_t inner_levels
, outer_levels
;
360 switch (state
->topology
) {
361 case IR3_TESS_TRIANGLES
:
369 case IR3_TESS_ISOLINES
:
377 const uint32_t patch_stride
= 1 + inner_levels
+ outer_levels
;
379 nir_ssa_def
*primitive_id
= nir_load_primitive_id(b
);
381 nir_ssa_def
*patch_offset
= nir_imul24(b
, primitive_id
, nir_imm_int(b
, patch_stride
));
385 case VARYING_SLOT_TESS_LEVEL_OUTER
:
386 /* There's some kind of header dword, tess levels start at index 1. */
389 case VARYING_SLOT_TESS_LEVEL_INNER
:
390 offset
= 1 + outer_levels
;
396 return nir_iadd(b
, patch_offset
, nir_imm_int(b
, offset
));
400 lower_tess_ctrl_block(nir_block
*block
, nir_builder
*b
, struct state
*state
)
402 nir_foreach_instr_safe (instr
, block
) {
403 if (instr
->type
!= nir_instr_type_intrinsic
)
406 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
408 switch (intr
->intrinsic
) {
409 case nir_intrinsic_control_barrier
:
410 case nir_intrinsic_memory_barrier_tcs_patch
:
411 /* Hull shaders dispatch 32 wide so an entire patch will always
412 * fit in a single warp and execute in lock-step. Consequently,
413 * we don't need to do anything for TCS barriers so just remove
414 * the intrinsic. Otherwise we'll emit an actual barrier
415 * instructions, which will deadlock.
417 nir_instr_remove(&intr
->instr
);
420 case nir_intrinsic_load_per_vertex_output
: {
421 // src[] = { vertex, offset }.
423 b
->cursor
= nir_before_instr(&intr
->instr
);
425 nir_ssa_def
*address
= nir_load_tess_param_base_ir3(b
);
426 nir_variable
*var
= get_var(&b
->shader
->outputs
, nir_intrinsic_base(intr
));
427 nir_ssa_def
*offset
= build_per_vertex_offset(b
, state
,
428 intr
->src
[0].ssa
, intr
->src
[1].ssa
, var
);
430 replace_intrinsic(b
, intr
, nir_intrinsic_load_global_ir3
, address
, offset
, NULL
);
434 case nir_intrinsic_store_per_vertex_output
: {
435 // src[] = { value, vertex, offset }.
437 b
->cursor
= nir_before_instr(&intr
->instr
);
439 /* nir_lower_io_to_temporaries replaces all access to output
440 * variables with temp variables and then emits a nir_copy_var at
441 * the end of the shader. Thus, we should always get a full wrmask
444 assert(util_is_power_of_two_nonzero(nir_intrinsic_write_mask(intr
) + 1));
446 nir_ssa_def
*value
= intr
->src
[0].ssa
;
447 nir_ssa_def
*address
= nir_load_tess_param_base_ir3(b
);
448 nir_variable
*var
= get_var(&b
->shader
->outputs
, nir_intrinsic_base(intr
));
449 nir_ssa_def
*offset
= build_per_vertex_offset(b
, state
,
450 intr
->src
[1].ssa
, intr
->src
[2].ssa
, var
);
452 replace_intrinsic(b
, intr
, nir_intrinsic_store_global_ir3
, value
, address
,
453 nir_iadd(b
, offset
, nir_imm_int(b
, nir_intrinsic_component(intr
))));
458 case nir_intrinsic_load_tess_level_inner
:
459 case nir_intrinsic_load_tess_level_outer
: {
460 b
->cursor
= nir_before_instr(&intr
->instr
);
462 gl_varying_slot slot
;
463 if (intr
->intrinsic
== nir_intrinsic_load_tess_level_inner
)
464 slot
= VARYING_SLOT_TESS_LEVEL_INNER
;
466 slot
= VARYING_SLOT_TESS_LEVEL_OUTER
;
468 nir_ssa_def
*address
= nir_load_tess_factor_base_ir3(b
);
469 nir_ssa_def
*offset
= build_tessfactor_base(b
, slot
, state
);
471 replace_intrinsic(b
, intr
, nir_intrinsic_load_global_ir3
, address
, offset
, NULL
);
475 case nir_intrinsic_load_output
: {
476 // src[] = { offset }.
478 nir_variable
*var
= get_var(&b
->shader
->outputs
, nir_intrinsic_base(intr
));
480 b
->cursor
= nir_before_instr(&intr
->instr
);
482 nir_ssa_def
*address
= nir_load_tess_param_base_ir3(b
);
483 nir_ssa_def
*offset
= build_patch_offset(b
, state
, intr
->src
[0].ssa
, var
);
485 replace_intrinsic(b
, intr
, nir_intrinsic_load_global_ir3
, address
, offset
, NULL
);
489 case nir_intrinsic_store_output
: {
490 // src[] = { value, offset }.
492 /* write patch output to bo */
494 nir_variable
*var
= get_var(&b
->shader
->outputs
, nir_intrinsic_base(intr
));
496 nir_ssa_def
**levels
= NULL
;
497 if (var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
498 levels
= state
->outer_levels
;
499 else if (var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
)
500 levels
= state
->inner_levels
;
502 b
->cursor
= nir_before_instr(&intr
->instr
);
505 for (int i
= 0; i
< 4; i
++)
506 if (nir_intrinsic_write_mask(intr
) & (1 << i
))
507 levels
[i
] = nir_channel(b
, intr
->src
[0].ssa
, i
);
508 nir_instr_remove(&intr
->instr
);
510 nir_ssa_def
*address
= nir_load_tess_param_base_ir3(b
);
511 nir_ssa_def
*offset
= build_patch_offset(b
, state
, intr
->src
[1].ssa
, var
);
513 debug_assert(nir_intrinsic_component(intr
) == 0);
515 /* nir_lower_io_to_temporaries replaces all access to output
516 * variables with temp variables and then emits a nir_copy_var at
517 * the end of the shader. Thus, we should always get a full wrmask
520 assert(util_is_power_of_two_nonzero(nir_intrinsic_write_mask(intr
) + 1));
522 replace_intrinsic(b
, intr
, nir_intrinsic_store_global_ir3
,
523 intr
->src
[0].ssa
, address
, offset
);
535 emit_tess_epilouge(nir_builder
*b
, struct state
*state
)
537 nir_ssa_def
*tessfactor_address
= nir_load_tess_factor_base_ir3(b
);
538 nir_ssa_def
*levels
[2];
540 if (!state
->outer_levels
[0])
543 /* Then emit the epilogue that actually writes out the tessellation levels
546 switch (state
->topology
) {
547 case IR3_TESS_TRIANGLES
:
548 levels
[0] = nir_vec4(b
, state
->outer_levels
[0], state
->outer_levels
[1],
549 state
->outer_levels
[2], state
->inner_levels
[0]);
553 levels
[0] = nir_vec4(b
, state
->outer_levels
[0], state
->outer_levels
[1],
554 state
->outer_levels
[2], state
->outer_levels
[3]);
555 levels
[1] = nir_vec2(b
, state
->inner_levels
[0], state
->inner_levels
[1]);
557 case IR3_TESS_ISOLINES
:
558 levels
[0] = nir_vec2(b
, state
->outer_levels
[0], state
->outer_levels
[1]);
565 nir_ssa_def
*offset
= build_tessfactor_base(b
, VARYING_SLOT_TESS_LEVEL_OUTER
, state
);
567 nir_intrinsic_instr
*store
=
568 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_global_ir3
);
570 store
->src
[0] = nir_src_for_ssa(levels
[0]);
571 store
->src
[1] = nir_src_for_ssa(tessfactor_address
);
572 store
->src
[2] = nir_src_for_ssa(offset
);
573 nir_builder_instr_insert(b
, &store
->instr
);
574 store
->num_components
= levels
[0]->num_components
;
577 store
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_store_global_ir3
);
578 offset
= nir_iadd(b
, offset
, nir_imm_int(b
, levels
[0]->num_components
));
580 store
->src
[0] = nir_src_for_ssa(levels
[1]);
581 store
->src
[1] = nir_src_for_ssa(tessfactor_address
);
582 store
->src
[2] = nir_src_for_ssa(offset
);
583 nir_builder_instr_insert(b
, &store
->instr
);
584 store
->num_components
= levels
[1]->num_components
;
587 /* Finally, Insert endpatch instruction:
589 * TODO we should re-work this to use normal flow control.
592 nir_intrinsic_instr
*end_patch
=
593 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_end_patch_ir3
);
594 nir_builder_instr_insert(b
, &end_patch
->instr
);
598 ir3_nir_lower_tess_ctrl(nir_shader
*shader
, struct ir3_shader
*s
, unsigned topology
)
600 struct state state
= { .topology
= topology
};
602 if (shader_debug_enabled(shader
->info
.stage
)) {
603 fprintf(stderr
, "NIR (before tess lowering) for %s shader:\n",
604 _mesa_shader_stage_to_string(shader
->info
.stage
));
605 nir_print_shader(shader
, stderr
);
608 build_primitive_map(shader
, &state
.map
, &shader
->outputs
);
609 memcpy(s
->output_loc
, state
.map
.loc
, sizeof(s
->output_loc
));
610 s
->output_size
= state
.map
.stride
;
612 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
616 nir_builder_init(&b
, impl
);
617 b
.cursor
= nir_before_cf_list(&impl
->body
);
619 state
.header
= nir_load_tcs_header_ir3(&b
);
621 nir_foreach_block_safe (block
, impl
)
622 lower_tess_ctrl_block(block
, &b
, &state
);
624 /* Now move the body of the TCS into a conditional:
626 * if (gl_InvocationID < num_vertices)
632 nir_cf_extract(&body
, nir_before_cf_list(&impl
->body
),
633 nir_after_cf_list(&impl
->body
));
635 b
.cursor
= nir_after_cf_list(&impl
->body
);
637 /* Re-emit the header, since the old one got moved into the if branch */
638 state
.header
= nir_load_tcs_header_ir3(&b
);
639 nir_ssa_def
*iid
= build_invocation_id(&b
, &state
);
641 const uint32_t nvertices
= shader
->info
.tess
.tcs_vertices_out
;
642 nir_ssa_def
*cond
= nir_ult(&b
, iid
, nir_imm_int(&b
, nvertices
));
644 nir_if
*nif
= nir_push_if(&b
, cond
);
646 nir_cf_reinsert(&body
, b
.cursor
);
648 b
.cursor
= nir_after_cf_list(&nif
->then_list
);
650 /* Insert conditional exit for threads invocation id != 0 */
651 nir_ssa_def
*iid0_cond
= nir_ieq(&b
, iid
, nir_imm_int(&b
, 0));
652 nir_intrinsic_instr
*cond_end
=
653 nir_intrinsic_instr_create(shader
, nir_intrinsic_cond_end_ir3
);
654 cond_end
->src
[0] = nir_src_for_ssa(iid0_cond
);
655 nir_builder_instr_insert(&b
, &cond_end
->instr
);
657 emit_tess_epilouge(&b
, &state
);
661 nir_metadata_preserve(impl
, 0);
666 lower_tess_eval_block(nir_block
*block
, nir_builder
*b
, struct state
*state
)
668 nir_foreach_instr_safe (instr
, block
) {
669 if (instr
->type
!= nir_instr_type_intrinsic
)
672 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
674 switch (intr
->intrinsic
) {
675 case nir_intrinsic_load_tess_coord
: {
676 b
->cursor
= nir_after_instr(&intr
->instr
);
677 nir_ssa_def
*x
= nir_channel(b
, &intr
->dest
.ssa
, 0);
678 nir_ssa_def
*y
= nir_channel(b
, &intr
->dest
.ssa
, 1);
681 if (state
->topology
== IR3_TESS_TRIANGLES
)
682 z
= nir_fsub(b
, nir_fsub(b
, nir_imm_float(b
, 1.0f
), y
), x
);
684 z
= nir_imm_float(b
, 0.0f
);
686 nir_ssa_def
*coord
= nir_vec3(b
, x
, y
, z
);
688 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
,
689 nir_src_for_ssa(coord
),
694 case nir_intrinsic_load_per_vertex_input
: {
695 // src[] = { vertex, offset }.
697 b
->cursor
= nir_before_instr(&intr
->instr
);
699 nir_ssa_def
*address
= nir_load_tess_param_base_ir3(b
);
700 nir_variable
*var
= get_var(&b
->shader
->inputs
, nir_intrinsic_base(intr
));
701 nir_ssa_def
*offset
= build_per_vertex_offset(b
, state
,
702 intr
->src
[0].ssa
, intr
->src
[1].ssa
, var
);
704 replace_intrinsic(b
, intr
, nir_intrinsic_load_global_ir3
, address
, offset
, NULL
);
708 case nir_intrinsic_load_tess_level_inner
:
709 case nir_intrinsic_load_tess_level_outer
: {
710 unsigned dest_comp
= nir_intrinsic_dest_components(intr
);
711 b
->cursor
= nir_before_instr(&intr
->instr
);
713 gl_varying_slot slot
;
714 if (intr
->intrinsic
== nir_intrinsic_load_tess_level_inner
)
715 slot
= VARYING_SLOT_TESS_LEVEL_INNER
;
717 slot
= VARYING_SLOT_TESS_LEVEL_OUTER
;
719 nir_ssa_def
*address
= nir_load_tess_factor_base_ir3(b
);
720 nir_ssa_def
*offset
= build_tessfactor_base(b
, slot
, state
);
722 /* Loading across a vec4 (16b) memory boundary is problematic
723 * if we don't use components from the second vec4. The tess
724 * levels aren't guaranteed to be vec4 aligned and we don't
725 * know which levels are actually used, so we load each
726 * component individually.
728 nir_ssa_def
*levels
[4];
729 for (unsigned i
= 0; i
< dest_comp
; i
++) {
730 nir_intrinsic_instr
*new_intr
=
731 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_global_ir3
);
733 new_intr
->src
[0] = nir_src_for_ssa(address
);
734 new_intr
->src
[1] = nir_src_for_ssa(nir_iadd(b
, offset
, nir_imm_int(b
, i
)));
735 new_intr
->num_components
= 1;
736 nir_ssa_dest_init(&new_intr
->instr
, &new_intr
->dest
, 1, 32, NULL
);
737 nir_builder_instr_insert(b
, &new_intr
->instr
);
738 levels
[i
] = &new_intr
->dest
.ssa
;
741 nir_ssa_def
*v
= nir_vec(b
, levels
, dest_comp
);
743 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(v
));
745 nir_instr_remove(&intr
->instr
);
749 case nir_intrinsic_load_input
: {
750 // src[] = { offset }.
752 nir_variable
*var
= get_var(&b
->shader
->inputs
, nir_intrinsic_base(intr
));
754 debug_assert(var
->data
.patch
);
756 b
->cursor
= nir_before_instr(&intr
->instr
);
758 nir_ssa_def
*address
= nir_load_tess_param_base_ir3(b
);
759 nir_ssa_def
*offset
= build_patch_offset(b
, state
, intr
->src
[0].ssa
, var
);
761 replace_intrinsic(b
, intr
, nir_intrinsic_load_global_ir3
, address
, offset
, NULL
);
772 ir3_nir_lower_tess_eval(nir_shader
*shader
, unsigned topology
)
774 struct state state
= { .topology
= topology
};
776 if (shader_debug_enabled(shader
->info
.stage
)) {
777 fprintf(stderr
, "NIR (before tess lowering) for %s shader:\n",
778 _mesa_shader_stage_to_string(shader
->info
.stage
));
779 nir_print_shader(shader
, stderr
);
782 /* Build map of inputs so we have the sizes. */
783 build_primitive_map(shader
, &state
.map
, &shader
->inputs
);
785 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
789 nir_builder_init(&b
, impl
);
791 nir_foreach_block_safe (block
, impl
)
792 lower_tess_eval_block(block
, &b
, &state
);
794 nir_metadata_preserve(impl
, 0);
798 lower_gs_block(nir_block
*block
, nir_builder
*b
, struct state
*state
)
800 nir_foreach_instr_safe (instr
, block
) {
801 if (instr
->type
!= nir_instr_type_intrinsic
)
804 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
806 switch (intr
->intrinsic
) {
807 case nir_intrinsic_end_primitive
: {
808 b
->cursor
= nir_before_instr(&intr
->instr
);
809 nir_store_var(b
, state
->vertex_flags_out
, nir_imm_int(b
, 4), 0x1);
810 nir_instr_remove(&intr
->instr
);
814 case nir_intrinsic_emit_vertex
: {
815 /* Load the vertex count */
816 b
->cursor
= nir_before_instr(&intr
->instr
);
817 nir_ssa_def
*count
= nir_load_var(b
, state
->vertex_count_var
);
819 nir_push_if(b
, nir_ieq(b
, count
, local_thread_id(b
)));
821 foreach_two_lists(dest_node
, &state
->emit_outputs
, src_node
, &state
->old_outputs
) {
822 nir_variable
*dest
= exec_node_data(nir_variable
, dest_node
, node
);
823 nir_variable
*src
= exec_node_data(nir_variable
, src_node
, node
);
824 nir_copy_var(b
, dest
, src
);
827 nir_instr_remove(&intr
->instr
);
829 nir_store_var(b
, state
->emitted_vertex_var
,
830 nir_iadd(b
, nir_load_var(b
, state
->emitted_vertex_var
), nir_imm_int(b
, 1)), 0x1);
834 /* Increment the vertex count by 1 */
835 nir_store_var(b
, state
->vertex_count_var
,
836 nir_iadd(b
, count
, nir_imm_int(b
, 1)), 0x1); /* .x */
837 nir_store_var(b
, state
->vertex_flags_out
, nir_imm_int(b
, 0), 0x1);
849 ir3_nir_lower_gs(nir_shader
*shader
)
851 struct state state
= { };
853 if (shader_debug_enabled(shader
->info
.stage
)) {
854 fprintf(stderr
, "NIR (before gs lowering):\n");
855 nir_print_shader(shader
, stderr
);
858 build_primitive_map(shader
, &state
.map
, &shader
->inputs
);
860 /* Create an output var for vertex_flags. This will be shadowed below,
861 * same way regular outputs get shadowed, and this variable will become a
864 state
.vertex_flags_out
= nir_variable_create(shader
, nir_var_shader_out
,
865 glsl_uint_type(), "vertex_flags");
866 state
.vertex_flags_out
->data
.driver_location
= shader
->num_outputs
++;
867 state
.vertex_flags_out
->data
.location
= VARYING_SLOT_GS_VERTEX_FLAGS_IR3
;
868 state
.vertex_flags_out
->data
.interpolation
= INTERP_MODE_NONE
;
870 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
874 nir_builder_init(&b
, impl
);
875 b
.cursor
= nir_before_cf_list(&impl
->body
);
877 state
.header
= nir_load_gs_header_ir3(&b
);
879 /* Generate two set of shadow vars for the output variables. The first
880 * set replaces the real outputs and the second set (emit_outputs) we'll
881 * assign in the emit_vertex conditionals. Then at the end of the shader
882 * we copy the emit_outputs to the real outputs, so that we get
883 * store_output in uniform control flow.
885 exec_list_move_nodes_to(&shader
->outputs
, &state
.old_outputs
);
886 exec_list_make_empty(&state
.emit_outputs
);
887 nir_foreach_variable(var
, &state
.old_outputs
) {
888 /* Create a new output var by cloning the original output var and
891 nir_variable
*output
= nir_variable_clone(var
, shader
);
892 exec_list_push_tail(&shader
->outputs
, &output
->node
);
894 /* Rewrite the original output to be a shadow variable. */
895 var
->name
= ralloc_asprintf(var
, "%s@gs-temp", output
->name
);
896 var
->data
.mode
= nir_var_shader_temp
;
898 /* Clone the shadow variable to create the emit shadow variable that
899 * we'll assign in the emit conditionals.
901 nir_variable
*emit_output
= nir_variable_clone(var
, shader
);
902 emit_output
->name
= ralloc_asprintf(var
, "%s@emit-temp", output
->name
);
903 exec_list_push_tail(&state
.emit_outputs
, &emit_output
->node
);
906 /* During the shader we'll keep track of which vertex we're currently
907 * emitting for the EmitVertex test and how many vertices we emitted so we
908 * know to discard if didn't emit any. In most simple shaders, this can
909 * all be statically determined and gets optimized away.
911 state
.vertex_count_var
=
912 nir_local_variable_create(impl
, glsl_uint_type(), "vertex_count");
913 state
.emitted_vertex_var
=
914 nir_local_variable_create(impl
, glsl_uint_type(), "emitted_vertex");
916 /* Initialize to 0. */
917 b
.cursor
= nir_before_cf_list(&impl
->body
);
918 nir_store_var(&b
, state
.vertex_count_var
, nir_imm_int(&b
, 0), 0x1);
919 nir_store_var(&b
, state
.emitted_vertex_var
, nir_imm_int(&b
, 0), 0x1);
920 nir_store_var(&b
, state
.vertex_flags_out
, nir_imm_int(&b
, 4), 0x1);
922 nir_foreach_block_safe (block
, impl
)
923 lower_gs_block(block
, &b
, &state
);
925 set_foreach(impl
->end_block
->predecessors
, block_entry
) {
926 struct nir_block
*block
= (void *)block_entry
->key
;
927 b
.cursor
= nir_after_block_before_jump(block
);
929 nir_intrinsic_instr
*discard_if
=
930 nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_discard_if
);
932 nir_ssa_def
*cond
= nir_ieq(&b
, nir_load_var(&b
, state
.emitted_vertex_var
), nir_imm_int(&b
, 0));
934 discard_if
->src
[0] = nir_src_for_ssa(cond
);
936 nir_builder_instr_insert(&b
, &discard_if
->instr
);
938 foreach_two_lists(dest_node
, &shader
->outputs
, src_node
, &state
.emit_outputs
) {
939 nir_variable
*dest
= exec_node_data(nir_variable
, dest_node
, node
);
940 nir_variable
*src
= exec_node_data(nir_variable
, src_node
, node
);
941 nir_copy_var(&b
, dest
, src
);
945 exec_list_append(&shader
->globals
, &state
.old_outputs
);
946 exec_list_append(&shader
->globals
, &state
.emit_outputs
);
948 nir_metadata_preserve(impl
, 0);
950 nir_lower_global_vars_to_local(shader
);
951 nir_split_var_copies(shader
);
952 nir_lower_var_copies(shader
);
954 nir_fixup_deref_modes(shader
);
956 if (shader_debug_enabled(shader
->info
.stage
)) {
957 fprintf(stderr
, "NIR (after gs lowering):\n");
958 nir_print_shader(shader
, stderr
);
963 ir3_link_geometry_stages(const struct ir3_shader_variant
*producer
,
964 const struct ir3_shader_variant
*consumer
,
967 uint32_t num_loc
= 0, factor
;
969 switch (consumer
->type
) {
970 case MESA_SHADER_TESS_CTRL
:
971 case MESA_SHADER_GEOMETRY
:
972 /* These stages load with ldlw, which expects byte offsets. */
975 case MESA_SHADER_TESS_EVAL
:
976 /* The tess eval shader uses ldg, which takes dword offsets. */
980 unreachable("bad shader stage");
983 nir_foreach_variable(in_var
, &consumer
->shader
->nir
->inputs
) {
984 nir_foreach_variable(out_var
, &producer
->shader
->nir
->outputs
) {
985 if (in_var
->data
.location
== out_var
->data
.location
) {
986 locs
[in_var
->data
.driver_location
] =
987 producer
->shader
->output_loc
[out_var
->data
.driver_location
] * factor
;
989 debug_assert(num_loc
<= in_var
->data
.driver_location
+ 1);
990 num_loc
= in_var
->data
.driver_location
+ 1;