3 * Copyright (c) 2019 Collabora LTD
5 * Author: Gert Wollny <gert.wollny@collabora.com>
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "nir_builder.h"
30 #include "../r600_pipe.h"
31 #include "../r600_shader.h"
33 #include "sfn_instruction_tex.h"
35 #include "sfn_shader_vertex.h"
36 #include "sfn_shader_fragment.h"
37 #include "sfn_shader_geometry.h"
38 #include "sfn_shader_compute.h"
39 #include "sfn_nir_lower_fs_out_to_vector.h"
40 #include "sfn_ir_to_assembly.h"
48 ShaderFromNir::ShaderFromNir():sh(nullptr),
54 bool ShaderFromNir::lower(const nir_shader
*shader
, r600_pipe_shader
*pipe_shader
,
55 r600_pipe_shader_selector
*sel
, r600_shader_key
& key
,
56 struct r600_shader
* gs_shader
)
61 switch (shader
->info
.stage
) {
62 case MESA_SHADER_VERTEX
:
63 impl
.reset(new VertexShaderFromNir(pipe_shader
, *sel
, key
, gs_shader
));
66 case MESA_SHADER_GEOMETRY
:
67 sfn_log
<< SfnLog::trans
<< "Start GS\n";
68 impl
.reset(new GeometryShaderFromNir(pipe_shader
, *sel
, key
));
70 case MESA_SHADER_FRAGMENT
:
71 sfn_log
<< SfnLog::trans
<< "Start FS\n";
72 impl
.reset(new FragmentShaderFromNir(*shader
, pipe_shader
->shader
, *sel
, key
));
74 case MESA_SHADER_COMPUTE
:
75 sfn_log
<< SfnLog::trans
<< "Start CS\n";
76 impl
.reset(new ComputeShaderFromNir(pipe_shader
, *sel
, key
));
82 sfn_log
<< SfnLog::trans
<< "Process declarations\n";
83 if (!process_declaration())
86 // at this point all functions should be inlined
87 const nir_function
*func
= reinterpret_cast<const nir_function
*>(exec_list_get_head_const(&sh
->functions
));
89 sfn_log
<< SfnLog::trans
<< "Scan shader\n";
90 nir_foreach_block(block
, func
->impl
) {
91 nir_foreach_instr(instr
, block
) {
92 if (!impl
->scan_instruction(instr
)) {
93 fprintf(stderr
, "Unhandled sysvalue access ");
94 nir_print_instr(instr
, stderr
);
95 fprintf(stderr
, "\n");
101 sfn_log
<< SfnLog::trans
<< "Reserve registers\n";
102 if (!impl
->allocate_reserved_registers()) {
106 ValuePool::array_list arrays
;
107 sfn_log
<< SfnLog::trans
<< "Allocate local registers\n";
108 foreach_list_typed(nir_register
, reg
, node
, &func
->impl
->registers
) {
109 impl
->allocate_local_register(*reg
, arrays
);
112 sfn_log
<< SfnLog::trans
<< "Emit shader start\n";
113 impl
->allocate_arrays(arrays
);
115 impl
->emit_shader_start();
117 sfn_log
<< SfnLog::trans
<< "Process shader \n";
118 foreach_list_typed(nir_cf_node
, node
, node
, &func
->impl
->body
) {
119 if (!process_cf_node(node
))
123 // Add optimizations here
124 sfn_log
<< SfnLog::trans
<< "Finalize\n";
127 if (!sfn_log
.has_debug_flag(SfnLog::nomerge
)) {
128 sfn_log
<< SfnLog::trans
<< "Merge registers\n";
129 impl
->remap_registers();
131 sfn_log
<< SfnLog::trans
<< "Finished translating to R600 IR\n";
135 Shader
ShaderFromNir::shader() const
137 return Shader
{impl
->m_output
, impl
->get_temp_registers()};
141 bool ShaderFromNir::process_cf_node(nir_cf_node
*node
)
143 SFN_TRACE_FUNC(SfnLog::flow
, "CF");
144 switch (node
->type
) {
145 case nir_cf_node_block
:
146 return process_block(nir_cf_node_as_block(node
));
148 return process_if(nir_cf_node_as_if(node
));
149 case nir_cf_node_loop
:
150 return process_loop(nir_cf_node_as_loop(node
));
156 bool ShaderFromNir::process_if(nir_if
*if_stmt
)
158 SFN_TRACE_FUNC(SfnLog::flow
, "IF");
160 if (!impl
->emit_if_start(m_current_if_id
, if_stmt
))
163 int if_id
= m_current_if_id
++;
164 m_if_stack
.push(if_id
);
166 foreach_list_typed(nir_cf_node
, n
, node
, &if_stmt
->then_list
)
167 if (!process_cf_node(n
)) return false;
169 if (!if_stmt
->then_list
.is_empty()) {
170 if (!impl
->emit_else_start(if_id
))
173 foreach_list_typed(nir_cf_node
, n
, node
, &if_stmt
->else_list
)
174 if (!process_cf_node(n
)) return false;
177 if (!impl
->emit_ifelse_end(if_id
))
184 bool ShaderFromNir::process_loop(nir_loop
*node
)
186 SFN_TRACE_FUNC(SfnLog::flow
, "LOOP");
187 int loop_id
= m_current_loop_id
++;
189 if (!impl
->emit_loop_start(loop_id
))
192 foreach_list_typed(nir_cf_node
, n
, node
, &node
->body
)
193 if (!process_cf_node(n
)) return false;
195 if (!impl
->emit_loop_end(loop_id
))
201 bool ShaderFromNir::process_block(nir_block
*block
)
203 SFN_TRACE_FUNC(SfnLog::flow
, "BLOCK");
204 nir_foreach_instr(instr
, block
) {
205 int r
= emit_instruction(instr
);
207 sfn_log
<< SfnLog::err
<< "R600: Unsupported instruction: "
216 ShaderFromNir::~ShaderFromNir()
220 pipe_shader_type
ShaderFromNir::processor_type() const
222 return impl
->m_processor_type
;
226 bool ShaderFromNir::emit_instruction(nir_instr
*instr
)
230 sfn_log
<< SfnLog::instr
<< "Read instruction " << *instr
<< "\n";
232 switch (instr
->type
) {
233 case nir_instr_type_alu
:
234 return impl
->emit_alu_instruction(instr
);
235 case nir_instr_type_deref
:
236 return impl
->emit_deref_instruction(nir_instr_as_deref(instr
));
237 case nir_instr_type_intrinsic
:
238 return impl
->emit_intrinsic_instruction(nir_instr_as_intrinsic(instr
));
239 case nir_instr_type_load_const
:
240 return impl
->set_literal_constant(nir_instr_as_load_const(instr
));
241 case nir_instr_type_tex
:
242 return impl
->emit_tex_instruction(instr
);
243 case nir_instr_type_jump
:
244 return impl
->emit_jump_instruction(nir_instr_as_jump(instr
));
246 fprintf(stderr
, "R600: %s: ShaderFromNir Unsupported instruction: type %d:'", __func__
, instr
->type
);
247 nir_print_instr(instr
, stderr
);
248 fprintf(stderr
, "'\n");
250 case nir_instr_type_ssa_undef
:
251 return impl
->create_undef(nir_instr_as_ssa_undef(instr
));
256 bool ShaderFromNir::process_declaration()
259 nir_foreach_variable(variable
, &sh
->inputs
) {
260 if (!impl
->process_inputs(variable
)) {
261 fprintf(stderr
, "R600: error parsing input varible %s\n", variable
->name
);
267 nir_foreach_variable(variable
, &sh
->outputs
) {
268 if (!impl
->process_outputs(variable
)) {
269 fprintf(stderr
, "R600: error parsing outputs varible %s\n", variable
->name
);
275 nir_foreach_variable(variable
, &sh
->uniforms
) {
276 if (!impl
->process_uniforms(variable
)) {
277 fprintf(stderr
, "R600: error parsing outputs varible %s\n", variable
->name
);
285 const std::vector
<InstructionBlock
>& ShaderFromNir::shader_ir() const
288 return impl
->m_output
;
292 AssemblyFromShader::~AssemblyFromShader()
296 bool AssemblyFromShader::lower(const std::vector
<InstructionBlock
>& ir
)
302 r600_nir_lower_pack_unpack_2x16_impl(nir_builder
*b
, nir_instr
*instr
, void *_options
)
304 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
307 case nir_op_unpack_half_2x16
: {
308 nir_ssa_def
*packed
= nir_ssa_for_alu_src(b
, alu
, 0);
309 return nir_vec2(b
, nir_unpack_half_2x16_split_x(b
, packed
),
310 nir_unpack_half_2x16_split_y(b
, packed
));
313 case nir_op_pack_half_2x16
: {
314 nir_ssa_def
*src_vec2
= nir_ssa_for_alu_src(b
, alu
, 0);
315 return nir_pack_half_2x16_split(b
, nir_channel(b
, src_vec2
, 0),
316 nir_channel(b
, src_vec2
, 1));
323 bool r600_nir_lower_pack_unpack_2x16_filter(const nir_instr
*instr
, const void *_options
)
325 return instr
->type
== nir_instr_type_alu
;
328 bool r600_nir_lower_pack_unpack_2x16(nir_shader
*shader
)
330 return nir_shader_lower_instructions(shader
,
331 r600_nir_lower_pack_unpack_2x16_filter
,
332 r600_nir_lower_pack_unpack_2x16_impl
,
337 r600_nir_lower_scratch_address_impl(nir_builder
*b
, nir_intrinsic_instr
*instr
)
339 b
->cursor
= nir_before_instr(&instr
->instr
);
341 int address_index
= 0;
344 if (instr
->intrinsic
== nir_intrinsic_store_scratch
) {
345 align
= instr
->src
[0].ssa
->num_components
;
348 align
= instr
->dest
.ssa
.num_components
;
351 nir_ssa_def
*address
= instr
->src
[address_index
].ssa
;
352 nir_ssa_def
*new_address
= nir_ishr(b
, address
, nir_imm_int(b
, 4 * align
));
354 nir_instr_rewrite_src(&instr
->instr
, &instr
->src
[address_index
],
355 nir_src_for_ssa(new_address
));
358 bool r600_lower_scratch_addresses(nir_shader
*shader
)
360 bool progress
= false;
361 nir_foreach_function(function
, shader
) {
363 nir_builder_init(&build
, function
->impl
);
365 nir_foreach_block(block
, function
->impl
) {
366 nir_foreach_instr(instr
, block
) {
367 if (instr
->type
!= nir_instr_type_intrinsic
)
369 nir_intrinsic_instr
*op
= nir_instr_as_intrinsic(instr
);
370 if (op
->intrinsic
!= nir_intrinsic_load_scratch
&&
371 op
->intrinsic
!= nir_intrinsic_store_scratch
)
373 r600_nir_lower_scratch_address_impl(&build
, op
);
382 r600_lower_ubo_to_align16_impl(nir_builder
*b
, nir_instr
*instr
, void *_options
)
384 b
->cursor
= nir_before_instr(instr
);
386 nir_intrinsic_instr
*op
= nir_instr_as_intrinsic(instr
);
387 assert(op
->intrinsic
== nir_intrinsic_load_ubo
);
389 bool const_address
= (nir_src_is_const(op
->src
[1]) && nir_src_is_const(op
->src
[0]));
391 nir_ssa_def
*offset
= op
->src
[1].ssa
;
393 /* This is ugly: With const addressing we can actually set a proper fetch target mask,
394 * but for this we need the component encoded, we don't shift and do de decoding in the
395 * backend. Otherwise we shift by four and resolve the component here
396 * (TODO: encode the start component in the intrinsic when the offset base is non-constant
397 * but a multiple of 16 */
399 nir_ssa_def
*new_offset
= offset
;
401 new_offset
= nir_ishr(b
, offset
, nir_imm_int(b
, 4));
403 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_ubo_r600
);
404 load
->num_components
= const_address
? op
->num_components
: 4;
405 load
->src
[0] = op
->src
[0];
406 load
->src
[1] = nir_src_for_ssa(new_offset
);
407 nir_intrinsic_set_align(load
, nir_intrinsic_align_mul(op
), nir_intrinsic_align_offset(op
));
409 nir_ssa_dest_init(&load
->instr
, &load
->dest
, load
->num_components
, 32, NULL
);
410 nir_builder_instr_insert(b
, &load
->instr
);
412 /* when four components are loaded or both the offset and the location
413 * are constant, then the backend can deal with it better */
414 if (op
->num_components
== 4 || const_address
)
415 return &load
->dest
.ssa
;
417 /* What comes below is a performance disaster when the offset is not constant
418 * because then we have to assume that any component can be the first one and we
419 * have to pick the result manually. */
420 nir_ssa_def
*first_comp
= nir_iand(b
, nir_ishr(b
, offset
, nir_imm_int(b
, 2)),
423 const unsigned swz_000
[4] = {0, 0, 0};
424 nir_ssa_def
*component_select
= nir_ieq(b
, r600_imm_ivec3(b
, 0, 1, 2),
425 nir_swizzle(b
, first_comp
, swz_000
, 3));
427 if (op
->num_components
== 1) {
428 nir_ssa_def
*check0
= nir_bcsel(b
, nir_channel(b
, component_select
, 0),
429 nir_channel(b
, &load
->dest
.ssa
, 0),
430 nir_channel(b
, &load
->dest
.ssa
, 3));
431 nir_ssa_def
*check1
= nir_bcsel(b
, nir_channel(b
, component_select
, 1),
432 nir_channel(b
, &load
->dest
.ssa
, 1),
434 return nir_bcsel(b
, nir_channel(b
, component_select
, 2),
435 nir_channel(b
, &load
->dest
.ssa
, 2),
437 } else if (op
->num_components
== 2) {
438 const unsigned szw_01
[2] = {0, 1};
439 const unsigned szw_12
[2] = {1, 2};
440 const unsigned szw_23
[2] = {2, 3};
442 nir_ssa_def
*check0
= nir_bcsel(b
, nir_channel(b
, component_select
, 0),
443 nir_swizzle(b
, &load
->dest
.ssa
, szw_01
, 2),
444 nir_swizzle(b
, &load
->dest
.ssa
, szw_23
, 2));
445 return nir_bcsel(b
, nir_channel(b
, component_select
, 1),
446 nir_swizzle(b
, &load
->dest
.ssa
, szw_12
, 2),
449 const unsigned szw_012
[3] = {0, 1, 2};
450 const unsigned szw_123
[3] = {1, 2, 3};
451 return nir_bcsel(b
, nir_channel(b
, component_select
, 0),
452 nir_swizzle(b
, &load
->dest
.ssa
, szw_012
, 3),
453 nir_swizzle(b
, &load
->dest
.ssa
, szw_123
, 3));
457 bool r600_lower_ubo_to_align16_filter(const nir_instr
*instr
, const void *_options
)
459 if (instr
->type
!= nir_instr_type_intrinsic
)
462 nir_intrinsic_instr
*op
= nir_instr_as_intrinsic(instr
);
463 return op
->intrinsic
== nir_intrinsic_load_ubo
;
467 bool r600_lower_ubo_to_align16(nir_shader
*shader
)
469 return nir_shader_lower_instructions(shader
,
470 r600_lower_ubo_to_align16_filter
,
471 r600_lower_ubo_to_align16_impl
,
477 using r600::r600_nir_lower_int_tg4
;
478 using r600::r600_nir_lower_pack_unpack_2x16
;
479 using r600::r600_lower_scratch_addresses
;
480 using r600::r600_lower_fs_out_to_vector
;
481 using r600::r600_lower_ubo_to_align16
;
484 r600_glsl_type_size(const struct glsl_type
*type
, bool is_bindless
)
486 return glsl_count_vec4_slots(type
, false, is_bindless
);
490 r600_get_natural_size_align_bytes(const struct glsl_type
*type
,
491 unsigned *size
, unsigned *align
)
493 if (type
->base_type
!= GLSL_TYPE_ARRAY
) {
497 unsigned elem_size
, elem_align
;
498 glsl_get_natural_size_align_bytes(type
->fields
.array
,
499 &elem_size
, &elem_align
);
501 *size
= type
->length
;
506 optimize_once(nir_shader
*shader
)
508 bool progress
= false;
509 NIR_PASS(progress
, shader
, nir_copy_prop
);
510 NIR_PASS(progress
, shader
, nir_opt_dce
);
511 NIR_PASS(progress
, shader
, nir_opt_algebraic
);
512 NIR_PASS(progress
, shader
, nir_opt_constant_folding
);
513 NIR_PASS(progress
, shader
, nir_opt_copy_prop_vars
);
514 NIR_PASS(progress
, shader
, nir_opt_vectorize
);
516 NIR_PASS(progress
, shader
, nir_opt_remove_phis
);
518 if (nir_opt_trivial_continues(shader
)) {
520 NIR_PASS(progress
, shader
, nir_copy_prop
);
521 NIR_PASS(progress
, shader
, nir_opt_dce
);
524 NIR_PASS(progress
, shader
, nir_opt_if
, false);
525 NIR_PASS(progress
, shader
, nir_opt_dead_cf
);
526 NIR_PASS(progress
, shader
, nir_opt_cse
);
527 NIR_PASS(progress
, shader
, nir_opt_peephole_select
, 200, true, true);
529 NIR_PASS(progress
, shader
, nir_opt_conditional_discard
);
530 NIR_PASS(progress
, shader
, nir_opt_dce
);
531 NIR_PASS(progress
, shader
, nir_opt_undef
);
535 bool has_saturate(const nir_function
*func
)
537 nir_foreach_block(block
, func
->impl
) {
538 nir_foreach_instr(instr
, block
) {
539 if (instr
->type
== nir_instr_type_alu
) {
540 auto alu
= nir_instr_as_alu(instr
);
541 if (alu
->dest
.saturate
)
549 int r600_shader_from_nir(struct r600_context
*rctx
,
550 struct r600_pipe_shader
*pipeshader
,
551 r600_shader_key
*key
)
554 struct r600_pipe_shader_selector
*sel
= pipeshader
->selector
;
556 r600::ShaderFromNir convert
;
558 if (rctx
->screen
->b
.debug_flags
& DBG_PREOPT_IR
) {
559 fprintf(stderr
, "PRE-OPT-NIR-----------.------------------------------\n");
560 nir_print_shader(sel
->nir
, stderr
);
561 fprintf(stderr
, "END PRE-OPT-NIR--------------------------------------\n\n");
564 NIR_PASS_V(sel
->nir
, nir_lower_vars_to_ssa
);
565 NIR_PASS_V(sel
->nir
, nir_lower_regs_to_ssa
);
566 NIR_PASS_V(sel
->nir
, nir_lower_phis_to_scalar
);
568 static const struct nir_lower_tex_options lower_tex_options
= {
571 NIR_PASS_V(sel
->nir
, nir_lower_tex
, &lower_tex_options
);
573 NIR_PASS_V(sel
->nir
, r600::r600_nir_lower_txl_txf_array_or_cube
);
575 NIR_PASS_V(sel
->nir
, r600_nir_lower_int_tg4
);
576 NIR_PASS_V(sel
->nir
, r600_nir_lower_pack_unpack_2x16
);
578 NIR_PASS_V(sel
->nir
, nir_lower_io
, nir_var_uniform
, r600_glsl_type_size
,
579 nir_lower_io_lower_64bit_to_32
);
581 if (sel
->nir
->info
.stage
== MESA_SHADER_VERTEX
)
582 NIR_PASS_V(sel
->nir
, r600_vectorize_vs_inputs
);
584 if (sel
->nir
->info
.stage
== MESA_SHADER_FRAGMENT
)
585 NIR_PASS_V(sel
->nir
, r600_lower_fs_out_to_vector
);
587 if (sel
->nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
588 sel
->nir
->info
.stage
== MESA_SHADER_TESS_EVAL
)
589 NIR_PASS_V(sel
->nir
, nir_lower_io
, nir_var_shader_in
, r600_glsl_type_size
,
590 nir_lower_io_lower_64bit_to_32
);
592 if (sel
->nir
->info
.stage
== MESA_SHADER_TESS_CTRL
)
593 NIR_PASS_V(sel
->nir
, nir_lower_io
, nir_var_shader_out
, r600_glsl_type_size
,
594 nir_lower_io_lower_64bit_to_32
);
596 const nir_function
*func
= reinterpret_cast<const nir_function
*>(exec_list_get_head_const(&sel
->nir
->functions
));
597 bool optimize
= func
->impl
->registers
.length() == 0 && !has_saturate(func
);
600 optimize_once(sel
->nir
);
601 NIR_PASS_V(sel
->nir
, r600_lower_ubo_to_align16
);
603 /* It seems the output of this optimization is cached somewhere, and
604 * when there are registers, then we can no longer copy propagate, so
605 * skip the optimization then. (There is probably a better way, but yeah)
608 while(optimize_once(sel
->nir
));
610 NIR_PASS_V(sel
->nir
, nir_remove_dead_variables
, nir_var_shader_in
);
611 NIR_PASS_V(sel
->nir
, nir_remove_dead_variables
, nir_var_shader_out
);
614 NIR_PASS_V(sel
->nir
, nir_lower_vars_to_scratch
,
615 nir_var_function_temp
,
617 r600_get_natural_size_align_bytes
);
619 while (optimize
&& optimize_once(sel
->nir
));
621 NIR_PASS_V(sel
->nir
, nir_lower_locals_to_regs
);
622 //NIR_PASS_V(sel->nir, nir_opt_algebraic);
623 //NIR_PASS_V(sel->nir, nir_copy_prop);
624 NIR_PASS_V(sel
->nir
, nir_lower_to_source_mods
, nir_lower_float_source_mods
);
625 NIR_PASS_V(sel
->nir
, nir_convert_from_ssa
, true);
626 NIR_PASS_V(sel
->nir
, nir_opt_dce
);
628 if ((rctx
->screen
->b
.debug_flags
& DBG_NIR
) &&
629 (rctx
->screen
->b
.debug_flags
& DBG_ALL_SHADERS
)) {
630 fprintf(stderr
, "-- NIR --------------------------------------------------------\n");
631 struct nir_function
*func
= (struct nir_function
*)exec_list_get_head(&sel
->nir
->functions
);
632 nir_index_ssa_defs(func
->impl
);
633 nir_print_shader(sel
->nir
, stderr
);
634 fprintf(stderr
, "-- END --------------------------------------------------------\n");
637 memset(&pipeshader
->shader
, 0, sizeof(r600_shader
));
638 pipeshader
->scratch_space_needed
= sel
->nir
->scratch_size
;
640 if (sel
->nir
->info
.stage
== MESA_SHADER_TESS_EVAL
||
641 sel
->nir
->info
.stage
== MESA_SHADER_VERTEX
||
642 sel
->nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
643 pipeshader
->shader
.clip_dist_write
|= ((1 << sel
->nir
->info
.clip_distance_array_size
) - 1);
644 pipeshader
->shader
.cull_dist_write
= ((1 << sel
->nir
->info
.cull_distance_array_size
) - 1)
645 << sel
->nir
->info
.clip_distance_array_size
;
646 pipeshader
->shader
.cc_dist_mask
= (1 << (sel
->nir
->info
.cull_distance_array_size
+
647 sel
->nir
->info
.clip_distance_array_size
)) - 1;
650 struct r600_shader
* gs_shader
= nullptr;
652 gs_shader
= &rctx
->gs_shader
->current
->shader
;
654 bool r
= convert
.lower(sel
->nir
, pipeshader
, sel
, *key
, gs_shader
);
656 if (!r
|| rctx
->screen
->b
.debug_flags
& DBG_ALL_SHADERS
) {
659 snprintf(filename
, 4000, "nir-%s_%d.inc", sel
->nir
->info
.name
, shnr
++);
661 if (access(filename
, F_OK
) == -1) {
662 FILE *f
= fopen(filename
, "w");
665 fprintf(f
, "const char *shader_blob_%s = {\nR\"(", sel
->nir
->info
.name
);
666 nir_print_shader(sel
->nir
, f
);
667 fprintf(f
, ")\";\n");
675 auto shader
= convert
.shader();
677 r600_screen
*rscreen
= rctx
->screen
;
678 r600_bytecode_init(&pipeshader
->shader
.bc
, rscreen
->b
.chip_class
, rscreen
->b
.family
,
679 rscreen
->has_compressed_msaa_texturing
);
681 r600::sfn_log
<< r600::SfnLog::shader_info
682 << "pipeshader->shader.processor_type = "
683 << pipeshader
->shader
.processor_type
<< "\n";
685 pipeshader
->shader
.bc
.type
= pipeshader
->shader
.processor_type
;
686 pipeshader
->shader
.bc
.isa
= rctx
->isa
;
688 r600::AssemblyFromShaderLegacy
afs(&pipeshader
->shader
, key
);
689 if (!afs
.lower(shader
.m_ir
)) {
690 R600_ERR("%s: Lowering to assembly failed\n", __func__
);
694 if (sel
->nir
->info
.stage
== MESA_SHADER_GEOMETRY
) {
695 r600::sfn_log
<< r600::SfnLog::shader_info
<< "Geometry shader, create copy shader\n";
696 generate_gs_copy_shader(rctx
, pipeshader
, &sel
->so
);
697 assert(pipeshader
->gs_copy_shader
);
699 r600::sfn_log
<< r600::SfnLog::shader_info
<< "This is not a Geometry shader\n";