3 * Copyright (c) 2018 Collabora LTD
5 * Author: Gert Wollny <gert.wollny@collabora.com>
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "sfn_shader_geometry.h"
28 #include "sfn_instruction_misc.h"
29 #include "sfn_instruction_fetch.h"
30 #include "sfn_shaderio.h"
34 GeometryShaderFromNir::GeometryShaderFromNir(r600_pipe_shader
*sh
,
35 r600_pipe_shader_selector
&sel
,
36 const r600_shader_key
&key
,
37 enum chip_class chip_class
):
38 VertexStage(PIPE_SHADER_GEOMETRY
, sel
, sh
->shader
,
39 sh
->scratch_space_needed
, chip_class
),
42 m_first_vertex_emitted(false),
44 m_next_input_ring_offset(0),
48 m_gs_tri_strip_adj_fix(false)
50 sh_info().atomic_base
= key
.gs
.first_atomic_counter
;
53 bool GeometryShaderFromNir::do_emit_load_deref(UNUSED
const nir_variable
*in_var
, UNUSED nir_intrinsic_instr
* instr
)
58 bool GeometryShaderFromNir::do_emit_store_deref(const nir_variable
*out_var
, nir_intrinsic_instr
* instr
)
60 uint32_t write_mask
= nir_intrinsic_write_mask(instr
);
61 GPRVector::Swizzle swz
= swizzle_from_mask(write_mask
);
62 auto out_value
= vec_from_nir_with_fetch_constant(instr
->src
[1], write_mask
, swz
, true);
64 sh_info().output
[out_var
->data
.driver_location
].write_mask
= write_mask
;
66 auto ir
= new MemRingOutIntruction(cf_mem_ring
, mem_write_ind
, out_value
,
67 4 * out_var
->data
.driver_location
,
68 instr
->num_components
, m_export_base
);
74 bool GeometryShaderFromNir::scan_sysvalue_access(UNUSED nir_instr
*instr
)
79 bool GeometryShaderFromNir::do_process_inputs(nir_variable
*input
)
82 if (input
->data
.location
== VARYING_SLOT_POS
||
83 input
->data
.location
== VARYING_SLOT_PSIZ
||
84 input
->data
.location
== VARYING_SLOT_CLIP_VERTEX
||
85 input
->data
.location
== VARYING_SLOT_CLIP_DIST0
||
86 input
->data
.location
== VARYING_SLOT_CLIP_DIST1
||
87 input
->data
.location
== VARYING_SLOT_COL0
||
88 input
->data
.location
== VARYING_SLOT_COL1
||
89 (input
->data
.location
>= VARYING_SLOT_VAR0
&&
90 input
->data
.location
<= VARYING_SLOT_VAR31
) ||
91 (input
->data
.location
>= VARYING_SLOT_TEX0
&&
92 input
->data
.location
<= VARYING_SLOT_TEX7
)) {
94 r600_shader_io
& io
= sh_info().input
[input
->data
.driver_location
];
95 auto semantic
= r600_get_varying_semantic(input
->data
.location
);
96 io
.name
= semantic
.first
;
97 io
.sid
= semantic
.second
;
99 io
.ring_offset
= 16 * input
->data
.driver_location
;
101 m_next_input_ring_offset
+= 16;
108 bool GeometryShaderFromNir::do_process_outputs(nir_variable
*output
)
110 if (output
->data
.location
== VARYING_SLOT_COL0
||
111 output
->data
.location
== VARYING_SLOT_COL1
||
112 (output
->data
.location
>= VARYING_SLOT_VAR0
&&
113 output
->data
.location
<= VARYING_SLOT_VAR31
) ||
114 (output
->data
.location
>= VARYING_SLOT_TEX0
&&
115 output
->data
.location
<= VARYING_SLOT_TEX7
) ||
116 output
->data
.location
== VARYING_SLOT_BFC0
||
117 output
->data
.location
== VARYING_SLOT_BFC1
||
118 output
->data
.location
== VARYING_SLOT_CLIP_VERTEX
||
119 output
->data
.location
== VARYING_SLOT_CLIP_DIST0
||
120 output
->data
.location
== VARYING_SLOT_CLIP_DIST1
||
121 output
->data
.location
== VARYING_SLOT_PRIMITIVE_ID
||
122 output
->data
.location
== VARYING_SLOT_POS
||
123 output
->data
.location
== VARYING_SLOT_PSIZ
||
124 output
->data
.location
== VARYING_SLOT_LAYER
||
125 output
->data
.location
== VARYING_SLOT_VIEWPORT
||
126 output
->data
.location
== VARYING_SLOT_FOGC
) {
127 r600_shader_io
& io
= sh_info().output
[output
->data
.driver_location
];
129 auto semantic
= r600_get_varying_semantic(output
->data
.location
);
130 io
.name
= semantic
.first
;
131 io
.sid
= semantic
.second
;
133 evaluate_spi_sid(io
);
136 if (output
->data
.location
== VARYING_SLOT_CLIP_DIST0
||
137 output
->data
.location
== VARYING_SLOT_CLIP_DIST1
) {
138 m_num_clip_dist
+= 4;
141 if (output
->data
.location
== VARYING_SLOT_VIEWPORT
) {
142 sh_info().vs_out_viewport
= 1;
143 sh_info().vs_out_misc_write
= 1;
151 bool GeometryShaderFromNir::do_allocate_reserved_registers()
153 const int sel
[6] = {0, 0 ,0, 1, 1, 1};
154 const int chan
[6] = {0, 1 ,3, 0, 1, 2};
156 increment_reserved_registers();
157 increment_reserved_registers();
159 /* Reserve registers used by the shaders (should check how many
160 * components are actually used */
161 for (int i
= 0; i
< 6; ++i
) {
162 auto reg
= new GPRValue(sel
[i
], chan
[i
]);
164 m_per_vertex_offsets
[i
].reset(reg
);
165 inject_register(sel
[i
], chan
[i
], m_per_vertex_offsets
[i
], false);
167 auto reg
= new GPRValue(0, 2);
169 m_primitive_id
.reset(reg
);
170 inject_register(0, 2, m_primitive_id
, false);
172 reg
= new GPRValue(1, 3);
174 m_invocation_id
.reset(reg
);
175 inject_register(1, 3, m_invocation_id
, false);
177 m_export_base
= get_temp_register();
178 emit_instruction(new AluInstruction(op1_mov
, m_export_base
, Value::zero
, {alu_write
, alu_last_instr
}));
180 sh_info().ring_item_sizes
[0] = m_next_input_ring_offset
;
182 if (m_key
.gs
.tri_strip_adj_fix
)
188 void GeometryShaderFromNir::emit_adj_fix()
190 PValue
adjhelp0(new GPRValue(m_export_base
->sel(), 1));
191 emit_instruction(op2_and_int
, adjhelp0
, {m_primitive_id
, Value::one_i
}, {alu_write
, alu_last_instr
});
193 int help2
= allocate_temp_register();
195 int reg_chanels
[6] = {0, 1, 2, 3, 2, 3};
197 int rotate_indices
[6] = {4, 5, 0, 1, 2, 3};
199 reg_indices
[0] = reg_indices
[1] = reg_indices
[2] = reg_indices
[3] = help2
;
200 reg_indices
[4] = reg_indices
[5] = m_export_base
->sel();
202 std::array
<PValue
, 6> adjhelp
;
204 AluInstruction
*ir
= nullptr;
205 for (int i
= 0; i
< 6; i
++) {
206 adjhelp
[i
].reset(new GPRValue(reg_indices
[i
], reg_chanels
[i
]));
207 ir
= new AluInstruction(op3_cnde_int
, adjhelp
[i
],
208 {adjhelp0
, m_per_vertex_offsets
[i
],
209 m_per_vertex_offsets
[rotate_indices
[i
]]},
212 ir
->set_flag(alu_last_instr
);
213 emit_instruction(ir
);
215 ir
->set_flag(alu_last_instr
);
217 for (int i
= 0; i
< 6; i
++)
218 m_per_vertex_offsets
[i
] = adjhelp
[i
];
221 bool GeometryShaderFromNir::emit_deref_instruction_override(nir_deref_instr
* instr
)
223 if (instr
->deref_type
== nir_deref_type_array
) {
224 auto var
= get_deref_location(instr
->parent
);
225 ArrayDeref ad
= {var
, &instr
->arr
.index
};
226 assert(instr
->dest
.is_ssa
);
227 m_in_array_deref
[instr
->dest
.ssa
.index
] = ad
;
229 /* Problem: nir_intrinsice_load_deref tries to lookup the
230 * variable, and will not find it, need to override that too */
236 bool GeometryShaderFromNir::emit_intrinsic_instruction_override(nir_intrinsic_instr
* instr
)
238 switch (instr
->intrinsic
) {
239 case nir_intrinsic_load_deref
: {
240 auto& src
= instr
->src
[0];
242 auto array
= m_in_array_deref
.find(src
.ssa
->index
);
243 if (array
!= m_in_array_deref
.end())
244 return emit_load_from_array(instr
, array
->second
);
246 case nir_intrinsic_emit_vertex
:
247 return emit_vertex(instr
, false);
248 case nir_intrinsic_end_primitive
:
249 return emit_vertex(instr
, true);
250 case nir_intrinsic_load_primitive_id
:
251 return load_preloaded_value(instr
->dest
, 0, m_primitive_id
);
252 case nir_intrinsic_load_invocation_id
:
253 return load_preloaded_value(instr
->dest
, 0, m_invocation_id
);
260 bool GeometryShaderFromNir::emit_vertex(nir_intrinsic_instr
* instr
, bool cut
)
262 int stream
= nir_intrinsic_stream_id(instr
);
265 emit_instruction(new EmitVertex(stream
, cut
));
268 emit_instruction(new AluInstruction(op2_add_int
, m_export_base
, m_export_base
,
269 PValue(new LiteralValue(sh_info().noutput
)),
270 {alu_write
, alu_last_instr
}));
275 bool GeometryShaderFromNir::emit_load_from_array(nir_intrinsic_instr
* instr
,
276 const ArrayDeref
& array_deref
)
278 auto dest
= vec_from_nir(instr
->dest
, instr
->num_components
);
280 const nir_load_const_instr
* literal_index
= nullptr;
282 if (array_deref
.index
->is_ssa
)
283 literal_index
= get_literal_constant(array_deref
.index
->ssa
->index
);
285 if (!literal_index
) {
286 sfn_log
<< SfnLog::err
<< "GS: Indirect input addressing not (yet) supported\n";
289 assert(literal_index
->value
[0].u32
< 6);
290 PValue addr
= m_per_vertex_offsets
[literal_index
->value
[0].u32
];
292 auto fetch
= new FetchInstruction(vc_fetch
, no_index_offset
, dest
, addr
,
293 16 * array_deref
.var
->data
.driver_location
,
294 R600_GS_RING_CONST_BUFFER
, PValue(), bim_none
, true);
295 emit_instruction(fetch
);
299 void GeometryShaderFromNir::do_finalize()
301 if (m_num_clip_dist
) {
302 sh_info().cc_dist_mask
= (1 << m_num_clip_dist
) - 1;
303 sh_info().clip_dist_write
= (1 << m_num_clip_dist
) - 1;