2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file brw_vec4_tcs.cpp
27 * Tessellaton control shader specific code derived from the vec4_visitor class.
31 #include "brw_vec4_tcs.h"
33 #include "dev/gen_debug.h"
37 vec4_tcs_visitor::vec4_tcs_visitor(const struct brw_compiler
*compiler
,
39 const struct brw_tcs_prog_key
*key
,
40 struct brw_tcs_prog_data
*prog_data
,
41 const nir_shader
*nir
,
43 int shader_time_index
,
44 const struct brw_vue_map
*input_vue_map
)
45 : vec4_visitor(compiler
, log_data
, &key
->base
.tex
, &prog_data
->base
,
46 nir
, mem_ctx
, false, shader_time_index
),
47 input_vue_map(input_vue_map
), key(key
)
53 vec4_tcs_visitor::setup_payload()
57 /* The payload always contains important data in r0, which contains
58 * the URB handles that are passed on to the URB write at the end
63 /* r1.0 - r4.7 may contain the input control point URB handles,
64 * which we use to pull vertex data.
68 /* Push constants may start at r5.0 */
69 reg
= setup_uniforms(reg
);
71 this->first_non_payload_grf
= reg
;
76 vec4_tcs_visitor::emit_prolog()
78 invocation_id
= src_reg(this, glsl_type::uint_type
);
79 emit(TCS_OPCODE_GET_INSTANCE_ID
, dst_reg(invocation_id
));
81 /* HS threads are dispatched with the dispatch mask set to 0xFF.
82 * If there are an odd number of output vertices, then the final
83 * HS instance dispatched will only have its bottom half doing real
84 * work, and so we need to disable the upper half:
86 if (nir
->info
.tess
.tcs_vertices_out
% 2) {
87 emit(CMP(dst_null_d(), invocation_id
,
88 brw_imm_ud(nir
->info
.tess
.tcs_vertices_out
),
91 /* Matching ENDIF is in emit_thread_end() */
92 emit(IF(BRW_PREDICATE_NORMAL
));
98 vec4_tcs_visitor::emit_thread_end()
100 vec4_instruction
*inst
;
101 current_annotation
= "thread end";
103 if (nir
->info
.tess
.tcs_vertices_out
% 2) {
104 emit(BRW_OPCODE_ENDIF
);
107 if (devinfo
->gen
== 7) {
108 struct brw_tcs_prog_data
*tcs_prog_data
=
109 (struct brw_tcs_prog_data
*) prog_data
;
111 current_annotation
= "release input vertices";
113 /* Synchronize all threads, so we know that no one is still
114 * using the input URB handles.
116 if (tcs_prog_data
->instances
> 1) {
117 dst_reg header
= dst_reg(this, glsl_type::uvec4_type
);
118 emit(TCS_OPCODE_CREATE_BARRIER_HEADER
, header
);
119 emit(SHADER_OPCODE_BARRIER
, dst_null_ud(), src_reg(header
));
122 /* Make thread 0 (invocations <1, 0>) release pairs of ICP handles.
123 * We want to compare the bottom half of invocation_id with 0, but
124 * use that truth value for the top half as well. Unfortunately,
125 * we don't have stride in the vec4 world, nor UV immediates in
126 * align16, so we need an opcode to get invocation_id<0,4,0>.
128 set_condmod(BRW_CONDITIONAL_Z
,
129 emit(TCS_OPCODE_SRC0_010_IS_ZERO
, dst_null_d(),
131 emit(IF(BRW_PREDICATE_NORMAL
));
132 for (unsigned i
= 0; i
< key
->input_vertices
; i
+= 2) {
133 /* If we have an odd number of input vertices, the last will be
134 * unpaired. We don't want to use an interleaved URB write in
137 const bool is_unpaired
= i
== key
->input_vertices
- 1;
139 dst_reg
header(this, glsl_type::uvec4_type
);
140 emit(TCS_OPCODE_RELEASE_INPUT
, header
, brw_imm_ud(i
),
141 brw_imm_ud(is_unpaired
));
143 emit(BRW_OPCODE_ENDIF
);
146 if (unlikely(INTEL_DEBUG
& DEBUG_SHADER_TIME
))
147 emit_shader_time_end();
149 inst
= emit(TCS_OPCODE_THREAD_END
);
156 vec4_tcs_visitor::emit_input_urb_read(const dst_reg
&dst
,
157 const src_reg
&vertex_index
,
158 unsigned base_offset
,
159 unsigned first_component
,
160 const src_reg
&indirect_offset
)
162 vec4_instruction
*inst
;
163 dst_reg
temp(this, glsl_type::ivec4_type
);
164 temp
.type
= dst
.type
;
166 /* Set up the message header to reference the proper parts of the URB */
167 dst_reg header
= dst_reg(this, glsl_type::uvec4_type
);
168 inst
= emit(TCS_OPCODE_SET_INPUT_URB_OFFSETS
, header
, vertex_index
,
170 inst
->force_writemask_all
= true;
172 /* Read into a temporary, ignoring writemasking. */
173 inst
= emit(VEC4_OPCODE_URB_READ
, temp
, src_reg(header
));
174 inst
->offset
= base_offset
;
178 /* Copy the temporary to the destination to deal with writemasking.
180 * Also attempt to deal with gl_PointSize being in the .w component.
182 if (inst
->offset
== 0 && indirect_offset
.file
== BAD_FILE
) {
183 emit(MOV(dst
, swizzle(src_reg(temp
), BRW_SWIZZLE_WWWW
)));
185 src_reg src
= src_reg(temp
);
186 src
.swizzle
= BRW_SWZ_COMP_INPUT(first_component
);
192 vec4_tcs_visitor::emit_output_urb_read(const dst_reg
&dst
,
193 unsigned base_offset
,
194 unsigned first_component
,
195 const src_reg
&indirect_offset
)
197 vec4_instruction
*inst
;
199 /* Set up the message header to reference the proper parts of the URB */
200 dst_reg header
= dst_reg(this, glsl_type::uvec4_type
);
201 inst
= emit(TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
, header
,
202 brw_imm_ud(dst
.writemask
<< first_component
), indirect_offset
);
203 inst
->force_writemask_all
= true;
205 vec4_instruction
*read
= emit(VEC4_OPCODE_URB_READ
, dst
, src_reg(header
));
206 read
->offset
= base_offset
;
210 if (first_component
) {
211 /* Read into a temporary and copy with a swizzle and writemask. */
212 read
->dst
= retype(dst_reg(this, glsl_type::ivec4_type
), dst
.type
);
213 emit(MOV(dst
, swizzle(src_reg(read
->dst
),
214 BRW_SWZ_COMP_INPUT(first_component
))));
219 vec4_tcs_visitor::emit_urb_write(const src_reg
&value
,
221 unsigned base_offset
,
222 const src_reg
&indirect_offset
)
227 src_reg
message(this, glsl_type::uvec4_type
, 2);
228 vec4_instruction
*inst
;
230 inst
= emit(TCS_OPCODE_SET_OUTPUT_URB_OFFSETS
, dst_reg(message
),
231 brw_imm_ud(writemask
), indirect_offset
);
232 inst
->force_writemask_all
= true;
233 inst
= emit(MOV(byte_offset(dst_reg(retype(message
, value
.type
)), REG_SIZE
),
235 inst
->force_writemask_all
= true;
237 inst
= emit(TCS_OPCODE_URB_WRITE
, dst_null_f(), message
);
238 inst
->offset
= base_offset
;
244 vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr
*instr
)
246 switch (instr
->intrinsic
) {
247 case nir_intrinsic_load_invocation_id
:
248 emit(MOV(get_nir_dest(instr
->dest
, BRW_REGISTER_TYPE_UD
),
251 case nir_intrinsic_load_primitive_id
:
252 emit(TCS_OPCODE_GET_PRIMITIVE_ID
,
253 get_nir_dest(instr
->dest
, BRW_REGISTER_TYPE_UD
));
255 case nir_intrinsic_load_patch_vertices_in
:
256 emit(MOV(get_nir_dest(instr
->dest
, BRW_REGISTER_TYPE_D
),
257 brw_imm_d(key
->input_vertices
)));
259 case nir_intrinsic_load_per_vertex_input
: {
260 assert(nir_dest_bit_size(instr
->dest
) == 32);
261 src_reg indirect_offset
= get_indirect_offset(instr
);
262 unsigned imm_offset
= instr
->const_index
[0];
264 src_reg vertex_index
= retype(get_nir_src_imm(instr
->src
[0]),
265 BRW_REGISTER_TYPE_UD
);
267 unsigned first_component
= nir_intrinsic_component(instr
);
268 dst_reg dst
= get_nir_dest(instr
->dest
, BRW_REGISTER_TYPE_D
);
269 dst
.writemask
= brw_writemask_for_size(instr
->num_components
);
270 emit_input_urb_read(dst
, vertex_index
, imm_offset
,
271 first_component
, indirect_offset
);
274 case nir_intrinsic_load_input
:
275 unreachable("nir_lower_io should use load_per_vertex_input intrinsics");
277 case nir_intrinsic_load_output
:
278 case nir_intrinsic_load_per_vertex_output
: {
279 src_reg indirect_offset
= get_indirect_offset(instr
);
280 unsigned imm_offset
= instr
->const_index
[0];
282 dst_reg dst
= get_nir_dest(instr
->dest
, BRW_REGISTER_TYPE_D
);
283 dst
.writemask
= brw_writemask_for_size(instr
->num_components
);
285 emit_output_urb_read(dst
, imm_offset
, nir_intrinsic_component(instr
),
289 case nir_intrinsic_store_output
:
290 case nir_intrinsic_store_per_vertex_output
: {
291 assert(nir_src_bit_size(instr
->src
[0]) == 32);
292 src_reg value
= get_nir_src(instr
->src
[0]);
293 unsigned mask
= instr
->const_index
[1];
294 unsigned swiz
= BRW_SWIZZLE_XYZW
;
296 src_reg indirect_offset
= get_indirect_offset(instr
);
297 unsigned imm_offset
= instr
->const_index
[0];
299 unsigned first_component
= nir_intrinsic_component(instr
);
300 if (first_component
) {
301 assert(swiz
== BRW_SWIZZLE_XYZW
);
302 swiz
= BRW_SWZ_COMP_OUTPUT(first_component
);
303 mask
= mask
<< first_component
;
306 emit_urb_write(swizzle(value
, swiz
), mask
,
307 imm_offset
, indirect_offset
);
311 case nir_intrinsic_control_barrier
: {
312 dst_reg header
= dst_reg(this, glsl_type::uvec4_type
);
313 emit(TCS_OPCODE_CREATE_BARRIER_HEADER
, header
);
314 emit(SHADER_OPCODE_BARRIER
, dst_null_ud(), src_reg(header
));
318 case nir_intrinsic_memory_barrier_tcs_patch
:
322 vec4_visitor::nir_emit_intrinsic(instr
);
327 * Return the number of patches to accumulate before an 8_PATCH mode thread is
328 * launched. In cases with a large number of input control points and a large
329 * amount of VS outputs, the VS URB space needed to store an entire 8 patches
330 * worth of data can be prohibitive, so it can be beneficial to launch threads
333 * See the 3DSTATE_HS::Patch Count Threshold documentation for the recommended
334 * values. Note that 0 means to "disable" early dispatch, meaning to wait for
335 * a full 8 patches as normal.
338 get_patch_count_threshold(int input_control_points
)
340 if (input_control_points
<= 4)
342 else if (input_control_points
<= 6)
344 else if (input_control_points
<= 8)
346 else if (input_control_points
<= 10)
348 else if (input_control_points
<= 14)
351 /* Return patch count 1 for PATCHLIST_15 - PATCHLIST_32 */
355 extern "C" const unsigned *
356 brw_compile_tcs(const struct brw_compiler
*compiler
,
359 const struct brw_tcs_prog_key
*key
,
360 struct brw_tcs_prog_data
*prog_data
,
362 int shader_time_index
,
363 struct brw_compile_stats
*stats
,
366 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
367 struct brw_vue_prog_data
*vue_prog_data
= &prog_data
->base
;
368 const bool is_scalar
= compiler
->scalar_stage
[MESA_SHADER_TESS_CTRL
];
369 const unsigned *assembly
;
371 nir
->info
.outputs_written
= key
->outputs_written
;
372 nir
->info
.patch_outputs_written
= key
->patch_outputs_written
;
374 struct brw_vue_map input_vue_map
;
375 brw_compute_vue_map(devinfo
, &input_vue_map
, nir
->info
.inputs_read
,
376 nir
->info
.separate_shader
, 1);
377 brw_compute_tess_vue_map(&vue_prog_data
->vue_map
,
378 nir
->info
.outputs_written
,
379 nir
->info
.patch_outputs_written
);
381 brw_nir_apply_key(nir
, compiler
, &key
->base
, 8, is_scalar
);
382 brw_nir_lower_vue_inputs(nir
, &input_vue_map
);
383 brw_nir_lower_tcs_outputs(nir
, &vue_prog_data
->vue_map
,
384 key
->tes_primitive_mode
);
385 if (key
->quads_workaround
)
386 brw_nir_apply_tcs_quads_workaround(nir
);
388 brw_postprocess_nir(nir
, compiler
, is_scalar
);
390 bool has_primitive_id
=
391 nir
->info
.system_values_read
& (1 << SYSTEM_VALUE_PRIMITIVE_ID
);
393 prog_data
->patch_count_threshold
= get_patch_count_threshold(key
->input_vertices
);
395 if (compiler
->use_tcs_8_patch
&&
396 nir
->info
.tess
.tcs_vertices_out
<= (devinfo
->gen
>= 12 ? 32 : 16) &&
397 2 + has_primitive_id
+ key
->input_vertices
<= (devinfo
->gen
>= 12 ? 63 : 31)) {
398 /* 3DSTATE_HS imposes two constraints on using 8_PATCH mode. First, the
399 * "Instance" field limits the number of output vertices to [1, 16] on
400 * gen11 and below, or [1, 32] on gen12 and above. Secondly, the
401 * "Dispatch GRF Start Register for URB Data" field is limited to [0,
402 * 31] - which imposes a limit on the input vertices.
404 vue_prog_data
->dispatch_mode
= DISPATCH_MODE_TCS_8_PATCH
;
405 prog_data
->instances
= nir
->info
.tess
.tcs_vertices_out
;
406 prog_data
->include_primitive_id
= has_primitive_id
;
408 unsigned verts_per_thread
= is_scalar
? 8 : 2;
409 vue_prog_data
->dispatch_mode
= DISPATCH_MODE_TCS_SINGLE_PATCH
;
410 prog_data
->instances
=
411 DIV_ROUND_UP(nir
->info
.tess
.tcs_vertices_out
, verts_per_thread
);
414 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
415 * That divides up as follows:
417 * 32 bytes for the patch header (tessellation factors)
418 * 480 bytes for per-patch varyings (a varying component is 4 bytes and
419 * gl_MaxTessPatchComponents = 120)
420 * 16384 bytes for per-vertex varyings (a varying component is 4 bytes,
421 * gl_MaxPatchVertices = 32 and
422 * gl_MaxTessControlOutputComponents = 128)
424 * 15808 bytes left for varying packing overhead
426 const int num_per_patch_slots
= vue_prog_data
->vue_map
.num_per_patch_slots
;
427 const int num_per_vertex_slots
= vue_prog_data
->vue_map
.num_per_vertex_slots
;
428 unsigned output_size_bytes
= 0;
429 /* Note that the patch header is counted in num_per_patch_slots. */
430 output_size_bytes
+= num_per_patch_slots
* 16;
431 output_size_bytes
+= nir
->info
.tess
.tcs_vertices_out
*
432 num_per_vertex_slots
* 16;
434 assert(output_size_bytes
>= 1);
435 if (output_size_bytes
> GEN7_MAX_HS_URB_ENTRY_SIZE_BYTES
)
438 /* URB entry sizes are stored as a multiple of 64 bytes. */
439 vue_prog_data
->urb_entry_size
= ALIGN(output_size_bytes
, 64) / 64;
441 /* On Cannonlake software shall not program an allocation size that
442 * specifies a size that is a multiple of 3 64B (512-bit) cachelines.
444 if (devinfo
->gen
== 10 &&
445 vue_prog_data
->urb_entry_size
% 3 == 0)
446 vue_prog_data
->urb_entry_size
++;
448 /* HS does not use the usual payload pushing from URB to GRFs,
449 * because we don't have enough registers for a full-size payload, and
450 * the hardware is broken on Haswell anyway.
452 vue_prog_data
->urb_read_length
= 0;
454 if (unlikely(INTEL_DEBUG
& DEBUG_TCS
)) {
455 fprintf(stderr
, "TCS Input ");
456 brw_print_vue_map(stderr
, &input_vue_map
);
457 fprintf(stderr
, "TCS Output ");
458 brw_print_vue_map(stderr
, &vue_prog_data
->vue_map
);
462 fs_visitor
v(compiler
, log_data
, mem_ctx
, &key
->base
,
463 &prog_data
->base
.base
, nir
, 8,
464 shader_time_index
, &input_vue_map
);
467 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
471 prog_data
->base
.base
.dispatch_grf_start_reg
= v
.payload
.num_regs
;
473 fs_generator
g(compiler
, log_data
, mem_ctx
,
474 &prog_data
->base
.base
, false, MESA_SHADER_TESS_CTRL
);
475 if (unlikely(INTEL_DEBUG
& DEBUG_TCS
)) {
476 g
.enable_debug(ralloc_asprintf(mem_ctx
,
477 "%s tessellation control shader %s",
478 nir
->info
.label
? nir
->info
.label
483 g
.generate_code(v
.cfg
, 8, v
.shader_stats
,
484 v
.performance_analysis
.require(), stats
);
486 assembly
= g
.get_assembly();
488 vec4_tcs_visitor
v(compiler
, log_data
, key
, prog_data
,
489 nir
, mem_ctx
, shader_time_index
, &input_vue_map
);
492 *error_str
= ralloc_strdup(mem_ctx
, v
.fail_msg
);
496 if (unlikely(INTEL_DEBUG
& DEBUG_TCS
))
497 v
.dump_instructions();
500 assembly
= brw_vec4_generate_assembly(compiler
, log_data
, mem_ctx
, nir
,
501 &prog_data
->base
, v
.cfg
,
502 v
.performance_analysis
.require(),
510 } /* namespace brw */