2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
27 * State atom for client-programmable geometry shaders, and support code.
31 #include "brw_context.h"
32 #include "brw_vec4_gs_visitor.h"
33 #include "brw_state.h"
34 #include "brw_ff_gs.h"
38 do_gs_prog(struct brw_context
*brw
,
39 struct gl_shader_program
*prog
,
40 struct brw_geometry_program
*gp
,
41 struct brw_gs_prog_key
*key
)
43 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
44 struct brw_gs_compile c
;
45 memset(&c
, 0, sizeof(c
));
49 c
.prog_data
.include_primitive_id
=
50 (gp
->program
.Base
.InputsRead
& VARYING_BIT_PRIMITIVE_ID
) != 0;
52 c
.prog_data
.invocations
= gp
->program
.Invocations
;
54 /* Allocate the references to the uniforms that will end up in the
55 * prog_data associated with the compiled program, and which will be freed
58 * Note: param_count needs to be num_uniform_components * 4, since we add
59 * padding around uniform values below vec4 size, so the worst case is that
60 * every uniform is a float which gets padded to the size of a vec4.
62 struct gl_shader
*gs
= prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
63 int param_count
= gs
->num_uniform_components
* 4;
65 /* We also upload clip plane data as uniforms */
66 param_count
+= MAX_CLIP_PLANES
* 4;
68 c
.prog_data
.base
.base
.param
=
69 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
70 c
.prog_data
.base
.base
.pull_param
=
71 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
72 /* Setting nr_params here NOT to the size of the param and pull_param
73 * arrays, but to the number of uniform components vec4_visitor
74 * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
76 c
.prog_data
.base
.base
.nr_params
= ALIGN(param_count
, 4) / 4 + gs
->num_samplers
;
79 if (gp
->program
.OutputType
== GL_POINTS
) {
80 /* When the output type is points, the geometry shader may output data
81 * to multiple streams, and EndPrimitive() has no effect. So we
82 * configure the hardware to interpret the control data as stream ID.
84 c
.prog_data
.control_data_format
= GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID
;
86 /* We only have to emit control bits if we are using streams */
87 if (prog
->Geom
.UsesStreams
)
88 c
.control_data_bits_per_vertex
= 2;
90 c
.control_data_bits_per_vertex
= 0;
92 /* When the output type is triangle_strip or line_strip, EndPrimitive()
93 * may be used to terminate the current strip and start a new one
94 * (similar to primitive restart), and outputting data to multiple
95 * streams is not supported. So we configure the hardware to interpret
96 * the control data as EndPrimitive information (a.k.a. "cut bits").
98 c
.prog_data
.control_data_format
= GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT
;
100 /* We only need to output control data if the shader actually calls
103 c
.control_data_bits_per_vertex
= gp
->program
.UsesEndPrimitive
? 1 : 0;
106 /* There are no control data bits in gen6. */
107 c
.control_data_bits_per_vertex
= 0;
109 /* If it is using transform feedback, enable it */
110 if (prog
->TransformFeedback
.NumVarying
)
111 c
.prog_data
.gen6_xfb_enabled
= true;
113 c
.prog_data
.gen6_xfb_enabled
= false;
115 c
.control_data_header_size_bits
=
116 gp
->program
.VerticesOut
* c
.control_data_bits_per_vertex
;
118 /* 1 HWORD = 32 bytes = 256 bits */
119 c
.prog_data
.control_data_header_size_hwords
=
120 ALIGN(c
.control_data_header_size_bits
, 256) / 256;
122 GLbitfield64 outputs_written
= gp
->program
.Base
.OutputsWritten
;
124 /* In order for legacy clipping to work, we need to populate the clip
125 * distance varying slots whenever clipping is enabled, even if the vertex
126 * shader doesn't write to gl_ClipDistance.
128 if (c
.key
.base
.userclip_active
) {
129 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0
);
130 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1
);
133 brw_compute_vue_map(brw
, &c
.prog_data
.base
.vue_map
, outputs_written
);
135 /* Compute the output vertex size.
137 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
140 * [0,62] indicating [1,63] 16B units
142 * Specifies the size of each vertex stored in the GS output entry
143 * (following any Control Header data) as a number of 128-bit units
146 * Programming Restrictions: The vertex size must be programmed as a
147 * multiple of 32B units with the following exception: Rendering is
148 * disabled (as per SOL stage state) and the vertex size output by the
151 * If rendering is enabled (as per SOL state) the vertex size must be
152 * programmed as a multiple of 32B units. In other words, the only time
153 * software can program a vertex size with an odd number of 16B units
154 * is when rendering is disabled.
156 * Note: B=bytes in the above text.
158 * It doesn't seem worth the extra trouble to optimize the case where the
159 * vertex size is 16B (especially since this would require special-casing
160 * the GEN assembly that writes to the URB). So we just set the vertex
161 * size to a multiple of 32B (2 vec4's) in all cases.
163 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
164 * budget that as follows:
166 * 512 bytes for varyings (a varying component is 4 bytes and
167 * gl_MaxGeometryOutputComponents = 128)
168 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
170 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
171 * even if it's not used)
172 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
173 * whenever clip planes are enabled, even if the shader doesn't
174 * write to gl_ClipDistance)
175 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
176 * (see below)--this causes up to 1 VUE slot to be wasted
177 * 400 bytes available for varying packing overhead
179 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
180 * per interpolation type, so this is plenty.
183 unsigned output_vertex_size_bytes
= c
.prog_data
.base
.vue_map
.num_slots
* 16;
184 assert(brw
->gen
== 6 ||
185 output_vertex_size_bytes
<= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES
);
186 c
.prog_data
.output_vertex_size_hwords
=
187 ALIGN(output_vertex_size_bytes
, 32) / 32;
189 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
190 * That divides up as follows:
192 * 64 bytes for the control data header (cut indices or StreamID bits)
193 * 4096 bytes for varyings (a varying component is 4 bytes and
194 * gl_MaxGeometryTotalOutputComponents = 1024)
195 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
196 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
197 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
198 * even if it's not used)
199 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
200 * whenever clip planes are enabled, even if the shader doesn't
201 * write to gl_ClipDistance)
202 * 4096 bytes overhead since the VUE size must be a multiple of 32
203 * bytes (see above)--this causes up to 1 VUE slot to be wasted
204 * 8128 bytes available for varying packing overhead
206 * Worst-case varying packing overhead is 3/4 of a varying slot per
207 * interpolation type, which works out to 3072 bytes, so this would allow
208 * us to accommodate 2 interpolation types without any danger of running
211 * In practice, the risk of running out of URB space is very small, since
212 * the above figures are all worst-case, and most of them scale with the
213 * number of output vertices. So we'll just calculate the amount of space
214 * we need, and if it's too large, fail to compile.
216 * The above is for gen7+ where we have a single URB entry that will hold
217 * all the output. In gen6, we will have to allocate URB entries for every
218 * vertex we emit, so our URB entries only need to be large enough to hold
219 * a single vertex. Also, gen6 does not have a control data header.
221 unsigned output_size_bytes
;
224 c
.prog_data
.output_vertex_size_hwords
* 32 * gp
->program
.VerticesOut
;
225 output_size_bytes
+= 32 * c
.prog_data
.control_data_header_size_hwords
;
227 output_size_bytes
= c
.prog_data
.output_vertex_size_hwords
* 32;
230 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
231 * which comes before the control header.
234 output_size_bytes
+= 32;
236 assert(output_size_bytes
>= 1);
237 int max_output_size_bytes
= GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES
;
239 max_output_size_bytes
= GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES
;
240 if (output_size_bytes
> max_output_size_bytes
)
244 /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
245 * a multiple of 128 bytes in gen6.
248 c
.prog_data
.base
.urb_entry_size
= ALIGN(output_size_bytes
, 64) / 64;
250 c
.prog_data
.base
.urb_entry_size
= ALIGN(output_size_bytes
, 128) / 128;
252 c
.prog_data
.output_topology
=
253 get_hw_prim_for_gl_prim(gp
->program
.OutputType
);
255 brw_compute_vue_map(brw
, &c
.input_vue_map
, c
.key
.input_varyings
);
257 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
258 * need to program a URB read length of ceiling(num_slots / 2).
260 c
.prog_data
.base
.urb_read_length
= (c
.input_vue_map
.num_slots
+ 1) / 2;
262 void *mem_ctx
= ralloc_context(NULL
);
263 unsigned program_size
;
264 const unsigned *program
=
265 brw_gs_emit(brw
, prog
, &c
, mem_ctx
, &program_size
);
266 if (program
== NULL
) {
267 ralloc_free(mem_ctx
);
271 /* Scratch space is used for register spilling */
272 if (c
.base
.last_scratch
) {
273 perf_debug("Geometry shader triggered register spilling. "
274 "Try reducing the number of live vec4 values to "
275 "improve performance.\n");
277 c
.prog_data
.base
.base
.total_scratch
278 = brw_get_scratch_size(c
.base
.last_scratch
*REG_SIZE
);
280 brw_get_scratch_bo(brw
, &stage_state
->scratch_bo
,
281 c
.prog_data
.base
.base
.total_scratch
*
282 brw
->max_gs_threads
);
285 brw_upload_cache(&brw
->cache
, BRW_CACHE_GS_PROG
,
286 &c
.key
, sizeof(c
.key
),
287 program
, program_size
,
288 &c
.prog_data
, sizeof(c
.prog_data
),
289 &stage_state
->prog_offset
, &brw
->gs
.prog_data
);
290 ralloc_free(mem_ctx
);
296 brw_upload_gs_prog(struct brw_context
*brw
)
298 struct gl_context
*ctx
= &brw
->ctx
;
299 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
300 struct brw_gs_prog_key key
;
301 /* BRW_NEW_GEOMETRY_PROGRAM */
302 struct brw_geometry_program
*gp
=
303 (struct brw_geometry_program
*) brw
->geometry_program
;
305 if (!brw_state_dirty(brw
,
307 BRW_NEW_GEOMETRY_PROGRAM
|
308 BRW_NEW_TRANSFORM_FEEDBACK
|
313 /* No geometry shader. Vertex data just passes straight through. */
314 if (brw
->state
.dirty
.brw
& BRW_NEW_VUE_MAP_VS
) {
315 brw
->vue_map_geom_out
= brw
->vue_map_vs
;
316 brw
->state
.dirty
.brw
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
320 (brw
->state
.dirty
.brw
& BRW_NEW_TRANSFORM_FEEDBACK
)) {
321 gen6_brw_upload_ff_gs_prog(brw
);
325 /* Other state atoms had better not try to access prog_data, since
326 * there's no GS program.
328 brw
->gs
.prog_data
= NULL
;
329 brw
->gs
.base
.prog_data
= NULL
;
334 struct gl_program
*prog
= &gp
->program
.Base
;
336 memset(&key
, 0, sizeof(key
));
338 key
.base
.program_string_id
= gp
->id
;
339 brw_setup_vue_key_clip_info(brw
, &key
.base
,
340 gp
->program
.Base
.UsesClipDistanceOut
);
343 brw_populate_sampler_prog_key_data(ctx
, prog
, stage_state
->sampler_count
,
346 /* BRW_NEW_VUE_MAP_VS */
347 key
.input_varyings
= brw
->vue_map_vs
.slots_valid
;
349 if (!brw_search_cache(&brw
->cache
, BRW_CACHE_GS_PROG
,
351 &stage_state
->prog_offset
, &brw
->gs
.prog_data
)) {
353 do_gs_prog(brw
, ctx
->_Shader
->CurrentProgram
[MESA_SHADER_GEOMETRY
], gp
,
358 brw
->gs
.base
.prog_data
= &brw
->gs
.prog_data
->base
.base
;
360 if (memcmp(&brw
->gs
.prog_data
->base
.vue_map
, &brw
->vue_map_geom_out
,
361 sizeof(brw
->vue_map_geom_out
)) != 0) {
362 brw
->vue_map_geom_out
= brw
->gs
.prog_data
->base
.vue_map
;
363 brw
->state
.dirty
.brw
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
368 brw_gs_precompile(struct gl_context
*ctx
,
369 struct gl_shader_program
*shader_prog
,
370 struct gl_program
*prog
)
372 struct brw_context
*brw
= brw_context(ctx
);
373 struct brw_gs_prog_key key
;
374 uint32_t old_prog_offset
= brw
->gs
.base
.prog_offset
;
375 struct brw_gs_prog_data
*old_prog_data
= brw
->gs
.prog_data
;
378 struct gl_geometry_program
*gp
= (struct gl_geometry_program
*) prog
;
379 struct brw_geometry_program
*bgp
= brw_geometry_program(gp
);
381 memset(&key
, 0, sizeof(key
));
383 brw_vue_setup_prog_key_for_precompile(ctx
, &key
.base
, bgp
->id
, &gp
->Base
);
385 /* Assume that the set of varyings coming in from the vertex shader exactly
386 * matches what the geometry shader requires.
388 key
.input_varyings
= gp
->Base
.InputsRead
;
390 success
= do_gs_prog(brw
, shader_prog
, bgp
, &key
);
392 brw
->gs
.base
.prog_offset
= old_prog_offset
;
393 brw
->gs
.prog_data
= old_prog_data
;
400 brw_gs_prog_data_compare(const void *in_a
, const void *in_b
)
402 const struct brw_gs_prog_data
*a
= in_a
;
403 const struct brw_gs_prog_data
*b
= in_b
;
405 /* Compare the base structure. */
406 if (!brw_stage_prog_data_compare(&a
->base
.base
, &b
->base
.base
))
409 /* Compare the rest of the struct. */
410 const unsigned offset
= sizeof(struct brw_stage_prog_data
);
411 if (memcmp(((char *) a
) + offset
, ((char *) b
) + offset
,
412 sizeof(struct brw_gs_prog_data
) - offset
)) {