2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
27 * State atom for client-programmable geometry shaders, and support code.
31 #include "brw_context.h"
32 #include "brw_vec4_gs_visitor.h"
33 #include "brw_state.h"
34 #include "brw_ff_gs.h"
38 do_gs_prog(struct brw_context
*brw
,
39 struct gl_shader_program
*prog
,
40 struct brw_geometry_program
*gp
,
41 struct brw_gs_prog_key
*key
)
43 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
44 struct brw_gs_compile c
;
45 memset(&c
, 0, sizeof(c
));
49 c
.prog_data
.include_primitive_id
=
50 (gp
->program
.Base
.InputsRead
& VARYING_BIT_PRIMITIVE_ID
) != 0;
52 c
.prog_data
.invocations
= gp
->program
.Invocations
;
54 /* Allocate the references to the uniforms that will end up in the
55 * prog_data associated with the compiled program, and which will be freed
58 * Note: param_count needs to be num_uniform_components * 4, since we add
59 * padding around uniform values below vec4 size, so the worst case is that
60 * every uniform is a float which gets padded to the size of a vec4.
62 struct gl_shader
*gs
= prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
63 int param_count
= gs
->num_uniform_components
* 4;
65 /* We also upload clip plane data as uniforms */
66 param_count
+= MAX_CLIP_PLANES
* 4;
68 c
.prog_data
.base
.base
.param
=
69 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
70 c
.prog_data
.base
.base
.pull_param
=
71 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
72 c
.prog_data
.base
.base
.nr_params
= param_count
;
75 if (gp
->program
.OutputType
== GL_POINTS
) {
76 /* When the output type is points, the geometry shader may output data
77 * to multiple streams, and EndPrimitive() has no effect. So we
78 * configure the hardware to interpret the control data as stream ID.
80 c
.prog_data
.control_data_format
= GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID
;
82 /* We only have to emit control bits if we are using streams */
83 if (prog
->Geom
.UsesStreams
)
84 c
.control_data_bits_per_vertex
= 2;
86 c
.control_data_bits_per_vertex
= 0;
88 /* When the output type is triangle_strip or line_strip, EndPrimitive()
89 * may be used to terminate the current strip and start a new one
90 * (similar to primitive restart), and outputting data to multiple
91 * streams is not supported. So we configure the hardware to interpret
92 * the control data as EndPrimitive information (a.k.a. "cut bits").
94 c
.prog_data
.control_data_format
= GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT
;
96 /* We only need to output control data if the shader actually calls
99 c
.control_data_bits_per_vertex
= gp
->program
.UsesEndPrimitive
? 1 : 0;
102 /* There are no control data bits in gen6. */
103 c
.control_data_bits_per_vertex
= 0;
105 /* If it is using transform feedback, enable it */
106 if (prog
->TransformFeedback
.NumVarying
)
107 c
.prog_data
.gen6_xfb_enabled
= true;
109 c
.prog_data
.gen6_xfb_enabled
= false;
111 c
.control_data_header_size_bits
=
112 gp
->program
.VerticesOut
* c
.control_data_bits_per_vertex
;
114 /* 1 HWORD = 32 bytes = 256 bits */
115 c
.prog_data
.control_data_header_size_hwords
=
116 ALIGN(c
.control_data_header_size_bits
, 256) / 256;
118 GLbitfield64 outputs_written
= gp
->program
.Base
.OutputsWritten
;
120 /* In order for legacy clipping to work, we need to populate the clip
121 * distance varying slots whenever clipping is enabled, even if the vertex
122 * shader doesn't write to gl_ClipDistance.
124 if (c
.key
.base
.userclip_active
) {
125 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0
);
126 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1
);
129 brw_compute_vue_map(brw
, &c
.prog_data
.base
.vue_map
, outputs_written
);
131 /* Compute the output vertex size.
133 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
136 * [0,62] indicating [1,63] 16B units
138 * Specifies the size of each vertex stored in the GS output entry
139 * (following any Control Header data) as a number of 128-bit units
142 * Programming Restrictions: The vertex size must be programmed as a
143 * multiple of 32B units with the following exception: Rendering is
144 * disabled (as per SOL stage state) and the vertex size output by the
147 * If rendering is enabled (as per SOL state) the vertex size must be
148 * programmed as a multiple of 32B units. In other words, the only time
149 * software can program a vertex size with an odd number of 16B units
150 * is when rendering is disabled.
152 * Note: B=bytes in the above text.
154 * It doesn't seem worth the extra trouble to optimize the case where the
155 * vertex size is 16B (especially since this would require special-casing
156 * the GEN assembly that writes to the URB). So we just set the vertex
157 * size to a multiple of 32B (2 vec4's) in all cases.
159 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
160 * budget that as follows:
162 * 512 bytes for varyings (a varying component is 4 bytes and
163 * gl_MaxGeometryOutputComponents = 128)
164 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
166 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
167 * even if it's not used)
168 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
169 * whenever clip planes are enabled, even if the shader doesn't
170 * write to gl_ClipDistance)
171 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
172 * (see below)--this causes up to 1 VUE slot to be wasted
173 * 400 bytes available for varying packing overhead
175 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
176 * per interpolation type, so this is plenty.
179 unsigned output_vertex_size_bytes
= c
.prog_data
.base
.vue_map
.num_slots
* 16;
180 assert(brw
->gen
== 6 ||
181 output_vertex_size_bytes
<= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES
);
182 c
.prog_data
.output_vertex_size_hwords
=
183 ALIGN(output_vertex_size_bytes
, 32) / 32;
185 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
186 * That divides up as follows:
188 * 64 bytes for the control data header (cut indices or StreamID bits)
189 * 4096 bytes for varyings (a varying component is 4 bytes and
190 * gl_MaxGeometryTotalOutputComponents = 1024)
191 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
192 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
193 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
194 * even if it's not used)
195 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
196 * whenever clip planes are enabled, even if the shader doesn't
197 * write to gl_ClipDistance)
198 * 4096 bytes overhead since the VUE size must be a multiple of 32
199 * bytes (see above)--this causes up to 1 VUE slot to be wasted
200 * 8128 bytes available for varying packing overhead
202 * Worst-case varying packing overhead is 3/4 of a varying slot per
203 * interpolation type, which works out to 3072 bytes, so this would allow
204 * us to accommodate 2 interpolation types without any danger of running
207 * In practice, the risk of running out of URB space is very small, since
208 * the above figures are all worst-case, and most of them scale with the
209 * number of output vertices. So we'll just calculate the amount of space
210 * we need, and if it's too large, fail to compile.
212 * The above is for gen7+ where we have a single URB entry that will hold
213 * all the output. In gen6, we will have to allocate URB entries for every
214 * vertex we emit, so our URB entries only need to be large enough to hold
215 * a single vertex. Also, gen6 does not have a control data header.
217 unsigned output_size_bytes
;
220 c
.prog_data
.output_vertex_size_hwords
* 32 * gp
->program
.VerticesOut
;
221 output_size_bytes
+= 32 * c
.prog_data
.control_data_header_size_hwords
;
223 output_size_bytes
= c
.prog_data
.output_vertex_size_hwords
* 32;
226 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
227 * which comes before the control header.
230 output_size_bytes
+= 32;
232 assert(output_size_bytes
>= 1);
233 int max_output_size_bytes
= GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES
;
235 max_output_size_bytes
= GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES
;
236 if (output_size_bytes
> max_output_size_bytes
)
240 /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
241 * a multiple of 128 bytes in gen6.
244 c
.prog_data
.base
.urb_entry_size
= ALIGN(output_size_bytes
, 64) / 64;
246 c
.prog_data
.base
.urb_entry_size
= ALIGN(output_size_bytes
, 128) / 128;
248 c
.prog_data
.output_topology
=
249 get_hw_prim_for_gl_prim(gp
->program
.OutputType
);
251 brw_compute_vue_map(brw
, &c
.input_vue_map
, c
.key
.input_varyings
);
253 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
254 * need to program a URB read length of ceiling(num_slots / 2).
256 c
.prog_data
.base
.urb_read_length
= (c
.input_vue_map
.num_slots
+ 1) / 2;
258 void *mem_ctx
= ralloc_context(NULL
);
259 unsigned program_size
;
260 const unsigned *program
=
261 brw_gs_emit(brw
, prog
, &c
, mem_ctx
, &program_size
);
262 if (program
== NULL
) {
263 ralloc_free(mem_ctx
);
267 /* Scratch space is used for register spilling */
268 if (c
.base
.last_scratch
) {
269 perf_debug("Geometry shader triggered register spilling. "
270 "Try reducing the number of live vec4 values to "
271 "improve performance.\n");
273 c
.prog_data
.base
.base
.total_scratch
274 = brw_get_scratch_size(c
.base
.last_scratch
*REG_SIZE
);
276 brw_get_scratch_bo(brw
, &stage_state
->scratch_bo
,
277 c
.prog_data
.base
.base
.total_scratch
*
278 brw
->max_gs_threads
);
281 brw_upload_cache(&brw
->cache
, BRW_CACHE_GS_PROG
,
282 &c
.key
, sizeof(c
.key
),
283 program
, program_size
,
284 &c
.prog_data
, sizeof(c
.prog_data
),
285 &stage_state
->prog_offset
, &brw
->gs
.prog_data
);
286 ralloc_free(mem_ctx
);
292 brw_gs_state_dirty(struct brw_context
*brw
)
294 return brw_state_dirty(brw
,
296 BRW_NEW_GEOMETRY_PROGRAM
|
297 BRW_NEW_TRANSFORM_FEEDBACK
|
302 brw_gs_populate_key(struct brw_context
*brw
,
303 struct brw_gs_prog_key
*key
)
305 struct gl_context
*ctx
= &brw
->ctx
;
306 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
307 struct brw_geometry_program
*gp
=
308 (struct brw_geometry_program
*) brw
->geometry_program
;
309 struct gl_program
*prog
= &gp
->program
.Base
;
311 memset(key
, 0, sizeof(*key
));
313 key
->base
.program_string_id
= gp
->id
;
314 brw_setup_vue_key_clip_info(brw
, &key
->base
,
315 gp
->program
.Base
.UsesClipDistanceOut
);
318 brw_populate_sampler_prog_key_data(ctx
, prog
, stage_state
->sampler_count
,
321 /* BRW_NEW_VUE_MAP_VS */
322 key
->input_varyings
= brw
->vue_map_vs
.slots_valid
;
326 brw_upload_gs_prog(struct brw_context
*brw
)
328 struct gl_context
*ctx
= &brw
->ctx
;
329 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
330 struct brw_gs_prog_key key
;
331 /* BRW_NEW_GEOMETRY_PROGRAM */
332 struct brw_geometry_program
*gp
=
333 (struct brw_geometry_program
*) brw
->geometry_program
;
335 if (!brw_gs_state_dirty(brw
))
339 /* No geometry shader. Vertex data just passes straight through. */
340 if (brw
->ctx
.NewDriverState
& BRW_NEW_VUE_MAP_VS
) {
341 brw
->vue_map_geom_out
= brw
->vue_map_vs
;
342 brw
->ctx
.NewDriverState
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
346 (brw
->ctx
.NewDriverState
& BRW_NEW_TRANSFORM_FEEDBACK
)) {
347 gen6_brw_upload_ff_gs_prog(brw
);
351 /* Other state atoms had better not try to access prog_data, since
352 * there's no GS program.
354 brw
->gs
.prog_data
= NULL
;
355 brw
->gs
.base
.prog_data
= NULL
;
360 brw_gs_populate_key(brw
, &key
);
362 if (!brw_search_cache(&brw
->cache
, BRW_CACHE_GS_PROG
,
364 &stage_state
->prog_offset
, &brw
->gs
.prog_data
)) {
366 do_gs_prog(brw
, ctx
->_Shader
->CurrentProgram
[MESA_SHADER_GEOMETRY
], gp
,
371 brw
->gs
.base
.prog_data
= &brw
->gs
.prog_data
->base
.base
;
373 if (memcmp(&brw
->gs
.prog_data
->base
.vue_map
, &brw
->vue_map_geom_out
,
374 sizeof(brw
->vue_map_geom_out
)) != 0) {
375 brw
->vue_map_geom_out
= brw
->gs
.prog_data
->base
.vue_map
;
376 brw
->ctx
.NewDriverState
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
381 brw_gs_precompile(struct gl_context
*ctx
,
382 struct gl_shader_program
*shader_prog
,
383 struct gl_program
*prog
)
385 struct brw_context
*brw
= brw_context(ctx
);
386 struct brw_gs_prog_key key
;
387 uint32_t old_prog_offset
= brw
->gs
.base
.prog_offset
;
388 struct brw_gs_prog_data
*old_prog_data
= brw
->gs
.prog_data
;
391 struct gl_geometry_program
*gp
= (struct gl_geometry_program
*) prog
;
392 struct brw_geometry_program
*bgp
= brw_geometry_program(gp
);
394 memset(&key
, 0, sizeof(key
));
396 brw_vue_setup_prog_key_for_precompile(ctx
, &key
.base
, bgp
->id
, &gp
->Base
);
398 /* Assume that the set of varyings coming in from the vertex shader exactly
399 * matches what the geometry shader requires.
401 key
.input_varyings
= gp
->Base
.InputsRead
;
403 success
= do_gs_prog(brw
, shader_prog
, bgp
, &key
);
405 brw
->gs
.base
.prog_offset
= old_prog_offset
;
406 brw
->gs
.prog_data
= old_prog_data
;
413 brw_gs_prog_data_compare(const void *in_a
, const void *in_b
)
415 const struct brw_gs_prog_data
*a
= in_a
;
416 const struct brw_gs_prog_data
*b
= in_b
;
418 /* Compare the base structure. */
419 if (!brw_stage_prog_data_compare(&a
->base
.base
, &b
->base
.base
))
422 /* Compare the rest of the struct. */
423 const unsigned offset
= sizeof(struct brw_stage_prog_data
);
424 if (memcmp(((char *) a
) + offset
, ((char *) b
) + offset
,
425 sizeof(struct brw_gs_prog_data
) - offset
)) {