2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
27 * State atom for client-programmable geometry shaders, and support code.
30 #include "brw_vec4_gs.h"
31 #include "brw_context.h"
32 #include "brw_vec4_gs_visitor.h"
33 #include "brw_state.h"
37 do_gs_prog(struct brw_context
*brw
,
38 struct gl_shader_program
*prog
,
39 struct brw_geometry_program
*gp
,
40 struct brw_gs_prog_key
*key
)
42 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
43 struct brw_gs_compile c
;
44 memset(&c
, 0, sizeof(c
));
48 c
.prog_data
.include_primitive_id
=
49 (gp
->program
.Base
.InputsRead
& VARYING_BIT_PRIMITIVE_ID
) != 0;
51 c
.prog_data
.invocations
= gp
->program
.Invocations
;
53 /* Allocate the references to the uniforms that will end up in the
54 * prog_data associated with the compiled program, and which will be freed
57 * Note: param_count needs to be num_uniform_components * 4, since we add
58 * padding around uniform values below vec4 size, so the worst case is that
59 * every uniform is a float which gets padded to the size of a vec4.
61 struct gl_shader
*gs
= prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
62 int param_count
= gs
->num_uniform_components
* 4;
64 /* We also upload clip plane data as uniforms */
65 param_count
+= MAX_CLIP_PLANES
* 4;
67 c
.prog_data
.base
.base
.param
=
68 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
69 c
.prog_data
.base
.base
.pull_param
=
70 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
71 /* Setting nr_params here NOT to the size of the param and pull_param
72 * arrays, but to the number of uniform components vec4_visitor
73 * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
75 c
.prog_data
.base
.base
.nr_params
= ALIGN(param_count
, 4) / 4 + gs
->num_samplers
;
77 if (gp
->program
.OutputType
== GL_POINTS
) {
78 /* When the output type is points, the geometry shader may output data
79 * to multiple streams, and EndPrimitive() has no effect. So we
80 * configure the hardware to interpret the control data as stream ID.
82 c
.prog_data
.control_data_format
= GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID
;
84 /* We only have to emit control bits if we are using streams */
85 if (prog
->Geom
.UsesStreams
)
86 c
.control_data_bits_per_vertex
= 2;
88 c
.control_data_bits_per_vertex
= 0;
90 /* When the output type is triangle_strip or line_strip, EndPrimitive()
91 * may be used to terminate the current strip and start a new one
92 * (similar to primitive restart), and outputting data to multiple
93 * streams is not supported. So we configure the hardware to interpret
94 * the control data as EndPrimitive information (a.k.a. "cut bits").
96 c
.prog_data
.control_data_format
= GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT
;
98 /* We only need to output control data if the shader actually calls
101 c
.control_data_bits_per_vertex
= gp
->program
.UsesEndPrimitive
? 1 : 0;
103 c
.control_data_header_size_bits
=
104 gp
->program
.VerticesOut
* c
.control_data_bits_per_vertex
;
106 /* 1 HWORD = 32 bytes = 256 bits */
107 c
.prog_data
.control_data_header_size_hwords
=
108 ALIGN(c
.control_data_header_size_bits
, 256) / 256;
110 GLbitfield64 outputs_written
= gp
->program
.Base
.OutputsWritten
;
112 /* In order for legacy clipping to work, we need to populate the clip
113 * distance varying slots whenever clipping is enabled, even if the vertex
114 * shader doesn't write to gl_ClipDistance.
116 if (c
.key
.base
.userclip_active
) {
117 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0
);
118 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1
);
121 brw_compute_vue_map(brw
, &c
.prog_data
.base
.vue_map
, outputs_written
);
123 /* Compute the output vertex size.
125 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
128 * [0,62] indicating [1,63] 16B units
130 * Specifies the size of each vertex stored in the GS output entry
131 * (following any Control Header data) as a number of 128-bit units
134 * Programming Restrictions: The vertex size must be programmed as a
135 * multiple of 32B units with the following exception: Rendering is
136 * disabled (as per SOL stage state) and the vertex size output by the
139 * If rendering is enabled (as per SOL state) the vertex size must be
140 * programmed as a multiple of 32B units. In other words, the only time
141 * software can program a vertex size with an odd number of 16B units
142 * is when rendering is disabled.
144 * Note: B=bytes in the above text.
146 * It doesn't seem worth the extra trouble to optimize the case where the
147 * vertex size is 16B (especially since this would require special-casing
148 * the GEN assembly that writes to the URB). So we just set the vertex
149 * size to a multiple of 32B (2 vec4's) in all cases.
151 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
152 * budget that as follows:
154 * 512 bytes for varyings (a varying component is 4 bytes and
155 * gl_MaxGeometryOutputComponents = 128)
156 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
158 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
159 * even if it's not used)
160 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
161 * whenever clip planes are enabled, even if the shader doesn't
162 * write to gl_ClipDistance)
163 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
164 * (see below)--this causes up to 1 VUE slot to be wasted
165 * 400 bytes available for varying packing overhead
167 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
168 * per interpolation type, so this is plenty.
171 unsigned output_vertex_size_bytes
= c
.prog_data
.base
.vue_map
.num_slots
* 16;
172 assert(output_vertex_size_bytes
<= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES
);
173 c
.prog_data
.output_vertex_size_hwords
=
174 ALIGN(output_vertex_size_bytes
, 32) / 32;
176 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
177 * That divides up as follows:
179 * 64 bytes for the control data header (cut indices or StreamID bits)
180 * 4096 bytes for varyings (a varying component is 4 bytes and
181 * gl_MaxGeometryTotalOutputComponents = 1024)
182 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
183 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
184 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
185 * even if it's not used)
186 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
187 * whenever clip planes are enabled, even if the shader doesn't
188 * write to gl_ClipDistance)
189 * 4096 bytes overhead since the VUE size must be a multiple of 32
190 * bytes (see above)--this causes up to 1 VUE slot to be wasted
191 * 8128 bytes available for varying packing overhead
193 * Worst-case varying packing overhead is 3/4 of a varying slot per
194 * interpolation type, which works out to 3072 bytes, so this would allow
195 * us to accommodate 2 interpolation types without any danger of running
198 * In practice, the risk of running out of URB space is very small, since
199 * the above figures are all worst-case, and most of them scale with the
200 * number of output vertices. So we'll just calculate the amount of space
201 * we need, and if it's too large, fail to compile.
203 unsigned output_size_bytes
=
204 c
.prog_data
.output_vertex_size_hwords
* 32 * gp
->program
.VerticesOut
;
205 output_size_bytes
+= 32 * c
.prog_data
.control_data_header_size_hwords
;
207 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
208 * which comes before the control header.
211 output_size_bytes
+= 32;
213 assert(output_size_bytes
>= 1);
214 if (output_size_bytes
> GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES
)
217 /* URB entry sizes are stored as a multiple of 64 bytes. */
218 c
.prog_data
.base
.urb_entry_size
= ALIGN(output_size_bytes
, 64) / 64;
220 c
.prog_data
.output_topology
=
221 get_hw_prim_for_gl_prim(gp
->program
.OutputType
);
223 brw_compute_vue_map(brw
, &c
.input_vue_map
, c
.key
.input_varyings
);
225 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
226 * need to program a URB read length of ceiling(num_slots / 2).
228 c
.prog_data
.base
.urb_read_length
= (c
.input_vue_map
.num_slots
+ 1) / 2;
230 void *mem_ctx
= ralloc_context(NULL
);
231 unsigned program_size
;
232 const unsigned *program
=
233 brw_gs_emit(brw
, prog
, &c
, mem_ctx
, &program_size
);
234 if (program
== NULL
) {
235 ralloc_free(mem_ctx
);
239 /* Scratch space is used for register spilling */
240 if (c
.base
.last_scratch
) {
241 perf_debug("Geometry shader triggered register spilling. "
242 "Try reducing the number of live vec4 values to "
243 "improve performance.\n");
245 c
.prog_data
.base
.total_scratch
246 = brw_get_scratch_size(c
.base
.last_scratch
*REG_SIZE
);
248 brw_get_scratch_bo(brw
, &stage_state
->scratch_bo
,
249 c
.prog_data
.base
.total_scratch
* brw
->max_gs_threads
);
252 brw_upload_cache(&brw
->cache
, BRW_GS_PROG
,
253 &c
.key
, sizeof(c
.key
),
254 program
, program_size
,
255 &c
.prog_data
, sizeof(c
.prog_data
),
256 &stage_state
->prog_offset
, &brw
->gs
.prog_data
);
257 ralloc_free(mem_ctx
);
264 brw_upload_gs_prog(struct brw_context
*brw
)
266 struct gl_context
*ctx
= &brw
->ctx
;
267 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
268 struct brw_gs_prog_key key
;
269 /* BRW_NEW_GEOMETRY_PROGRAM */
270 struct brw_geometry_program
*gp
=
271 (struct brw_geometry_program
*) brw
->geometry_program
;
274 /* No geometry shader. Vertex data just passes straight through. */
275 if (brw
->state
.dirty
.brw
& BRW_NEW_VUE_MAP_VS
) {
276 brw
->vue_map_geom_out
= brw
->vue_map_vs
;
277 brw
->state
.dirty
.brw
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
280 /* Other state atoms had better not try to access prog_data, since
281 * there's no GS program.
283 brw
->gs
.prog_data
= NULL
;
284 brw
->gs
.base
.prog_data
= NULL
;
289 struct gl_program
*prog
= &gp
->program
.Base
;
291 memset(&key
, 0, sizeof(key
));
293 key
.base
.program_string_id
= gp
->id
;
294 brw_setup_vec4_key_clip_info(brw
, &key
.base
,
295 gp
->program
.Base
.UsesClipDistanceOut
);
297 /* _NEW_LIGHT | _NEW_BUFFERS */
298 key
.base
.clamp_vertex_color
= ctx
->Light
._ClampVertexColor
;
301 brw_populate_sampler_prog_key_data(ctx
, prog
, stage_state
->sampler_count
,
304 /* BRW_NEW_VUE_MAP_VS */
305 key
.input_varyings
= brw
->vue_map_vs
.slots_valid
;
307 if (!brw_search_cache(&brw
->cache
, BRW_GS_PROG
,
309 &stage_state
->prog_offset
, &brw
->gs
.prog_data
)) {
311 do_gs_prog(brw
, ctx
->_Shader
->CurrentProgram
[MESA_SHADER_GEOMETRY
], gp
,
316 brw
->gs
.base
.prog_data
= &brw
->gs
.prog_data
->base
.base
;
318 if (memcmp(&brw
->vs
.prog_data
->base
.vue_map
, &brw
->vue_map_geom_out
,
319 sizeof(brw
->vue_map_geom_out
)) != 0) {
320 brw
->vue_map_geom_out
= brw
->gs
.prog_data
->base
.vue_map
;
321 brw
->state
.dirty
.brw
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
326 const struct brw_tracked_state brw_gs_prog
= {
328 .mesa
= (_NEW_LIGHT
| _NEW_BUFFERS
| _NEW_TEXTURE
),
329 .brw
= BRW_NEW_GEOMETRY_PROGRAM
| BRW_NEW_VUE_MAP_VS
,
331 .emit
= brw_upload_gs_prog
336 brw_gs_precompile(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
338 struct brw_context
*brw
= brw_context(ctx
);
339 struct brw_gs_prog_key key
;
340 uint32_t old_prog_offset
= brw
->gs
.base
.prog_offset
;
341 struct brw_gs_prog_data
*old_prog_data
= brw
->gs
.prog_data
;
344 if (!prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
])
347 struct gl_geometry_program
*gp
= (struct gl_geometry_program
*)
348 prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
]->Program
;
349 struct brw_geometry_program
*bgp
= brw_geometry_program(gp
);
351 memset(&key
, 0, sizeof(key
));
353 brw_vec4_setup_prog_key_for_precompile(ctx
, &key
.base
, bgp
->id
, &gp
->Base
);
355 /* Assume that the set of varyings coming in from the vertex shader exactly
356 * matches what the geometry shader requires.
358 key
.input_varyings
= gp
->Base
.InputsRead
;
360 success
= do_gs_prog(brw
, prog
, bgp
, &key
);
362 brw
->gs
.base
.prog_offset
= old_prog_offset
;
363 brw
->gs
.prog_data
= old_prog_data
;
370 brw_gs_prog_data_compare(const void *in_a
, const void *in_b
)
372 const struct brw_gs_prog_data
*a
= in_a
;
373 const struct brw_gs_prog_data
*b
= in_b
;
375 /* Compare the base structure. */
376 if (!brw_stage_prog_data_compare(&a
->base
.base
, &b
->base
.base
))
379 /* Compare the rest of the struct. */
380 const unsigned offset
= sizeof(struct brw_stage_prog_data
);
381 if (memcmp(((char *) a
) + offset
, ((char *) b
) + offset
,
382 sizeof(struct brw_gs_prog_data
) - offset
)) {