i965: Add _CACHE_ in brw_cache_id enum names.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_gs.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_vec4_gs.c
26 *
27 * State atom for client-programmable geometry shaders, and support code.
28 */
29
30 #include "brw_gs.h"
31 #include "brw_context.h"
32 #include "brw_vec4_gs_visitor.h"
33 #include "brw_state.h"
34 #include "brw_ff_gs.h"
35
36
37 static bool
38 do_gs_prog(struct brw_context *brw,
39 struct gl_shader_program *prog,
40 struct brw_geometry_program *gp,
41 struct brw_gs_prog_key *key)
42 {
43 struct brw_stage_state *stage_state = &brw->gs.base;
44 struct brw_gs_compile c;
45 memset(&c, 0, sizeof(c));
46 c.key = *key;
47 c.gp = gp;
48
49 c.prog_data.include_primitive_id =
50 (gp->program.Base.InputsRead & VARYING_BIT_PRIMITIVE_ID) != 0;
51
52 c.prog_data.invocations = gp->program.Invocations;
53
54 /* Allocate the references to the uniforms that will end up in the
55 * prog_data associated with the compiled program, and which will be freed
56 * by the state cache.
57 *
58 * Note: param_count needs to be num_uniform_components * 4, since we add
59 * padding around uniform values below vec4 size, so the worst case is that
60 * every uniform is a float which gets padded to the size of a vec4.
61 */
62 struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
63 int param_count = gs->num_uniform_components * 4;
64
65 /* We also upload clip plane data as uniforms */
66 param_count += MAX_CLIP_PLANES * 4;
67
68 c.prog_data.base.base.param =
69 rzalloc_array(NULL, const gl_constant_value *, param_count);
70 c.prog_data.base.base.pull_param =
71 rzalloc_array(NULL, const gl_constant_value *, param_count);
72 /* Setting nr_params here NOT to the size of the param and pull_param
73 * arrays, but to the number of uniform components vec4_visitor
74 * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
75 */
76 c.prog_data.base.base.nr_params = ALIGN(param_count, 4) / 4 + gs->num_samplers;
77
78 if (brw->gen >= 7) {
79 if (gp->program.OutputType == GL_POINTS) {
80 /* When the output type is points, the geometry shader may output data
81 * to multiple streams, and EndPrimitive() has no effect. So we
82 * configure the hardware to interpret the control data as stream ID.
83 */
84 c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
85
86 /* We only have to emit control bits if we are using streams */
87 if (prog->Geom.UsesStreams)
88 c.control_data_bits_per_vertex = 2;
89 else
90 c.control_data_bits_per_vertex = 0;
91 } else {
92 /* When the output type is triangle_strip or line_strip, EndPrimitive()
93 * may be used to terminate the current strip and start a new one
94 * (similar to primitive restart), and outputting data to multiple
95 * streams is not supported. So we configure the hardware to interpret
96 * the control data as EndPrimitive information (a.k.a. "cut bits").
97 */
98 c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;
99
100 /* We only need to output control data if the shader actually calls
101 * EndPrimitive().
102 */
103 c.control_data_bits_per_vertex = gp->program.UsesEndPrimitive ? 1 : 0;
104 }
105 } else {
106 /* There are no control data bits in gen6. */
107 c.control_data_bits_per_vertex = 0;
108
109 /* If it is using transform feedback, enable it */
110 if (prog->TransformFeedback.NumVarying)
111 c.prog_data.gen6_xfb_enabled = true;
112 else
113 c.prog_data.gen6_xfb_enabled = false;
114 }
115 c.control_data_header_size_bits =
116 gp->program.VerticesOut * c.control_data_bits_per_vertex;
117
118 /* 1 HWORD = 32 bytes = 256 bits */
119 c.prog_data.control_data_header_size_hwords =
120 ALIGN(c.control_data_header_size_bits, 256) / 256;
121
122 GLbitfield64 outputs_written = gp->program.Base.OutputsWritten;
123
124 /* In order for legacy clipping to work, we need to populate the clip
125 * distance varying slots whenever clipping is enabled, even if the vertex
126 * shader doesn't write to gl_ClipDistance.
127 */
128 if (c.key.base.userclip_active) {
129 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
130 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
131 }
132
133 brw_compute_vue_map(brw, &c.prog_data.base.vue_map, outputs_written);
134
135 /* Compute the output vertex size.
136 *
137 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
138 * Size (p168):
139 *
140 * [0,62] indicating [1,63] 16B units
141 *
142 * Specifies the size of each vertex stored in the GS output entry
143 * (following any Control Header data) as a number of 128-bit units
144 * (minus one).
145 *
146 * Programming Restrictions: The vertex size must be programmed as a
147 * multiple of 32B units with the following exception: Rendering is
148 * disabled (as per SOL stage state) and the vertex size output by the
149 * GS thread is 16B.
150 *
151 * If rendering is enabled (as per SOL state) the vertex size must be
152 * programmed as a multiple of 32B units. In other words, the only time
153 * software can program a vertex size with an odd number of 16B units
154 * is when rendering is disabled.
155 *
156 * Note: B=bytes in the above text.
157 *
158 * It doesn't seem worth the extra trouble to optimize the case where the
159 * vertex size is 16B (especially since this would require special-casing
160 * the GEN assembly that writes to the URB). So we just set the vertex
161 * size to a multiple of 32B (2 vec4's) in all cases.
162 *
163 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
164 * budget that as follows:
165 *
166 * 512 bytes for varyings (a varying component is 4 bytes and
167 * gl_MaxGeometryOutputComponents = 128)
168 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
169 * bytes)
170 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
171 * even if it's not used)
172 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
173 * whenever clip planes are enabled, even if the shader doesn't
174 * write to gl_ClipDistance)
175 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
176 * (see below)--this causes up to 1 VUE slot to be wasted
177 * 400 bytes available for varying packing overhead
178 *
179 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
180 * per interpolation type, so this is plenty.
181 *
182 */
183 unsigned output_vertex_size_bytes = c.prog_data.base.vue_map.num_slots * 16;
184 assert(brw->gen == 6 ||
185 output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
186 c.prog_data.output_vertex_size_hwords =
187 ALIGN(output_vertex_size_bytes, 32) / 32;
188
189 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
190 * That divides up as follows:
191 *
192 * 64 bytes for the control data header (cut indices or StreamID bits)
193 * 4096 bytes for varyings (a varying component is 4 bytes and
194 * gl_MaxGeometryTotalOutputComponents = 1024)
195 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
196 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
197 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
198 * even if it's not used)
199 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
200 * whenever clip planes are enabled, even if the shader doesn't
201 * write to gl_ClipDistance)
202 * 4096 bytes overhead since the VUE size must be a multiple of 32
203 * bytes (see above)--this causes up to 1 VUE slot to be wasted
204 * 8128 bytes available for varying packing overhead
205 *
206 * Worst-case varying packing overhead is 3/4 of a varying slot per
207 * interpolation type, which works out to 3072 bytes, so this would allow
208 * us to accommodate 2 interpolation types without any danger of running
209 * out of URB space.
210 *
211 * In practice, the risk of running out of URB space is very small, since
212 * the above figures are all worst-case, and most of them scale with the
213 * number of output vertices. So we'll just calculate the amount of space
214 * we need, and if it's too large, fail to compile.
215 *
216 * The above is for gen7+ where we have a single URB entry that will hold
217 * all the output. In gen6, we will have to allocate URB entries for every
218 * vertex we emit, so our URB entries only need to be large enough to hold
219 * a single vertex. Also, gen6 does not have a control data header.
220 */
221 unsigned output_size_bytes;
222 if (brw->gen >= 7) {
223 output_size_bytes =
224 c.prog_data.output_vertex_size_hwords * 32 * gp->program.VerticesOut;
225 output_size_bytes += 32 * c.prog_data.control_data_header_size_hwords;
226 } else {
227 output_size_bytes = c.prog_data.output_vertex_size_hwords * 32;
228 }
229
230 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
231 * which comes before the control header.
232 */
233 if (brw->gen >= 8)
234 output_size_bytes += 32;
235
236 assert(output_size_bytes >= 1);
237 int max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES;
238 if (brw->gen == 6)
239 max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES;
240 if (output_size_bytes > max_output_size_bytes)
241 return false;
242
243
244 /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
245 * a multiple of 128 bytes in gen6.
246 */
247 if (brw->gen >= 7)
248 c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
249 else
250 c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
251
252 c.prog_data.output_topology =
253 get_hw_prim_for_gl_prim(gp->program.OutputType);
254
255 brw_compute_vue_map(brw, &c.input_vue_map, c.key.input_varyings);
256
257 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
258 * need to program a URB read length of ceiling(num_slots / 2).
259 */
260 c.prog_data.base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;
261
262 void *mem_ctx = ralloc_context(NULL);
263 unsigned program_size;
264 const unsigned *program =
265 brw_gs_emit(brw, prog, &c, mem_ctx, &program_size);
266 if (program == NULL) {
267 ralloc_free(mem_ctx);
268 return false;
269 }
270
271 /* Scratch space is used for register spilling */
272 if (c.base.last_scratch) {
273 perf_debug("Geometry shader triggered register spilling. "
274 "Try reducing the number of live vec4 values to "
275 "improve performance.\n");
276
277 c.prog_data.base.base.total_scratch
278 = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
279
280 brw_get_scratch_bo(brw, &stage_state->scratch_bo,
281 c.prog_data.base.base.total_scratch *
282 brw->max_gs_threads);
283 }
284
285 brw_upload_cache(&brw->cache, BRW_CACHE_GS_PROG,
286 &c.key, sizeof(c.key),
287 program, program_size,
288 &c.prog_data, sizeof(c.prog_data),
289 &stage_state->prog_offset, &brw->gs.prog_data);
290 ralloc_free(mem_ctx);
291
292 return true;
293 }
294
295
296 static void
297 brw_upload_gs_prog(struct brw_context *brw)
298 {
299 struct gl_context *ctx = &brw->ctx;
300 struct brw_stage_state *stage_state = &brw->gs.base;
301 struct brw_gs_prog_key key;
302 /* BRW_NEW_GEOMETRY_PROGRAM */
303 struct brw_geometry_program *gp =
304 (struct brw_geometry_program *) brw->geometry_program;
305
306 if (gp == NULL) {
307 /* No geometry shader. Vertex data just passes straight through. */
308 if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
309 brw->vue_map_geom_out = brw->vue_map_vs;
310 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
311 }
312
313 if (brw->gen == 6 &&
314 (brw->state.dirty.brw & BRW_NEW_TRANSFORM_FEEDBACK)) {
315 gen6_brw_upload_ff_gs_prog(brw);
316 return;
317 }
318
319 /* Other state atoms had better not try to access prog_data, since
320 * there's no GS program.
321 */
322 brw->gs.prog_data = NULL;
323 brw->gs.base.prog_data = NULL;
324
325 return;
326 }
327
328 struct gl_program *prog = &gp->program.Base;
329
330 memset(&key, 0, sizeof(key));
331
332 key.base.program_string_id = gp->id;
333 brw_setup_vec4_key_clip_info(brw, &key.base,
334 gp->program.Base.UsesClipDistanceOut);
335
336 /* _NEW_LIGHT | _NEW_BUFFERS */
337 key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
338
339 /* _NEW_TEXTURE */
340 brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count,
341 &key.base.tex);
342
343 /* BRW_NEW_VUE_MAP_VS */
344 key.input_varyings = brw->vue_map_vs.slots_valid;
345
346 if (!brw_search_cache(&brw->cache, BRW_CACHE_GS_PROG,
347 &key, sizeof(key),
348 &stage_state->prog_offset, &brw->gs.prog_data)) {
349 bool success =
350 do_gs_prog(brw, ctx->_Shader->CurrentProgram[MESA_SHADER_GEOMETRY], gp,
351 &key);
352 assert(success);
353 (void)success;
354 }
355 brw->gs.base.prog_data = &brw->gs.prog_data->base.base;
356
357 if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
358 sizeof(brw->vue_map_geom_out)) != 0) {
359 brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
360 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
361 }
362 }
363
364
365 const struct brw_tracked_state brw_gs_prog = {
366 .dirty = {
367 .mesa = _NEW_BUFFERS |
368 _NEW_LIGHT |
369 _NEW_TEXTURE,
370 .brw = BRW_NEW_GEOMETRY_PROGRAM |
371 BRW_NEW_TRANSFORM_FEEDBACK |
372 BRW_NEW_VUE_MAP_VS,
373 },
374 .emit = brw_upload_gs_prog
375 };
376
377
378 bool
379 brw_gs_precompile(struct gl_context *ctx,
380 struct gl_shader_program *shader_prog,
381 struct gl_program *prog)
382 {
383 struct brw_context *brw = brw_context(ctx);
384 struct brw_gs_prog_key key;
385 uint32_t old_prog_offset = brw->gs.base.prog_offset;
386 struct brw_gs_prog_data *old_prog_data = brw->gs.prog_data;
387 bool success;
388
389 struct gl_geometry_program *gp = (struct gl_geometry_program *) prog;
390 struct brw_geometry_program *bgp = brw_geometry_program(gp);
391
392 memset(&key, 0, sizeof(key));
393
394 brw_vec4_setup_prog_key_for_precompile(ctx, &key.base, bgp->id, &gp->Base);
395
396 /* Assume that the set of varyings coming in from the vertex shader exactly
397 * matches what the geometry shader requires.
398 */
399 key.input_varyings = gp->Base.InputsRead;
400
401 success = do_gs_prog(brw, shader_prog, bgp, &key);
402
403 brw->gs.base.prog_offset = old_prog_offset;
404 brw->gs.prog_data = old_prog_data;
405
406 return success;
407 }
408
409
410 bool
411 brw_gs_prog_data_compare(const void *in_a, const void *in_b)
412 {
413 const struct brw_gs_prog_data *a = in_a;
414 const struct brw_gs_prog_data *b = in_b;
415
416 /* Compare the base structure. */
417 if (!brw_stage_prog_data_compare(&a->base.base, &b->base.base))
418 return false;
419
420 /* Compare the rest of the struct. */
421 const unsigned offset = sizeof(struct brw_stage_prog_data);
422 if (memcmp(((char *) a) + offset, ((char *) b) + offset,
423 sizeof(struct brw_gs_prog_data) - offset)) {
424 return false;
425 }
426
427 return true;
428 }