i965/gen6+: Remove VUE map dependency on userclip_active.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_gs.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_vec4_gs.c
26 *
27 * State atom for client-programmable geometry shaders, and support code.
28 */
29
30 #include "brw_vec4_gs.h"
31 #include "brw_context.h"
32 #include "brw_vec4_gs_visitor.h"
33 #include "brw_state.h"
34
35
36 static bool
37 do_gs_prog(struct brw_context *brw,
38 struct gl_shader_program *prog,
39 struct brw_geometry_program *gp,
40 struct brw_gs_prog_key *key)
41 {
42 struct brw_stage_state *stage_state = &brw->gs.base;
43 struct brw_gs_compile c;
44 memset(&c, 0, sizeof(c));
45 c.key = *key;
46 c.gp = gp;
47
48 /* Allocate the references to the uniforms that will end up in the
49 * prog_data associated with the compiled program, and which will be freed
50 * by the state cache.
51 *
52 * Note: param_count needs to be num_uniform_components * 4, since we add
53 * padding around uniform values below vec4 size, so the worst case is that
54 * every uniform is a float which gets padded to the size of a vec4.
55 */
56 struct gl_shader *gs = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
57 int param_count = gs->num_uniform_components * 4;
58
59 /* We also upload clip plane data as uniforms */
60 param_count += MAX_CLIP_PLANES * 4;
61
62 c.prog_data.base.param = rzalloc_array(NULL, const float *, param_count);
63 c.prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count);
64
65 if (gp->program.OutputType == GL_POINTS) {
66 /* When the output type is points, the geometry shader may output data
67 * to multiple streams, and EndPrimitive() has no effect. So we
68 * configure the hardware to interpret the control data as stream ID.
69 */
70 c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
71
72 /* However, StreamID is not yet supported, so we output zero bits of
73 * control data per vertex.
74 */
75 c.control_data_bits_per_vertex = 0;
76 } else {
77 /* When the output type is triangle_strip or line_strip, EndPrimitive()
78 * may be used to terminate the current strip and start a new one
79 * (similar to primitive restart), and outputting data to multiple
80 * streams is not supported. So we configure the hardware to interpret
81 * the control data as EndPrimitive information (a.k.a. "cut bits").
82 */
83 c.prog_data.control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;
84
85 /* We only need to output control data if the shader actually calls
86 * EndPrimitive().
87 */
88 c.control_data_bits_per_vertex = gp->program.UsesEndPrimitive ? 1 : 0;
89 }
90 c.control_data_header_size_bits =
91 gp->program.VerticesOut * c.control_data_bits_per_vertex;
92
93 /* 1 HWORD = 32 bytes = 256 bits */
94 c.prog_data.control_data_header_size_hwords =
95 ALIGN(c.control_data_header_size_bits, 256) / 256;
96
97 GLbitfield64 outputs_written = gp->program.Base.OutputsWritten;
98
99 /* In order for legacy clipping to work, we need to populate the clip
100 * distance varying slots whenever clipping is enabled, even if the vertex
101 * shader doesn't write to gl_ClipDistance.
102 */
103 if (c.key.base.userclip_active) {
104 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
105 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
106 }
107
108 brw_compute_vue_map(brw, &c.prog_data.base.vue_map, outputs_written);
109
110 /* Compute the output vertex size.
111 *
112 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
113 * Size (p168):
114 *
115 * [0,62] indicating [1,63] 16B units
116 *
117 * Specifies the size of each vertex stored in the GS output entry
118 * (following any Control Header data) as a number of 128-bit units
119 * (minus one).
120 *
121 * Programming Restrictions: The vertex size must be programmed as a
122 * multiple of 32B units with the following exception: Rendering is
123 * disabled (as per SOL stage state) and the vertex size output by the
124 * GS thread is 16B.
125 *
126 * If rendering is enabled (as per SOL state) the vertex size must be
127 * programmed as a multiple of 32B units. In other words, the only time
128 * software can program a vertex size with an odd number of 16B units
129 * is when rendering is disabled.
130 *
131 * Note: B=bytes in the above text.
132 *
133 * It doesn't seem worth the extra trouble to optimize the case where the
134 * vertex size is 16B (especially since this would require special-casing
135 * the GEN assembly that writes to the URB). So we just set the vertex
136 * size to a multiple of 32B (2 vec4's) in all cases.
137 *
138 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
139 * budget that as follows:
140 *
141 * 512 bytes for varyings (a varying component is 4 bytes and
142 * gl_MaxGeometryOutputComponents = 128)
143 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
144 * bytes)
145 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
146 * even if it's not used)
147 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
148 * whenever clip planes are enabled, even if the shader doesn't
149 * write to gl_ClipDistance)
150 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
151 * (see below)--this causes up to 1 VUE slot to be wasted
152 * 400 bytes available for varying packing overhead
153 *
154 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
155 * per interpolation type, so this is plenty.
156 *
157 */
158 unsigned output_vertex_size_bytes = c.prog_data.base.vue_map.num_slots * 16;
159 assert(output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
160 c.prog_data.output_vertex_size_hwords =
161 ALIGN(output_vertex_size_bytes, 32) / 32;
162
163 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
164 * That divides up as follows:
165 *
166 * 64 bytes for the control data header (cut indices or StreamID bits)
167 * 4096 bytes for varyings (a varying component is 4 bytes and
168 * gl_MaxGeometryTotalOutputComponents = 1024)
169 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
170 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
171 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
172 * even if it's not used)
173 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
174 * whenever clip planes are enabled, even if the shader doesn't
175 * write to gl_ClipDistance)
176 * 4096 bytes overhead since the VUE size must be a multiple of 32
177 * bytes (see above)--this causes up to 1 VUE slot to be wasted
178 * 8128 bytes available for varying packing overhead
179 *
180 * Worst-case varying packing overhead is 3/4 of a varying slot per
181 * interpolation type, which works out to 3072 bytes, so this would allow
182 * us to accommodate 2 interpolation types without any danger of running
183 * out of URB space.
184 *
185 * In practice, the risk of running out of URB space is very small, since
186 * the above figures are all worst-case, and most of them scale with the
187 * number of output vertices. So we'll just calculate the amount of space
188 * we need, and if it's too large, fail to compile.
189 */
190 unsigned output_size_bytes =
191 c.prog_data.output_vertex_size_hwords * 32 * gp->program.VerticesOut;
192 output_size_bytes += 32 * c.prog_data.control_data_header_size_hwords;
193
194 assert(output_size_bytes >= 1);
195 if (output_size_bytes > GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES)
196 return false;
197
198 /* URB entry sizes are stored as a multiple of 64 bytes. */
199 c.prog_data.base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
200
201 c.prog_data.output_topology = prim_to_hw_prim[gp->program.OutputType];
202
203 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
204 * need to program a URB read length of ceiling(num_slots / 2).
205 */
206 c.prog_data.base.urb_read_length = (c.key.input_vue_map.num_slots + 1) / 2;
207
208 void *mem_ctx = ralloc_context(NULL);
209 unsigned program_size;
210 const unsigned *program =
211 brw_gs_emit(brw, prog, &c, mem_ctx, &program_size);
212 if (program == NULL) {
213 ralloc_free(mem_ctx);
214 return false;
215 }
216
217 /* Scratch space is used for register spilling */
218 if (c.base.last_scratch) {
219 perf_debug("Geometry shader triggered register spilling. "
220 "Try reducing the number of live vec4 values to "
221 "improve performance.\n");
222
223 c.prog_data.base.total_scratch
224 = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
225
226 brw_get_scratch_bo(brw, &stage_state->scratch_bo,
227 c.prog_data.base.total_scratch * brw->max_gs_threads);
228 }
229
230 brw_upload_cache(&brw->cache, BRW_GS_PROG,
231 &c.key, sizeof(c.key),
232 program, program_size,
233 &c.prog_data, sizeof(c.prog_data),
234 &stage_state->prog_offset, &brw->gs.prog_data);
235 ralloc_free(mem_ctx);
236
237 return true;
238 }
239
240
241 static void
242 brw_upload_gs_prog(struct brw_context *brw)
243 {
244 struct gl_context *ctx = &brw->ctx;
245 struct brw_stage_state *stage_state = &brw->gs.base;
246 struct brw_gs_prog_key key;
247 /* BRW_NEW_GEOMETRY_PROGRAM */
248 struct brw_geometry_program *gp =
249 (struct brw_geometry_program *) brw->geometry_program;
250
251 if (gp == NULL) {
252 /* No geometry shader. Vertex data just passes straight through. */
253 if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
254 brw->vue_map_geom_out = brw->vue_map_vs;
255 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
256 }
257 return;
258 }
259
260 struct gl_program *prog = &gp->program.Base;
261
262 memset(&key, 0, sizeof(key));
263
264 key.base.program_string_id = gp->id;
265 brw_setup_vec4_key_clip_info(brw, &key.base, gp->program.UsesClipDistance);
266
267 /* _NEW_LIGHT | _NEW_BUFFERS */
268 key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
269
270 /* _NEW_TEXTURE */
271 brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count,
272 &key.base.tex);
273
274 /* BRW_NEW_VUE_MAP_VS */
275 key.input_vue_map = brw->vue_map_vs;
276
277 if (!brw_search_cache(&brw->cache, BRW_GS_PROG,
278 &key, sizeof(key),
279 &stage_state->prog_offset, &brw->gs.prog_data)) {
280 bool success = do_gs_prog(brw, ctx->Shader.CurrentGeometryProgram,
281 gp, &key);
282 assert(success);
283 }
284 if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
285 sizeof(brw->vue_map_geom_out)) != 0) {
286 brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
287 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
288 }
289 }
290
291
292 const struct brw_tracked_state brw_gs_prog = {
293 .dirty = {
294 .mesa = (_NEW_LIGHT | _NEW_BUFFERS | _NEW_TEXTURE),
295 .brw = BRW_NEW_GEOMETRY_PROGRAM | BRW_NEW_VUE_MAP_VS,
296 },
297 .emit = brw_upload_gs_prog
298 };
299
300
301 bool
302 brw_gs_prog_data_compare(const void *in_a, const void *in_b,
303 int aux_size, const void *in_key)
304 {
305 const struct brw_gs_prog_data *a = in_a;
306 const struct brw_gs_prog_data *b = in_b;
307
308 /* Compare the base vec4 structure. */
309 if (!brw_vec4_prog_data_compare(&a->base, &b->base))
310 return false;
311
312 /* Compare the rest of the struct. */
313 const unsigned offset = sizeof(struct brw_vec4_prog_data);
314 if (memcmp(((char *) &a) + offset, ((char *) &b) + offset,
315 sizeof(struct brw_gs_prog_data) - offset)) {
316 return false;
317 }
318
319 return true;
320 }
321
322
323 void
324 brw_gs_prog_data_free(const void *in_prog_data)
325 {
326 const struct brw_gs_prog_data *prog_data = in_prog_data;
327
328 brw_vec4_prog_data_free(&prog_data->base);
329 }