1bfea3d88e6d1d8fa2506ac095fcb801da3ac360
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/compiler.h"
34 #include "brw_context.h"
35 #include "brw_vs.h"
36 #include "brw_util.h"
37 #include "brw_state.h"
38 #include "program/prog_print.h"
39 #include "program/prog_parameter.h"
40
41 #include "util/ralloc.h"
42
43 static inline void assign_vue_slot(struct brw_vue_map *vue_map,
44 int varying)
45 {
46 /* Make sure this varying hasn't been assigned a slot already */
47 assert (vue_map->varying_to_slot[varying] == -1);
48
49 vue_map->varying_to_slot[varying] = vue_map->num_slots;
50 vue_map->slot_to_varying[vue_map->num_slots++] = varying;
51 }
52
53 /**
54 * Compute the VUE map for vertex shader program.
55 */
56 void
57 brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map,
58 GLbitfield64 slots_valid)
59 {
60 vue_map->slots_valid = slots_valid;
61 int i;
62
63 /* gl_Layer and gl_ViewportIndex don't get their own varying slots -- they
64 * are stored in the first VUE slot (VARYING_SLOT_PSIZ).
65 */
66 slots_valid &= ~(VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
67
68 /* Make sure that the values we store in vue_map->varying_to_slot and
69 * vue_map->slot_to_varying won't overflow the signed chars that are used
70 * to store them. Note that since vue_map->slot_to_varying sometimes holds
71 * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that
72 * BRW_VARYING_SLOT_COUNT is <= 127, not 128.
73 */
74 STATIC_ASSERT(BRW_VARYING_SLOT_COUNT <= 127);
75
76 vue_map->num_slots = 0;
77 for (i = 0; i < BRW_VARYING_SLOT_COUNT; ++i) {
78 vue_map->varying_to_slot[i] = -1;
79 vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_COUNT;
80 }
81
82 /* VUE header: format depends on chip generation and whether clipping is
83 * enabled.
84 */
85 if (brw->gen < 6) {
86 /* There are 8 dwords in VUE header pre-Ironlake:
87 * dword 0-3 is indices, point width, clip flags.
88 * dword 4-7 is ndc position
89 * dword 8-11 is the first vertex data.
90 *
91 * On Ironlake the VUE header is nominally 20 dwords, but the hardware
92 * will accept the same header layout as Gen4 [and should be a bit faster]
93 */
94 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
95 assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
96 assign_vue_slot(vue_map, VARYING_SLOT_POS);
97 } else {
98 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
99 * dword 0-3 of the header is indices, point width, clip flags.
100 * dword 4-7 is the 4D space position
101 * dword 8-15 of the vertex header is the user clip distance if
102 * enabled.
103 * dword 8-11 or 16-19 is the first vertex element data we fill.
104 */
105 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
106 assign_vue_slot(vue_map, VARYING_SLOT_POS);
107 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0))
108 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
109 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1))
110 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
111
112 /* front and back colors need to be consecutive so that we can use
113 * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing
114 * two-sided color.
115 */
116 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL0))
117 assign_vue_slot(vue_map, VARYING_SLOT_COL0);
118 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC0))
119 assign_vue_slot(vue_map, VARYING_SLOT_BFC0);
120 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL1))
121 assign_vue_slot(vue_map, VARYING_SLOT_COL1);
122 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC1))
123 assign_vue_slot(vue_map, VARYING_SLOT_BFC1);
124 }
125
126 /* The hardware doesn't care about the rest of the vertex outputs, so just
127 * assign them contiguously. Don't reassign outputs that already have a
128 * slot.
129 *
130 * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX,
131 * since it's encoded as the clip distances by emit_clip_distances().
132 * However, it may be output by transform feedback, and we'd rather not
133 * recompute state when TF changes, so we just always include it.
134 */
135 for (int i = 0; i < VARYING_SLOT_MAX; ++i) {
136 if ((slots_valid & BITFIELD64_BIT(i)) &&
137 vue_map->varying_to_slot[i] == -1) {
138 assign_vue_slot(vue_map, i);
139 }
140 }
141 }
142
143
144 /**
145 * Decide which set of clip planes should be used when clipping via
146 * gl_Position or gl_ClipVertex.
147 */
148 gl_clip_plane *brw_select_clip_planes(struct gl_context *ctx)
149 {
150 if (ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX]) {
151 /* There is currently a GLSL vertex shader, so clip according to GLSL
152 * rules, which means compare gl_ClipVertex (or gl_Position, if
153 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
154 * that were stored in EyeUserPlane at the time the clip planes were
155 * specified.
156 */
157 return ctx->Transform.EyeUserPlane;
158 } else {
159 /* Either we are using fixed function or an ARB vertex program. In
160 * either case the clip planes are going to be compared against
161 * gl_Position (which is in clip coordinates) so we have to clip using
162 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
163 * core.
164 */
165 return ctx->Transform._ClipUserPlane;
166 }
167 }
168
169
170 bool
171 brw_vs_prog_data_compare(const void *in_a, const void *in_b)
172 {
173 const struct brw_vs_prog_data *a = in_a;
174 const struct brw_vs_prog_data *b = in_b;
175
176 /* Compare the base structure. */
177 if (!brw_stage_prog_data_compare(&a->base.base, &b->base.base))
178 return false;
179
180 /* Compare the rest of the struct. */
181 const unsigned offset = sizeof(struct brw_stage_prog_data);
182 if (memcmp(((char *) a) + offset, ((char *) b) + offset,
183 sizeof(struct brw_vs_prog_data) - offset)) {
184 return false;
185 }
186
187 return true;
188 }
189
190 static bool
191 do_vs_prog(struct brw_context *brw,
192 struct gl_shader_program *prog,
193 struct brw_vertex_program *vp,
194 struct brw_vs_prog_key *key)
195 {
196 GLuint program_size;
197 const GLuint *program;
198 struct brw_vs_compile c;
199 struct brw_vs_prog_data prog_data;
200 struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base;
201 void *mem_ctx;
202 int i;
203 struct gl_shader *vs = NULL;
204
205 if (prog)
206 vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
207
208 memset(&c, 0, sizeof(c));
209 memcpy(&c.key, key, sizeof(*key));
210 memset(&prog_data, 0, sizeof(prog_data));
211
212 mem_ctx = ralloc_context(NULL);
213
214 c.vp = vp;
215
216 /* Allocate the references to the uniforms that will end up in the
217 * prog_data associated with the compiled program, and which will be freed
218 * by the state cache.
219 */
220 int param_count;
221 if (vs) {
222 /* We add padding around uniform values below vec4 size, with the worst
223 * case being a float value that gets blown up to a vec4, so be
224 * conservative here.
225 */
226 param_count = vs->num_uniform_components * 4;
227
228 } else {
229 param_count = vp->program.Base.Parameters->NumParameters * 4;
230 }
231 /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
232 * planes as uniforms.
233 */
234 param_count += c.key.base.nr_userclip_plane_consts * 4;
235
236 stage_prog_data->param =
237 rzalloc_array(NULL, const gl_constant_value *, param_count);
238 stage_prog_data->pull_param =
239 rzalloc_array(NULL, const gl_constant_value *, param_count);
240
241 /* Setting nr_params here NOT to the size of the param and pull_param
242 * arrays, but to the number of uniform components vec4_visitor
243 * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
244 */
245 stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
246 if (vs) {
247 stage_prog_data->nr_params += vs->num_samplers;
248 }
249
250 GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
251 prog_data.inputs_read = vp->program.Base.InputsRead;
252
253 if (c.key.copy_edgeflag) {
254 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
255 prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
256 }
257
258 if (brw->gen < 6) {
259 /* Put dummy slots into the VUE for the SF to put the replaced
260 * point sprite coords in. We shouldn't need these dummy slots,
261 * which take up precious URB space, but it would mean that the SF
262 * doesn't get nice aligned pairs of input coords into output
263 * coords, which would be a pain to handle.
264 */
265 for (i = 0; i < 8; i++) {
266 if (c.key.point_coord_replace & (1 << i))
267 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
268 }
269
270 /* if back colors are written, allocate slots for front colors too */
271 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
272 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
273 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
274 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
275 }
276
277 /* In order for legacy clipping to work, we need to populate the clip
278 * distance varying slots whenever clipping is enabled, even if the vertex
279 * shader doesn't write to gl_ClipDistance.
280 */
281 if (c.key.base.userclip_active) {
282 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
283 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
284 }
285
286 brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written);
287
288 if (0) {
289 _mesa_fprint_program_opt(stderr, &c.vp->program.Base, PROG_PRINT_DEBUG,
290 true);
291 }
292
293 /* Emit GEN4 code.
294 */
295 program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
296 if (program == NULL) {
297 ralloc_free(mem_ctx);
298 return false;
299 }
300
301 /* Scratch space is used for register spilling */
302 if (c.base.last_scratch) {
303 perf_debug("Vertex shader triggered register spilling. "
304 "Try reducing the number of live vec4 values to "
305 "improve performance.\n");
306
307 prog_data.base.base.total_scratch
308 = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
309
310 brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
311 prog_data.base.base.total_scratch *
312 brw->max_vs_threads);
313 }
314
315 brw_upload_cache(&brw->cache, BRW_VS_PROG,
316 &c.key, sizeof(c.key),
317 program, program_size,
318 &prog_data, sizeof(prog_data),
319 &brw->vs.base.prog_offset, &brw->vs.prog_data);
320 ralloc_free(mem_ctx);
321
322 return true;
323 }
324
325 static bool
326 key_debug(struct brw_context *brw, const char *name, int a, int b)
327 {
328 if (a != b) {
329 perf_debug(" %s %d->%d\n", name, a, b);
330 return true;
331 }
332 return false;
333 }
334
335 void
336 brw_vs_debug_recompile(struct brw_context *brw,
337 struct gl_shader_program *prog,
338 const struct brw_vs_prog_key *key)
339 {
340 struct brw_cache_item *c = NULL;
341 const struct brw_vs_prog_key *old_key = NULL;
342 bool found = false;
343
344 perf_debug("Recompiling vertex shader for program %d\n", prog->Name);
345
346 for (unsigned int i = 0; i < brw->cache.size; i++) {
347 for (c = brw->cache.items[i]; c; c = c->next) {
348 if (c->cache_id == BRW_VS_PROG) {
349 old_key = c->key;
350
351 if (old_key->base.program_string_id == key->base.program_string_id)
352 break;
353 }
354 }
355 if (c)
356 break;
357 }
358
359 if (!c) {
360 perf_debug(" Didn't find previous compile in the shader cache for "
361 "debug\n");
362 return;
363 }
364
365 for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
366 found |= key_debug(brw, "Vertex attrib w/a flags",
367 old_key->gl_attrib_wa_flags[i],
368 key->gl_attrib_wa_flags[i]);
369 }
370
371 found |= key_debug(brw, "user clip flags",
372 old_key->base.userclip_active, key->base.userclip_active);
373
374 found |= key_debug(brw, "user clipping planes as push constants",
375 old_key->base.nr_userclip_plane_consts,
376 key->base.nr_userclip_plane_consts);
377
378 found |= key_debug(brw, "copy edgeflag",
379 old_key->copy_edgeflag, key->copy_edgeflag);
380 found |= key_debug(brw, "PointCoord replace",
381 old_key->point_coord_replace, key->point_coord_replace);
382 found |= key_debug(brw, "vertex color clamping",
383 old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
384
385 found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
386 &key->base.tex);
387
388 if (!found) {
389 perf_debug(" Something else\n");
390 }
391 }
392
393
394 void
395 brw_setup_vec4_key_clip_info(struct brw_context *brw,
396 struct brw_vec4_prog_key *key,
397 bool program_uses_clip_distance)
398 {
399 struct gl_context *ctx = &brw->ctx;
400
401 key->userclip_active = (ctx->Transform.ClipPlanesEnabled != 0);
402 if (key->userclip_active && !program_uses_clip_distance) {
403 key->nr_userclip_plane_consts
404 = _mesa_logbase2(ctx->Transform.ClipPlanesEnabled) + 1;
405 }
406 }
407
408
409 static void brw_upload_vs_prog(struct brw_context *brw)
410 {
411 struct gl_context *ctx = &brw->ctx;
412 struct brw_vs_prog_key key;
413 /* BRW_NEW_VERTEX_PROGRAM */
414 struct brw_vertex_program *vp =
415 (struct brw_vertex_program *)brw->vertex_program;
416 struct gl_program *prog = (struct gl_program *) brw->vertex_program;
417 int i;
418
419 memset(&key, 0, sizeof(key));
420
421 /* Just upload the program verbatim for now. Always send it all
422 * the inputs it asks for, whether they are varying or not.
423 */
424 key.base.program_string_id = vp->id;
425 brw_setup_vec4_key_clip_info(brw, &key.base,
426 vp->program.Base.UsesClipDistanceOut);
427
428 /* _NEW_POLYGON */
429 if (brw->gen < 6) {
430 key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
431 ctx->Polygon.BackMode != GL_FILL);
432 }
433
434 /* _NEW_LIGHT | _NEW_BUFFERS */
435 key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
436
437 /* _NEW_POINT */
438 if (brw->gen < 6 && ctx->Point.PointSprite) {
439 for (i = 0; i < 8; i++) {
440 if (ctx->Point.CoordReplace[i])
441 key.point_coord_replace |= (1 << i);
442 }
443 }
444
445 /* _NEW_TEXTURE */
446 brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
447 &key.base.tex);
448
449 /* BRW_NEW_VERTICES */
450 if (brw->gen < 8 && !brw->is_haswell) {
451 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
452 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
453 */
454 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
455 if (!(vp->program.Base.InputsRead & BITFIELD64_BIT(i)))
456 continue;
457
458 uint8_t wa_flags = 0;
459
460 switch (brw->vb.inputs[i].glarray->Type) {
461
462 case GL_FIXED:
463 wa_flags = brw->vb.inputs[i].glarray->Size;
464 break;
465
466 case GL_INT_2_10_10_10_REV:
467 wa_flags |= BRW_ATTRIB_WA_SIGN;
468 /* fallthough */
469
470 case GL_UNSIGNED_INT_2_10_10_10_REV:
471 if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
472 wa_flags |= BRW_ATTRIB_WA_BGRA;
473
474 if (brw->vb.inputs[i].glarray->Normalized)
475 wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
476 else if (!brw->vb.inputs[i].glarray->Integer)
477 wa_flags |= BRW_ATTRIB_WA_SCALE;
478
479 break;
480 }
481
482 key.gl_attrib_wa_flags[i] = wa_flags;
483 }
484 }
485
486 if (!brw_search_cache(&brw->cache, BRW_VS_PROG,
487 &key, sizeof(key),
488 &brw->vs.base.prog_offset, &brw->vs.prog_data)) {
489 bool success =
490 do_vs_prog(brw, ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX], vp,
491 &key);
492 (void) success;
493 assert(success);
494 }
495 brw->vs.base.prog_data = &brw->vs.prog_data->base.base;
496
497 if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
498 sizeof(brw->vue_map_geom_out)) != 0) {
499 brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
500 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
501 if (brw->gen < 6) {
502 /* No geometry shader support, so the VS VUE map is the VUE map for
503 * the output of the "geometry" portion of the pipeline.
504 */
505 brw->vue_map_geom_out = brw->vue_map_vs;
506 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
507 }
508 }
509 }
510
511 /* See brw_vs.c:
512 */
513 const struct brw_tracked_state brw_vs_prog = {
514 .dirty = {
515 .mesa = _NEW_BUFFERS |
516 _NEW_LIGHT |
517 _NEW_POINT |
518 _NEW_POLYGON |
519 _NEW_TEXTURE |
520 _NEW_TRANSFORM,
521 .brw = BRW_NEW_VERTEX_PROGRAM |
522 BRW_NEW_VERTICES,
523 .cache = 0
524 },
525 .emit = brw_upload_vs_prog
526 };
527
528 bool
529 brw_vs_precompile(struct gl_context *ctx,
530 struct gl_shader_program *shader_prog,
531 struct gl_program *prog)
532 {
533 struct brw_context *brw = brw_context(ctx);
534 struct brw_vs_prog_key key;
535 uint32_t old_prog_offset = brw->vs.base.prog_offset;
536 struct brw_vs_prog_data *old_prog_data = brw->vs.prog_data;
537 bool success;
538
539 struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
540 struct brw_vertex_program *bvp = brw_vertex_program(vp);
541
542 memset(&key, 0, sizeof(key));
543
544 brw_vec4_setup_prog_key_for_precompile(ctx, &key.base, bvp->id, &vp->Base);
545
546 success = do_vs_prog(brw, shader_prog, bvp, &key);
547
548 brw->vs.base.prog_offset = old_prog_offset;
549 brw->vs.prog_data = old_prog_data;
550
551 return success;
552 }