s/Tungsten Graphics/VMware/
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/compiler.h"
34 #include "brw_context.h"
35 #include "brw_vs.h"
36 #include "brw_util.h"
37 #include "brw_state.h"
38 #include "program/prog_print.h"
39 #include "program/prog_parameter.h"
40
41 #include "glsl/ralloc.h"
42
43 static inline void assign_vue_slot(struct brw_vue_map *vue_map,
44 int varying)
45 {
46 /* Make sure this varying hasn't been assigned a slot already */
47 assert (vue_map->varying_to_slot[varying] == -1);
48
49 vue_map->varying_to_slot[varying] = vue_map->num_slots;
50 vue_map->slot_to_varying[vue_map->num_slots++] = varying;
51 }
52
53 /**
54 * Compute the VUE map for vertex shader program.
55 */
56 void
57 brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map,
58 GLbitfield64 slots_valid)
59 {
60 vue_map->slots_valid = slots_valid;
61 int i;
62
63 /* gl_Layer doesn't get its own varying slot--it's stored in the virst VUE
64 * slot (VARYING_SLOT_PSIZ).
65 */
66 slots_valid &= ~VARYING_BIT_LAYER;
67
68 /* Make sure that the values we store in vue_map->varying_to_slot and
69 * vue_map->slot_to_varying won't overflow the signed chars that are used
70 * to store them. Note that since vue_map->slot_to_varying sometimes holds
71 * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that
72 * BRW_VARYING_SLOT_COUNT is <= 127, not 128.
73 */
74 STATIC_ASSERT(BRW_VARYING_SLOT_COUNT <= 127);
75
76 vue_map->num_slots = 0;
77 for (i = 0; i < BRW_VARYING_SLOT_COUNT; ++i) {
78 vue_map->varying_to_slot[i] = -1;
79 vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_COUNT;
80 }
81
82 /* VUE header: format depends on chip generation and whether clipping is
83 * enabled.
84 */
85 switch (brw->gen) {
86 case 4:
87 case 5:
88 /* There are 8 dwords in VUE header pre-Ironlake:
89 * dword 0-3 is indices, point width, clip flags.
90 * dword 4-7 is ndc position
91 * dword 8-11 is the first vertex data.
92 *
93 * On Ironlake the VUE header is nominally 20 dwords, but the hardware
94 * will accept the same header layout as Gen4 [and should be a bit faster]
95 */
96 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
97 assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
98 assign_vue_slot(vue_map, VARYING_SLOT_POS);
99 break;
100 case 6:
101 case 7:
102 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
103 * dword 0-3 of the header is indices, point width, clip flags.
104 * dword 4-7 is the 4D space position
105 * dword 8-15 of the vertex header is the user clip distance if
106 * enabled.
107 * dword 8-11 or 16-19 is the first vertex element data we fill.
108 */
109 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
110 assign_vue_slot(vue_map, VARYING_SLOT_POS);
111 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0))
112 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
113 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1))
114 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
115
116 /* front and back colors need to be consecutive so that we can use
117 * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing
118 * two-sided color.
119 */
120 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL0))
121 assign_vue_slot(vue_map, VARYING_SLOT_COL0);
122 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC0))
123 assign_vue_slot(vue_map, VARYING_SLOT_BFC0);
124 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL1))
125 assign_vue_slot(vue_map, VARYING_SLOT_COL1);
126 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC1))
127 assign_vue_slot(vue_map, VARYING_SLOT_BFC1);
128 break;
129 default:
130 assert (!"VUE map not known for this chip generation");
131 break;
132 }
133
134 /* The hardware doesn't care about the rest of the vertex outputs, so just
135 * assign them contiguously. Don't reassign outputs that already have a
136 * slot.
137 *
138 * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX,
139 * since it's encoded as the clip distances by emit_clip_distances().
140 * However, it may be output by transform feedback, and we'd rather not
141 * recompute state when TF changes, so we just always include it.
142 */
143 for (int i = 0; i < VARYING_SLOT_MAX; ++i) {
144 if ((slots_valid & BITFIELD64_BIT(i)) &&
145 vue_map->varying_to_slot[i] == -1) {
146 assign_vue_slot(vue_map, i);
147 }
148 }
149 }
150
151
152 /**
153 * Decide which set of clip planes should be used when clipping via
154 * gl_Position or gl_ClipVertex.
155 */
156 gl_clip_plane *brw_select_clip_planes(struct gl_context *ctx)
157 {
158 if (ctx->Shader.CurrentVertexProgram) {
159 /* There is currently a GLSL vertex shader, so clip according to GLSL
160 * rules, which means compare gl_ClipVertex (or gl_Position, if
161 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
162 * that were stored in EyeUserPlane at the time the clip planes were
163 * specified.
164 */
165 return ctx->Transform.EyeUserPlane;
166 } else {
167 /* Either we are using fixed function or an ARB vertex program. In
168 * either case the clip planes are going to be compared against
169 * gl_Position (which is in clip coordinates) so we have to clip using
170 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
171 * core.
172 */
173 return ctx->Transform._ClipUserPlane;
174 }
175 }
176
177
178 bool
179 brw_vs_prog_data_compare(const void *in_a, const void *in_b)
180 {
181 const struct brw_vs_prog_data *a = in_a;
182 const struct brw_vs_prog_data *b = in_b;
183
184 /* Compare the base vec4 structure. */
185 if (!brw_vec4_prog_data_compare(&a->base, &b->base))
186 return false;
187
188 /* Compare the rest of the struct. */
189 const unsigned offset = sizeof(struct brw_vec4_prog_data);
190 if (memcmp(((char *) a) + offset, ((char *) b) + offset,
191 sizeof(struct brw_vs_prog_data) - offset)) {
192 return false;
193 }
194
195 return true;
196 }
197
198 static bool
199 do_vs_prog(struct brw_context *brw,
200 struct gl_shader_program *prog,
201 struct brw_vertex_program *vp,
202 struct brw_vs_prog_key *key)
203 {
204 GLuint program_size;
205 const GLuint *program;
206 struct brw_vs_compile c;
207 struct brw_vs_prog_data prog_data;
208 void *mem_ctx;
209 int i;
210 struct gl_shader *vs = NULL;
211
212 if (prog)
213 vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
214
215 memset(&c, 0, sizeof(c));
216 memcpy(&c.key, key, sizeof(*key));
217 memset(&prog_data, 0, sizeof(prog_data));
218
219 mem_ctx = ralloc_context(NULL);
220
221 c.vp = vp;
222
223 /* Allocate the references to the uniforms that will end up in the
224 * prog_data associated with the compiled program, and which will be freed
225 * by the state cache.
226 */
227 int param_count;
228 if (vs) {
229 /* We add padding around uniform values below vec4 size, with the worst
230 * case being a float value that gets blown up to a vec4, so be
231 * conservative here.
232 */
233 param_count = vs->num_uniform_components * 4;
234
235 } else {
236 param_count = vp->program.Base.Parameters->NumParameters * 4;
237 }
238 /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
239 * planes as uniforms.
240 */
241 param_count += c.key.base.nr_userclip_plane_consts * 4;
242
243 prog_data.base.param = rzalloc_array(NULL, const float *, param_count);
244 prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count);
245
246 GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
247 prog_data.inputs_read = vp->program.Base.InputsRead;
248
249 if (c.key.copy_edgeflag) {
250 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
251 prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
252 }
253
254 if (brw->gen < 6) {
255 /* Put dummy slots into the VUE for the SF to put the replaced
256 * point sprite coords in. We shouldn't need these dummy slots,
257 * which take up precious URB space, but it would mean that the SF
258 * doesn't get nice aligned pairs of input coords into output
259 * coords, which would be a pain to handle.
260 */
261 for (i = 0; i < 8; i++) {
262 if (c.key.point_coord_replace & (1 << i))
263 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
264 }
265
266 /* if back colors are written, allocate slots for front colors too */
267 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
268 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
269 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
270 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
271 }
272
273 /* In order for legacy clipping to work, we need to populate the clip
274 * distance varying slots whenever clipping is enabled, even if the vertex
275 * shader doesn't write to gl_ClipDistance.
276 */
277 if (c.key.base.userclip_active) {
278 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
279 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
280 }
281
282 brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written);
283
284 if (0) {
285 _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
286 true);
287 }
288
289 /* Emit GEN4 code.
290 */
291 program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
292 if (program == NULL) {
293 ralloc_free(mem_ctx);
294 return false;
295 }
296
297 /* Scratch space is used for register spilling */
298 if (c.base.last_scratch) {
299 perf_debug("Vertex shader triggered register spilling. "
300 "Try reducing the number of live vec4 values to "
301 "improve performance.\n");
302
303 prog_data.base.total_scratch
304 = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
305
306 brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
307 prog_data.base.total_scratch * brw->max_vs_threads);
308 }
309
310 brw_upload_cache(&brw->cache, BRW_VS_PROG,
311 &c.key, sizeof(c.key),
312 program, program_size,
313 &prog_data, sizeof(prog_data),
314 &brw->vs.base.prog_offset, &brw->vs.prog_data);
315 ralloc_free(mem_ctx);
316
317 return true;
318 }
319
320 static bool
321 key_debug(struct brw_context *brw, const char *name, int a, int b)
322 {
323 if (a != b) {
324 perf_debug(" %s %d->%d\n", name, a, b);
325 return true;
326 }
327 return false;
328 }
329
330 void
331 brw_vs_debug_recompile(struct brw_context *brw,
332 struct gl_shader_program *prog,
333 const struct brw_vs_prog_key *key)
334 {
335 struct brw_cache_item *c = NULL;
336 const struct brw_vs_prog_key *old_key = NULL;
337 bool found = false;
338
339 perf_debug("Recompiling vertex shader for program %d\n", prog->Name);
340
341 for (unsigned int i = 0; i < brw->cache.size; i++) {
342 for (c = brw->cache.items[i]; c; c = c->next) {
343 if (c->cache_id == BRW_VS_PROG) {
344 old_key = c->key;
345
346 if (old_key->base.program_string_id == key->base.program_string_id)
347 break;
348 }
349 }
350 if (c)
351 break;
352 }
353
354 if (!c) {
355 perf_debug(" Didn't find previous compile in the shader cache for "
356 "debug\n");
357 return;
358 }
359
360 for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
361 found |= key_debug(brw, "Vertex attrib w/a flags",
362 old_key->gl_attrib_wa_flags[i],
363 key->gl_attrib_wa_flags[i]);
364 }
365
366 found |= key_debug(brw, "user clip flags",
367 old_key->base.userclip_active, key->base.userclip_active);
368
369 found |= key_debug(brw, "user clipping planes as push constants",
370 old_key->base.nr_userclip_plane_consts,
371 key->base.nr_userclip_plane_consts);
372
373 found |= key_debug(brw, "copy edgeflag",
374 old_key->copy_edgeflag, key->copy_edgeflag);
375 found |= key_debug(brw, "PointCoord replace",
376 old_key->point_coord_replace, key->point_coord_replace);
377 found |= key_debug(brw, "vertex color clamping",
378 old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
379
380 found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
381 &key->base.tex);
382
383 if (!found) {
384 perf_debug(" Something else\n");
385 }
386 }
387
388
389 void
390 brw_setup_vec4_key_clip_info(struct brw_context *brw,
391 struct brw_vec4_prog_key *key,
392 bool program_uses_clip_distance)
393 {
394 struct gl_context *ctx = &brw->ctx;
395
396 key->userclip_active = (ctx->Transform.ClipPlanesEnabled != 0);
397 if (key->userclip_active && !program_uses_clip_distance) {
398 key->nr_userclip_plane_consts
399 = _mesa_logbase2(ctx->Transform.ClipPlanesEnabled) + 1;
400 }
401 }
402
403
404 static void brw_upload_vs_prog(struct brw_context *brw)
405 {
406 struct gl_context *ctx = &brw->ctx;
407 struct brw_vs_prog_key key;
408 /* BRW_NEW_VERTEX_PROGRAM */
409 struct brw_vertex_program *vp =
410 (struct brw_vertex_program *)brw->vertex_program;
411 struct gl_program *prog = (struct gl_program *) brw->vertex_program;
412 int i;
413
414 memset(&key, 0, sizeof(key));
415
416 /* Just upload the program verbatim for now. Always send it all
417 * the inputs it asks for, whether they are varying or not.
418 */
419 key.base.program_string_id = vp->id;
420 brw_setup_vec4_key_clip_info(brw, &key.base,
421 vp->program.Base.UsesClipDistanceOut);
422
423 /* _NEW_POLYGON */
424 if (brw->gen < 6) {
425 key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
426 ctx->Polygon.BackMode != GL_FILL);
427 }
428
429 /* _NEW_LIGHT | _NEW_BUFFERS */
430 key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
431
432 /* _NEW_POINT */
433 if (brw->gen < 6 && ctx->Point.PointSprite) {
434 for (i = 0; i < 8; i++) {
435 if (ctx->Point.CoordReplace[i])
436 key.point_coord_replace |= (1 << i);
437 }
438 }
439
440 /* _NEW_TEXTURE */
441 brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
442 &key.base.tex);
443
444 /* BRW_NEW_VERTICES */
445 if (brw->gen < 8 && !brw->is_haswell) {
446 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
447 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
448 */
449 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
450 if (!(vp->program.Base.InputsRead & BITFIELD64_BIT(i)))
451 continue;
452
453 uint8_t wa_flags = 0;
454
455 switch (brw->vb.inputs[i].glarray->Type) {
456
457 case GL_FIXED:
458 wa_flags = brw->vb.inputs[i].glarray->Size;
459 break;
460
461 case GL_INT_2_10_10_10_REV:
462 wa_flags |= BRW_ATTRIB_WA_SIGN;
463 /* fallthough */
464
465 case GL_UNSIGNED_INT_2_10_10_10_REV:
466 if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
467 wa_flags |= BRW_ATTRIB_WA_BGRA;
468
469 if (brw->vb.inputs[i].glarray->Normalized)
470 wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
471 else if (!brw->vb.inputs[i].glarray->Integer)
472 wa_flags |= BRW_ATTRIB_WA_SCALE;
473
474 break;
475 }
476
477 key.gl_attrib_wa_flags[i] = wa_flags;
478 }
479 }
480
481 if (!brw_search_cache(&brw->cache, BRW_VS_PROG,
482 &key, sizeof(key),
483 &brw->vs.base.prog_offset, &brw->vs.prog_data)) {
484 bool success = do_vs_prog(brw, ctx->Shader.CurrentVertexProgram,
485 vp, &key);
486 (void) success;
487 assert(success);
488 }
489 brw->vs.base.prog_data = &brw->vs.prog_data->base.base;
490
491 if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
492 sizeof(brw->vue_map_geom_out)) != 0) {
493 brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
494 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
495 if (brw->gen < 7) {
496 /* No geometry shader support, so the VS VUE map is the VUE map for
497 * the output of the "geometry" portion of the pipeline.
498 */
499 brw->vue_map_geom_out = brw->vue_map_vs;
500 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
501 }
502 }
503 }
504
505 /* See brw_vs.c:
506 */
507 const struct brw_tracked_state brw_vs_prog = {
508 .dirty = {
509 .mesa = (_NEW_TRANSFORM | _NEW_POLYGON | _NEW_POINT | _NEW_LIGHT |
510 _NEW_TEXTURE |
511 _NEW_BUFFERS),
512 .brw = (BRW_NEW_VERTEX_PROGRAM |
513 BRW_NEW_VERTICES),
514 .cache = 0
515 },
516 .emit = brw_upload_vs_prog
517 };
518
519 bool
520 brw_vs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
521 {
522 struct brw_context *brw = brw_context(ctx);
523 struct brw_vs_prog_key key;
524 uint32_t old_prog_offset = brw->vs.base.prog_offset;
525 struct brw_vs_prog_data *old_prog_data = brw->vs.prog_data;
526 bool success;
527
528 if (!prog->_LinkedShaders[MESA_SHADER_VERTEX])
529 return true;
530
531 struct gl_vertex_program *vp = (struct gl_vertex_program *)
532 prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
533 struct brw_vertex_program *bvp = brw_vertex_program(vp);
534
535 memset(&key, 0, sizeof(key));
536
537 brw_vec4_setup_prog_key_for_precompile(ctx, &key.base, bvp->id, &vp->Base);
538
539 success = do_vs_prog(brw, prog, bvp, &key);
540
541 brw->vs.base.prog_offset = old_prog_offset;
542 brw->vs.prog_data = old_prog_data;
543
544 return success;
545 }
546
547
548 void
549 brw_vs_prog_data_free(const void *in_prog_data)
550 {
551 const struct brw_vs_prog_data *prog_data = in_prog_data;
552
553 brw_vec4_prog_data_free(&prog_data->base);
554 }