i965: Avoid recompiles for fragment clamping on non-clamping APIs.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/compiler.h"
34 #include "brw_context.h"
35 #include "brw_vs.h"
36 #include "brw_util.h"
37 #include "brw_state.h"
38 #include "program/prog_print.h"
39 #include "program/prog_parameter.h"
40
41 #include "glsl/ralloc.h"
42
43 static inline void assign_vue_slot(struct brw_vue_map *vue_map,
44 int varying)
45 {
46 /* Make sure this varying hasn't been assigned a slot already */
47 assert (vue_map->varying_to_slot[varying] == -1);
48
49 vue_map->varying_to_slot[varying] = vue_map->num_slots;
50 vue_map->slot_to_varying[vue_map->num_slots++] = varying;
51 }
52
53 /**
54 * Compute the VUE map for vertex shader program.
55 *
56 * Note that consumers of this map using cache keys must include
57 * prog_data->userclip and prog_data->outputs_written in their key
58 * (generated by CACHE_NEW_VS_PROG).
59 */
60 void
61 brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map,
62 GLbitfield64 slots_valid, bool userclip_active)
63 {
64 const struct intel_context *intel = &brw->intel;
65
66 /* Prior to Gen6, don't assign a slot for VARYING_SLOT_CLIP_VERTEX, since
67 * it is unsupported.
68 */
69 if (intel->gen < 6)
70 slots_valid &= ~VARYING_BIT_CLIP_VERTEX;
71
72 vue_map->slots_valid = slots_valid;
73 int i;
74
75 /* Make sure that the values we store in vue_map->varying_to_slot and
76 * vue_map->slot_to_varying won't overflow the signed chars that are used
77 * to store them. Note that since vue_map->slot_to_varying sometimes holds
78 * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that
79 * BRW_VARYING_SLOT_COUNT is <= 127, not 128.
80 */
81 STATIC_ASSERT(BRW_VARYING_SLOT_COUNT <= 127);
82
83 vue_map->num_slots = 0;
84 for (i = 0; i < BRW_VARYING_SLOT_COUNT; ++i) {
85 vue_map->varying_to_slot[i] = -1;
86 vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_COUNT;
87 }
88
89 /* VUE header: format depends on chip generation and whether clipping is
90 * enabled.
91 */
92 switch (intel->gen) {
93 case 4:
94 /* There are 8 dwords in VUE header pre-Ironlake:
95 * dword 0-3 is indices, point width, clip flags.
96 * dword 4-7 is ndc position
97 * dword 8-11 is the first vertex data.
98 */
99 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
100 assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
101 assign_vue_slot(vue_map, VARYING_SLOT_POS);
102 break;
103 case 5:
104 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
105 * dword 0-3 of the header is indices, point width, clip flags.
106 * dword 4-7 is the ndc position
107 * dword 8-11 of the vertex header is the 4D space position
108 * dword 12-19 of the vertex header is the user clip distance.
109 * dword 20-23 is a pad so that the vertex element data is aligned
110 * dword 24-27 is the first vertex data we fill.
111 *
112 * Note: future pipeline stages expect 4D space position to be
113 * contiguous with the other varyings, so we make dword 24-27 a
114 * duplicate copy of the 4D space position.
115 */
116 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
117 assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
118 assign_vue_slot(vue_map, BRW_VARYING_SLOT_POS_DUPLICATE);
119 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
120 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
121 assign_vue_slot(vue_map, BRW_VARYING_SLOT_PAD);
122 assign_vue_slot(vue_map, VARYING_SLOT_POS);
123 break;
124 case 6:
125 case 7:
126 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
127 * dword 0-3 of the header is indices, point width, clip flags.
128 * dword 4-7 is the 4D space position
129 * dword 8-15 of the vertex header is the user clip distance if
130 * enabled.
131 * dword 8-11 or 16-19 is the first vertex element data we fill.
132 */
133 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
134 assign_vue_slot(vue_map, VARYING_SLOT_POS);
135 if (userclip_active) {
136 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
137 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
138 }
139 /* front and back colors need to be consecutive so that we can use
140 * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing
141 * two-sided color.
142 */
143 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL0))
144 assign_vue_slot(vue_map, VARYING_SLOT_COL0);
145 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC0))
146 assign_vue_slot(vue_map, VARYING_SLOT_BFC0);
147 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL1))
148 assign_vue_slot(vue_map, VARYING_SLOT_COL1);
149 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC1))
150 assign_vue_slot(vue_map, VARYING_SLOT_BFC1);
151 break;
152 default:
153 assert (!"VUE map not known for this chip generation");
154 break;
155 }
156
157 /* The hardware doesn't care about the rest of the vertex outputs, so just
158 * assign them contiguously. Don't reassign outputs that already have a
159 * slot.
160 *
161 * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX,
162 * since it's encoded as the clip distances by emit_clip_distances().
163 * However, it may be output by transform feedback, and we'd rather not
164 * recompute state when TF changes, so we just always include it.
165 */
166 for (int i = 0; i < VARYING_SLOT_MAX; ++i) {
167 if ((slots_valid & BITFIELD64_BIT(i)) &&
168 vue_map->varying_to_slot[i] == -1) {
169 assign_vue_slot(vue_map, i);
170 }
171 }
172 }
173
174
175 /**
176 * Decide which set of clip planes should be used when clipping via
177 * gl_Position or gl_ClipVertex.
178 */
179 gl_clip_plane *brw_select_clip_planes(struct gl_context *ctx)
180 {
181 if (ctx->Shader.CurrentVertexProgram) {
182 /* There is currently a GLSL vertex shader, so clip according to GLSL
183 * rules, which means compare gl_ClipVertex (or gl_Position, if
184 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
185 * that were stored in EyeUserPlane at the time the clip planes were
186 * specified.
187 */
188 return ctx->Transform.EyeUserPlane;
189 } else {
190 /* Either we are using fixed function or an ARB vertex program. In
191 * either case the clip planes are going to be compared against
192 * gl_Position (which is in clip coordinates) so we have to clip using
193 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
194 * core.
195 */
196 return ctx->Transform._ClipUserPlane;
197 }
198 }
199
200
201 bool
202 brw_vec4_prog_data_compare(const struct brw_vec4_prog_data *a,
203 const struct brw_vec4_prog_data *b)
204 {
205 /* Compare all the struct up to the pointers. */
206 if (memcmp(a, b, offsetof(struct brw_vec4_prog_data, param)))
207 return false;
208
209 if (memcmp(a->param, b->param, a->nr_params * sizeof(void *)))
210 return false;
211
212 if (memcmp(a->pull_param, b->pull_param, a->nr_pull_params * sizeof(void *)))
213 return false;
214
215 return true;
216 }
217
218
219 bool
220 brw_vs_prog_data_compare(const void *in_a, const void *in_b,
221 int aux_size, const void *in_key)
222 {
223 const struct brw_vs_prog_data *a = in_a;
224 const struct brw_vs_prog_data *b = in_b;
225
226 /* Compare the base vec4 structure. */
227 if (!brw_vec4_prog_data_compare(&a->base, &b->base))
228 return false;
229
230 /* Compare the rest of the struct. */
231 const unsigned offset = sizeof(struct brw_vec4_prog_data);
232 if (memcmp(((char *) &a) + offset, ((char *) &b) + offset,
233 sizeof(struct brw_vs_prog_data) - offset)) {
234 return false;
235 }
236
237 return true;
238 }
239
240 static bool
241 do_vs_prog(struct brw_context *brw,
242 struct gl_shader_program *prog,
243 struct brw_vertex_program *vp,
244 struct brw_vs_prog_key *key)
245 {
246 struct intel_context *intel = &brw->intel;
247 GLuint program_size;
248 const GLuint *program;
249 struct brw_vs_compile c;
250 struct brw_vs_prog_data prog_data;
251 void *mem_ctx;
252 int i;
253 struct gl_shader *vs = NULL;
254
255 if (prog)
256 vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
257
258 memset(&c, 0, sizeof(c));
259 memcpy(&c.key, key, sizeof(*key));
260 memset(&prog_data, 0, sizeof(prog_data));
261
262 mem_ctx = ralloc_context(NULL);
263
264 c.vp = vp;
265
266 /* Allocate the references to the uniforms that will end up in the
267 * prog_data associated with the compiled program, and which will be freed
268 * by the state cache.
269 */
270 int param_count;
271 if (vs) {
272 /* We add padding around uniform values below vec4 size, with the worst
273 * case being a float value that gets blown up to a vec4, so be
274 * conservative here.
275 */
276 param_count = vs->num_uniform_components * 4;
277
278 } else {
279 param_count = vp->program.Base.Parameters->NumParameters * 4;
280 }
281 /* We also upload clip plane data as uniforms */
282 param_count += MAX_CLIP_PLANES * 4;
283
284 prog_data.base.param = rzalloc_array(NULL, const float *, param_count);
285 prog_data.base.pull_param = rzalloc_array(NULL, const float *, param_count);
286
287 GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
288 prog_data.inputs_read = vp->program.Base.InputsRead;
289
290 if (c.key.copy_edgeflag) {
291 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
292 prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
293 }
294
295 if (intel->gen < 6) {
296 /* Put dummy slots into the VUE for the SF to put the replaced
297 * point sprite coords in. We shouldn't need these dummy slots,
298 * which take up precious URB space, but it would mean that the SF
299 * doesn't get nice aligned pairs of input coords into output
300 * coords, which would be a pain to handle.
301 */
302 for (i = 0; i < 8; i++) {
303 if (c.key.point_coord_replace & (1 << i))
304 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
305 }
306 }
307
308 brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written,
309 c.key.base.userclip_active);
310
311 if (0) {
312 _mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
313 true);
314 }
315
316 /* Emit GEN4 code.
317 */
318 program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
319 if (program == NULL) {
320 ralloc_free(mem_ctx);
321 return false;
322 }
323
324 if (prog_data.base.nr_pull_params)
325 prog_data.base.num_surfaces = 1;
326 if (c.vp->program.Base.SamplersUsed)
327 prog_data.base.num_surfaces = SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT);
328 if (prog &&
329 prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks) {
330 prog_data.base.num_surfaces =
331 SURF_INDEX_VS_UBO(prog->_LinkedShaders[MESA_SHADER_VERTEX]->NumUniformBlocks);
332 }
333
334 /* Scratch space is used for register spilling */
335 if (c.base.last_scratch) {
336 perf_debug("Vertex shader triggered register spilling. "
337 "Try reducing the number of live vec4 values to "
338 "improve performance.\n");
339
340 prog_data.base.total_scratch
341 = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
342
343 brw_get_scratch_bo(intel, &brw->vs.scratch_bo,
344 prog_data.base.total_scratch * brw->max_vs_threads);
345 }
346
347 brw_upload_cache(&brw->cache, BRW_VS_PROG,
348 &c.key, sizeof(c.key),
349 program, program_size,
350 &prog_data, sizeof(prog_data),
351 &brw->vs.prog_offset, &brw->vs.prog_data);
352 ralloc_free(mem_ctx);
353
354 return true;
355 }
356
357 static bool
358 key_debug(struct intel_context *intel, const char *name, int a, int b)
359 {
360 if (a != b) {
361 perf_debug(" %s %d->%d\n", name, a, b);
362 return true;
363 }
364 return false;
365 }
366
367 void
368 brw_vs_debug_recompile(struct brw_context *brw,
369 struct gl_shader_program *prog,
370 const struct brw_vs_prog_key *key)
371 {
372 struct intel_context *intel = &brw->intel;
373 struct brw_cache_item *c = NULL;
374 const struct brw_vs_prog_key *old_key = NULL;
375 bool found = false;
376
377 perf_debug("Recompiling vertex shader for program %d\n", prog->Name);
378
379 for (unsigned int i = 0; i < brw->cache.size; i++) {
380 for (c = brw->cache.items[i]; c; c = c->next) {
381 if (c->cache_id == BRW_VS_PROG) {
382 old_key = c->key;
383
384 if (old_key->base.program_string_id == key->base.program_string_id)
385 break;
386 }
387 }
388 if (c)
389 break;
390 }
391
392 if (!c) {
393 perf_debug(" Didn't find previous compile in the shader cache for "
394 "debug\n");
395 return;
396 }
397
398 for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
399 found |= key_debug(intel, "Vertex attrib w/a flags",
400 old_key->gl_attrib_wa_flags[i],
401 key->gl_attrib_wa_flags[i]);
402 }
403
404 found |= key_debug(intel, "user clip flags",
405 old_key->base.userclip_active, key->base.userclip_active);
406
407 found |= key_debug(intel, "user clipping planes as push constants",
408 old_key->base.nr_userclip_plane_consts,
409 key->base.nr_userclip_plane_consts);
410
411 found |= key_debug(intel, "clip distance enable",
412 old_key->base.uses_clip_distance, key->base.uses_clip_distance);
413 found |= key_debug(intel, "clip plane enable bitfield",
414 old_key->base.userclip_planes_enabled_gen_4_5,
415 key->base.userclip_planes_enabled_gen_4_5);
416 found |= key_debug(intel, "copy edgeflag",
417 old_key->copy_edgeflag, key->copy_edgeflag);
418 found |= key_debug(intel, "PointCoord replace",
419 old_key->point_coord_replace, key->point_coord_replace);
420 found |= key_debug(intel, "vertex color clamping",
421 old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
422
423 found |= brw_debug_recompile_sampler_key(intel, &old_key->base.tex,
424 &key->base.tex);
425
426 if (!found) {
427 perf_debug(" Something else\n");
428 }
429 }
430
431 static void brw_upload_vs_prog(struct brw_context *brw)
432 {
433 struct intel_context *intel = &brw->intel;
434 struct gl_context *ctx = &intel->ctx;
435 struct brw_vs_prog_key key;
436 /* BRW_NEW_VERTEX_PROGRAM */
437 struct brw_vertex_program *vp =
438 (struct brw_vertex_program *)brw->vertex_program;
439 struct gl_program *prog = (struct gl_program *) brw->vertex_program;
440 int i;
441
442 memset(&key, 0, sizeof(key));
443
444 /* Just upload the program verbatim for now. Always send it all
445 * the inputs it asks for, whether they are varying or not.
446 */
447 key.base.program_string_id = vp->id;
448 key.base.userclip_active = (ctx->Transform.ClipPlanesEnabled != 0);
449 key.base.uses_clip_distance = vp->program.UsesClipDistance;
450 if (key.base.userclip_active && !key.base.uses_clip_distance) {
451 if (intel->gen < 6) {
452 key.base.nr_userclip_plane_consts
453 = _mesa_bitcount_64(ctx->Transform.ClipPlanesEnabled);
454 key.base.userclip_planes_enabled_gen_4_5
455 = ctx->Transform.ClipPlanesEnabled;
456 } else {
457 key.base.nr_userclip_plane_consts
458 = _mesa_logbase2(ctx->Transform.ClipPlanesEnabled) + 1;
459 }
460 }
461
462 /* _NEW_POLYGON */
463 if (intel->gen < 6) {
464 key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
465 ctx->Polygon.BackMode != GL_FILL);
466 }
467
468 /* _NEW_LIGHT | _NEW_BUFFERS */
469 key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
470
471 /* _NEW_POINT */
472 if (intel->gen < 6 && ctx->Point.PointSprite) {
473 for (i = 0; i < 8; i++) {
474 if (ctx->Point.CoordReplace[i])
475 key.point_coord_replace |= (1 << i);
476 }
477 }
478
479 /* _NEW_TEXTURE */
480 brw_populate_sampler_prog_key_data(ctx, prog, &key.base.tex);
481
482 /* BRW_NEW_VERTICES */
483 if (intel->gen < 8 && !intel->is_haswell) {
484 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
485 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
486 */
487 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
488 if (!(vp->program.Base.InputsRead & BITFIELD64_BIT(i)))
489 continue;
490
491 uint8_t wa_flags = 0;
492
493 switch (brw->vb.inputs[i].glarray->Type) {
494
495 case GL_FIXED:
496 wa_flags = brw->vb.inputs[i].glarray->Size;
497 break;
498
499 case GL_INT_2_10_10_10_REV:
500 wa_flags |= BRW_ATTRIB_WA_SIGN;
501 /* fallthough */
502
503 case GL_UNSIGNED_INT_2_10_10_10_REV:
504 if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
505 wa_flags |= BRW_ATTRIB_WA_BGRA;
506
507 if (brw->vb.inputs[i].glarray->Normalized)
508 wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
509 else if (!brw->vb.inputs[i].glarray->Integer)
510 wa_flags |= BRW_ATTRIB_WA_SCALE;
511
512 break;
513 }
514
515 key.gl_attrib_wa_flags[i] = wa_flags;
516 }
517 }
518
519 if (!brw_search_cache(&brw->cache, BRW_VS_PROG,
520 &key, sizeof(key),
521 &brw->vs.prog_offset, &brw->vs.prog_data)) {
522 bool success = do_vs_prog(brw, ctx->Shader.CurrentVertexProgram,
523 vp, &key);
524
525 assert(success);
526 }
527 if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
528 sizeof(brw->vue_map_geom_out)) != 0) {
529 brw->vue_map_geom_out = brw->vs.prog_data->base.vue_map;
530 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
531 }
532 }
533
534 /* See brw_vs.c:
535 */
536 const struct brw_tracked_state brw_vs_prog = {
537 .dirty = {
538 .mesa = (_NEW_TRANSFORM | _NEW_POLYGON | _NEW_POINT | _NEW_LIGHT |
539 _NEW_TEXTURE |
540 _NEW_BUFFERS),
541 .brw = (BRW_NEW_VERTEX_PROGRAM |
542 BRW_NEW_VERTICES),
543 .cache = 0
544 },
545 .emit = brw_upload_vs_prog
546 };
547
548 bool
549 brw_vs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
550 {
551 struct brw_context *brw = brw_context(ctx);
552 struct brw_vs_prog_key key;
553 uint32_t old_prog_offset = brw->vs.prog_offset;
554 struct brw_vs_prog_data *old_prog_data = brw->vs.prog_data;
555 bool success;
556
557 if (!prog->_LinkedShaders[MESA_SHADER_VERTEX])
558 return true;
559
560 struct gl_vertex_program *vp = (struct gl_vertex_program *)
561 prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
562 struct brw_vertex_program *bvp = brw_vertex_program(vp);
563
564 memset(&key, 0, sizeof(key));
565
566 key.base.program_string_id = bvp->id;
567 key.base.clamp_vertex_color = ctx->API == API_OPENGL_COMPAT;
568
569 for (int i = 0; i < MAX_SAMPLERS; i++) {
570 if (vp->Base.ShadowSamplers & (1 << i)) {
571 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
572 key.base.tex.swizzles[i] =
573 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
574 } else {
575 /* Color sampler: assume no swizzling. */
576 key.base.tex.swizzles[i] = SWIZZLE_XYZW;
577 }
578 }
579
580 success = do_vs_prog(brw, prog, bvp, &key);
581
582 brw->vs.prog_offset = old_prog_offset;
583 brw->vs.prog_data = old_prog_data;
584
585 return success;
586 }
587
588
589 void
590 brw_vec4_prog_data_free(const struct brw_vec4_prog_data *prog_data)
591 {
592 ralloc_free((void *)prog_data->param);
593 ralloc_free((void *)prog_data->pull_param);
594 }
595
596
597 void
598 brw_vs_prog_data_free(const void *in_prog_data)
599 {
600 const struct brw_vs_prog_data *prog_data = in_prog_data;
601
602 brw_vec4_prog_data_free(&prog_data->base);
603 }