util: Move ralloc to a new src/util directory.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vs.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/compiler.h"
34 #include "brw_context.h"
35 #include "brw_vs.h"
36 #include "brw_util.h"
37 #include "brw_state.h"
38 #include "program/prog_print.h"
39 #include "program/prog_parameter.h"
40
41 #include "util/ralloc.h"
42
43 static inline void assign_vue_slot(struct brw_vue_map *vue_map,
44 int varying)
45 {
46 /* Make sure this varying hasn't been assigned a slot already */
47 assert (vue_map->varying_to_slot[varying] == -1);
48
49 vue_map->varying_to_slot[varying] = vue_map->num_slots;
50 vue_map->slot_to_varying[vue_map->num_slots++] = varying;
51 }
52
53 /**
54 * Compute the VUE map for vertex shader program.
55 */
56 void
57 brw_compute_vue_map(struct brw_context *brw, struct brw_vue_map *vue_map,
58 GLbitfield64 slots_valid)
59 {
60 vue_map->slots_valid = slots_valid;
61 int i;
62
63 /* gl_Layer and gl_ViewportIndex don't get their own varying slots -- they
64 * are stored in the first VUE slot (VARYING_SLOT_PSIZ).
65 */
66 slots_valid &= ~(VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT);
67
68 /* Make sure that the values we store in vue_map->varying_to_slot and
69 * vue_map->slot_to_varying won't overflow the signed chars that are used
70 * to store them. Note that since vue_map->slot_to_varying sometimes holds
71 * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that
72 * BRW_VARYING_SLOT_COUNT is <= 127, not 128.
73 */
74 STATIC_ASSERT(BRW_VARYING_SLOT_COUNT <= 127);
75
76 vue_map->num_slots = 0;
77 for (i = 0; i < BRW_VARYING_SLOT_COUNT; ++i) {
78 vue_map->varying_to_slot[i] = -1;
79 vue_map->slot_to_varying[i] = BRW_VARYING_SLOT_COUNT;
80 }
81
82 /* VUE header: format depends on chip generation and whether clipping is
83 * enabled.
84 */
85 if (brw->gen < 6) {
86 /* There are 8 dwords in VUE header pre-Ironlake:
87 * dword 0-3 is indices, point width, clip flags.
88 * dword 4-7 is ndc position
89 * dword 8-11 is the first vertex data.
90 *
91 * On Ironlake the VUE header is nominally 20 dwords, but the hardware
92 * will accept the same header layout as Gen4 [and should be a bit faster]
93 */
94 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
95 assign_vue_slot(vue_map, BRW_VARYING_SLOT_NDC);
96 assign_vue_slot(vue_map, VARYING_SLOT_POS);
97 } else {
98 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
99 * dword 0-3 of the header is indices, point width, clip flags.
100 * dword 4-7 is the 4D space position
101 * dword 8-15 of the vertex header is the user clip distance if
102 * enabled.
103 * dword 8-11 or 16-19 is the first vertex element data we fill.
104 */
105 assign_vue_slot(vue_map, VARYING_SLOT_PSIZ);
106 assign_vue_slot(vue_map, VARYING_SLOT_POS);
107 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0))
108 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST0);
109 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1))
110 assign_vue_slot(vue_map, VARYING_SLOT_CLIP_DIST1);
111
112 /* front and back colors need to be consecutive so that we can use
113 * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing
114 * two-sided color.
115 */
116 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL0))
117 assign_vue_slot(vue_map, VARYING_SLOT_COL0);
118 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC0))
119 assign_vue_slot(vue_map, VARYING_SLOT_BFC0);
120 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_COL1))
121 assign_vue_slot(vue_map, VARYING_SLOT_COL1);
122 if (slots_valid & BITFIELD64_BIT(VARYING_SLOT_BFC1))
123 assign_vue_slot(vue_map, VARYING_SLOT_BFC1);
124 }
125
126 /* The hardware doesn't care about the rest of the vertex outputs, so just
127 * assign them contiguously. Don't reassign outputs that already have a
128 * slot.
129 *
130 * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX,
131 * since it's encoded as the clip distances by emit_clip_distances().
132 * However, it may be output by transform feedback, and we'd rather not
133 * recompute state when TF changes, so we just always include it.
134 */
135 for (int i = 0; i < VARYING_SLOT_MAX; ++i) {
136 if ((slots_valid & BITFIELD64_BIT(i)) &&
137 vue_map->varying_to_slot[i] == -1) {
138 assign_vue_slot(vue_map, i);
139 }
140 }
141 }
142
143
144 /**
145 * Decide which set of clip planes should be used when clipping via
146 * gl_Position or gl_ClipVertex.
147 */
148 gl_clip_plane *brw_select_clip_planes(struct gl_context *ctx)
149 {
150 if (ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX]) {
151 /* There is currently a GLSL vertex shader, so clip according to GLSL
152 * rules, which means compare gl_ClipVertex (or gl_Position, if
153 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
154 * that were stored in EyeUserPlane at the time the clip planes were
155 * specified.
156 */
157 return ctx->Transform.EyeUserPlane;
158 } else {
159 /* Either we are using fixed function or an ARB vertex program. In
160 * either case the clip planes are going to be compared against
161 * gl_Position (which is in clip coordinates) so we have to clip using
162 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
163 * core.
164 */
165 return ctx->Transform._ClipUserPlane;
166 }
167 }
168
169
170 bool
171 brw_vs_prog_data_compare(const void *in_a, const void *in_b)
172 {
173 const struct brw_vs_prog_data *a = in_a;
174 const struct brw_vs_prog_data *b = in_b;
175
176 /* Compare the base structure. */
177 if (!brw_stage_prog_data_compare(&a->base.base, &b->base.base))
178 return false;
179
180 /* Compare the rest of the struct. */
181 const unsigned offset = sizeof(struct brw_stage_prog_data);
182 if (memcmp(((char *) a) + offset, ((char *) b) + offset,
183 sizeof(struct brw_vs_prog_data) - offset)) {
184 return false;
185 }
186
187 return true;
188 }
189
190 static bool
191 do_vs_prog(struct brw_context *brw,
192 struct gl_shader_program *prog,
193 struct brw_vertex_program *vp,
194 struct brw_vs_prog_key *key)
195 {
196 GLuint program_size;
197 const GLuint *program;
198 struct brw_vs_compile c;
199 struct brw_vs_prog_data prog_data;
200 struct brw_stage_prog_data *stage_prog_data = &prog_data.base.base;
201 void *mem_ctx;
202 int i;
203 struct gl_shader *vs = NULL;
204
205 if (prog)
206 vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
207
208 memset(&c, 0, sizeof(c));
209 memcpy(&c.key, key, sizeof(*key));
210 memset(&prog_data, 0, sizeof(prog_data));
211
212 mem_ctx = ralloc_context(NULL);
213
214 c.vp = vp;
215
216 /* Allocate the references to the uniforms that will end up in the
217 * prog_data associated with the compiled program, and which will be freed
218 * by the state cache.
219 */
220 int param_count;
221 if (vs) {
222 /* We add padding around uniform values below vec4 size, with the worst
223 * case being a float value that gets blown up to a vec4, so be
224 * conservative here.
225 */
226 param_count = vs->num_uniform_components * 4;
227
228 } else {
229 param_count = vp->program.Base.Parameters->NumParameters * 4;
230 }
231 /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
232 * planes as uniforms.
233 */
234 param_count += c.key.base.nr_userclip_plane_consts * 4;
235
236 stage_prog_data->param = rzalloc_array(NULL, const float *, param_count);
237 stage_prog_data->pull_param = rzalloc_array(NULL, const float *, param_count);
238
239 /* Setting nr_params here NOT to the size of the param and pull_param
240 * arrays, but to the number of uniform components vec4_visitor
241 * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
242 */
243 stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
244 if (vs) {
245 stage_prog_data->nr_params += vs->num_samplers;
246 }
247
248 GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
249 prog_data.inputs_read = vp->program.Base.InputsRead;
250
251 if (c.key.copy_edgeflag) {
252 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
253 prog_data.inputs_read |= VERT_BIT_EDGEFLAG;
254 }
255
256 if (brw->gen < 6) {
257 /* Put dummy slots into the VUE for the SF to put the replaced
258 * point sprite coords in. We shouldn't need these dummy slots,
259 * which take up precious URB space, but it would mean that the SF
260 * doesn't get nice aligned pairs of input coords into output
261 * coords, which would be a pain to handle.
262 */
263 for (i = 0; i < 8; i++) {
264 if (c.key.point_coord_replace & (1 << i))
265 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
266 }
267
268 /* if back colors are written, allocate slots for front colors too */
269 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
270 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
271 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
272 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
273 }
274
275 /* In order for legacy clipping to work, we need to populate the clip
276 * distance varying slots whenever clipping is enabled, even if the vertex
277 * shader doesn't write to gl_ClipDistance.
278 */
279 if (c.key.base.userclip_active) {
280 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
281 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
282 }
283
284 brw_compute_vue_map(brw, &prog_data.base.vue_map, outputs_written);
285
286 if (0) {
287 _mesa_fprint_program_opt(stderr, &c.vp->program.Base, PROG_PRINT_DEBUG,
288 true);
289 }
290
291 /* Emit GEN4 code.
292 */
293 program = brw_vs_emit(brw, prog, &c, &prog_data, mem_ctx, &program_size);
294 if (program == NULL) {
295 ralloc_free(mem_ctx);
296 return false;
297 }
298
299 /* Scratch space is used for register spilling */
300 if (c.base.last_scratch) {
301 perf_debug("Vertex shader triggered register spilling. "
302 "Try reducing the number of live vec4 values to "
303 "improve performance.\n");
304
305 prog_data.base.total_scratch
306 = brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
307
308 brw_get_scratch_bo(brw, &brw->vs.base.scratch_bo,
309 prog_data.base.total_scratch * brw->max_vs_threads);
310 }
311
312 brw_upload_cache(&brw->cache, BRW_VS_PROG,
313 &c.key, sizeof(c.key),
314 program, program_size,
315 &prog_data, sizeof(prog_data),
316 &brw->vs.base.prog_offset, &brw->vs.prog_data);
317 ralloc_free(mem_ctx);
318
319 return true;
320 }
321
322 static bool
323 key_debug(struct brw_context *brw, const char *name, int a, int b)
324 {
325 if (a != b) {
326 perf_debug(" %s %d->%d\n", name, a, b);
327 return true;
328 }
329 return false;
330 }
331
332 void
333 brw_vs_debug_recompile(struct brw_context *brw,
334 struct gl_shader_program *prog,
335 const struct brw_vs_prog_key *key)
336 {
337 struct brw_cache_item *c = NULL;
338 const struct brw_vs_prog_key *old_key = NULL;
339 bool found = false;
340
341 perf_debug("Recompiling vertex shader for program %d\n", prog->Name);
342
343 for (unsigned int i = 0; i < brw->cache.size; i++) {
344 for (c = brw->cache.items[i]; c; c = c->next) {
345 if (c->cache_id == BRW_VS_PROG) {
346 old_key = c->key;
347
348 if (old_key->base.program_string_id == key->base.program_string_id)
349 break;
350 }
351 }
352 if (c)
353 break;
354 }
355
356 if (!c) {
357 perf_debug(" Didn't find previous compile in the shader cache for "
358 "debug\n");
359 return;
360 }
361
362 for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
363 found |= key_debug(brw, "Vertex attrib w/a flags",
364 old_key->gl_attrib_wa_flags[i],
365 key->gl_attrib_wa_flags[i]);
366 }
367
368 found |= key_debug(brw, "user clip flags",
369 old_key->base.userclip_active, key->base.userclip_active);
370
371 found |= key_debug(brw, "user clipping planes as push constants",
372 old_key->base.nr_userclip_plane_consts,
373 key->base.nr_userclip_plane_consts);
374
375 found |= key_debug(brw, "copy edgeflag",
376 old_key->copy_edgeflag, key->copy_edgeflag);
377 found |= key_debug(brw, "PointCoord replace",
378 old_key->point_coord_replace, key->point_coord_replace);
379 found |= key_debug(brw, "vertex color clamping",
380 old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
381
382 found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
383 &key->base.tex);
384
385 if (!found) {
386 perf_debug(" Something else\n");
387 }
388 }
389
390
391 void
392 brw_setup_vec4_key_clip_info(struct brw_context *brw,
393 struct brw_vec4_prog_key *key,
394 bool program_uses_clip_distance)
395 {
396 struct gl_context *ctx = &brw->ctx;
397
398 key->userclip_active = (ctx->Transform.ClipPlanesEnabled != 0);
399 if (key->userclip_active && !program_uses_clip_distance) {
400 key->nr_userclip_plane_consts
401 = _mesa_logbase2(ctx->Transform.ClipPlanesEnabled) + 1;
402 }
403 }
404
405
406 static void brw_upload_vs_prog(struct brw_context *brw)
407 {
408 struct gl_context *ctx = &brw->ctx;
409 struct brw_vs_prog_key key;
410 /* BRW_NEW_VERTEX_PROGRAM */
411 struct brw_vertex_program *vp =
412 (struct brw_vertex_program *)brw->vertex_program;
413 struct gl_program *prog = (struct gl_program *) brw->vertex_program;
414 int i;
415
416 memset(&key, 0, sizeof(key));
417
418 /* Just upload the program verbatim for now. Always send it all
419 * the inputs it asks for, whether they are varying or not.
420 */
421 key.base.program_string_id = vp->id;
422 brw_setup_vec4_key_clip_info(brw, &key.base,
423 vp->program.Base.UsesClipDistanceOut);
424
425 /* _NEW_POLYGON */
426 if (brw->gen < 6) {
427 key.copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
428 ctx->Polygon.BackMode != GL_FILL);
429 }
430
431 /* _NEW_LIGHT | _NEW_BUFFERS */
432 key.base.clamp_vertex_color = ctx->Light._ClampVertexColor;
433
434 /* _NEW_POINT */
435 if (brw->gen < 6 && ctx->Point.PointSprite) {
436 for (i = 0; i < 8; i++) {
437 if (ctx->Point.CoordReplace[i])
438 key.point_coord_replace |= (1 << i);
439 }
440 }
441
442 /* _NEW_TEXTURE */
443 brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
444 &key.base.tex);
445
446 /* BRW_NEW_VERTICES */
447 if (brw->gen < 8 && !brw->is_haswell) {
448 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
449 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
450 */
451 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
452 if (!(vp->program.Base.InputsRead & BITFIELD64_BIT(i)))
453 continue;
454
455 uint8_t wa_flags = 0;
456
457 switch (brw->vb.inputs[i].glarray->Type) {
458
459 case GL_FIXED:
460 wa_flags = brw->vb.inputs[i].glarray->Size;
461 break;
462
463 case GL_INT_2_10_10_10_REV:
464 wa_flags |= BRW_ATTRIB_WA_SIGN;
465 /* fallthough */
466
467 case GL_UNSIGNED_INT_2_10_10_10_REV:
468 if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
469 wa_flags |= BRW_ATTRIB_WA_BGRA;
470
471 if (brw->vb.inputs[i].glarray->Normalized)
472 wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
473 else if (!brw->vb.inputs[i].glarray->Integer)
474 wa_flags |= BRW_ATTRIB_WA_SCALE;
475
476 break;
477 }
478
479 key.gl_attrib_wa_flags[i] = wa_flags;
480 }
481 }
482
483 if (!brw_search_cache(&brw->cache, BRW_VS_PROG,
484 &key, sizeof(key),
485 &brw->vs.base.prog_offset, &brw->vs.prog_data)) {
486 bool success =
487 do_vs_prog(brw, ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX], vp,
488 &key);
489 (void) success;
490 assert(success);
491 }
492 brw->vs.base.prog_data = &brw->vs.prog_data->base.base;
493
494 if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
495 sizeof(brw->vue_map_geom_out)) != 0) {
496 brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
497 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
498 if (brw->gen < 7) {
499 /* No geometry shader support, so the VS VUE map is the VUE map for
500 * the output of the "geometry" portion of the pipeline.
501 */
502 brw->vue_map_geom_out = brw->vue_map_vs;
503 brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
504 }
505 }
506 }
507
508 /* See brw_vs.c:
509 */
510 const struct brw_tracked_state brw_vs_prog = {
511 .dirty = {
512 .mesa = (_NEW_TRANSFORM | _NEW_POLYGON | _NEW_POINT | _NEW_LIGHT |
513 _NEW_TEXTURE |
514 _NEW_BUFFERS),
515 .brw = (BRW_NEW_VERTEX_PROGRAM |
516 BRW_NEW_VERTICES),
517 .cache = 0
518 },
519 .emit = brw_upload_vs_prog
520 };
521
522 bool
523 brw_vs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
524 {
525 struct brw_context *brw = brw_context(ctx);
526 struct brw_vs_prog_key key;
527 uint32_t old_prog_offset = brw->vs.base.prog_offset;
528 struct brw_vs_prog_data *old_prog_data = brw->vs.prog_data;
529 bool success;
530
531 if (!prog->_LinkedShaders[MESA_SHADER_VERTEX])
532 return true;
533
534 struct gl_vertex_program *vp = (struct gl_vertex_program *)
535 prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
536 struct brw_vertex_program *bvp = brw_vertex_program(vp);
537
538 memset(&key, 0, sizeof(key));
539
540 brw_vec4_setup_prog_key_for_precompile(ctx, &key.base, bvp->id, &vp->Base);
541
542 success = do_vs_prog(brw, prog, bvp, &key);
543
544 brw->vs.base.prog_offset = old_prog_offset;
545 brw->vs.prog_data = old_prog_data;
546
547 return success;
548 }