i965/fs: Unify the param pointer allocation for FP/non-FP.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/fbobject.h"
37 #include "main/samplerobj.h"
38 #include "program/prog_parameter.h"
39
40 #include "glsl/ralloc.h"
41
42 /**
43 * Return a bitfield where bit n is set if barycentric interpolation mode n
44 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
45 */
46 static unsigned
47 brw_compute_barycentric_interp_modes(struct brw_context *brw,
48 bool shade_model_flat,
49 const struct gl_fragment_program *fprog)
50 {
51 unsigned barycentric_interp_modes = 0;
52 int attr;
53
54 /* Loop through all fragment shader inputs to figure out what interpolation
55 * modes are in use, and set the appropriate bits in
56 * barycentric_interp_modes.
57 */
58 for (attr = 0; attr < FRAG_ATTRIB_MAX; ++attr) {
59 enum glsl_interp_qualifier interp_qualifier =
60 fprog->InterpQualifier[attr];
61 bool is_centroid = fprog->IsCentroid & BITFIELD64_BIT(attr);
62 bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;
63
64 /* Ignore unused inputs. */
65 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
66 continue;
67
68 /* Ignore WPOS and FACE, because they don't require interpolation. */
69 if (attr == FRAG_ATTRIB_WPOS || attr == FRAG_ATTRIB_FACE)
70 continue;
71
72 /* Determine the set (or sets) of barycentric coordinates needed to
73 * interpolate this variable. Note that when
74 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
75 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
76 * for lit pixels, so we need both sets of barycentric coordinates.
77 */
78 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
79 if (is_centroid) {
80 barycentric_interp_modes |=
81 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
82 }
83 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
84 barycentric_interp_modes |=
85 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
86 }
87 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
88 (!(shade_model_flat && is_gl_Color) &&
89 interp_qualifier == INTERP_QUALIFIER_NONE)) {
90 if (is_centroid) {
91 barycentric_interp_modes |=
92 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
93 }
94 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
95 barycentric_interp_modes |=
96 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
97 }
98 }
99 }
100
101 return barycentric_interp_modes;
102 }
103
104
105 void
106 brw_wm_payload_setup(struct brw_context *brw,
107 struct brw_wm_compile *c)
108 {
109 struct intel_context *intel = &brw->intel;
110 bool uses_depth = (c->fp->program.Base.InputsRead &
111 (1 << FRAG_ATTRIB_WPOS)) != 0;
112 unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
113 int i;
114
115 if (intel->gen >= 6) {
116 /* R0-1: masks, pixel X/Y coordinates. */
117 c->nr_payload_regs = 2;
118 /* R2: only for 32-pixel dispatch.*/
119
120 /* R3-26: barycentric interpolation coordinates. These appear in the
121 * same order that they appear in the brw_wm_barycentric_interp_mode
122 * enum. Each set of coordinates occupies 2 registers if dispatch width
123 * == 8 and 4 registers if dispatch width == 16. Coordinates only
124 * appear if they were enabled using the "Barycentric Interpolation
125 * Mode" bits in WM_STATE.
126 */
127 for (i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
128 if (barycentric_interp_modes & (1 << i)) {
129 c->barycentric_coord_reg[i] = c->nr_payload_regs;
130 c->nr_payload_regs += 2;
131 if (c->dispatch_width == 16) {
132 c->nr_payload_regs += 2;
133 }
134 }
135 }
136
137 /* R27: interpolated depth if uses source depth */
138 if (uses_depth) {
139 c->source_depth_reg = c->nr_payload_regs;
140 c->nr_payload_regs++;
141 if (c->dispatch_width == 16) {
142 /* R28: interpolated depth if not 8-wide. */
143 c->nr_payload_regs++;
144 }
145 }
146 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W.
147 */
148 if (uses_depth) {
149 c->source_w_reg = c->nr_payload_regs;
150 c->nr_payload_regs++;
151 if (c->dispatch_width == 16) {
152 /* R30: interpolated W if not 8-wide. */
153 c->nr_payload_regs++;
154 }
155 }
156 /* R31: MSAA position offsets. */
157 /* R32-: bary for 32-pixel. */
158 /* R58-59: interp W for 32-pixel. */
159
160 if (c->fp->program.Base.OutputsWritten &
161 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
162 c->source_depth_to_render_target = true;
163 c->computes_depth = true;
164 }
165 } else {
166 brw_wm_lookup_iz(intel, c);
167 }
168 }
169
170 bool
171 brw_wm_prog_data_compare(const void *in_a, const void *in_b,
172 int aux_size, const void *in_key)
173 {
174 const struct brw_wm_prog_data *a = in_a;
175 const struct brw_wm_prog_data *b = in_b;
176
177 /* Compare all the struct up to the pointers. */
178 if (memcmp(a, b, offsetof(struct brw_wm_prog_data, param)))
179 return false;
180
181 if (memcmp(a->param, b->param, a->nr_params * sizeof(void *)))
182 return false;
183
184 if (memcmp(a->pull_param, b->pull_param, a->nr_pull_params * sizeof(void *)))
185 return false;
186
187 return true;
188 }
189
190 void
191 brw_wm_prog_data_free(const void *in_prog_data)
192 {
193 const struct brw_wm_prog_data *prog_data = in_prog_data;
194
195 ralloc_free((void *)prog_data->param);
196 ralloc_free((void *)prog_data->pull_param);
197 }
198
199 /**
200 * All Mesa program -> GPU code generation goes through this function.
201 * Depending on the instructions used (i.e. flow control instructions)
202 * we'll use one of two code generators.
203 */
204 bool do_wm_prog(struct brw_context *brw,
205 struct gl_shader_program *prog,
206 struct brw_fragment_program *fp,
207 struct brw_wm_prog_key *key)
208 {
209 struct intel_context *intel = &brw->intel;
210 struct brw_wm_compile *c;
211 const GLuint *program;
212 struct gl_shader *fs = NULL;
213 GLuint program_size;
214
215 if (prog)
216 fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
217
218 c = rzalloc(NULL, struct brw_wm_compile);
219
220 /* Allocate the references to the uniforms that will end up in the
221 * prog_data associated with the compiled program, and which will be freed
222 * by the state cache.
223 */
224 int param_count;
225 if (fs) {
226 param_count = fs->num_uniform_components;
227 } else {
228 param_count = fp->program.Base.Parameters->NumParameters * 4;
229 }
230 /* The backend also sometimes adds params for texture size. */
231 param_count += 2 * BRW_MAX_TEX_UNIT;
232 c->prog_data.param = rzalloc_array(NULL, const float *, param_count);
233 c->prog_data.pull_param = rzalloc_array(NULL, const float *, param_count);
234
235 memcpy(&c->key, key, sizeof(*key));
236
237 c->fp = fp;
238
239 brw_init_compile(brw, &c->func, c);
240
241 c->prog_data.barycentric_interp_modes =
242 brw_compute_barycentric_interp_modes(brw, c->key.flat_shade,
243 &fp->program);
244
245 brw_wm_fs_emit(brw, c, prog);
246
247 /* Scratch space is used for register spilling */
248 if (c->last_scratch) {
249 perf_debug("Fragment shader triggered register spilling. "
250 "Try reducing the number of live scalar values to "
251 "improve performance.\n");
252
253 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
254
255 brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
256 c->prog_data.total_scratch * brw->max_wm_threads);
257 }
258
259 if (unlikely(INTEL_DEBUG & DEBUG_WM))
260 fprintf(stderr, "\n");
261
262 /* get the program
263 */
264 program = brw_get_program(&c->func, &program_size);
265
266 brw_upload_cache(&brw->cache, BRW_WM_PROG,
267 &c->key, sizeof(c->key),
268 program, program_size,
269 &c->prog_data, sizeof(c->prog_data),
270 &brw->wm.prog_offset, &brw->wm.prog_data);
271
272 ralloc_free(c);
273
274 return true;
275 }
276
277 static bool
278 key_debug(const char *name, int a, int b)
279 {
280 if (a != b) {
281 perf_debug(" %s %d->%d\n", name, a, b);
282 return true;
283 } else {
284 return false;
285 }
286 }
287
288 bool
289 brw_debug_recompile_sampler_key(const struct brw_sampler_prog_key_data *old_key,
290 const struct brw_sampler_prog_key_data *key)
291 {
292 bool found = false;
293
294 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
295 found |= key_debug("EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
296 old_key->swizzles[i], key->swizzles[i]);
297 }
298 found |= key_debug("GL_CLAMP enabled on any texture unit's 1st coordinate",
299 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
300 found |= key_debug("GL_CLAMP enabled on any texture unit's 2nd coordinate",
301 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
302 found |= key_debug("GL_CLAMP enabled on any texture unit's 3rd coordinate",
303 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
304 found |= key_debug("GL_MESA_ycbcr texturing\n",
305 old_key->yuvtex_mask, key->yuvtex_mask);
306 found |= key_debug("GL_MESA_ycbcr UV swapping\n",
307 old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
308
309 return found;
310 }
311
312 void
313 brw_wm_debug_recompile(struct brw_context *brw,
314 struct gl_shader_program *prog,
315 const struct brw_wm_prog_key *key)
316 {
317 struct brw_cache_item *c = NULL;
318 const struct brw_wm_prog_key *old_key = NULL;
319 bool found = false;
320
321 perf_debug("Recompiling fragment shader for program %d\n", prog->Name);
322
323 for (unsigned int i = 0; i < brw->cache.size; i++) {
324 for (c = brw->cache.items[i]; c; c = c->next) {
325 if (c->cache_id == BRW_WM_PROG) {
326 old_key = c->key;
327
328 if (old_key->program_string_id == key->program_string_id)
329 break;
330 }
331 }
332 if (c)
333 break;
334 }
335
336 if (!c) {
337 perf_debug(" Didn't find previous compile in the shader cache for "
338 "debug\n");
339 return;
340 }
341
342 found |= key_debug("alphatest, computed depth, depth test, or depth write",
343 old_key->iz_lookup, key->iz_lookup);
344 found |= key_debug("depth statistics", old_key->stats_wm, key->stats_wm);
345 found |= key_debug("flat shading", old_key->flat_shade, key->flat_shade);
346 found |= key_debug("number of color buffers", old_key->nr_color_regions, key->nr_color_regions);
347 found |= key_debug("sample alpha to coverage", old_key->sample_alpha_to_coverage, key->sample_alpha_to_coverage);
348 found |= key_debug("rendering to FBO", old_key->render_to_fbo, key->render_to_fbo);
349 found |= key_debug("fragment color clamping", old_key->clamp_fragment_color, key->clamp_fragment_color);
350 found |= key_debug("line smoothing", old_key->line_aa, key->line_aa);
351 found |= key_debug("proj_attrib_mask", old_key->proj_attrib_mask, key->proj_attrib_mask);
352 found |= key_debug("renderbuffer height", old_key->drawable_height, key->drawable_height);
353 found |= key_debug("vertex shader outputs", old_key->vp_outputs_written, key->vp_outputs_written);
354
355 found |= brw_debug_recompile_sampler_key(&old_key->tex, &key->tex);
356
357 if (!found) {
358 perf_debug(" Something else\n");
359 }
360 }
361
362 void
363 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
364 const struct gl_program *prog,
365 struct brw_sampler_prog_key_data *key)
366 {
367 struct intel_context *intel = intel_context(ctx);
368
369 for (int s = 0; s < MAX_SAMPLERS; s++) {
370 key->swizzles[s] = SWIZZLE_NOOP;
371
372 if (!(prog->SamplersUsed & (1 << s)))
373 continue;
374
375 int unit_id = prog->SamplerUnits[s];
376 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
377
378 if (unit->_ReallyEnabled && unit->_Current->Target != GL_TEXTURE_BUFFER) {
379 const struct gl_texture_object *t = unit->_Current;
380 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
381 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
382
383 const bool alpha_depth = t->DepthMode == GL_ALPHA &&
384 (img->_BaseFormat == GL_DEPTH_COMPONENT ||
385 img->_BaseFormat == GL_DEPTH_STENCIL);
386
387 /* Haswell handles texture swizzling as surface format overrides
388 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
389 */
390 if (!intel->is_haswell || alpha_depth)
391 key->swizzles[s] = brw_get_texture_swizzle(t);
392
393 if (img->InternalFormat == GL_YCBCR_MESA) {
394 key->yuvtex_mask |= 1 << s;
395 if (img->TexFormat == MESA_FORMAT_YCBCR)
396 key->yuvtex_swap_mask |= 1 << s;
397 }
398
399 if (sampler->MinFilter != GL_NEAREST &&
400 sampler->MagFilter != GL_NEAREST) {
401 if (sampler->WrapS == GL_CLAMP)
402 key->gl_clamp_mask[0] |= 1 << s;
403 if (sampler->WrapT == GL_CLAMP)
404 key->gl_clamp_mask[1] |= 1 << s;
405 if (sampler->WrapR == GL_CLAMP)
406 key->gl_clamp_mask[2] |= 1 << s;
407 }
408 }
409 }
410 }
411
412 static void brw_wm_populate_key( struct brw_context *brw,
413 struct brw_wm_prog_key *key )
414 {
415 struct gl_context *ctx = &brw->intel.ctx;
416 struct intel_context *intel = &brw->intel;
417 /* BRW_NEW_FRAGMENT_PROGRAM */
418 const struct brw_fragment_program *fp =
419 (struct brw_fragment_program *)brw->fragment_program;
420 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
421 GLuint lookup = 0;
422 GLuint line_aa;
423 bool program_uses_dfdy = fp->program.UsesDFdy;
424
425 memset(key, 0, sizeof(*key));
426
427 /* Build the index for table lookup
428 */
429 if (intel->gen < 6) {
430 /* _NEW_COLOR */
431 if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
432 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
433
434 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
435 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
436
437 /* _NEW_DEPTH */
438 if (ctx->Depth.Test)
439 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
440
441 if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
442 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
443
444 /* _NEW_STENCIL */
445 if (ctx->Stencil._Enabled) {
446 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
447
448 if (ctx->Stencil.WriteMask[0] ||
449 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
450 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
451 }
452 key->iz_lookup = lookup;
453 }
454
455 line_aa = AA_NEVER;
456
457 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
458 if (ctx->Line.SmoothFlag) {
459 if (brw->intel.reduced_primitive == GL_LINES) {
460 line_aa = AA_ALWAYS;
461 }
462 else if (brw->intel.reduced_primitive == GL_TRIANGLES) {
463 if (ctx->Polygon.FrontMode == GL_LINE) {
464 line_aa = AA_SOMETIMES;
465
466 if (ctx->Polygon.BackMode == GL_LINE ||
467 (ctx->Polygon.CullFlag &&
468 ctx->Polygon.CullFaceMode == GL_BACK))
469 line_aa = AA_ALWAYS;
470 }
471 else if (ctx->Polygon.BackMode == GL_LINE) {
472 line_aa = AA_SOMETIMES;
473
474 if ((ctx->Polygon.CullFlag &&
475 ctx->Polygon.CullFaceMode == GL_FRONT))
476 line_aa = AA_ALWAYS;
477 }
478 }
479 }
480
481 key->line_aa = line_aa;
482
483 if (intel->gen < 6)
484 key->stats_wm = brw->intel.stats_wm;
485
486 /* BRW_NEW_WM_INPUT_DIMENSIONS */
487 /* Only set this for fixed function. The optimization it enables isn't
488 * useful for programs using shaders.
489 */
490 if (ctx->Shader.CurrentFragmentProgram)
491 key->proj_attrib_mask = 0xffffffff;
492 else
493 key->proj_attrib_mask = brw->wm.input_size_masks[4-1];
494
495 /* _NEW_LIGHT */
496 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
497
498 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
499 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
500
501 /* _NEW_TEXTURE */
502 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
503
504 /* _NEW_BUFFERS */
505 /*
506 * Include the draw buffer origin and height so that we can calculate
507 * fragment position values relative to the bottom left of the drawable,
508 * from the incoming screen origin relative position we get as part of our
509 * payload.
510 *
511 * This is only needed for the WM_WPOSXY opcode when the fragment program
512 * uses the gl_FragCoord input.
513 *
514 * We could avoid recompiling by including this as a constant referenced by
515 * our program, but if we were to do that it would also be nice to handle
516 * getting that constant updated at batchbuffer submit time (when we
517 * hold the lock and know where the buffer really is) rather than at emit
518 * time when we don't hold the lock and are just guessing. We could also
519 * just avoid using this as key data if the program doesn't use
520 * fragment.position.
521 *
522 * For DRI2 the origin_x/y will always be (0,0) but we still need the
523 * drawable height in order to invert the Y axis.
524 */
525 if (fp->program.Base.InputsRead & FRAG_BIT_WPOS) {
526 key->drawable_height = ctx->DrawBuffer->Height;
527 }
528
529 if ((fp->program.Base.InputsRead & FRAG_BIT_WPOS) || program_uses_dfdy) {
530 key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
531 }
532
533 /* _NEW_BUFFERS */
534 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
535 /* _NEW_MULTISAMPLE */
536 key->sample_alpha_to_coverage = ctx->Multisample.SampleAlphaToCoverage;
537
538 /* CACHE_NEW_VS_PROG */
539 if (intel->gen < 6)
540 key->vp_outputs_written = brw->vs.prog_data->outputs_written;
541
542 /* The unique fragment program ID */
543 key->program_string_id = fp->id;
544 }
545
546
547 static void
548 brw_upload_wm_prog(struct brw_context *brw)
549 {
550 struct intel_context *intel = &brw->intel;
551 struct gl_context *ctx = &intel->ctx;
552 struct brw_wm_prog_key key;
553 struct brw_fragment_program *fp = (struct brw_fragment_program *)
554 brw->fragment_program;
555
556 brw_wm_populate_key(brw, &key);
557
558 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
559 &key, sizeof(key),
560 &brw->wm.prog_offset, &brw->wm.prog_data)) {
561 bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
562 &key);
563 (void) success;
564 assert(success);
565 }
566 }
567
568
569 const struct brw_tracked_state brw_wm_prog = {
570 .dirty = {
571 .mesa = (_NEW_COLOR |
572 _NEW_DEPTH |
573 _NEW_STENCIL |
574 _NEW_POLYGON |
575 _NEW_LINE |
576 _NEW_LIGHT |
577 _NEW_FRAG_CLAMP |
578 _NEW_BUFFERS |
579 _NEW_TEXTURE |
580 _NEW_MULTISAMPLE),
581 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
582 BRW_NEW_WM_INPUT_DIMENSIONS |
583 BRW_NEW_REDUCED_PRIMITIVE),
584 .cache = CACHE_NEW_VS_PROG,
585 },
586 .emit = brw_upload_wm_prog
587 };
588