i965: Move intel_context::reduced_primitive to brw_context.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/fbobject.h"
37 #include "main/samplerobj.h"
38 #include "program/prog_parameter.h"
39
40 #include "glsl/ralloc.h"
41
42 /**
43 * Return a bitfield where bit n is set if barycentric interpolation mode n
44 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
45 */
46 static unsigned
47 brw_compute_barycentric_interp_modes(struct brw_context *brw,
48 bool shade_model_flat,
49 const struct gl_fragment_program *fprog)
50 {
51 unsigned barycentric_interp_modes = 0;
52 int attr;
53
54 /* Loop through all fragment shader inputs to figure out what interpolation
55 * modes are in use, and set the appropriate bits in
56 * barycentric_interp_modes.
57 */
58 for (attr = 0; attr < VARYING_SLOT_MAX; ++attr) {
59 enum glsl_interp_qualifier interp_qualifier =
60 fprog->InterpQualifier[attr];
61 bool is_centroid = fprog->IsCentroid & BITFIELD64_BIT(attr);
62 bool is_gl_Color = attr == VARYING_SLOT_COL0 || attr == VARYING_SLOT_COL1;
63
64 /* Ignore unused inputs. */
65 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
66 continue;
67
68 /* Ignore WPOS and FACE, because they don't require interpolation. */
69 if (attr == VARYING_SLOT_POS || attr == VARYING_SLOT_FACE)
70 continue;
71
72 /* Determine the set (or sets) of barycentric coordinates needed to
73 * interpolate this variable. Note that when
74 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
75 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
76 * for lit pixels, so we need both sets of barycentric coordinates.
77 */
78 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
79 if (is_centroid) {
80 barycentric_interp_modes |=
81 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
82 }
83 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
84 barycentric_interp_modes |=
85 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
86 }
87 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
88 (!(shade_model_flat && is_gl_Color) &&
89 interp_qualifier == INTERP_QUALIFIER_NONE)) {
90 if (is_centroid) {
91 barycentric_interp_modes |=
92 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
93 }
94 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
95 barycentric_interp_modes |=
96 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
97 }
98 }
99 }
100
101 return barycentric_interp_modes;
102 }
103
104 bool
105 brw_wm_prog_data_compare(const void *in_a, const void *in_b,
106 int aux_size, const void *in_key)
107 {
108 const struct brw_wm_prog_data *a = in_a;
109 const struct brw_wm_prog_data *b = in_b;
110
111 /* Compare all the struct up to the pointers. */
112 if (memcmp(a, b, offsetof(struct brw_wm_prog_data, param)))
113 return false;
114
115 if (memcmp(a->param, b->param, a->nr_params * sizeof(void *)))
116 return false;
117
118 if (memcmp(a->pull_param, b->pull_param, a->nr_pull_params * sizeof(void *)))
119 return false;
120
121 return true;
122 }
123
124 void
125 brw_wm_prog_data_free(const void *in_prog_data)
126 {
127 const struct brw_wm_prog_data *prog_data = in_prog_data;
128
129 ralloc_free((void *)prog_data->param);
130 ralloc_free((void *)prog_data->pull_param);
131 }
132
133 /**
134 * All Mesa program -> GPU code generation goes through this function.
135 * Depending on the instructions used (i.e. flow control instructions)
136 * we'll use one of two code generators.
137 */
138 bool do_wm_prog(struct brw_context *brw,
139 struct gl_shader_program *prog,
140 struct brw_fragment_program *fp,
141 struct brw_wm_prog_key *key)
142 {
143 struct intel_context *intel = &brw->intel;
144 struct brw_wm_compile *c;
145 const GLuint *program;
146 struct gl_shader *fs = NULL;
147 GLuint program_size;
148
149 if (prog)
150 fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
151
152 c = rzalloc(NULL, struct brw_wm_compile);
153
154 /* Allocate the references to the uniforms that will end up in the
155 * prog_data associated with the compiled program, and which will be freed
156 * by the state cache.
157 */
158 int param_count;
159 if (fs) {
160 param_count = fs->num_uniform_components;
161 } else {
162 param_count = fp->program.Base.Parameters->NumParameters * 4;
163 }
164 /* The backend also sometimes adds params for texture size. */
165 param_count += 2 * BRW_MAX_TEX_UNIT;
166 c->prog_data.param = rzalloc_array(NULL, const float *, param_count);
167 c->prog_data.pull_param = rzalloc_array(NULL, const float *, param_count);
168
169 memcpy(&c->key, key, sizeof(*key));
170
171 c->prog_data.barycentric_interp_modes =
172 brw_compute_barycentric_interp_modes(brw, c->key.flat_shade,
173 &fp->program);
174
175 program = brw_wm_fs_emit(brw, c, &fp->program, prog, &program_size);
176 if (program == NULL)
177 return false;
178
179 /* Scratch space is used for register spilling */
180 if (c->last_scratch) {
181 perf_debug("Fragment shader triggered register spilling. "
182 "Try reducing the number of live scalar values to "
183 "improve performance.\n");
184
185 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
186
187 brw_get_scratch_bo(brw, &brw->wm.scratch_bo,
188 c->prog_data.total_scratch * brw->max_wm_threads);
189 }
190
191 if (unlikely(INTEL_DEBUG & DEBUG_WM))
192 fprintf(stderr, "\n");
193
194 brw_upload_cache(&brw->cache, BRW_WM_PROG,
195 &c->key, sizeof(c->key),
196 program, program_size,
197 &c->prog_data, sizeof(c->prog_data),
198 &brw->wm.prog_offset, &brw->wm.prog_data);
199
200 ralloc_free(c);
201
202 return true;
203 }
204
205 static bool
206 key_debug(struct brw_context *brw, const char *name, int a, int b)
207 {
208 struct intel_context *intel = &brw->intel;
209 if (a != b) {
210 perf_debug(" %s %d->%d\n", name, a, b);
211 return true;
212 } else {
213 return false;
214 }
215 }
216
217 bool
218 brw_debug_recompile_sampler_key(struct brw_context *brw,
219 const struct brw_sampler_prog_key_data *old_key,
220 const struct brw_sampler_prog_key_data *key)
221 {
222 bool found = false;
223
224 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
225 found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
226 old_key->swizzles[i], key->swizzles[i]);
227 }
228 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
229 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
230 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
231 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
232 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
233 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
234 found |= key_debug(brw, "GL_MESA_ycbcr texturing\n",
235 old_key->yuvtex_mask, key->yuvtex_mask);
236 found |= key_debug(brw, "GL_MESA_ycbcr UV swapping\n",
237 old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
238
239 return found;
240 }
241
242 void
243 brw_wm_debug_recompile(struct brw_context *brw,
244 struct gl_shader_program *prog,
245 const struct brw_wm_prog_key *key)
246 {
247 struct intel_context *intel = &brw->intel;
248 struct brw_cache_item *c = NULL;
249 const struct brw_wm_prog_key *old_key = NULL;
250 bool found = false;
251
252 perf_debug("Recompiling fragment shader for program %d\n", prog->Name);
253
254 for (unsigned int i = 0; i < brw->cache.size; i++) {
255 for (c = brw->cache.items[i]; c; c = c->next) {
256 if (c->cache_id == BRW_WM_PROG) {
257 old_key = c->key;
258
259 if (old_key->program_string_id == key->program_string_id)
260 break;
261 }
262 }
263 if (c)
264 break;
265 }
266
267 if (!c) {
268 perf_debug(" Didn't find previous compile in the shader cache for debug\n");
269 return;
270 }
271
272 found |= key_debug(brw, "alphatest, computed depth, depth test, or "
273 "depth write",
274 old_key->iz_lookup, key->iz_lookup);
275 found |= key_debug(brw, "depth statistics",
276 old_key->stats_wm, key->stats_wm);
277 found |= key_debug(brw, "flat shading",
278 old_key->flat_shade, key->flat_shade);
279 found |= key_debug(brw, "number of color buffers",
280 old_key->nr_color_regions, key->nr_color_regions);
281 found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
282 old_key->replicate_alpha, key->replicate_alpha);
283 found |= key_debug(brw, "rendering to FBO",
284 old_key->render_to_fbo, key->render_to_fbo);
285 found |= key_debug(brw, "fragment color clamping",
286 old_key->clamp_fragment_color, key->clamp_fragment_color);
287 found |= key_debug(brw, "line smoothing",
288 old_key->line_aa, key->line_aa);
289 found |= key_debug(brw, "renderbuffer height",
290 old_key->drawable_height, key->drawable_height);
291 found |= key_debug(brw, "input slots valid",
292 old_key->input_slots_valid, key->input_slots_valid);
293
294 found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
295
296 if (!found) {
297 perf_debug(" Something else\n");
298 }
299 }
300
301 void
302 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
303 const struct gl_program *prog,
304 struct brw_sampler_prog_key_data *key)
305 {
306 struct intel_context *intel = intel_context(ctx);
307
308 for (int s = 0; s < MAX_SAMPLERS; s++) {
309 key->swizzles[s] = SWIZZLE_NOOP;
310
311 if (!(prog->SamplersUsed & (1 << s)))
312 continue;
313
314 int unit_id = prog->SamplerUnits[s];
315 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
316
317 if (unit->_ReallyEnabled && unit->_Current->Target != GL_TEXTURE_BUFFER) {
318 const struct gl_texture_object *t = unit->_Current;
319 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
320 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
321
322 const bool alpha_depth = t->DepthMode == GL_ALPHA &&
323 (img->_BaseFormat == GL_DEPTH_COMPONENT ||
324 img->_BaseFormat == GL_DEPTH_STENCIL);
325
326 /* Haswell handles texture swizzling as surface format overrides
327 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
328 */
329 if (!intel->is_haswell || alpha_depth)
330 key->swizzles[s] = brw_get_texture_swizzle(ctx, t);
331
332 if (img->InternalFormat == GL_YCBCR_MESA) {
333 key->yuvtex_mask |= 1 << s;
334 if (img->TexFormat == MESA_FORMAT_YCBCR)
335 key->yuvtex_swap_mask |= 1 << s;
336 }
337
338 if (sampler->MinFilter != GL_NEAREST &&
339 sampler->MagFilter != GL_NEAREST) {
340 if (sampler->WrapS == GL_CLAMP)
341 key->gl_clamp_mask[0] |= 1 << s;
342 if (sampler->WrapT == GL_CLAMP)
343 key->gl_clamp_mask[1] |= 1 << s;
344 if (sampler->WrapR == GL_CLAMP)
345 key->gl_clamp_mask[2] |= 1 << s;
346 }
347 }
348 }
349 }
350
351 static void brw_wm_populate_key( struct brw_context *brw,
352 struct brw_wm_prog_key *key )
353 {
354 struct gl_context *ctx = &brw->intel.ctx;
355 struct intel_context *intel = &brw->intel;
356 /* BRW_NEW_FRAGMENT_PROGRAM */
357 const struct brw_fragment_program *fp =
358 (struct brw_fragment_program *)brw->fragment_program;
359 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
360 GLuint lookup = 0;
361 GLuint line_aa;
362 bool program_uses_dfdy = fp->program.UsesDFdy;
363
364 memset(key, 0, sizeof(*key));
365
366 /* Build the index for table lookup
367 */
368 if (intel->gen < 6) {
369 /* _NEW_COLOR */
370 if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
371 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
372
373 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
374 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
375
376 /* _NEW_DEPTH */
377 if (ctx->Depth.Test)
378 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
379
380 if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
381 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
382
383 /* _NEW_STENCIL | _NEW_BUFFERS */
384 if (ctx->Stencil._Enabled) {
385 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
386
387 if (ctx->Stencil.WriteMask[0] ||
388 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
389 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
390 }
391 key->iz_lookup = lookup;
392 }
393
394 line_aa = AA_NEVER;
395
396 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
397 if (ctx->Line.SmoothFlag) {
398 if (brw->reduced_primitive == GL_LINES) {
399 line_aa = AA_ALWAYS;
400 }
401 else if (brw->reduced_primitive == GL_TRIANGLES) {
402 if (ctx->Polygon.FrontMode == GL_LINE) {
403 line_aa = AA_SOMETIMES;
404
405 if (ctx->Polygon.BackMode == GL_LINE ||
406 (ctx->Polygon.CullFlag &&
407 ctx->Polygon.CullFaceMode == GL_BACK))
408 line_aa = AA_ALWAYS;
409 }
410 else if (ctx->Polygon.BackMode == GL_LINE) {
411 line_aa = AA_SOMETIMES;
412
413 if ((ctx->Polygon.CullFlag &&
414 ctx->Polygon.CullFaceMode == GL_FRONT))
415 line_aa = AA_ALWAYS;
416 }
417 }
418 }
419
420 key->line_aa = line_aa;
421
422 if (intel->gen < 6)
423 key->stats_wm = brw->intel.stats_wm;
424
425 /* _NEW_LIGHT */
426 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
427
428 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
429 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
430
431 /* _NEW_TEXTURE */
432 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
433
434 /* _NEW_BUFFERS */
435 /*
436 * Include the draw buffer origin and height so that we can calculate
437 * fragment position values relative to the bottom left of the drawable,
438 * from the incoming screen origin relative position we get as part of our
439 * payload.
440 *
441 * This is only needed for the WM_WPOSXY opcode when the fragment program
442 * uses the gl_FragCoord input.
443 *
444 * We could avoid recompiling by including this as a constant referenced by
445 * our program, but if we were to do that it would also be nice to handle
446 * getting that constant updated at batchbuffer submit time (when we
447 * hold the lock and know where the buffer really is) rather than at emit
448 * time when we don't hold the lock and are just guessing. We could also
449 * just avoid using this as key data if the program doesn't use
450 * fragment.position.
451 *
452 * For DRI2 the origin_x/y will always be (0,0) but we still need the
453 * drawable height in order to invert the Y axis.
454 */
455 if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
456 key->drawable_height = ctx->DrawBuffer->Height;
457 }
458
459 if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
460 key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
461 }
462
463 /* _NEW_BUFFERS */
464 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
465
466 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
467 key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
468 (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);
469
470 /* BRW_NEW_VUE_MAP_GEOM_OUT */
471 if (intel->gen < 6)
472 key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
473
474 /* The unique fragment program ID */
475 key->program_string_id = fp->id;
476 }
477
478
479 static void
480 brw_upload_wm_prog(struct brw_context *brw)
481 {
482 struct intel_context *intel = &brw->intel;
483 struct gl_context *ctx = &intel->ctx;
484 struct brw_wm_prog_key key;
485 struct brw_fragment_program *fp = (struct brw_fragment_program *)
486 brw->fragment_program;
487
488 brw_wm_populate_key(brw, &key);
489
490 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
491 &key, sizeof(key),
492 &brw->wm.prog_offset, &brw->wm.prog_data)) {
493 bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
494 &key);
495 (void) success;
496 assert(success);
497 }
498 }
499
500
501 const struct brw_tracked_state brw_wm_prog = {
502 .dirty = {
503 .mesa = (_NEW_COLOR |
504 _NEW_DEPTH |
505 _NEW_STENCIL |
506 _NEW_POLYGON |
507 _NEW_LINE |
508 _NEW_LIGHT |
509 _NEW_FRAG_CLAMP |
510 _NEW_BUFFERS |
511 _NEW_TEXTURE |
512 _NEW_MULTISAMPLE),
513 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
514 BRW_NEW_REDUCED_PRIMITIVE |
515 BRW_NEW_VUE_MAP_GEOM_OUT |
516 BRW_NEW_STATS_WM)
517 },
518 .emit = brw_upload_wm_prog
519 };
520