i965: Make the param pointer arrays for the WM dynamically sized.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/fbobject.h"
37 #include "main/samplerobj.h"
38 #include "program/prog_parameter.h"
39
40 #include "glsl/ralloc.h"
41
42 /** Return number of src args for given instruction */
43 GLuint brw_wm_nr_args( GLuint opcode )
44 {
45 switch (opcode) {
46 case WM_FRONTFACING:
47 case WM_PIXELXY:
48 return 0;
49 case WM_CINTERP:
50 case WM_WPOSXY:
51 case WM_DELTAXY:
52 return 1;
53 case WM_LINTERP:
54 case WM_PIXELW:
55 return 2;
56 case WM_FB_WRITE:
57 case WM_PINTERP:
58 return 3;
59 default:
60 assert(opcode < MAX_OPCODE);
61 return _mesa_num_inst_src_regs(opcode);
62 }
63 }
64
65
66 GLuint brw_wm_is_scalar_result( GLuint opcode )
67 {
68 switch (opcode) {
69 case OPCODE_COS:
70 case OPCODE_EX2:
71 case OPCODE_LG2:
72 case OPCODE_POW:
73 case OPCODE_RCP:
74 case OPCODE_RSQ:
75 case OPCODE_SIN:
76 case OPCODE_DP2:
77 case OPCODE_DP3:
78 case OPCODE_DP4:
79 case OPCODE_DPH:
80 case OPCODE_DST:
81 return 1;
82
83 default:
84 return 0;
85 }
86 }
87
88
89 /**
90 * Do GPU code generation for non-GLSL shader. non-GLSL shaders have
91 * no flow control instructions so we can more readily do SSA-style
92 * optimizations.
93 */
94 static void
95 brw_wm_non_glsl_emit(struct brw_context *brw, struct brw_wm_compile *c)
96 {
97 /* Augment fragment program. Add instructions for pre- and
98 * post-fragment-program tasks such as interpolation and fogging.
99 */
100 brw_wm_pass_fp(c);
101
102 /* Translate to intermediate representation. Build register usage
103 * chains.
104 */
105 brw_wm_pass0(c);
106
107 /* Dead code removal.
108 */
109 brw_wm_pass1(c);
110
111 /* Register allocation.
112 * Divide by two because we operate on 16 pixels at a time and require
113 * two GRF entries for each logical shader register.
114 */
115 c->grf_limit = BRW_WM_MAX_GRF / 2;
116
117 brw_wm_pass2(c);
118
119 /* how many general-purpose registers are used */
120 c->prog_data.reg_blocks = brw_register_blocks(c->max_wm_grf);
121
122 /* Emit GEN4 code.
123 */
124 brw_wm_emit(c);
125 }
126
127
128 /**
129 * Return a bitfield where bit n is set if barycentric interpolation mode n
130 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
131 */
132 static unsigned
133 brw_compute_barycentric_interp_modes(struct brw_context *brw,
134 bool shade_model_flat,
135 const struct gl_fragment_program *fprog)
136 {
137 unsigned barycentric_interp_modes = 0;
138 int attr;
139
140 /* Loop through all fragment shader inputs to figure out what interpolation
141 * modes are in use, and set the appropriate bits in
142 * barycentric_interp_modes.
143 */
144 for (attr = 0; attr < FRAG_ATTRIB_MAX; ++attr) {
145 enum glsl_interp_qualifier interp_qualifier =
146 fprog->InterpQualifier[attr];
147 bool is_centroid = fprog->IsCentroid & BITFIELD64_BIT(attr);
148 bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;
149
150 /* Ignore unused inputs. */
151 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
152 continue;
153
154 /* Ignore WPOS and FACE, because they don't require interpolation. */
155 if (attr == FRAG_ATTRIB_WPOS || attr == FRAG_ATTRIB_FACE)
156 continue;
157
158 /* Determine the set (or sets) of barycentric coordinates needed to
159 * interpolate this variable. Note that when
160 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
161 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
162 * for lit pixels, so we need both sets of barycentric coordinates.
163 */
164 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
165 if (is_centroid) {
166 barycentric_interp_modes |=
167 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
168 }
169 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
170 barycentric_interp_modes |=
171 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
172 }
173 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
174 (!(shade_model_flat && is_gl_Color) &&
175 interp_qualifier == INTERP_QUALIFIER_NONE)) {
176 if (is_centroid) {
177 barycentric_interp_modes |=
178 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
179 }
180 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
181 barycentric_interp_modes |=
182 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
183 }
184 }
185 }
186
187 return barycentric_interp_modes;
188 }
189
190
191 void
192 brw_wm_payload_setup(struct brw_context *brw,
193 struct brw_wm_compile *c)
194 {
195 struct intel_context *intel = &brw->intel;
196 bool uses_depth = (c->fp->program.Base.InputsRead &
197 (1 << FRAG_ATTRIB_WPOS)) != 0;
198 unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
199 int i;
200
201 if (intel->gen >= 6) {
202 /* R0-1: masks, pixel X/Y coordinates. */
203 c->nr_payload_regs = 2;
204 /* R2: only for 32-pixel dispatch.*/
205
206 /* R3-26: barycentric interpolation coordinates. These appear in the
207 * same order that they appear in the brw_wm_barycentric_interp_mode
208 * enum. Each set of coordinates occupies 2 registers if dispatch width
209 * == 8 and 4 registers if dispatch width == 16. Coordinates only
210 * appear if they were enabled using the "Barycentric Interpolation
211 * Mode" bits in WM_STATE.
212 */
213 for (i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
214 if (barycentric_interp_modes & (1 << i)) {
215 c->barycentric_coord_reg[i] = c->nr_payload_regs;
216 c->nr_payload_regs += 2;
217 if (c->dispatch_width == 16) {
218 c->nr_payload_regs += 2;
219 }
220 }
221 }
222
223 /* R27: interpolated depth if uses source depth */
224 if (uses_depth) {
225 c->source_depth_reg = c->nr_payload_regs;
226 c->nr_payload_regs++;
227 if (c->dispatch_width == 16) {
228 /* R28: interpolated depth if not 8-wide. */
229 c->nr_payload_regs++;
230 }
231 }
232 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W.
233 */
234 if (uses_depth) {
235 c->source_w_reg = c->nr_payload_regs;
236 c->nr_payload_regs++;
237 if (c->dispatch_width == 16) {
238 /* R30: interpolated W if not 8-wide. */
239 c->nr_payload_regs++;
240 }
241 }
242 /* R31: MSAA position offsets. */
243 /* R32-: bary for 32-pixel. */
244 /* R58-59: interp W for 32-pixel. */
245
246 if (c->fp->program.Base.OutputsWritten &
247 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
248 c->source_depth_to_render_target = true;
249 c->computes_depth = true;
250 }
251 } else {
252 brw_wm_lookup_iz(intel, c);
253 }
254 }
255
256 bool
257 brw_wm_prog_data_compare(const void *in_a, const void *in_b,
258 int aux_size, const void *in_key)
259 {
260 const struct brw_wm_prog_data *a = in_a;
261 const struct brw_wm_prog_data *b = in_b;
262
263 /* Compare all the struct up to the pointers. */
264 if (memcmp(a, b, offsetof(struct brw_wm_prog_data, param)))
265 return false;
266
267 if (memcmp(a->param, b->param, a->nr_params * sizeof(void *)))
268 return false;
269
270 if (memcmp(a->pull_param, b->pull_param, a->nr_pull_params * sizeof(void *)))
271 return false;
272
273 return true;
274 }
275
276 void
277 brw_wm_prog_data_free(const void *in_prog_data)
278 {
279 const struct brw_wm_prog_data *prog_data = in_prog_data;
280
281 ralloc_free((void *)prog_data->param);
282 ralloc_free((void *)prog_data->pull_param);
283 }
284
285 /**
286 * All Mesa program -> GPU code generation goes through this function.
287 * Depending on the instructions used (i.e. flow control instructions)
288 * we'll use one of two code generators.
289 */
290 bool do_wm_prog(struct brw_context *brw,
291 struct gl_shader_program *prog,
292 struct brw_fragment_program *fp,
293 struct brw_wm_prog_key *key)
294 {
295 struct intel_context *intel = &brw->intel;
296 struct brw_wm_compile *c;
297 const GLuint *program;
298 struct gl_shader *fs = NULL;
299 GLuint program_size;
300
301 if (prog)
302 fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
303
304 c = brw->wm.compile_data;
305 if (c == NULL) {
306 brw->wm.compile_data = rzalloc(NULL, struct brw_wm_compile);
307 c = brw->wm.compile_data;
308 if (c == NULL) {
309 /* Ouch - big out of memory problem. Can't continue
310 * without triggering a segfault, no way to signal,
311 * so just return.
312 */
313 return false;
314 }
315 } else {
316 void *instruction = c->instruction;
317 void *prog_instructions = c->prog_instructions;
318 void *vreg = c->vreg;
319 void *refs = c->refs;
320 memset(c, 0, sizeof(*brw->wm.compile_data));
321 c->instruction = instruction;
322 c->prog_instructions = prog_instructions;
323 c->vreg = vreg;
324 c->refs = refs;
325 }
326
327 /* Allocate the references to the uniforms that will end up in the
328 * prog_data associated with the compiled program, and which will be freed
329 * by the state cache.
330 */
331 if (fs) {
332 int param_count = fs->num_uniform_components;
333 /* The backend also sometimes adds params for texture size. */
334 param_count += 2 * BRW_MAX_TEX_UNIT;
335
336 c->prog_data.param = rzalloc_array(c, const float *, param_count);
337 c->prog_data.pull_param = rzalloc_array(c, const float *, param_count);
338 } else {
339 /* brw_wm_pass0.c will also add references to 0.0 and 1.0 which are
340 * uploaded as push parameters.
341 */
342 int param_count = (fp->program.Base.Parameters->NumParameters + 2) * 4;
343 c->prog_data.param = rzalloc_array(c, const float *, param_count);
344 /* The old backend never does pull constants. */
345 c->prog_data.pull_param = NULL;
346 }
347
348 memcpy(&c->key, key, sizeof(*key));
349
350 c->fp = fp;
351 c->env_param = brw->intel.ctx.FragmentProgram.Parameters;
352
353 brw_init_compile(brw, &c->func, c);
354
355 c->prog_data.barycentric_interp_modes =
356 brw_compute_barycentric_interp_modes(brw, c->key.flat_shade,
357 &fp->program);
358
359 if (prog && prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
360 if (!brw_wm_fs_emit(brw, c, prog))
361 return false;
362 } else {
363 if (!c->instruction) {
364 c->instruction = rzalloc_array(c, struct brw_wm_instruction, BRW_WM_MAX_INSN);
365 c->prog_instructions = rzalloc_array(c, struct prog_instruction, BRW_WM_MAX_INSN);
366 c->vreg = rzalloc_array(c, struct brw_wm_value, BRW_WM_MAX_VREG);
367 c->refs = rzalloc_array(c, struct brw_wm_ref, BRW_WM_MAX_REF);
368 }
369
370 /* Fallback for fixed function and ARB_fp shaders. */
371 c->dispatch_width = 16;
372 brw_wm_payload_setup(brw, c);
373 brw_wm_non_glsl_emit(brw, c);
374 c->prog_data.dispatch_width = 16;
375 }
376
377 /* Scratch space is used for register spilling */
378 if (c->last_scratch) {
379 perf_debug("Fragment shader triggered register spilling. "
380 "Try reducing the number of live scalar values to "
381 "improve performance.\n");
382
383 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
384
385 brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
386 c->prog_data.total_scratch * brw->max_wm_threads);
387 }
388
389 if (unlikely(INTEL_DEBUG & DEBUG_WM))
390 fprintf(stderr, "\n");
391
392 /* get the program
393 */
394 program = brw_get_program(&c->func, &program_size);
395
396 brw_upload_cache(&brw->cache, BRW_WM_PROG,
397 &c->key, sizeof(c->key),
398 program, program_size,
399 &c->prog_data, sizeof(c->prog_data),
400 &brw->wm.prog_offset, &brw->wm.prog_data);
401
402 return true;
403 }
404
405 static bool
406 key_debug(const char *name, int a, int b)
407 {
408 if (a != b) {
409 perf_debug(" %s %d->%d\n", name, a, b);
410 return true;
411 } else {
412 return false;
413 }
414 }
415
416 bool
417 brw_debug_recompile_sampler_key(const struct brw_sampler_prog_key_data *old_key,
418 const struct brw_sampler_prog_key_data *key)
419 {
420 bool found = false;
421
422 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
423 found |= key_debug("EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
424 old_key->swizzles[i], key->swizzles[i]);
425 }
426 found |= key_debug("GL_CLAMP enabled on any texture unit's 1st coordinate",
427 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
428 found |= key_debug("GL_CLAMP enabled on any texture unit's 2nd coordinate",
429 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
430 found |= key_debug("GL_CLAMP enabled on any texture unit's 3rd coordinate",
431 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
432 found |= key_debug("GL_MESA_ycbcr texturing\n",
433 old_key->yuvtex_mask, key->yuvtex_mask);
434 found |= key_debug("GL_MESA_ycbcr UV swapping\n",
435 old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
436
437 return found;
438 }
439
440 void
441 brw_wm_debug_recompile(struct brw_context *brw,
442 struct gl_shader_program *prog,
443 const struct brw_wm_prog_key *key)
444 {
445 struct brw_cache_item *c = NULL;
446 const struct brw_wm_prog_key *old_key = NULL;
447 bool found = false;
448
449 perf_debug("Recompiling fragment shader for program %d\n", prog->Name);
450
451 for (unsigned int i = 0; i < brw->cache.size; i++) {
452 for (c = brw->cache.items[i]; c; c = c->next) {
453 if (c->cache_id == BRW_WM_PROG) {
454 old_key = c->key;
455
456 if (old_key->program_string_id == key->program_string_id)
457 break;
458 }
459 }
460 if (c)
461 break;
462 }
463
464 if (!c) {
465 perf_debug(" Didn't find previous compile in the shader cache for "
466 "debug\n");
467 return;
468 }
469
470 found |= key_debug("alphatest, computed depth, depth test, or depth write",
471 old_key->iz_lookup, key->iz_lookup);
472 found |= key_debug("depth statistics", old_key->stats_wm, key->stats_wm);
473 found |= key_debug("flat shading", old_key->flat_shade, key->flat_shade);
474 found |= key_debug("number of color buffers", old_key->nr_color_regions, key->nr_color_regions);
475 found |= key_debug("rendering to FBO", old_key->render_to_fbo, key->render_to_fbo);
476 found |= key_debug("fragment color clamping", old_key->clamp_fragment_color, key->clamp_fragment_color);
477 found |= key_debug("line smoothing", old_key->line_aa, key->line_aa);
478 found |= key_debug("proj_attrib_mask", old_key->proj_attrib_mask, key->proj_attrib_mask);
479 found |= key_debug("renderbuffer height", old_key->drawable_height, key->drawable_height);
480 found |= key_debug("vertex shader outputs", old_key->vp_outputs_written, key->vp_outputs_written);
481
482 found |= brw_debug_recompile_sampler_key(&old_key->tex, &key->tex);
483
484 if (!found) {
485 perf_debug(" Something else\n");
486 }
487 }
488
489 void
490 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
491 const struct gl_program *prog,
492 struct brw_sampler_prog_key_data *key)
493 {
494 for (int s = 0; s < MAX_SAMPLERS; s++) {
495 key->swizzles[s] = SWIZZLE_NOOP;
496
497 if (!(prog->SamplersUsed & (1 << s)))
498 continue;
499
500 int unit_id = prog->SamplerUnits[s];
501 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
502
503 if (unit->_ReallyEnabled && unit->_Current->Target != GL_TEXTURE_BUFFER) {
504 const struct gl_texture_object *t = unit->_Current;
505 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
506 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
507 int swizzles[SWIZZLE_NIL + 1] = {
508 SWIZZLE_X,
509 SWIZZLE_Y,
510 SWIZZLE_Z,
511 SWIZZLE_W,
512 SWIZZLE_ZERO,
513 SWIZZLE_ONE,
514 SWIZZLE_NIL
515 };
516
517 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
518 img->_BaseFormat == GL_DEPTH_STENCIL) {
519 /* We handle GL_DEPTH_TEXTURE_MODE here instead of as surface
520 * format overrides because shadow comparison always returns the
521 * result of the comparison in all channels anyway.
522 */
523 switch (t->DepthMode) {
524 case GL_ALPHA:
525 swizzles[0] = SWIZZLE_ZERO;
526 swizzles[1] = SWIZZLE_ZERO;
527 swizzles[2] = SWIZZLE_ZERO;
528 swizzles[3] = SWIZZLE_X;
529 break;
530 case GL_LUMINANCE:
531 swizzles[0] = SWIZZLE_X;
532 swizzles[1] = SWIZZLE_X;
533 swizzles[2] = SWIZZLE_X;
534 swizzles[3] = SWIZZLE_ONE;
535 break;
536 case GL_INTENSITY:
537 swizzles[0] = SWIZZLE_X;
538 swizzles[1] = SWIZZLE_X;
539 swizzles[2] = SWIZZLE_X;
540 swizzles[3] = SWIZZLE_X;
541 break;
542 case GL_RED:
543 swizzles[0] = SWIZZLE_X;
544 swizzles[1] = SWIZZLE_ZERO;
545 swizzles[2] = SWIZZLE_ZERO;
546 swizzles[3] = SWIZZLE_ONE;
547 break;
548 }
549 }
550
551 if (img->InternalFormat == GL_YCBCR_MESA) {
552 key->yuvtex_mask |= 1 << s;
553 if (img->TexFormat == MESA_FORMAT_YCBCR)
554 key->yuvtex_swap_mask |= 1 << s;
555 }
556
557 key->swizzles[s] =
558 MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
559 swizzles[GET_SWZ(t->_Swizzle, 1)],
560 swizzles[GET_SWZ(t->_Swizzle, 2)],
561 swizzles[GET_SWZ(t->_Swizzle, 3)]);
562
563 if (sampler->MinFilter != GL_NEAREST &&
564 sampler->MagFilter != GL_NEAREST) {
565 if (sampler->WrapS == GL_CLAMP)
566 key->gl_clamp_mask[0] |= 1 << s;
567 if (sampler->WrapT == GL_CLAMP)
568 key->gl_clamp_mask[1] |= 1 << s;
569 if (sampler->WrapR == GL_CLAMP)
570 key->gl_clamp_mask[2] |= 1 << s;
571 }
572 }
573 }
574 }
575
576 static void brw_wm_populate_key( struct brw_context *brw,
577 struct brw_wm_prog_key *key )
578 {
579 struct gl_context *ctx = &brw->intel.ctx;
580 struct intel_context *intel = &brw->intel;
581 /* BRW_NEW_FRAGMENT_PROGRAM */
582 const struct brw_fragment_program *fp =
583 (struct brw_fragment_program *)brw->fragment_program;
584 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
585 GLuint lookup = 0;
586 GLuint line_aa;
587 bool program_uses_dfdy = fp->program.UsesDFdy;
588
589 memset(key, 0, sizeof(*key));
590
591 /* Build the index for table lookup
592 */
593 if (intel->gen < 6) {
594 /* _NEW_COLOR */
595 if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
596 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
597
598 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
599 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
600
601 /* _NEW_DEPTH */
602 if (ctx->Depth.Test)
603 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
604
605 if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
606 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
607
608 /* _NEW_STENCIL */
609 if (ctx->Stencil._Enabled) {
610 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
611
612 if (ctx->Stencil.WriteMask[0] ||
613 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
614 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
615 }
616 key->iz_lookup = lookup;
617 }
618
619 line_aa = AA_NEVER;
620
621 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
622 if (ctx->Line.SmoothFlag) {
623 if (brw->intel.reduced_primitive == GL_LINES) {
624 line_aa = AA_ALWAYS;
625 }
626 else if (brw->intel.reduced_primitive == GL_TRIANGLES) {
627 if (ctx->Polygon.FrontMode == GL_LINE) {
628 line_aa = AA_SOMETIMES;
629
630 if (ctx->Polygon.BackMode == GL_LINE ||
631 (ctx->Polygon.CullFlag &&
632 ctx->Polygon.CullFaceMode == GL_BACK))
633 line_aa = AA_ALWAYS;
634 }
635 else if (ctx->Polygon.BackMode == GL_LINE) {
636 line_aa = AA_SOMETIMES;
637
638 if ((ctx->Polygon.CullFlag &&
639 ctx->Polygon.CullFaceMode == GL_FRONT))
640 line_aa = AA_ALWAYS;
641 }
642 }
643 }
644
645 key->line_aa = line_aa;
646
647 if (intel->gen < 6)
648 key->stats_wm = brw->intel.stats_wm;
649
650 /* BRW_NEW_WM_INPUT_DIMENSIONS */
651 /* Only set this for fixed function. The optimization it enables isn't
652 * useful for programs using shaders.
653 */
654 if (ctx->Shader.CurrentFragmentProgram)
655 key->proj_attrib_mask = 0xffffffff;
656 else
657 key->proj_attrib_mask = brw->wm.input_size_masks[4-1];
658
659 /* _NEW_LIGHT */
660 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
661
662 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
663 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
664
665 /* _NEW_TEXTURE */
666 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
667
668 /* _NEW_BUFFERS */
669 /*
670 * Include the draw buffer origin and height so that we can calculate
671 * fragment position values relative to the bottom left of the drawable,
672 * from the incoming screen origin relative position we get as part of our
673 * payload.
674 *
675 * This is only needed for the WM_WPOSXY opcode when the fragment program
676 * uses the gl_FragCoord input.
677 *
678 * We could avoid recompiling by including this as a constant referenced by
679 * our program, but if we were to do that it would also be nice to handle
680 * getting that constant updated at batchbuffer submit time (when we
681 * hold the lock and know where the buffer really is) rather than at emit
682 * time when we don't hold the lock and are just guessing. We could also
683 * just avoid using this as key data if the program doesn't use
684 * fragment.position.
685 *
686 * For DRI2 the origin_x/y will always be (0,0) but we still need the
687 * drawable height in order to invert the Y axis.
688 */
689 if (fp->program.Base.InputsRead & FRAG_BIT_WPOS) {
690 key->drawable_height = ctx->DrawBuffer->Height;
691 }
692
693 if ((fp->program.Base.InputsRead & FRAG_BIT_WPOS) || program_uses_dfdy) {
694 key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
695 }
696
697 /* _NEW_BUFFERS */
698 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
699 /* _NEW_MULTISAMPLE */
700 key->sample_alpha_to_coverage = ctx->Multisample.SampleAlphaToCoverage;
701
702 /* CACHE_NEW_VS_PROG */
703 if (intel->gen < 6)
704 key->vp_outputs_written = brw->vs.prog_data->outputs_written;
705
706 /* The unique fragment program ID */
707 key->program_string_id = fp->id;
708 }
709
710
711 static void
712 brw_upload_wm_prog(struct brw_context *brw)
713 {
714 struct intel_context *intel = &brw->intel;
715 struct gl_context *ctx = &intel->ctx;
716 struct brw_wm_prog_key key;
717 struct brw_fragment_program *fp = (struct brw_fragment_program *)
718 brw->fragment_program;
719
720 brw_wm_populate_key(brw, &key);
721
722 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
723 &key, sizeof(key),
724 &brw->wm.prog_offset, &brw->wm.prog_data)) {
725 bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
726 &key);
727 (void) success;
728 assert(success);
729 }
730 }
731
732
733 const struct brw_tracked_state brw_wm_prog = {
734 .dirty = {
735 .mesa = (_NEW_COLOR |
736 _NEW_DEPTH |
737 _NEW_STENCIL |
738 _NEW_POLYGON |
739 _NEW_LINE |
740 _NEW_LIGHT |
741 _NEW_FRAG_CLAMP |
742 _NEW_BUFFERS |
743 _NEW_TEXTURE |
744 _NEW_MULTISAMPLE),
745 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
746 BRW_NEW_WM_INPUT_DIMENSIONS |
747 BRW_NEW_REDUCED_PRIMITIVE),
748 .cache = CACHE_NEW_VS_PROG,
749 },
750 .emit = brw_upload_wm_prog
751 };
752