i965: Do texture swizzling in hardware on Haswell.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/fbobject.h"
37 #include "main/samplerobj.h"
38 #include "program/prog_parameter.h"
39
40 #include "glsl/ralloc.h"
41
42 /** Return number of src args for given instruction */
43 GLuint brw_wm_nr_args( GLuint opcode )
44 {
45 switch (opcode) {
46 case WM_FRONTFACING:
47 case WM_PIXELXY:
48 return 0;
49 case WM_CINTERP:
50 case WM_WPOSXY:
51 case WM_DELTAXY:
52 return 1;
53 case WM_LINTERP:
54 case WM_PIXELW:
55 return 2;
56 case WM_FB_WRITE:
57 case WM_PINTERP:
58 return 3;
59 default:
60 assert(opcode < MAX_OPCODE);
61 return _mesa_num_inst_src_regs(opcode);
62 }
63 }
64
65
66 GLuint brw_wm_is_scalar_result( GLuint opcode )
67 {
68 switch (opcode) {
69 case OPCODE_COS:
70 case OPCODE_EX2:
71 case OPCODE_LG2:
72 case OPCODE_POW:
73 case OPCODE_RCP:
74 case OPCODE_RSQ:
75 case OPCODE_SIN:
76 case OPCODE_DP2:
77 case OPCODE_DP3:
78 case OPCODE_DP4:
79 case OPCODE_DPH:
80 case OPCODE_DST:
81 return 1;
82
83 default:
84 return 0;
85 }
86 }
87
88
89 /**
90 * Do GPU code generation for non-GLSL shader. non-GLSL shaders have
91 * no flow control instructions so we can more readily do SSA-style
92 * optimizations.
93 */
94 static void
95 brw_wm_non_glsl_emit(struct brw_context *brw, struct brw_wm_compile *c)
96 {
97 /* Augment fragment program. Add instructions for pre- and
98 * post-fragment-program tasks such as interpolation and fogging.
99 */
100 brw_wm_pass_fp(c);
101
102 /* Translate to intermediate representation. Build register usage
103 * chains.
104 */
105 brw_wm_pass0(c);
106
107 /* Dead code removal.
108 */
109 brw_wm_pass1(c);
110
111 /* Register allocation.
112 * Divide by two because we operate on 16 pixels at a time and require
113 * two GRF entries for each logical shader register.
114 */
115 c->grf_limit = BRW_WM_MAX_GRF / 2;
116
117 brw_wm_pass2(c);
118
119 /* how many general-purpose registers are used */
120 c->prog_data.reg_blocks = brw_register_blocks(c->max_wm_grf);
121
122 /* Emit GEN4 code.
123 */
124 brw_wm_emit(c);
125 }
126
127
128 /**
129 * Return a bitfield where bit n is set if barycentric interpolation mode n
130 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
131 */
132 static unsigned
133 brw_compute_barycentric_interp_modes(struct brw_context *brw,
134 bool shade_model_flat,
135 const struct gl_fragment_program *fprog)
136 {
137 unsigned barycentric_interp_modes = 0;
138 int attr;
139
140 /* Loop through all fragment shader inputs to figure out what interpolation
141 * modes are in use, and set the appropriate bits in
142 * barycentric_interp_modes.
143 */
144 for (attr = 0; attr < FRAG_ATTRIB_MAX; ++attr) {
145 enum glsl_interp_qualifier interp_qualifier =
146 fprog->InterpQualifier[attr];
147 bool is_centroid = fprog->IsCentroid & BITFIELD64_BIT(attr);
148 bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;
149
150 /* Ignore unused inputs. */
151 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
152 continue;
153
154 /* Ignore WPOS and FACE, because they don't require interpolation. */
155 if (attr == FRAG_ATTRIB_WPOS || attr == FRAG_ATTRIB_FACE)
156 continue;
157
158 /* Determine the set (or sets) of barycentric coordinates needed to
159 * interpolate this variable. Note that when
160 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
161 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
162 * for lit pixels, so we need both sets of barycentric coordinates.
163 */
164 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
165 if (is_centroid) {
166 barycentric_interp_modes |=
167 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
168 }
169 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
170 barycentric_interp_modes |=
171 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
172 }
173 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
174 (!(shade_model_flat && is_gl_Color) &&
175 interp_qualifier == INTERP_QUALIFIER_NONE)) {
176 if (is_centroid) {
177 barycentric_interp_modes |=
178 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
179 }
180 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
181 barycentric_interp_modes |=
182 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
183 }
184 }
185 }
186
187 return barycentric_interp_modes;
188 }
189
190
191 void
192 brw_wm_payload_setup(struct brw_context *brw,
193 struct brw_wm_compile *c)
194 {
195 struct intel_context *intel = &brw->intel;
196 bool uses_depth = (c->fp->program.Base.InputsRead &
197 (1 << FRAG_ATTRIB_WPOS)) != 0;
198 unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
199 int i;
200
201 if (intel->gen >= 6) {
202 /* R0-1: masks, pixel X/Y coordinates. */
203 c->nr_payload_regs = 2;
204 /* R2: only for 32-pixel dispatch.*/
205
206 /* R3-26: barycentric interpolation coordinates. These appear in the
207 * same order that they appear in the brw_wm_barycentric_interp_mode
208 * enum. Each set of coordinates occupies 2 registers if dispatch width
209 * == 8 and 4 registers if dispatch width == 16. Coordinates only
210 * appear if they were enabled using the "Barycentric Interpolation
211 * Mode" bits in WM_STATE.
212 */
213 for (i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
214 if (barycentric_interp_modes & (1 << i)) {
215 c->barycentric_coord_reg[i] = c->nr_payload_regs;
216 c->nr_payload_regs += 2;
217 if (c->dispatch_width == 16) {
218 c->nr_payload_regs += 2;
219 }
220 }
221 }
222
223 /* R27: interpolated depth if uses source depth */
224 if (uses_depth) {
225 c->source_depth_reg = c->nr_payload_regs;
226 c->nr_payload_regs++;
227 if (c->dispatch_width == 16) {
228 /* R28: interpolated depth if not 8-wide. */
229 c->nr_payload_regs++;
230 }
231 }
232 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W.
233 */
234 if (uses_depth) {
235 c->source_w_reg = c->nr_payload_regs;
236 c->nr_payload_regs++;
237 if (c->dispatch_width == 16) {
238 /* R30: interpolated W if not 8-wide. */
239 c->nr_payload_regs++;
240 }
241 }
242 /* R31: MSAA position offsets. */
243 /* R32-: bary for 32-pixel. */
244 /* R58-59: interp W for 32-pixel. */
245
246 if (c->fp->program.Base.OutputsWritten &
247 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
248 c->source_depth_to_render_target = true;
249 c->computes_depth = true;
250 }
251 } else {
252 brw_wm_lookup_iz(intel, c);
253 }
254 }
255
256 bool
257 brw_wm_prog_data_compare(const void *in_a, const void *in_b,
258 int aux_size, const void *in_key)
259 {
260 const struct brw_wm_prog_data *a = in_a;
261 const struct brw_wm_prog_data *b = in_b;
262
263 /* Compare all the struct up to the pointers. */
264 if (memcmp(a, b, offsetof(struct brw_wm_prog_data, param)))
265 return false;
266
267 if (memcmp(a->param, b->param, a->nr_params * sizeof(void *)))
268 return false;
269
270 if (memcmp(a->pull_param, b->pull_param, a->nr_pull_params * sizeof(void *)))
271 return false;
272
273 return true;
274 }
275
276 void
277 brw_wm_prog_data_free(const void *in_prog_data)
278 {
279 const struct brw_wm_prog_data *prog_data = in_prog_data;
280
281 ralloc_free((void *)prog_data->param);
282 ralloc_free((void *)prog_data->pull_param);
283 }
284
285 /**
286 * All Mesa program -> GPU code generation goes through this function.
287 * Depending on the instructions used (i.e. flow control instructions)
288 * we'll use one of two code generators.
289 */
290 bool do_wm_prog(struct brw_context *brw,
291 struct gl_shader_program *prog,
292 struct brw_fragment_program *fp,
293 struct brw_wm_prog_key *key)
294 {
295 struct intel_context *intel = &brw->intel;
296 struct brw_wm_compile *c;
297 const GLuint *program;
298 struct gl_shader *fs = NULL;
299 GLuint program_size;
300
301 if (prog)
302 fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
303
304 c = brw->wm.compile_data;
305 if (c == NULL) {
306 brw->wm.compile_data = rzalloc(NULL, struct brw_wm_compile);
307 c = brw->wm.compile_data;
308 if (c == NULL) {
309 /* Ouch - big out of memory problem. Can't continue
310 * without triggering a segfault, no way to signal,
311 * so just return.
312 */
313 return false;
314 }
315 } else {
316 void *instruction = c->instruction;
317 void *prog_instructions = c->prog_instructions;
318 void *vreg = c->vreg;
319 void *refs = c->refs;
320 memset(c, 0, sizeof(*brw->wm.compile_data));
321 c->instruction = instruction;
322 c->prog_instructions = prog_instructions;
323 c->vreg = vreg;
324 c->refs = refs;
325 }
326
327 /* Allocate the references to the uniforms that will end up in the
328 * prog_data associated with the compiled program, and which will be freed
329 * by the state cache.
330 */
331 if (fs) {
332 int param_count = fs->num_uniform_components;
333 /* The backend also sometimes adds params for texture size. */
334 param_count += 2 * BRW_MAX_TEX_UNIT;
335
336 c->prog_data.param = rzalloc_array(c, const float *, param_count);
337 c->prog_data.pull_param = rzalloc_array(c, const float *, param_count);
338 } else {
339 /* brw_wm_pass0.c will also add references to 0.0 and 1.0 which are
340 * uploaded as push parameters.
341 */
342 int param_count = (fp->program.Base.Parameters->NumParameters + 2) * 4;
343 c->prog_data.param = rzalloc_array(c, const float *, param_count);
344 /* The old backend never does pull constants. */
345 c->prog_data.pull_param = NULL;
346 }
347
348 memcpy(&c->key, key, sizeof(*key));
349
350 c->fp = fp;
351 c->env_param = brw->intel.ctx.FragmentProgram.Parameters;
352
353 brw_init_compile(brw, &c->func, c);
354
355 c->prog_data.barycentric_interp_modes =
356 brw_compute_barycentric_interp_modes(brw, c->key.flat_shade,
357 &fp->program);
358
359 if (prog && prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
360 if (!brw_wm_fs_emit(brw, c, prog))
361 return false;
362 } else {
363 if (!c->instruction) {
364 c->instruction = rzalloc_array(c, struct brw_wm_instruction, BRW_WM_MAX_INSN);
365 c->prog_instructions = rzalloc_array(c, struct prog_instruction, BRW_WM_MAX_INSN);
366 c->vreg = rzalloc_array(c, struct brw_wm_value, BRW_WM_MAX_VREG);
367 c->refs = rzalloc_array(c, struct brw_wm_ref, BRW_WM_MAX_REF);
368 }
369
370 /* Fallback for fixed function and ARB_fp shaders. */
371 c->dispatch_width = 16;
372 brw_wm_payload_setup(brw, c);
373 brw_wm_non_glsl_emit(brw, c);
374 c->prog_data.dispatch_width = 16;
375 }
376
377 /* Scratch space is used for register spilling */
378 if (c->last_scratch) {
379 perf_debug("Fragment shader triggered register spilling. "
380 "Try reducing the number of live scalar values to "
381 "improve performance.\n");
382
383 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
384
385 brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
386 c->prog_data.total_scratch * brw->max_wm_threads);
387 }
388
389 if (unlikely(INTEL_DEBUG & DEBUG_WM))
390 fprintf(stderr, "\n");
391
392 /* get the program
393 */
394 program = brw_get_program(&c->func, &program_size);
395
396 brw_upload_cache(&brw->cache, BRW_WM_PROG,
397 &c->key, sizeof(c->key),
398 program, program_size,
399 &c->prog_data, sizeof(c->prog_data),
400 &brw->wm.prog_offset, &brw->wm.prog_data);
401
402 return true;
403 }
404
405 static bool
406 key_debug(const char *name, int a, int b)
407 {
408 if (a != b) {
409 perf_debug(" %s %d->%d\n", name, a, b);
410 return true;
411 } else {
412 return false;
413 }
414 }
415
416 bool
417 brw_debug_recompile_sampler_key(const struct brw_sampler_prog_key_data *old_key,
418 const struct brw_sampler_prog_key_data *key)
419 {
420 bool found = false;
421
422 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
423 found |= key_debug("EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
424 old_key->swizzles[i], key->swizzles[i]);
425 }
426 found |= key_debug("GL_CLAMP enabled on any texture unit's 1st coordinate",
427 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
428 found |= key_debug("GL_CLAMP enabled on any texture unit's 2nd coordinate",
429 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
430 found |= key_debug("GL_CLAMP enabled on any texture unit's 3rd coordinate",
431 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
432 found |= key_debug("GL_MESA_ycbcr texturing\n",
433 old_key->yuvtex_mask, key->yuvtex_mask);
434 found |= key_debug("GL_MESA_ycbcr UV swapping\n",
435 old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
436
437 return found;
438 }
439
440 void
441 brw_wm_debug_recompile(struct brw_context *brw,
442 struct gl_shader_program *prog,
443 const struct brw_wm_prog_key *key)
444 {
445 struct brw_cache_item *c = NULL;
446 const struct brw_wm_prog_key *old_key = NULL;
447 bool found = false;
448
449 perf_debug("Recompiling fragment shader for program %d\n", prog->Name);
450
451 for (unsigned int i = 0; i < brw->cache.size; i++) {
452 for (c = brw->cache.items[i]; c; c = c->next) {
453 if (c->cache_id == BRW_WM_PROG) {
454 old_key = c->key;
455
456 if (old_key->program_string_id == key->program_string_id)
457 break;
458 }
459 }
460 if (c)
461 break;
462 }
463
464 if (!c) {
465 perf_debug(" Didn't find previous compile in the shader cache for "
466 "debug\n");
467 return;
468 }
469
470 found |= key_debug("alphatest, computed depth, depth test, or depth write",
471 old_key->iz_lookup, key->iz_lookup);
472 found |= key_debug("depth statistics", old_key->stats_wm, key->stats_wm);
473 found |= key_debug("flat shading", old_key->flat_shade, key->flat_shade);
474 found |= key_debug("number of color buffers", old_key->nr_color_regions, key->nr_color_regions);
475 found |= key_debug("rendering to FBO", old_key->render_to_fbo, key->render_to_fbo);
476 found |= key_debug("fragment color clamping", old_key->clamp_fragment_color, key->clamp_fragment_color);
477 found |= key_debug("line smoothing", old_key->line_aa, key->line_aa);
478 found |= key_debug("proj_attrib_mask", old_key->proj_attrib_mask, key->proj_attrib_mask);
479 found |= key_debug("renderbuffer height", old_key->drawable_height, key->drawable_height);
480 found |= key_debug("vertex shader outputs", old_key->vp_outputs_written, key->vp_outputs_written);
481
482 found |= brw_debug_recompile_sampler_key(&old_key->tex, &key->tex);
483
484 if (!found) {
485 perf_debug(" Something else\n");
486 }
487 }
488
489 void
490 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
491 const struct gl_program *prog,
492 struct brw_sampler_prog_key_data *key)
493 {
494 struct intel_context *intel = intel_context(ctx);
495
496 for (int s = 0; s < MAX_SAMPLERS; s++) {
497 key->swizzles[s] = SWIZZLE_NOOP;
498
499 if (!(prog->SamplersUsed & (1 << s)))
500 continue;
501
502 int unit_id = prog->SamplerUnits[s];
503 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
504
505 if (unit->_ReallyEnabled && unit->_Current->Target != GL_TEXTURE_BUFFER) {
506 const struct gl_texture_object *t = unit->_Current;
507 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
508 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
509
510 const bool alpha_depth = t->DepthMode == GL_ALPHA &&
511 (img->_BaseFormat == GL_DEPTH_COMPONENT ||
512 img->_BaseFormat == GL_DEPTH_STENCIL);
513
514 /* Haswell handles texture swizzling as surface format overrides
515 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
516 */
517 if (!intel->is_haswell || alpha_depth)
518 key->swizzles[s] = brw_get_texture_swizzle(t);
519
520 if (img->InternalFormat == GL_YCBCR_MESA) {
521 key->yuvtex_mask |= 1 << s;
522 if (img->TexFormat == MESA_FORMAT_YCBCR)
523 key->yuvtex_swap_mask |= 1 << s;
524 }
525
526 if (sampler->MinFilter != GL_NEAREST &&
527 sampler->MagFilter != GL_NEAREST) {
528 if (sampler->WrapS == GL_CLAMP)
529 key->gl_clamp_mask[0] |= 1 << s;
530 if (sampler->WrapT == GL_CLAMP)
531 key->gl_clamp_mask[1] |= 1 << s;
532 if (sampler->WrapR == GL_CLAMP)
533 key->gl_clamp_mask[2] |= 1 << s;
534 }
535 }
536 }
537 }
538
539 static void brw_wm_populate_key( struct brw_context *brw,
540 struct brw_wm_prog_key *key )
541 {
542 struct gl_context *ctx = &brw->intel.ctx;
543 struct intel_context *intel = &brw->intel;
544 /* BRW_NEW_FRAGMENT_PROGRAM */
545 const struct brw_fragment_program *fp =
546 (struct brw_fragment_program *)brw->fragment_program;
547 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
548 GLuint lookup = 0;
549 GLuint line_aa;
550 bool program_uses_dfdy = fp->program.UsesDFdy;
551
552 memset(key, 0, sizeof(*key));
553
554 /* Build the index for table lookup
555 */
556 if (intel->gen < 6) {
557 /* _NEW_COLOR */
558 if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
559 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
560
561 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
562 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
563
564 /* _NEW_DEPTH */
565 if (ctx->Depth.Test)
566 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
567
568 if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
569 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
570
571 /* _NEW_STENCIL */
572 if (ctx->Stencil._Enabled) {
573 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
574
575 if (ctx->Stencil.WriteMask[0] ||
576 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
577 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
578 }
579 key->iz_lookup = lookup;
580 }
581
582 line_aa = AA_NEVER;
583
584 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
585 if (ctx->Line.SmoothFlag) {
586 if (brw->intel.reduced_primitive == GL_LINES) {
587 line_aa = AA_ALWAYS;
588 }
589 else if (brw->intel.reduced_primitive == GL_TRIANGLES) {
590 if (ctx->Polygon.FrontMode == GL_LINE) {
591 line_aa = AA_SOMETIMES;
592
593 if (ctx->Polygon.BackMode == GL_LINE ||
594 (ctx->Polygon.CullFlag &&
595 ctx->Polygon.CullFaceMode == GL_BACK))
596 line_aa = AA_ALWAYS;
597 }
598 else if (ctx->Polygon.BackMode == GL_LINE) {
599 line_aa = AA_SOMETIMES;
600
601 if ((ctx->Polygon.CullFlag &&
602 ctx->Polygon.CullFaceMode == GL_FRONT))
603 line_aa = AA_ALWAYS;
604 }
605 }
606 }
607
608 key->line_aa = line_aa;
609
610 if (intel->gen < 6)
611 key->stats_wm = brw->intel.stats_wm;
612
613 /* BRW_NEW_WM_INPUT_DIMENSIONS */
614 /* Only set this for fixed function. The optimization it enables isn't
615 * useful for programs using shaders.
616 */
617 if (ctx->Shader.CurrentFragmentProgram)
618 key->proj_attrib_mask = 0xffffffff;
619 else
620 key->proj_attrib_mask = brw->wm.input_size_masks[4-1];
621
622 /* _NEW_LIGHT */
623 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
624
625 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
626 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
627
628 /* _NEW_TEXTURE */
629 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
630
631 /* _NEW_BUFFERS */
632 /*
633 * Include the draw buffer origin and height so that we can calculate
634 * fragment position values relative to the bottom left of the drawable,
635 * from the incoming screen origin relative position we get as part of our
636 * payload.
637 *
638 * This is only needed for the WM_WPOSXY opcode when the fragment program
639 * uses the gl_FragCoord input.
640 *
641 * We could avoid recompiling by including this as a constant referenced by
642 * our program, but if we were to do that it would also be nice to handle
643 * getting that constant updated at batchbuffer submit time (when we
644 * hold the lock and know where the buffer really is) rather than at emit
645 * time when we don't hold the lock and are just guessing. We could also
646 * just avoid using this as key data if the program doesn't use
647 * fragment.position.
648 *
649 * For DRI2 the origin_x/y will always be (0,0) but we still need the
650 * drawable height in order to invert the Y axis.
651 */
652 if (fp->program.Base.InputsRead & FRAG_BIT_WPOS) {
653 key->drawable_height = ctx->DrawBuffer->Height;
654 }
655
656 if ((fp->program.Base.InputsRead & FRAG_BIT_WPOS) || program_uses_dfdy) {
657 key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
658 }
659
660 /* _NEW_BUFFERS */
661 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
662 /* _NEW_MULTISAMPLE */
663 key->sample_alpha_to_coverage = ctx->Multisample.SampleAlphaToCoverage;
664
665 /* CACHE_NEW_VS_PROG */
666 if (intel->gen < 6)
667 key->vp_outputs_written = brw->vs.prog_data->outputs_written;
668
669 /* The unique fragment program ID */
670 key->program_string_id = fp->id;
671 }
672
673
674 static void
675 brw_upload_wm_prog(struct brw_context *brw)
676 {
677 struct intel_context *intel = &brw->intel;
678 struct gl_context *ctx = &intel->ctx;
679 struct brw_wm_prog_key key;
680 struct brw_fragment_program *fp = (struct brw_fragment_program *)
681 brw->fragment_program;
682
683 brw_wm_populate_key(brw, &key);
684
685 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
686 &key, sizeof(key),
687 &brw->wm.prog_offset, &brw->wm.prog_data)) {
688 bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
689 &key);
690 (void) success;
691 assert(success);
692 }
693 }
694
695
696 const struct brw_tracked_state brw_wm_prog = {
697 .dirty = {
698 .mesa = (_NEW_COLOR |
699 _NEW_DEPTH |
700 _NEW_STENCIL |
701 _NEW_POLYGON |
702 _NEW_LINE |
703 _NEW_LIGHT |
704 _NEW_FRAG_CLAMP |
705 _NEW_BUFFERS |
706 _NEW_TEXTURE |
707 _NEW_MULTISAMPLE),
708 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
709 BRW_NEW_WM_INPUT_DIMENSIONS |
710 BRW_NEW_REDUCED_PRIMITIVE),
711 .cache = CACHE_NEW_VS_PROG,
712 },
713 .emit = brw_upload_wm_prog
714 };
715