i965: Refactor texture swizzle generation into a helper.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/fbobject.h"
37 #include "main/samplerobj.h"
38 #include "program/prog_parameter.h"
39
40 #include "glsl/ralloc.h"
41
42 /** Return number of src args for given instruction */
43 GLuint brw_wm_nr_args( GLuint opcode )
44 {
45 switch (opcode) {
46 case WM_FRONTFACING:
47 case WM_PIXELXY:
48 return 0;
49 case WM_CINTERP:
50 case WM_WPOSXY:
51 case WM_DELTAXY:
52 return 1;
53 case WM_LINTERP:
54 case WM_PIXELW:
55 return 2;
56 case WM_FB_WRITE:
57 case WM_PINTERP:
58 return 3;
59 default:
60 assert(opcode < MAX_OPCODE);
61 return _mesa_num_inst_src_regs(opcode);
62 }
63 }
64
65
66 GLuint brw_wm_is_scalar_result( GLuint opcode )
67 {
68 switch (opcode) {
69 case OPCODE_COS:
70 case OPCODE_EX2:
71 case OPCODE_LG2:
72 case OPCODE_POW:
73 case OPCODE_RCP:
74 case OPCODE_RSQ:
75 case OPCODE_SIN:
76 case OPCODE_DP2:
77 case OPCODE_DP3:
78 case OPCODE_DP4:
79 case OPCODE_DPH:
80 case OPCODE_DST:
81 return 1;
82
83 default:
84 return 0;
85 }
86 }
87
88
89 /**
90 * Do GPU code generation for non-GLSL shader. non-GLSL shaders have
91 * no flow control instructions so we can more readily do SSA-style
92 * optimizations.
93 */
94 static void
95 brw_wm_non_glsl_emit(struct brw_context *brw, struct brw_wm_compile *c)
96 {
97 /* Augment fragment program. Add instructions for pre- and
98 * post-fragment-program tasks such as interpolation and fogging.
99 */
100 brw_wm_pass_fp(c);
101
102 /* Translate to intermediate representation. Build register usage
103 * chains.
104 */
105 brw_wm_pass0(c);
106
107 /* Dead code removal.
108 */
109 brw_wm_pass1(c);
110
111 /* Register allocation.
112 * Divide by two because we operate on 16 pixels at a time and require
113 * two GRF entries for each logical shader register.
114 */
115 c->grf_limit = BRW_WM_MAX_GRF / 2;
116
117 brw_wm_pass2(c);
118
119 /* how many general-purpose registers are used */
120 c->prog_data.reg_blocks = brw_register_blocks(c->max_wm_grf);
121
122 /* Emit GEN4 code.
123 */
124 brw_wm_emit(c);
125 }
126
127
128 /**
129 * Return a bitfield where bit n is set if barycentric interpolation mode n
130 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
131 */
132 static unsigned
133 brw_compute_barycentric_interp_modes(struct brw_context *brw,
134 bool shade_model_flat,
135 const struct gl_fragment_program *fprog)
136 {
137 unsigned barycentric_interp_modes = 0;
138 int attr;
139
140 /* Loop through all fragment shader inputs to figure out what interpolation
141 * modes are in use, and set the appropriate bits in
142 * barycentric_interp_modes.
143 */
144 for (attr = 0; attr < FRAG_ATTRIB_MAX; ++attr) {
145 enum glsl_interp_qualifier interp_qualifier =
146 fprog->InterpQualifier[attr];
147 bool is_centroid = fprog->IsCentroid & BITFIELD64_BIT(attr);
148 bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;
149
150 /* Ignore unused inputs. */
151 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
152 continue;
153
154 /* Ignore WPOS and FACE, because they don't require interpolation. */
155 if (attr == FRAG_ATTRIB_WPOS || attr == FRAG_ATTRIB_FACE)
156 continue;
157
158 /* Determine the set (or sets) of barycentric coordinates needed to
159 * interpolate this variable. Note that when
160 * brw->needs_unlit_centroid_workaround is set, centroid interpolation
161 * uses PIXEL interpolation for unlit pixels and CENTROID interpolation
162 * for lit pixels, so we need both sets of barycentric coordinates.
163 */
164 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
165 if (is_centroid) {
166 barycentric_interp_modes |=
167 1 << BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
168 }
169 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
170 barycentric_interp_modes |=
171 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
172 }
173 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
174 (!(shade_model_flat && is_gl_Color) &&
175 interp_qualifier == INTERP_QUALIFIER_NONE)) {
176 if (is_centroid) {
177 barycentric_interp_modes |=
178 1 << BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
179 }
180 if (!is_centroid || brw->needs_unlit_centroid_workaround) {
181 barycentric_interp_modes |=
182 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
183 }
184 }
185 }
186
187 return barycentric_interp_modes;
188 }
189
190
191 void
192 brw_wm_payload_setup(struct brw_context *brw,
193 struct brw_wm_compile *c)
194 {
195 struct intel_context *intel = &brw->intel;
196 bool uses_depth = (c->fp->program.Base.InputsRead &
197 (1 << FRAG_ATTRIB_WPOS)) != 0;
198 unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
199 int i;
200
201 if (intel->gen >= 6) {
202 /* R0-1: masks, pixel X/Y coordinates. */
203 c->nr_payload_regs = 2;
204 /* R2: only for 32-pixel dispatch.*/
205
206 /* R3-26: barycentric interpolation coordinates. These appear in the
207 * same order that they appear in the brw_wm_barycentric_interp_mode
208 * enum. Each set of coordinates occupies 2 registers if dispatch width
209 * == 8 and 4 registers if dispatch width == 16. Coordinates only
210 * appear if they were enabled using the "Barycentric Interpolation
211 * Mode" bits in WM_STATE.
212 */
213 for (i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
214 if (barycentric_interp_modes & (1 << i)) {
215 c->barycentric_coord_reg[i] = c->nr_payload_regs;
216 c->nr_payload_regs += 2;
217 if (c->dispatch_width == 16) {
218 c->nr_payload_regs += 2;
219 }
220 }
221 }
222
223 /* R27: interpolated depth if uses source depth */
224 if (uses_depth) {
225 c->source_depth_reg = c->nr_payload_regs;
226 c->nr_payload_regs++;
227 if (c->dispatch_width == 16) {
228 /* R28: interpolated depth if not 8-wide. */
229 c->nr_payload_regs++;
230 }
231 }
232 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W.
233 */
234 if (uses_depth) {
235 c->source_w_reg = c->nr_payload_regs;
236 c->nr_payload_regs++;
237 if (c->dispatch_width == 16) {
238 /* R30: interpolated W if not 8-wide. */
239 c->nr_payload_regs++;
240 }
241 }
242 /* R31: MSAA position offsets. */
243 /* R32-: bary for 32-pixel. */
244 /* R58-59: interp W for 32-pixel. */
245
246 if (c->fp->program.Base.OutputsWritten &
247 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
248 c->source_depth_to_render_target = true;
249 c->computes_depth = true;
250 }
251 } else {
252 brw_wm_lookup_iz(intel, c);
253 }
254 }
255
256 bool
257 brw_wm_prog_data_compare(const void *in_a, const void *in_b,
258 int aux_size, const void *in_key)
259 {
260 const struct brw_wm_prog_data *a = in_a;
261 const struct brw_wm_prog_data *b = in_b;
262
263 /* Compare all the struct up to the pointers. */
264 if (memcmp(a, b, offsetof(struct brw_wm_prog_data, param)))
265 return false;
266
267 if (memcmp(a->param, b->param, a->nr_params * sizeof(void *)))
268 return false;
269
270 if (memcmp(a->pull_param, b->pull_param, a->nr_pull_params * sizeof(void *)))
271 return false;
272
273 return true;
274 }
275
276 void
277 brw_wm_prog_data_free(const void *in_prog_data)
278 {
279 const struct brw_wm_prog_data *prog_data = in_prog_data;
280
281 ralloc_free((void *)prog_data->param);
282 ralloc_free((void *)prog_data->pull_param);
283 }
284
285 /**
286 * All Mesa program -> GPU code generation goes through this function.
287 * Depending on the instructions used (i.e. flow control instructions)
288 * we'll use one of two code generators.
289 */
290 bool do_wm_prog(struct brw_context *brw,
291 struct gl_shader_program *prog,
292 struct brw_fragment_program *fp,
293 struct brw_wm_prog_key *key)
294 {
295 struct intel_context *intel = &brw->intel;
296 struct brw_wm_compile *c;
297 const GLuint *program;
298 struct gl_shader *fs = NULL;
299 GLuint program_size;
300
301 if (prog)
302 fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
303
304 c = brw->wm.compile_data;
305 if (c == NULL) {
306 brw->wm.compile_data = rzalloc(NULL, struct brw_wm_compile);
307 c = brw->wm.compile_data;
308 if (c == NULL) {
309 /* Ouch - big out of memory problem. Can't continue
310 * without triggering a segfault, no way to signal,
311 * so just return.
312 */
313 return false;
314 }
315 } else {
316 void *instruction = c->instruction;
317 void *prog_instructions = c->prog_instructions;
318 void *vreg = c->vreg;
319 void *refs = c->refs;
320 memset(c, 0, sizeof(*brw->wm.compile_data));
321 c->instruction = instruction;
322 c->prog_instructions = prog_instructions;
323 c->vreg = vreg;
324 c->refs = refs;
325 }
326
327 /* Allocate the references to the uniforms that will end up in the
328 * prog_data associated with the compiled program, and which will be freed
329 * by the state cache.
330 */
331 if (fs) {
332 int param_count = fs->num_uniform_components;
333 /* The backend also sometimes adds params for texture size. */
334 param_count += 2 * BRW_MAX_TEX_UNIT;
335
336 c->prog_data.param = rzalloc_array(c, const float *, param_count);
337 c->prog_data.pull_param = rzalloc_array(c, const float *, param_count);
338 } else {
339 /* brw_wm_pass0.c will also add references to 0.0 and 1.0 which are
340 * uploaded as push parameters.
341 */
342 int param_count = (fp->program.Base.Parameters->NumParameters + 2) * 4;
343 c->prog_data.param = rzalloc_array(c, const float *, param_count);
344 /* The old backend never does pull constants. */
345 c->prog_data.pull_param = NULL;
346 }
347
348 memcpy(&c->key, key, sizeof(*key));
349
350 c->fp = fp;
351 c->env_param = brw->intel.ctx.FragmentProgram.Parameters;
352
353 brw_init_compile(brw, &c->func, c);
354
355 c->prog_data.barycentric_interp_modes =
356 brw_compute_barycentric_interp_modes(brw, c->key.flat_shade,
357 &fp->program);
358
359 if (prog && prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
360 if (!brw_wm_fs_emit(brw, c, prog))
361 return false;
362 } else {
363 if (!c->instruction) {
364 c->instruction = rzalloc_array(c, struct brw_wm_instruction, BRW_WM_MAX_INSN);
365 c->prog_instructions = rzalloc_array(c, struct prog_instruction, BRW_WM_MAX_INSN);
366 c->vreg = rzalloc_array(c, struct brw_wm_value, BRW_WM_MAX_VREG);
367 c->refs = rzalloc_array(c, struct brw_wm_ref, BRW_WM_MAX_REF);
368 }
369
370 /* Fallback for fixed function and ARB_fp shaders. */
371 c->dispatch_width = 16;
372 brw_wm_payload_setup(brw, c);
373 brw_wm_non_glsl_emit(brw, c);
374 c->prog_data.dispatch_width = 16;
375 }
376
377 /* Scratch space is used for register spilling */
378 if (c->last_scratch) {
379 perf_debug("Fragment shader triggered register spilling. "
380 "Try reducing the number of live scalar values to "
381 "improve performance.\n");
382
383 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
384
385 brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
386 c->prog_data.total_scratch * brw->max_wm_threads);
387 }
388
389 if (unlikely(INTEL_DEBUG & DEBUG_WM))
390 fprintf(stderr, "\n");
391
392 /* get the program
393 */
394 program = brw_get_program(&c->func, &program_size);
395
396 brw_upload_cache(&brw->cache, BRW_WM_PROG,
397 &c->key, sizeof(c->key),
398 program, program_size,
399 &c->prog_data, sizeof(c->prog_data),
400 &brw->wm.prog_offset, &brw->wm.prog_data);
401
402 return true;
403 }
404
405 static bool
406 key_debug(const char *name, int a, int b)
407 {
408 if (a != b) {
409 perf_debug(" %s %d->%d\n", name, a, b);
410 return true;
411 } else {
412 return false;
413 }
414 }
415
416 bool
417 brw_debug_recompile_sampler_key(const struct brw_sampler_prog_key_data *old_key,
418 const struct brw_sampler_prog_key_data *key)
419 {
420 bool found = false;
421
422 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
423 found |= key_debug("EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
424 old_key->swizzles[i], key->swizzles[i]);
425 }
426 found |= key_debug("GL_CLAMP enabled on any texture unit's 1st coordinate",
427 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
428 found |= key_debug("GL_CLAMP enabled on any texture unit's 2nd coordinate",
429 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
430 found |= key_debug("GL_CLAMP enabled on any texture unit's 3rd coordinate",
431 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
432 found |= key_debug("GL_MESA_ycbcr texturing\n",
433 old_key->yuvtex_mask, key->yuvtex_mask);
434 found |= key_debug("GL_MESA_ycbcr UV swapping\n",
435 old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
436
437 return found;
438 }
439
440 void
441 brw_wm_debug_recompile(struct brw_context *brw,
442 struct gl_shader_program *prog,
443 const struct brw_wm_prog_key *key)
444 {
445 struct brw_cache_item *c = NULL;
446 const struct brw_wm_prog_key *old_key = NULL;
447 bool found = false;
448
449 perf_debug("Recompiling fragment shader for program %d\n", prog->Name);
450
451 for (unsigned int i = 0; i < brw->cache.size; i++) {
452 for (c = brw->cache.items[i]; c; c = c->next) {
453 if (c->cache_id == BRW_WM_PROG) {
454 old_key = c->key;
455
456 if (old_key->program_string_id == key->program_string_id)
457 break;
458 }
459 }
460 if (c)
461 break;
462 }
463
464 if (!c) {
465 perf_debug(" Didn't find previous compile in the shader cache for "
466 "debug\n");
467 return;
468 }
469
470 found |= key_debug("alphatest, computed depth, depth test, or depth write",
471 old_key->iz_lookup, key->iz_lookup);
472 found |= key_debug("depth statistics", old_key->stats_wm, key->stats_wm);
473 found |= key_debug("flat shading", old_key->flat_shade, key->flat_shade);
474 found |= key_debug("number of color buffers", old_key->nr_color_regions, key->nr_color_regions);
475 found |= key_debug("rendering to FBO", old_key->render_to_fbo, key->render_to_fbo);
476 found |= key_debug("fragment color clamping", old_key->clamp_fragment_color, key->clamp_fragment_color);
477 found |= key_debug("line smoothing", old_key->line_aa, key->line_aa);
478 found |= key_debug("proj_attrib_mask", old_key->proj_attrib_mask, key->proj_attrib_mask);
479 found |= key_debug("renderbuffer height", old_key->drawable_height, key->drawable_height);
480 found |= key_debug("vertex shader outputs", old_key->vp_outputs_written, key->vp_outputs_written);
481
482 found |= brw_debug_recompile_sampler_key(&old_key->tex, &key->tex);
483
484 if (!found) {
485 perf_debug(" Something else\n");
486 }
487 }
488
489 void
490 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
491 const struct gl_program *prog,
492 struct brw_sampler_prog_key_data *key)
493 {
494 for (int s = 0; s < MAX_SAMPLERS; s++) {
495 key->swizzles[s] = SWIZZLE_NOOP;
496
497 if (!(prog->SamplersUsed & (1 << s)))
498 continue;
499
500 int unit_id = prog->SamplerUnits[s];
501 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
502
503 if (unit->_ReallyEnabled && unit->_Current->Target != GL_TEXTURE_BUFFER) {
504 const struct gl_texture_object *t = unit->_Current;
505 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
506 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
507
508 key->swizzles[s] = brw_get_texture_swizzle(t);
509
510 if (img->InternalFormat == GL_YCBCR_MESA) {
511 key->yuvtex_mask |= 1 << s;
512 if (img->TexFormat == MESA_FORMAT_YCBCR)
513 key->yuvtex_swap_mask |= 1 << s;
514 }
515
516 if (sampler->MinFilter != GL_NEAREST &&
517 sampler->MagFilter != GL_NEAREST) {
518 if (sampler->WrapS == GL_CLAMP)
519 key->gl_clamp_mask[0] |= 1 << s;
520 if (sampler->WrapT == GL_CLAMP)
521 key->gl_clamp_mask[1] |= 1 << s;
522 if (sampler->WrapR == GL_CLAMP)
523 key->gl_clamp_mask[2] |= 1 << s;
524 }
525 }
526 }
527 }
528
529 static void brw_wm_populate_key( struct brw_context *brw,
530 struct brw_wm_prog_key *key )
531 {
532 struct gl_context *ctx = &brw->intel.ctx;
533 struct intel_context *intel = &brw->intel;
534 /* BRW_NEW_FRAGMENT_PROGRAM */
535 const struct brw_fragment_program *fp =
536 (struct brw_fragment_program *)brw->fragment_program;
537 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
538 GLuint lookup = 0;
539 GLuint line_aa;
540 bool program_uses_dfdy = fp->program.UsesDFdy;
541
542 memset(key, 0, sizeof(*key));
543
544 /* Build the index for table lookup
545 */
546 if (intel->gen < 6) {
547 /* _NEW_COLOR */
548 if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
549 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
550
551 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
552 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
553
554 /* _NEW_DEPTH */
555 if (ctx->Depth.Test)
556 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
557
558 if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
559 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
560
561 /* _NEW_STENCIL */
562 if (ctx->Stencil._Enabled) {
563 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
564
565 if (ctx->Stencil.WriteMask[0] ||
566 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
567 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
568 }
569 key->iz_lookup = lookup;
570 }
571
572 line_aa = AA_NEVER;
573
574 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
575 if (ctx->Line.SmoothFlag) {
576 if (brw->intel.reduced_primitive == GL_LINES) {
577 line_aa = AA_ALWAYS;
578 }
579 else if (brw->intel.reduced_primitive == GL_TRIANGLES) {
580 if (ctx->Polygon.FrontMode == GL_LINE) {
581 line_aa = AA_SOMETIMES;
582
583 if (ctx->Polygon.BackMode == GL_LINE ||
584 (ctx->Polygon.CullFlag &&
585 ctx->Polygon.CullFaceMode == GL_BACK))
586 line_aa = AA_ALWAYS;
587 }
588 else if (ctx->Polygon.BackMode == GL_LINE) {
589 line_aa = AA_SOMETIMES;
590
591 if ((ctx->Polygon.CullFlag &&
592 ctx->Polygon.CullFaceMode == GL_FRONT))
593 line_aa = AA_ALWAYS;
594 }
595 }
596 }
597
598 key->line_aa = line_aa;
599
600 if (intel->gen < 6)
601 key->stats_wm = brw->intel.stats_wm;
602
603 /* BRW_NEW_WM_INPUT_DIMENSIONS */
604 /* Only set this for fixed function. The optimization it enables isn't
605 * useful for programs using shaders.
606 */
607 if (ctx->Shader.CurrentFragmentProgram)
608 key->proj_attrib_mask = 0xffffffff;
609 else
610 key->proj_attrib_mask = brw->wm.input_size_masks[4-1];
611
612 /* _NEW_LIGHT */
613 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
614
615 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
616 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
617
618 /* _NEW_TEXTURE */
619 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
620
621 /* _NEW_BUFFERS */
622 /*
623 * Include the draw buffer origin and height so that we can calculate
624 * fragment position values relative to the bottom left of the drawable,
625 * from the incoming screen origin relative position we get as part of our
626 * payload.
627 *
628 * This is only needed for the WM_WPOSXY opcode when the fragment program
629 * uses the gl_FragCoord input.
630 *
631 * We could avoid recompiling by including this as a constant referenced by
632 * our program, but if we were to do that it would also be nice to handle
633 * getting that constant updated at batchbuffer submit time (when we
634 * hold the lock and know where the buffer really is) rather than at emit
635 * time when we don't hold the lock and are just guessing. We could also
636 * just avoid using this as key data if the program doesn't use
637 * fragment.position.
638 *
639 * For DRI2 the origin_x/y will always be (0,0) but we still need the
640 * drawable height in order to invert the Y axis.
641 */
642 if (fp->program.Base.InputsRead & FRAG_BIT_WPOS) {
643 key->drawable_height = ctx->DrawBuffer->Height;
644 }
645
646 if ((fp->program.Base.InputsRead & FRAG_BIT_WPOS) || program_uses_dfdy) {
647 key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
648 }
649
650 /* _NEW_BUFFERS */
651 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
652 /* _NEW_MULTISAMPLE */
653 key->sample_alpha_to_coverage = ctx->Multisample.SampleAlphaToCoverage;
654
655 /* CACHE_NEW_VS_PROG */
656 if (intel->gen < 6)
657 key->vp_outputs_written = brw->vs.prog_data->outputs_written;
658
659 /* The unique fragment program ID */
660 key->program_string_id = fp->id;
661 }
662
663
664 static void
665 brw_upload_wm_prog(struct brw_context *brw)
666 {
667 struct intel_context *intel = &brw->intel;
668 struct gl_context *ctx = &intel->ctx;
669 struct brw_wm_prog_key key;
670 struct brw_fragment_program *fp = (struct brw_fragment_program *)
671 brw->fragment_program;
672
673 brw_wm_populate_key(brw, &key);
674
675 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
676 &key, sizeof(key),
677 &brw->wm.prog_offset, &brw->wm.prog_data)) {
678 bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
679 &key);
680 (void) success;
681 assert(success);
682 }
683 }
684
685
686 const struct brw_tracked_state brw_wm_prog = {
687 .dirty = {
688 .mesa = (_NEW_COLOR |
689 _NEW_DEPTH |
690 _NEW_STENCIL |
691 _NEW_POLYGON |
692 _NEW_LINE |
693 _NEW_LIGHT |
694 _NEW_FRAG_CLAMP |
695 _NEW_BUFFERS |
696 _NEW_TEXTURE |
697 _NEW_MULTISAMPLE),
698 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
699 BRW_NEW_WM_INPUT_DIMENSIONS |
700 BRW_NEW_REDUCED_PRIMITIVE),
701 .cache = CACHE_NEW_VS_PROG,
702 },
703 .emit = brw_upload_wm_prog
704 };
705