i965: Add HiZ operation state to brw_context
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/samplerobj.h"
37 #include "program/prog_parameter.h"
38
39 #include "glsl/ralloc.h"
40
41 /** Return number of src args for given instruction */
42 GLuint brw_wm_nr_args( GLuint opcode )
43 {
44 switch (opcode) {
45 case WM_FRONTFACING:
46 case WM_PIXELXY:
47 return 0;
48 case WM_CINTERP:
49 case WM_WPOSXY:
50 case WM_DELTAXY:
51 return 1;
52 case WM_LINTERP:
53 case WM_PIXELW:
54 return 2;
55 case WM_FB_WRITE:
56 case WM_PINTERP:
57 return 3;
58 default:
59 assert(opcode < MAX_OPCODE);
60 return _mesa_num_inst_src_regs(opcode);
61 }
62 }
63
64
65 GLuint brw_wm_is_scalar_result( GLuint opcode )
66 {
67 switch (opcode) {
68 case OPCODE_COS:
69 case OPCODE_EX2:
70 case OPCODE_LG2:
71 case OPCODE_POW:
72 case OPCODE_RCP:
73 case OPCODE_RSQ:
74 case OPCODE_SIN:
75 case OPCODE_DP2:
76 case OPCODE_DP3:
77 case OPCODE_DP4:
78 case OPCODE_DPH:
79 case OPCODE_DST:
80 return 1;
81
82 default:
83 return 0;
84 }
85 }
86
87
88 /**
89 * Do GPU code generation for non-GLSL shader. non-GLSL shaders have
90 * no flow control instructions so we can more readily do SSA-style
91 * optimizations.
92 */
93 static void
94 brw_wm_non_glsl_emit(struct brw_context *brw, struct brw_wm_compile *c)
95 {
96 /* Augment fragment program. Add instructions for pre- and
97 * post-fragment-program tasks such as interpolation and fogging.
98 */
99 brw_wm_pass_fp(c);
100
101 /* Translate to intermediate representation. Build register usage
102 * chains.
103 */
104 brw_wm_pass0(c);
105
106 /* Dead code removal.
107 */
108 brw_wm_pass1(c);
109
110 /* Register allocation.
111 * Divide by two because we operate on 16 pixels at a time and require
112 * two GRF entries for each logical shader register.
113 */
114 c->grf_limit = BRW_WM_MAX_GRF / 2;
115
116 brw_wm_pass2(c);
117
118 /* how many general-purpose registers are used */
119 c->prog_data.reg_blocks = brw_register_blocks(c->max_wm_grf);
120
121 /* Emit GEN4 code.
122 */
123 brw_wm_emit(c);
124 }
125
126
127 /**
128 * Return a bitfield where bit n is set if barycentric interpolation mode n
129 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
130 */
131 unsigned
132 brw_compute_barycentric_interp_modes(bool shade_model_flat,
133 const struct gl_fragment_program *fprog)
134 {
135 unsigned barycentric_interp_modes = 0;
136 int attr;
137
138 /* Loop through all fragment shader inputs to figure out what interpolation
139 * modes are in use, and set the appropriate bits in
140 * barycentric_interp_modes.
141 */
142 for (attr = 0; attr < FRAG_ATTRIB_MAX; ++attr) {
143 enum glsl_interp_qualifier interp_qualifier =
144 fprog->InterpQualifier[attr];
145 bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;
146
147 /* Ignore unused inputs. */
148 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
149 continue;
150
151 /* Ignore WPOS and FACE, because they don't require interpolation. */
152 if (attr == FRAG_ATTRIB_WPOS || attr == FRAG_ATTRIB_FACE)
153 continue;
154
155 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
156 barycentric_interp_modes |=
157 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
158 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
159 (!(shade_model_flat && is_gl_Color) &&
160 interp_qualifier == INTERP_QUALIFIER_NONE)) {
161 barycentric_interp_modes |=
162 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
163 }
164 }
165
166 return barycentric_interp_modes;
167 }
168
169
170 void
171 brw_wm_payload_setup(struct brw_context *brw,
172 struct brw_wm_compile *c)
173 {
174 struct intel_context *intel = &brw->intel;
175 bool uses_depth = (c->fp->program.Base.InputsRead &
176 (1 << FRAG_ATTRIB_WPOS)) != 0;
177 unsigned barycentric_interp_modes =
178 brw_compute_barycentric_interp_modes(c->key.flat_shade,
179 &c->fp->program);
180 int i;
181
182 if (intel->gen >= 6) {
183 /* R0-1: masks, pixel X/Y coordinates. */
184 c->nr_payload_regs = 2;
185 /* R2: only for 32-pixel dispatch.*/
186
187 /* R3-26: barycentric interpolation coordinates. These appear in the
188 * same order that they appear in the brw_wm_barycentric_interp_mode
189 * enum. Each set of coordinates occupies 2 registers if dispatch width
190 * == 8 and 4 registers if dispatch width == 16. Coordinates only
191 * appear if they were enabled using the "Barycentric Interpolation
192 * Mode" bits in WM_STATE.
193 */
194 for (i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
195 if (barycentric_interp_modes & (1 << i)) {
196 c->barycentric_coord_reg[i] = c->nr_payload_regs;
197 c->nr_payload_regs += 2;
198 if (c->dispatch_width == 16) {
199 c->nr_payload_regs += 2;
200 }
201 }
202 }
203
204 /* R27: interpolated depth if uses source depth */
205 if (uses_depth) {
206 c->source_depth_reg = c->nr_payload_regs;
207 c->nr_payload_regs++;
208 if (c->dispatch_width == 16) {
209 /* R28: interpolated depth if not 8-wide. */
210 c->nr_payload_regs++;
211 }
212 }
213 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W.
214 */
215 if (uses_depth) {
216 c->source_w_reg = c->nr_payload_regs;
217 c->nr_payload_regs++;
218 if (c->dispatch_width == 16) {
219 /* R30: interpolated W if not 8-wide. */
220 c->nr_payload_regs++;
221 }
222 }
223 /* R31: MSAA position offsets. */
224 /* R32-: bary for 32-pixel. */
225 /* R58-59: interp W for 32-pixel. */
226
227 if (c->fp->program.Base.OutputsWritten &
228 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
229 c->source_depth_to_render_target = true;
230 c->computes_depth = true;
231 }
232 } else {
233 brw_wm_lookup_iz(intel, c);
234 }
235 }
236
237 /**
238 * All Mesa program -> GPU code generation goes through this function.
239 * Depending on the instructions used (i.e. flow control instructions)
240 * we'll use one of two code generators.
241 */
242 bool do_wm_prog(struct brw_context *brw,
243 struct gl_shader_program *prog,
244 struct brw_fragment_program *fp,
245 struct brw_wm_prog_key *key)
246 {
247 struct intel_context *intel = &brw->intel;
248 struct brw_wm_compile *c;
249 const GLuint *program;
250 GLuint program_size;
251
252 c = brw->wm.compile_data;
253 if (c == NULL) {
254 brw->wm.compile_data = rzalloc(NULL, struct brw_wm_compile);
255 c = brw->wm.compile_data;
256 if (c == NULL) {
257 /* Ouch - big out of memory problem. Can't continue
258 * without triggering a segfault, no way to signal,
259 * so just return.
260 */
261 return false;
262 }
263 } else {
264 void *instruction = c->instruction;
265 void *prog_instructions = c->prog_instructions;
266 void *vreg = c->vreg;
267 void *refs = c->refs;
268 memset(c, 0, sizeof(*brw->wm.compile_data));
269 c->instruction = instruction;
270 c->prog_instructions = prog_instructions;
271 c->vreg = vreg;
272 c->refs = refs;
273 }
274 memcpy(&c->key, key, sizeof(*key));
275
276 c->fp = fp;
277 c->env_param = brw->intel.ctx.FragmentProgram.Parameters;
278
279 brw_init_compile(brw, &c->func, c);
280
281 if (prog && prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
282 if (!brw_wm_fs_emit(brw, c, prog))
283 return false;
284 } else {
285 if (!c->instruction) {
286 c->instruction = rzalloc_array(c, struct brw_wm_instruction, BRW_WM_MAX_INSN);
287 c->prog_instructions = rzalloc_array(c, struct prog_instruction, BRW_WM_MAX_INSN);
288 c->vreg = rzalloc_array(c, struct brw_wm_value, BRW_WM_MAX_VREG);
289 c->refs = rzalloc_array(c, struct brw_wm_ref, BRW_WM_MAX_REF);
290 }
291
292 /* Fallback for fixed function and ARB_fp shaders. */
293 c->dispatch_width = 16;
294 brw_wm_payload_setup(brw, c);
295 brw_wm_non_glsl_emit(brw, c);
296 c->prog_data.dispatch_width = 16;
297 }
298
299 /* Scratch space is used for register spilling */
300 if (c->last_scratch) {
301 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
302
303 brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
304 c->prog_data.total_scratch * brw->max_wm_threads);
305 }
306
307 if (unlikely(INTEL_DEBUG & DEBUG_WM))
308 fprintf(stderr, "\n");
309
310 /* get the program
311 */
312 program = brw_get_program(&c->func, &program_size);
313
314 brw_upload_cache(&brw->cache, BRW_WM_PROG,
315 &c->key, sizeof(c->key),
316 program, program_size,
317 &c->prog_data, sizeof(c->prog_data),
318 &brw->wm.prog_offset, &brw->wm.prog_data);
319
320 return true;
321 }
322
323
324
325 static void brw_wm_populate_key( struct brw_context *brw,
326 struct brw_wm_prog_key *key )
327 {
328 struct gl_context *ctx = &brw->intel.ctx;
329 /* BRW_NEW_FRAGMENT_PROGRAM */
330 const struct brw_fragment_program *fp =
331 (struct brw_fragment_program *)brw->fragment_program;
332 GLuint lookup = 0;
333 GLuint line_aa;
334 GLuint i;
335
336 memset(key, 0, sizeof(*key));
337
338 /* Build the index for table lookup
339 */
340 /* _NEW_COLOR */
341 key->alpha_test = ctx->Color.AlphaEnabled;
342 if (fp->program.UsesKill ||
343 ctx->Color.AlphaEnabled)
344 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
345
346 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
347 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
348
349 /* _NEW_DEPTH */
350 if (ctx->Depth.Test)
351 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
352
353 if (ctx->Depth.Test &&
354 ctx->Depth.Mask) /* ?? */
355 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
356
357 /* _NEW_STENCIL */
358 if (ctx->Stencil._Enabled) {
359 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
360
361 if (ctx->Stencil.WriteMask[0] ||
362 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
363 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
364 }
365
366 line_aa = AA_NEVER;
367
368 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
369 if (ctx->Line.SmoothFlag) {
370 if (brw->intel.reduced_primitive == GL_LINES) {
371 line_aa = AA_ALWAYS;
372 }
373 else if (brw->intel.reduced_primitive == GL_TRIANGLES) {
374 if (ctx->Polygon.FrontMode == GL_LINE) {
375 line_aa = AA_SOMETIMES;
376
377 if (ctx->Polygon.BackMode == GL_LINE ||
378 (ctx->Polygon.CullFlag &&
379 ctx->Polygon.CullFaceMode == GL_BACK))
380 line_aa = AA_ALWAYS;
381 }
382 else if (ctx->Polygon.BackMode == GL_LINE) {
383 line_aa = AA_SOMETIMES;
384
385 if ((ctx->Polygon.CullFlag &&
386 ctx->Polygon.CullFaceMode == GL_FRONT))
387 line_aa = AA_ALWAYS;
388 }
389 }
390 }
391
392 key->iz_lookup = lookup;
393 key->line_aa = line_aa;
394 key->stats_wm = brw->intel.stats_wm;
395
396 /* BRW_NEW_WM_INPUT_DIMENSIONS */
397 key->proj_attrib_mask = brw->wm.input_size_masks[4-1];
398
399 /* _NEW_LIGHT */
400 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
401
402 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
403 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
404
405 /* _NEW_TEXTURE */
406 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
407 const struct gl_texture_unit *unit = &ctx->Texture.Unit[i];
408
409 if (unit->_ReallyEnabled) {
410 const struct gl_texture_object *t = unit->_Current;
411 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
412 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, i);
413 int swizzles[SWIZZLE_NIL + 1] = {
414 SWIZZLE_X,
415 SWIZZLE_Y,
416 SWIZZLE_Z,
417 SWIZZLE_W,
418 SWIZZLE_ZERO,
419 SWIZZLE_ONE,
420 SWIZZLE_NIL
421 };
422
423 /* GL_DEPTH_TEXTURE_MODE is normally handled through
424 * brw_wm_surface_state, but it applies to shadow compares as
425 * well and our shadow compares always return the result in
426 * all 4 channels.
427 */
428 if (sampler->CompareMode == GL_COMPARE_R_TO_TEXTURE_ARB) {
429 key->compare_funcs[i] = sampler->CompareFunc;
430
431 if (sampler->DepthMode == GL_ALPHA) {
432 swizzles[0] = SWIZZLE_ZERO;
433 swizzles[1] = SWIZZLE_ZERO;
434 swizzles[2] = SWIZZLE_ZERO;
435 } else if (sampler->DepthMode == GL_LUMINANCE) {
436 swizzles[3] = SWIZZLE_ONE;
437 } else if (sampler->DepthMode == GL_RED) {
438 /* See table 3.23 of the GL 3.0 spec. */
439 swizzles[1] = SWIZZLE_ZERO;
440 swizzles[2] = SWIZZLE_ZERO;
441 swizzles[3] = SWIZZLE_ONE;
442 }
443 }
444
445 if (img->InternalFormat == GL_YCBCR_MESA) {
446 key->yuvtex_mask |= 1 << i;
447 if (img->TexFormat == MESA_FORMAT_YCBCR)
448 key->yuvtex_swap_mask |= 1 << i;
449 }
450
451 key->tex_swizzles[i] =
452 MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
453 swizzles[GET_SWZ(t->_Swizzle, 1)],
454 swizzles[GET_SWZ(t->_Swizzle, 2)],
455 swizzles[GET_SWZ(t->_Swizzle, 3)]);
456
457 if (sampler->MinFilter != GL_NEAREST &&
458 sampler->MagFilter != GL_NEAREST) {
459 if (sampler->WrapS == GL_CLAMP)
460 key->gl_clamp_mask[0] |= 1 << i;
461 if (sampler->WrapT == GL_CLAMP)
462 key->gl_clamp_mask[1] |= 1 << i;
463 if (sampler->WrapR == GL_CLAMP)
464 key->gl_clamp_mask[2] |= 1 << i;
465 }
466 }
467 else {
468 key->tex_swizzles[i] = SWIZZLE_NOOP;
469 }
470 }
471
472 /* _NEW_BUFFERS */
473 /*
474 * Include the draw buffer origin and height so that we can calculate
475 * fragment position values relative to the bottom left of the drawable,
476 * from the incoming screen origin relative position we get as part of our
477 * payload.
478 *
479 * This is only needed for the WM_WPOSXY opcode when the fragment program
480 * uses the gl_FragCoord input.
481 *
482 * We could avoid recompiling by including this as a constant referenced by
483 * our program, but if we were to do that it would also be nice to handle
484 * getting that constant updated at batchbuffer submit time (when we
485 * hold the lock and know where the buffer really is) rather than at emit
486 * time when we don't hold the lock and are just guessing. We could also
487 * just avoid using this as key data if the program doesn't use
488 * fragment.position.
489 *
490 * For DRI2 the origin_x/y will always be (0,0) but we still need the
491 * drawable height in order to invert the Y axis.
492 */
493 if (fp->program.Base.InputsRead & FRAG_BIT_WPOS) {
494 key->drawable_height = ctx->DrawBuffer->Height;
495 key->render_to_fbo = ctx->DrawBuffer->Name != 0;
496 }
497
498 /* _NEW_BUFFERS */
499 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
500
501 /* CACHE_NEW_VS_PROG */
502 key->vp_outputs_written = brw->vs.prog_data->outputs_written;
503
504 /* The unique fragment program ID */
505 key->program_string_id = fp->id;
506 }
507
508
509 static void
510 brw_upload_wm_prog(struct brw_context *brw)
511 {
512 struct intel_context *intel = &brw->intel;
513 struct gl_context *ctx = &intel->ctx;
514 struct brw_wm_prog_key key;
515 struct brw_fragment_program *fp = (struct brw_fragment_program *)
516 brw->fragment_program;
517
518 brw_wm_populate_key(brw, &key);
519
520 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
521 &key, sizeof(key),
522 &brw->wm.prog_offset, &brw->wm.prog_data)) {
523 bool success = do_wm_prog(brw, ctx->Shader.CurrentFragmentProgram, fp,
524 &key);
525 (void) success;
526 assert(success);
527 }
528 }
529
530
531 const struct brw_tracked_state brw_wm_prog = {
532 .dirty = {
533 .mesa = (_NEW_COLOR |
534 _NEW_DEPTH |
535 _NEW_STENCIL |
536 _NEW_POLYGON |
537 _NEW_LINE |
538 _NEW_LIGHT |
539 _NEW_FRAG_CLAMP |
540 _NEW_BUFFERS |
541 _NEW_TEXTURE),
542 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
543 BRW_NEW_WM_INPUT_DIMENSIONS |
544 BRW_NEW_REDUCED_PRIMITIVE),
545 .cache = CACHE_NEW_VS_PROG,
546 },
547 .emit = brw_upload_wm_prog
548 };
549