svga: check for and skip null vertex buffer pointers
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_context.h"
33 #include "brw_wm.h"
34 #include "brw_state.h"
35 #include "main/formats.h"
36 #include "main/samplerobj.h"
37 #include "program/prog_parameter.h"
38
39 #include "glsl/ralloc.h"
40
41 /** Return number of src args for given instruction */
42 GLuint brw_wm_nr_args( GLuint opcode )
43 {
44 switch (opcode) {
45 case WM_FRONTFACING:
46 case WM_PIXELXY:
47 return 0;
48 case WM_CINTERP:
49 case WM_WPOSXY:
50 case WM_DELTAXY:
51 return 1;
52 case WM_LINTERP:
53 case WM_PIXELW:
54 return 2;
55 case WM_FB_WRITE:
56 case WM_PINTERP:
57 return 3;
58 default:
59 assert(opcode < MAX_OPCODE);
60 return _mesa_num_inst_src_regs(opcode);
61 }
62 }
63
64
65 GLuint brw_wm_is_scalar_result( GLuint opcode )
66 {
67 switch (opcode) {
68 case OPCODE_COS:
69 case OPCODE_EX2:
70 case OPCODE_LG2:
71 case OPCODE_POW:
72 case OPCODE_RCP:
73 case OPCODE_RSQ:
74 case OPCODE_SIN:
75 case OPCODE_DP2:
76 case OPCODE_DP3:
77 case OPCODE_DP4:
78 case OPCODE_DPH:
79 case OPCODE_DST:
80 return 1;
81
82 default:
83 return 0;
84 }
85 }
86
87
88 /**
89 * Do GPU code generation for non-GLSL shader. non-GLSL shaders have
90 * no flow control instructions so we can more readily do SSA-style
91 * optimizations.
92 */
93 static void
94 brw_wm_non_glsl_emit(struct brw_context *brw, struct brw_wm_compile *c)
95 {
96 /* Augment fragment program. Add instructions for pre- and
97 * post-fragment-program tasks such as interpolation and fogging.
98 */
99 brw_wm_pass_fp(c);
100
101 /* Translate to intermediate representation. Build register usage
102 * chains.
103 */
104 brw_wm_pass0(c);
105
106 /* Dead code removal.
107 */
108 brw_wm_pass1(c);
109
110 /* Register allocation.
111 * Divide by two because we operate on 16 pixels at a time and require
112 * two GRF entries for each logical shader register.
113 */
114 c->grf_limit = BRW_WM_MAX_GRF / 2;
115
116 brw_wm_pass2(c);
117
118 /* how many general-purpose registers are used */
119 c->prog_data.reg_blocks = brw_register_blocks(c->max_wm_grf);
120
121 /* Emit GEN4 code.
122 */
123 brw_wm_emit(c);
124 }
125
126
127 /**
128 * Return a bitfield where bit n is set if barycentric interpolation mode n
129 * (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
130 */
131 static unsigned
132 brw_compute_barycentric_interp_modes(bool shade_model_flat,
133 const struct gl_fragment_program *fprog)
134 {
135 unsigned barycentric_interp_modes = 0;
136 int attr;
137
138 /* Loop through all fragment shader inputs to figure out what interpolation
139 * modes are in use, and set the appropriate bits in
140 * barycentric_interp_modes.
141 */
142 for (attr = 0; attr < FRAG_ATTRIB_MAX; ++attr) {
143 enum glsl_interp_qualifier interp_qualifier =
144 fprog->InterpQualifier[attr];
145 bool is_gl_Color = attr == FRAG_ATTRIB_COL0 || attr == FRAG_ATTRIB_COL1;
146
147 /* Ignore unused inputs. */
148 if (!(fprog->Base.InputsRead & BITFIELD64_BIT(attr)))
149 continue;
150
151 /* Ignore WPOS and FACE, because they don't require interpolation. */
152 if (attr == FRAG_ATTRIB_WPOS || attr == FRAG_ATTRIB_FACE)
153 continue;
154
155 if (interp_qualifier == INTERP_QUALIFIER_NOPERSPECTIVE) {
156 barycentric_interp_modes |=
157 1 << BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
158 } else if (interp_qualifier == INTERP_QUALIFIER_SMOOTH ||
159 (!(shade_model_flat && is_gl_Color) &&
160 interp_qualifier == INTERP_QUALIFIER_NONE)) {
161 barycentric_interp_modes |=
162 1 << BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
163 }
164 }
165
166 return barycentric_interp_modes;
167 }
168
169
170 void
171 brw_wm_payload_setup(struct brw_context *brw,
172 struct brw_wm_compile *c)
173 {
174 struct intel_context *intel = &brw->intel;
175 bool uses_depth = (c->fp->program.Base.InputsRead &
176 (1 << FRAG_ATTRIB_WPOS)) != 0;
177 unsigned barycentric_interp_modes = c->prog_data.barycentric_interp_modes;
178 int i;
179
180 if (intel->gen >= 6) {
181 /* R0-1: masks, pixel X/Y coordinates. */
182 c->nr_payload_regs = 2;
183 /* R2: only for 32-pixel dispatch.*/
184
185 /* R3-26: barycentric interpolation coordinates. These appear in the
186 * same order that they appear in the brw_wm_barycentric_interp_mode
187 * enum. Each set of coordinates occupies 2 registers if dispatch width
188 * == 8 and 4 registers if dispatch width == 16. Coordinates only
189 * appear if they were enabled using the "Barycentric Interpolation
190 * Mode" bits in WM_STATE.
191 */
192 for (i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
193 if (barycentric_interp_modes & (1 << i)) {
194 c->barycentric_coord_reg[i] = c->nr_payload_regs;
195 c->nr_payload_regs += 2;
196 if (c->dispatch_width == 16) {
197 c->nr_payload_regs += 2;
198 }
199 }
200 }
201
202 /* R27: interpolated depth if uses source depth */
203 if (uses_depth) {
204 c->source_depth_reg = c->nr_payload_regs;
205 c->nr_payload_regs++;
206 if (c->dispatch_width == 16) {
207 /* R28: interpolated depth if not 8-wide. */
208 c->nr_payload_regs++;
209 }
210 }
211 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W.
212 */
213 if (uses_depth) {
214 c->source_w_reg = c->nr_payload_regs;
215 c->nr_payload_regs++;
216 if (c->dispatch_width == 16) {
217 /* R30: interpolated W if not 8-wide. */
218 c->nr_payload_regs++;
219 }
220 }
221 /* R31: MSAA position offsets. */
222 /* R32-: bary for 32-pixel. */
223 /* R58-59: interp W for 32-pixel. */
224
225 if (c->fp->program.Base.OutputsWritten &
226 BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
227 c->source_depth_to_render_target = true;
228 c->computes_depth = true;
229 }
230 } else {
231 brw_wm_lookup_iz(intel, c);
232 }
233 }
234
235 /**
236 * All Mesa program -> GPU code generation goes through this function.
237 * Depending on the instructions used (i.e. flow control instructions)
238 * we'll use one of two code generators.
239 */
240 bool do_wm_prog(struct brw_context *brw,
241 struct gl_shader_program *prog,
242 struct brw_fragment_program *fp,
243 struct brw_wm_prog_key *key)
244 {
245 struct intel_context *intel = &brw->intel;
246 struct brw_wm_compile *c;
247 const GLuint *program;
248 GLuint program_size;
249
250 c = brw->wm.compile_data;
251 if (c == NULL) {
252 brw->wm.compile_data = rzalloc(NULL, struct brw_wm_compile);
253 c = brw->wm.compile_data;
254 if (c == NULL) {
255 /* Ouch - big out of memory problem. Can't continue
256 * without triggering a segfault, no way to signal,
257 * so just return.
258 */
259 return false;
260 }
261 } else {
262 void *instruction = c->instruction;
263 void *prog_instructions = c->prog_instructions;
264 void *vreg = c->vreg;
265 void *refs = c->refs;
266 memset(c, 0, sizeof(*brw->wm.compile_data));
267 c->instruction = instruction;
268 c->prog_instructions = prog_instructions;
269 c->vreg = vreg;
270 c->refs = refs;
271 }
272 memcpy(&c->key, key, sizeof(*key));
273
274 c->fp = fp;
275 c->env_param = brw->intel.ctx.FragmentProgram.Parameters;
276
277 brw_init_compile(brw, &c->func, c);
278
279 c->prog_data.barycentric_interp_modes =
280 brw_compute_barycentric_interp_modes(c->key.flat_shade, &fp->program);
281
282 if (prog && prog->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
283 if (!brw_wm_fs_emit(brw, c, prog))
284 return false;
285 } else {
286 if (!c->instruction) {
287 c->instruction = rzalloc_array(c, struct brw_wm_instruction, BRW_WM_MAX_INSN);
288 c->prog_instructions = rzalloc_array(c, struct prog_instruction, BRW_WM_MAX_INSN);
289 c->vreg = rzalloc_array(c, struct brw_wm_value, BRW_WM_MAX_VREG);
290 c->refs = rzalloc_array(c, struct brw_wm_ref, BRW_WM_MAX_REF);
291 }
292
293 /* Fallback for fixed function and ARB_fp shaders. */
294 c->dispatch_width = 16;
295 brw_wm_payload_setup(brw, c);
296 brw_wm_non_glsl_emit(brw, c);
297 c->prog_data.dispatch_width = 16;
298 }
299
300 /* Scratch space is used for register spilling */
301 if (c->last_scratch) {
302 c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
303
304 brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
305 c->prog_data.total_scratch * brw->max_wm_threads);
306 }
307
308 if (unlikely(INTEL_DEBUG & DEBUG_WM))
309 fprintf(stderr, "\n");
310
311 /* get the program
312 */
313 program = brw_get_program(&c->func, &program_size);
314
315 brw_upload_cache(&brw->cache, BRW_WM_PROG,
316 &c->key, sizeof(c->key),
317 program, program_size,
318 &c->prog_data, sizeof(c->prog_data),
319 &brw->wm.prog_offset, &brw->wm.prog_data);
320
321 return true;
322 }
323
324 void
325 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
326 struct brw_sampler_prog_key_data *key,
327 int i)
328 {
329 const struct gl_texture_unit *unit = &ctx->Texture.Unit[i];
330
331 if (unit->_ReallyEnabled && unit->_Current->Target != GL_TEXTURE_BUFFER) {
332 const struct gl_texture_object *t = unit->_Current;
333 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
334 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, i);
335 int swizzles[SWIZZLE_NIL + 1] = {
336 SWIZZLE_X,
337 SWIZZLE_Y,
338 SWIZZLE_Z,
339 SWIZZLE_W,
340 SWIZZLE_ZERO,
341 SWIZZLE_ONE,
342 SWIZZLE_NIL
343 };
344
345 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
346 img->_BaseFormat == GL_DEPTH_STENCIL) {
347 if (sampler->CompareMode == GL_COMPARE_R_TO_TEXTURE_ARB)
348 key->compare_funcs[i] = sampler->CompareFunc;
349
350 /* We handle GL_DEPTH_TEXTURE_MODE here instead of as surface format
351 * overrides because shadow comparison always returns the result of
352 * the comparison in all channels anyway.
353 */
354 switch (sampler->DepthMode) {
355 case GL_ALPHA:
356 swizzles[0] = SWIZZLE_ZERO;
357 swizzles[1] = SWIZZLE_ZERO;
358 swizzles[2] = SWIZZLE_ZERO;
359 swizzles[3] = SWIZZLE_X;
360 break;
361 case GL_LUMINANCE:
362 swizzles[0] = SWIZZLE_X;
363 swizzles[1] = SWIZZLE_X;
364 swizzles[2] = SWIZZLE_X;
365 swizzles[3] = SWIZZLE_ONE;
366 break;
367 case GL_INTENSITY:
368 swizzles[0] = SWIZZLE_X;
369 swizzles[1] = SWIZZLE_X;
370 swizzles[2] = SWIZZLE_X;
371 swizzles[3] = SWIZZLE_X;
372 break;
373 case GL_RED:
374 swizzles[0] = SWIZZLE_X;
375 swizzles[1] = SWIZZLE_ZERO;
376 swizzles[2] = SWIZZLE_ZERO;
377 swizzles[3] = SWIZZLE_ONE;
378 break;
379 }
380 }
381
382 if (img->InternalFormat == GL_YCBCR_MESA) {
383 key->yuvtex_mask |= 1 << i;
384 if (img->TexFormat == MESA_FORMAT_YCBCR)
385 key->yuvtex_swap_mask |= 1 << i;
386 }
387
388 key->swizzles[i] =
389 MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
390 swizzles[GET_SWZ(t->_Swizzle, 1)],
391 swizzles[GET_SWZ(t->_Swizzle, 2)],
392 swizzles[GET_SWZ(t->_Swizzle, 3)]);
393
394 if (sampler->MinFilter != GL_NEAREST &&
395 sampler->MagFilter != GL_NEAREST) {
396 if (sampler->WrapS == GL_CLAMP)
397 key->gl_clamp_mask[0] |= 1 << i;
398 if (sampler->WrapT == GL_CLAMP)
399 key->gl_clamp_mask[1] |= 1 << i;
400 if (sampler->WrapR == GL_CLAMP)
401 key->gl_clamp_mask[2] |= 1 << i;
402 }
403 }
404 else {
405 key->swizzles[i] = SWIZZLE_NOOP;
406 }
407 }
408
409 static void brw_wm_populate_key( struct brw_context *brw,
410 struct brw_wm_prog_key *key )
411 {
412 struct gl_context *ctx = &brw->intel.ctx;
413 /* BRW_NEW_FRAGMENT_PROGRAM */
414 const struct brw_fragment_program *fp =
415 (struct brw_fragment_program *)brw->fragment_program;
416 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
417 GLuint lookup = 0;
418 GLuint line_aa;
419 GLuint i;
420
421 memset(key, 0, sizeof(*key));
422
423 /* Build the index for table lookup
424 */
425 /* _NEW_COLOR */
426 key->alpha_test = ctx->Color.AlphaEnabled;
427 if (fp->program.UsesKill ||
428 ctx->Color.AlphaEnabled)
429 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
430
431 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
432 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
433
434 /* _NEW_DEPTH */
435 if (ctx->Depth.Test)
436 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
437
438 if (ctx->Depth.Test &&
439 ctx->Depth.Mask) /* ?? */
440 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
441
442 /* _NEW_STENCIL */
443 if (ctx->Stencil._Enabled) {
444 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
445
446 if (ctx->Stencil.WriteMask[0] ||
447 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
448 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
449 }
450
451 line_aa = AA_NEVER;
452
453 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
454 if (ctx->Line.SmoothFlag) {
455 if (brw->intel.reduced_primitive == GL_LINES) {
456 line_aa = AA_ALWAYS;
457 }
458 else if (brw->intel.reduced_primitive == GL_TRIANGLES) {
459 if (ctx->Polygon.FrontMode == GL_LINE) {
460 line_aa = AA_SOMETIMES;
461
462 if (ctx->Polygon.BackMode == GL_LINE ||
463 (ctx->Polygon.CullFlag &&
464 ctx->Polygon.CullFaceMode == GL_BACK))
465 line_aa = AA_ALWAYS;
466 }
467 else if (ctx->Polygon.BackMode == GL_LINE) {
468 line_aa = AA_SOMETIMES;
469
470 if ((ctx->Polygon.CullFlag &&
471 ctx->Polygon.CullFaceMode == GL_FRONT))
472 line_aa = AA_ALWAYS;
473 }
474 }
475 }
476
477 key->iz_lookup = lookup;
478 key->line_aa = line_aa;
479 key->stats_wm = brw->intel.stats_wm;
480
481 /* BRW_NEW_WM_INPUT_DIMENSIONS */
482 key->proj_attrib_mask = brw->wm.input_size_masks[4-1];
483
484 /* _NEW_LIGHT */
485 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
486
487 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
488 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
489
490 /* _NEW_TEXTURE */
491 for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
492 if (prog->TexturesUsed[i])
493 brw_populate_sampler_prog_key_data(ctx, &key->tex, i);
494 }
495
496 /* _NEW_BUFFERS */
497 /*
498 * Include the draw buffer origin and height so that we can calculate
499 * fragment position values relative to the bottom left of the drawable,
500 * from the incoming screen origin relative position we get as part of our
501 * payload.
502 *
503 * This is only needed for the WM_WPOSXY opcode when the fragment program
504 * uses the gl_FragCoord input.
505 *
506 * We could avoid recompiling by including this as a constant referenced by
507 * our program, but if we were to do that it would also be nice to handle
508 * getting that constant updated at batchbuffer submit time (when we
509 * hold the lock and know where the buffer really is) rather than at emit
510 * time when we don't hold the lock and are just guessing. We could also
511 * just avoid using this as key data if the program doesn't use
512 * fragment.position.
513 *
514 * For DRI2 the origin_x/y will always be (0,0) but we still need the
515 * drawable height in order to invert the Y axis.
516 */
517 if (fp->program.Base.InputsRead & FRAG_BIT_WPOS) {
518 key->drawable_height = ctx->DrawBuffer->Height;
519 key->render_to_fbo = ctx->DrawBuffer->Name != 0;
520 }
521
522 /* _NEW_BUFFERS */
523 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
524
525 /* CACHE_NEW_VS_PROG */
526 key->vp_outputs_written = brw->vs.prog_data->outputs_written;
527
528 /* The unique fragment program ID */
529 key->program_string_id = fp->id;
530 }
531
532
533 static void
534 brw_upload_wm_prog(struct brw_context *brw)
535 {
536 struct intel_context *intel = &brw->intel;
537 struct gl_context *ctx = &intel->ctx;
538 struct brw_wm_prog_key key;
539 struct brw_fragment_program *fp = (struct brw_fragment_program *)
540 brw->fragment_program;
541
542 brw_wm_populate_key(brw, &key);
543
544 if (!brw_search_cache(&brw->cache, BRW_WM_PROG,
545 &key, sizeof(key),
546 &brw->wm.prog_offset, &brw->wm.prog_data)) {
547 bool success = do_wm_prog(brw, ctx->Shader._CurrentFragmentProgram, fp,
548 &key);
549 (void) success;
550 assert(success);
551 }
552 }
553
554
555 const struct brw_tracked_state brw_wm_prog = {
556 .dirty = {
557 .mesa = (_NEW_COLOR |
558 _NEW_DEPTH |
559 _NEW_STENCIL |
560 _NEW_POLYGON |
561 _NEW_LINE |
562 _NEW_LIGHT |
563 _NEW_FRAG_CLAMP |
564 _NEW_BUFFERS |
565 _NEW_TEXTURE),
566 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
567 BRW_NEW_WM_INPUT_DIMENSIONS |
568 BRW_NEW_REDUCED_PRIMITIVE),
569 .cache = CACHE_NEW_VS_PROG,
570 },
571 .emit = brw_upload_wm_prog
572 };
573