Merge branch 'gallium-0.1' into gallium-0.2
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
44
45 #include "drivers/common/driverfuncs.h"
46
47 #include "intel_screen.h"
48
49 #include "i830_dri.h"
50
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr.h"
63
64 #include "drirenderbuffer.h"
65 #include "vblank.h"
66 #include "utils.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
68 #ifndef INTEL_DEBUG
69 int INTEL_DEBUG = (0);
70 #endif
71
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
96
97 #include "extension_helper.h"
98
99 #define DRIVER_DATE "20080716"
100 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
101
102 static const GLubyte *
103 intelGetString(GLcontext * ctx, GLenum name)
104 {
105 const struct intel_context *const intel = intel_context(ctx);
106 const char *chipset;
107 static char buffer[128];
108
109 switch (name) {
110 case GL_VENDOR:
111 return (GLubyte *) "Tungsten Graphics, Inc";
112 break;
113
114 case GL_RENDERER:
115 switch (intel->intelScreen->deviceID) {
116 case PCI_CHIP_845_G:
117 chipset = "Intel(R) 845G";
118 break;
119 case PCI_CHIP_I830_M:
120 chipset = "Intel(R) 830M";
121 break;
122 case PCI_CHIP_I855_GM:
123 chipset = "Intel(R) 852GM/855GM";
124 break;
125 case PCI_CHIP_I865_G:
126 chipset = "Intel(R) 865G";
127 break;
128 case PCI_CHIP_I915_G:
129 chipset = "Intel(R) 915G";
130 break;
131 case PCI_CHIP_E7221_G:
132 chipset = "Intel (R) E7221G (i915)";
133 break;
134 case PCI_CHIP_I915_GM:
135 chipset = "Intel(R) 915GM";
136 break;
137 case PCI_CHIP_I945_G:
138 chipset = "Intel(R) 945G";
139 break;
140 case PCI_CHIP_I945_GM:
141 chipset = "Intel(R) 945GM";
142 break;
143 case PCI_CHIP_I945_GME:
144 chipset = "Intel(R) 945GME";
145 break;
146 case PCI_CHIP_G33_G:
147 chipset = "Intel(R) G33";
148 break;
149 case PCI_CHIP_Q35_G:
150 chipset = "Intel(R) Q35";
151 break;
152 case PCI_CHIP_Q33_G:
153 chipset = "Intel(R) Q33";
154 break;
155 case PCI_CHIP_I965_Q:
156 chipset = "Intel(R) 965Q";
157 break;
158 case PCI_CHIP_I965_G:
159 case PCI_CHIP_I965_G_1:
160 chipset = "Intel(R) 965G";
161 break;
162 case PCI_CHIP_I946_GZ:
163 chipset = "Intel(R) 946GZ";
164 break;
165 case PCI_CHIP_I965_GM:
166 chipset = "Intel(R) 965GM";
167 break;
168 case PCI_CHIP_I965_GME:
169 chipset = "Intel(R) 965GME/GLE";
170 break;
171 case PCI_CHIP_GM45_GM:
172 chipset = "Mobile IntelĀ® GM45 Express Chipset";
173 break;
174 case PCI_CHIP_IGD_E_G:
175 chipset = "Intel(R) Integrated Graphics Device";
176 break;
177 case PCI_CHIP_G45_G:
178 chipset = "Intel(R) G45/G43";
179 break;
180 case PCI_CHIP_Q45_G:
181 chipset = "Intel(R) Q45/Q43";
182 break;
183 default:
184 chipset = "Unknown Intel Chipset";
185 break;
186 }
187
188 (void) driGetRendererString(buffer, chipset,
189 (intel->ttm) ? DRIVER_DATE_GEM : DRIVER_DATE,
190 0);
191 return (GLubyte *) buffer;
192
193 default:
194 return NULL;
195 }
196 }
197
198 /**
199 * Extension strings exported by the intel driver.
200 *
201 * Extensions supported by all chips supported by i830_dri, i915_dri, or
202 * i965_dri.
203 */
204 static const struct dri_extension card_extensions[] = {
205 {"GL_ARB_multisample", GL_ARB_multisample_functions},
206 {"GL_ARB_multitexture", NULL},
207 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
208 {"GL_NV_point_sprite", GL_NV_point_sprite_functions},
209 {"GL_ARB_texture_border_clamp", NULL},
210 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
211 {"GL_ARB_texture_cube_map", NULL},
212 {"GL_ARB_texture_env_add", NULL},
213 {"GL_ARB_texture_env_combine", NULL},
214 {"GL_ARB_texture_env_crossbar", NULL},
215 {"GL_ARB_texture_env_dot3", NULL},
216 {"GL_ARB_texture_mirrored_repeat", NULL},
217 {"GL_ARB_texture_non_power_of_two", NULL },
218 {"GL_ARB_texture_rectangle", NULL},
219 {"GL_NV_texture_rectangle", NULL},
220 {"GL_EXT_texture_rectangle", NULL},
221 {"GL_ARB_point_parameters", NULL},
222 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
223 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
224 {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
225 {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
226 {"GL_EXT_blend_equation_separate",
227 GL_EXT_blend_equation_separate_functions},
228 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
229 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
230 {"GL_EXT_blend_logic_op", NULL},
231 {"GL_EXT_blend_subtract", NULL},
232 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
233 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
234 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
235 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions},
236 #if 1 /* XXX FBO temporary? */
237 {"GL_EXT_packed_depth_stencil", NULL},
238 #endif
239 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
240 {"GL_EXT_stencil_wrap", NULL},
241 {"GL_EXT_texture_edge_clamp", NULL},
242 {"GL_EXT_texture_env_combine", NULL},
243 {"GL_EXT_texture_env_dot3", NULL},
244 {"GL_EXT_texture_filter_anisotropic", NULL},
245 {"GL_EXT_texture_lod_bias", NULL},
246 {"GL_3DFX_texture_compression_FXT1", NULL},
247 {"GL_APPLE_client_storage", NULL},
248 {"GL_MESA_pack_invert", NULL},
249 {"GL_MESA_ycbcr_texture", NULL},
250 {"GL_NV_blend_square", NULL},
251 {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
252 {"GL_NV_vertex_program1_1", NULL},
253 { "GL_SGIS_generate_mipmap", NULL },
254 {NULL, NULL}
255 };
256
257 static const struct dri_extension brw_extensions[] = {
258 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
259 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
260 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
261 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
262 { "GL_ARB_point_sprite", NULL},
263 { "GL_ARB_fragment_shader", NULL },
264 { "GL_ARB_draw_buffers", NULL },
265 { "GL_ARB_depth_texture", NULL },
266 { "GL_ARB_fragment_program", NULL },
267 { "GL_ARB_shadow", NULL },
268 { "GL_EXT_shadow_funcs", NULL },
269 { "GL_ARB_fragment_program_shadow", NULL },
270 /* ARB extn won't work if not enabled */
271 { "GL_SGIX_depth_texture", NULL },
272 { "GL_EXT_texture_sRGB", NULL},
273 { NULL, NULL }
274 };
275
276 static const struct dri_extension arb_oc_extensions[] = {
277 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions},
278 {NULL, NULL}
279 };
280
281 static const struct dri_extension ttm_extensions[] = {
282 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
283 {"GL_ARB_pixel_buffer_object", NULL},
284 {NULL, NULL}
285 };
286
287 /**
288 * Initializes potential list of extensions if ctx == NULL, or actually enables
289 * extensions for a context.
290 */
291 void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
292 {
293 struct intel_context *intel = ctx?intel_context(ctx):NULL;
294
295 /* Disable imaging extension until convolution is working in teximage paths.
296 */
297 enable_imaging = GL_FALSE;
298
299 driInitExtensions(ctx, card_extensions, enable_imaging);
300
301 if (intel == NULL || intel->ttm)
302 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
303
304 if (intel == NULL ||
305 (IS_965(intel->intelScreen->deviceID) &&
306 intel->intelScreen->drmMinor >= 8))
307 driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
308
309 if (intel == NULL || IS_965(intel->intelScreen->deviceID))
310 driInitExtensions(ctx, brw_extensions, GL_FALSE);
311 }
312
313 static const struct dri_debug_control debug_control[] = {
314 { "tex", DEBUG_TEXTURE},
315 { "state", DEBUG_STATE},
316 { "ioctl", DEBUG_IOCTL},
317 { "blit", DEBUG_BLIT},
318 { "mip", DEBUG_MIPTREE},
319 { "fall", DEBUG_FALLBACKS},
320 { "verb", DEBUG_VERBOSE},
321 { "bat", DEBUG_BATCH},
322 { "pix", DEBUG_PIXEL},
323 { "buf", DEBUG_BUFMGR},
324 { "reg", DEBUG_REGION},
325 { "fbo", DEBUG_FBO},
326 { "lock", DEBUG_LOCK},
327 { "sync", DEBUG_SYNC},
328 { "prim", DEBUG_PRIMS },
329 { "vert", DEBUG_VERTS },
330 { "dri", DEBUG_DRI },
331 { "dma", DEBUG_DMA },
332 { "san", DEBUG_SANITY },
333 { "sleep", DEBUG_SLEEP },
334 { "stats", DEBUG_STATS },
335 { "tile", DEBUG_TILE },
336 { "sing", DEBUG_SINGLE_THREAD },
337 { "thre", DEBUG_SINGLE_THREAD },
338 { "wm", DEBUG_WM },
339 { "urb", DEBUG_URB },
340 { "vs", DEBUG_VS },
341 { NULL, 0 }
342 };
343
344
345 static void
346 intelInvalidateState(GLcontext * ctx, GLuint new_state)
347 {
348 struct intel_context *intel = intel_context(ctx);
349
350 _swrast_InvalidateState(ctx, new_state);
351 _swsetup_InvalidateState(ctx, new_state);
352 _vbo_InvalidateState(ctx, new_state);
353 _tnl_InvalidateState(ctx, new_state);
354 _tnl_invalidate_vertex_state(ctx, new_state);
355
356 intel->NewGLState |= new_state;
357
358 if (intel->vtbl.invalidate_state)
359 intel->vtbl.invalidate_state( intel, new_state );
360 }
361
362
363 void
364 intelFlush(GLcontext * ctx)
365 {
366 struct intel_context *intel = intel_context(ctx);
367
368 if (intel->Fallback)
369 _swrast_flush(ctx);
370
371 if (!IS_965(intel->intelScreen->deviceID))
372 INTEL_FIREVERTICES(intel);
373
374 /* Emit a flush so that any frontbuffer rendering that might have occurred
375 * lands onscreen in a timely manner, even if the X Server doesn't trigger
376 * a flush for us.
377 */
378 intel_batchbuffer_emit_mi_flush(intel->batch);
379
380 if (intel->batch->map != intel->batch->ptr)
381 intel_batchbuffer_flush(intel->batch);
382 }
383
384 void
385 intelFinish(GLcontext * ctx)
386 {
387 struct gl_framebuffer *fb = ctx->DrawBuffer;
388 int i;
389
390 intelFlush(ctx);
391
392 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
393 struct intel_renderbuffer *irb;
394
395 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
396
397 if (irb->region)
398 dri_bo_wait_rendering(irb->region->buffer);
399 }
400 if (fb->_DepthBuffer) {
401 /* XXX: Wait on buffer idle */
402 }
403 }
404
405 static void
406 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
407 {
408 struct intel_context *intel = intel_context( ctx );
409 struct drm_i915_mmio io = {
410 .read_write = I915_MMIO_READ,
411 .reg = MMIO_REGS_PS_DEPTH_COUNT,
412 .data = &q->Result
413 };
414 intel->stats_wm++;
415 intelFinish(&intel->ctx);
416 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
417 }
418
419 static void
420 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
421 {
422 struct intel_context *intel = intel_context( ctx );
423 GLuint64EXT tmp;
424 struct drm_i915_mmio io = {
425 .read_write = I915_MMIO_READ,
426 .reg = MMIO_REGS_PS_DEPTH_COUNT,
427 .data = &tmp
428 };
429 intelFinish(&intel->ctx);
430 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
431 q->Result = tmp - q->Result;
432 q->Ready = GL_TRUE;
433 intel->stats_wm--;
434 }
435
436 /** Driver-specific fence emit implementation for the fake memory manager. */
437 static unsigned int
438 intel_fence_emit(void *private)
439 {
440 struct intel_context *intel = (struct intel_context *)private;
441 unsigned int fence;
442
443 /* XXX: Need to emit a flush, if we haven't already (at least with the
444 * current batchbuffer implementation, we have).
445 */
446
447 fence = intelEmitIrqLocked(intel);
448
449 return fence;
450 }
451
452 /** Driver-specific fence wait implementation for the fake memory manager. */
453 static int
454 intel_fence_wait(void *private, unsigned int cookie)
455 {
456 struct intel_context *intel = (struct intel_context *)private;
457
458 intelWaitIrq(intel, cookie);
459
460 return 0;
461 }
462
463 static GLboolean
464 intel_init_bufmgr(struct intel_context *intel)
465 {
466 intelScreenPrivate *intelScreen = intel->intelScreen;
467 GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
468 int gem_kernel = 0;
469 GLboolean gem_supported;
470 struct drm_i915_getparam gp;
471
472 gp.param = I915_PARAM_HAS_GEM;
473 gp.value = &gem_kernel;
474
475 (void) drmCommandWriteRead(intel->driFd, DRM_I915_GETPARAM, &gp, sizeof(gp));
476
477 /* If we've got a new enough DDX that's initializing GEM and giving us
478 * object handles for the shared buffers, use that.
479 */
480 intel->ttm = GL_FALSE;
481 if (intel->intelScreen->driScrnPriv->dri2.enabled)
482 gem_supported = GL_TRUE;
483 else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
484 gem_kernel &&
485 intel->intelScreen->front.bo_handle != -1)
486 gem_supported = GL_TRUE;
487 else
488 gem_supported = GL_FALSE;
489
490 if (!gem_disable && gem_supported) {
491 int bo_reuse_mode;
492 intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
493 BATCH_SZ);
494 if (intel->bufmgr != NULL)
495 intel->ttm = GL_TRUE;
496
497 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
498 switch (bo_reuse_mode) {
499 case DRI_CONF_BO_REUSE_DISABLED:
500 break;
501 case DRI_CONF_BO_REUSE_ALL:
502 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
503 break;
504 }
505 }
506 /* Otherwise, use the classic buffer manager. */
507 if (intel->bufmgr == NULL) {
508 if (gem_disable) {
509 fprintf(stderr, "GEM disabled. Using classic.\n");
510 } else {
511 fprintf(stderr, "Failed to initialize GEM. "
512 "Falling back to classic.\n");
513 }
514
515 if (intelScreen->tex.size == 0) {
516 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
517 __func__, __LINE__);
518 return GL_FALSE;
519 }
520
521 intel->bufmgr = intel_bufmgr_fake_init(intelScreen->tex.offset,
522 intelScreen->tex.map,
523 intelScreen->tex.size,
524 intel_fence_emit,
525 intel_fence_wait,
526 intel);
527 }
528
529 /* XXX bufmgr should be per-screen, not per-context */
530 intelScreen->ttm = intel->ttm;
531
532 return GL_TRUE;
533 }
534
535 void
536 intelInitDriverFunctions(struct dd_function_table *functions)
537 {
538 _mesa_init_driver_functions(functions);
539
540 functions->Flush = intelFlush;
541 functions->Finish = intelFinish;
542 functions->GetString = intelGetString;
543 functions->UpdateState = intelInvalidateState;
544
545 functions->CopyColorTable = _swrast_CopyColorTable;
546 functions->CopyColorSubTable = _swrast_CopyColorSubTable;
547 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
548 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
549
550 functions->BeginQuery = intelBeginQuery;
551 functions->EndQuery = intelEndQuery;
552
553 intelInitTextureFuncs(functions);
554 intelInitStateFuncs(functions);
555 intelInitBufferFuncs(functions);
556 intelInitPixelFuncs(functions);
557 }
558
559
560 GLboolean
561 intelInitContext(struct intel_context *intel,
562 const __GLcontextModes * mesaVis,
563 __DRIcontextPrivate * driContextPriv,
564 void *sharedContextPrivate,
565 struct dd_function_table *functions)
566 {
567 GLcontext *ctx = &intel->ctx;
568 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
569 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
570 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
571 volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
572 (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
573 int fthrottle_mode;
574
575 if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
576 functions, (void *) intel)) {
577 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
578 return GL_FALSE;
579 }
580
581 driContextPriv->driverPrivate = intel;
582 intel->intelScreen = intelScreen;
583 intel->driScreen = sPriv;
584 intel->sarea = saPriv;
585
586 /* Dri stuff */
587 intel->hHWContext = driContextPriv->hHWContext;
588 intel->driFd = sPriv->fd;
589 intel->driHwLock = sPriv->lock;
590
591 intel->width = intelScreen->width;
592 intel->height = intelScreen->height;
593
594 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
595 intel->driScreen->myNum,
596 IS_965(intelScreen->deviceID) ? "i965" : "i915");
597 if (intelScreen->deviceID == PCI_CHIP_I865_G)
598 intel->maxBatchSize = 4096;
599 else
600 intel->maxBatchSize = BATCH_SZ;
601
602 if (!intel_init_bufmgr(intel))
603 return GL_FALSE;
604
605 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
606
607 /* This doesn't yet catch all non-conformant rendering, but it's a
608 * start.
609 */
610 if (getenv("INTEL_STRICT_CONFORMANCE")) {
611 intel->strict_conformance = 1;
612 }
613
614 if (intel->strict_conformance) {
615 ctx->Const.MinLineWidth = 1.0;
616 ctx->Const.MinLineWidthAA = 1.0;
617 ctx->Const.MaxLineWidth = 1.0;
618 ctx->Const.MaxLineWidthAA = 1.0;
619 ctx->Const.LineWidthGranularity = 1.0;
620 }
621 else {
622 ctx->Const.MinLineWidth = 1.0;
623 ctx->Const.MinLineWidthAA = 1.0;
624 ctx->Const.MaxLineWidth = 5.0;
625 ctx->Const.MaxLineWidthAA = 5.0;
626 ctx->Const.LineWidthGranularity = 0.5;
627 }
628
629 ctx->Const.MinPointSize = 1.0;
630 ctx->Const.MinPointSizeAA = 1.0;
631 ctx->Const.MaxPointSize = 255.0;
632 ctx->Const.MaxPointSizeAA = 3.0;
633 ctx->Const.PointSizeGranularity = 1.0;
634
635 /* reinitialize the context point state.
636 * It depend on constants in __GLcontextRec::Const
637 */
638 _mesa_init_point(ctx);
639
640 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
641
642 /* Initialize the software rasterizer and helper modules. */
643 _swrast_CreateContext(ctx);
644 _vbo_CreateContext(ctx);
645 _tnl_CreateContext(ctx);
646 _swsetup_CreateContext(ctx);
647
648 /* Configure swrast to match hardware characteristics: */
649 _swrast_allow_pixel_fog(ctx, GL_FALSE);
650 _swrast_allow_vertex_fog(ctx, GL_TRUE);
651
652 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
653 intel->hw_stipple = 1;
654
655 /* XXX FBO: this doesn't seem to be used anywhere */
656 switch (mesaVis->depthBits) {
657 case 0: /* what to do in this case? */
658 case 16:
659 intel->polygon_offset_scale = 1.0;
660 break;
661 case 24:
662 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
663 break;
664 default:
665 assert(0);
666 break;
667 }
668
669 if (IS_965(intelScreen->deviceID))
670 intel->polygon_offset_scale /= 0xffff;
671
672 intel->RenderIndex = ~0;
673
674 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
675 intel->irqsEmitted = 0;
676
677 intel->do_irqs = (intel->intelScreen->irq_active &&
678 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
679
680 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
681
682 _math_matrix_ctr(&intel->ViewportMatrix);
683
684 if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
685 _mesa_printf("IRQs not active. Exiting\n");
686 exit(1);
687 }
688
689 intelInitExtensions(ctx, GL_FALSE);
690
691 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
692 if (INTEL_DEBUG & DEBUG_BUFMGR)
693 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
694
695 if (!sPriv->dri2.enabled)
696 intel_recreate_static_regions(intel);
697
698 intel->batch = intel_batchbuffer_alloc(intel);
699
700 intel_bufferobj_init(intel);
701 intel_fbo_init(intel);
702
703 if (intel->ctx.Mesa_DXTn) {
704 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
705 _mesa_enable_extension(ctx, "GL_S3_s3tc");
706 }
707 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
708 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
709 }
710
711 intel->prim.primitive = ~0;
712
713 /* Force all software fallbacks */
714 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
715 fprintf(stderr, "disabling 3D rasterization\n");
716 intel->no_rast = 1;
717 }
718
719 /* Disable all hardware rendering (skip emitting batches and fences/waits
720 * to the kernel)
721 */
722 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
723
724 return GL_TRUE;
725 }
726
727 void
728 intelDestroyContext(__DRIcontextPrivate * driContextPriv)
729 {
730 struct intel_context *intel =
731 (struct intel_context *) driContextPriv->driverPrivate;
732
733 assert(intel); /* should never be null */
734 if (intel) {
735 GLboolean release_texture_heaps;
736
737 INTEL_FIREVERTICES(intel);
738
739 intel->vtbl.destroy(intel);
740
741 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
742 _swsetup_DestroyContext(&intel->ctx);
743 _tnl_DestroyContext(&intel->ctx);
744 _vbo_DestroyContext(&intel->ctx);
745
746 _swrast_DestroyContext(&intel->ctx);
747 intel->Fallback = 0; /* don't call _swrast_Flush later */
748
749 intel_batchbuffer_free(intel->batch);
750 free(intel->prim.vb);
751
752 if (release_texture_heaps) {
753 /* This share group is about to go away, free our private
754 * texture object data.
755 */
756 if (INTEL_DEBUG & DEBUG_TEXTURE)
757 fprintf(stderr, "do something to free texture heaps\n");
758 }
759
760 /* free the Mesa context */
761 _mesa_free_context_data(&intel->ctx);
762
763 dri_bufmgr_destroy(intel->bufmgr);
764 }
765 }
766
767 GLboolean
768 intelUnbindContext(__DRIcontextPrivate * driContextPriv)
769 {
770 return GL_TRUE;
771 }
772
773 GLboolean
774 intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
775 __DRIdrawablePrivate * driDrawPriv,
776 __DRIdrawablePrivate * driReadPriv)
777 {
778 __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
779
780 if (driContextPriv) {
781 struct intel_context *intel =
782 (struct intel_context *) driContextPriv->driverPrivate;
783 struct intel_framebuffer *intel_fb =
784 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
785 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
786
787
788 /* XXX FBO temporary fix-ups! */
789 /* if the renderbuffers don't have regions, init them from the context */
790 if (!driContextPriv->driScreenPriv->dri2.enabled) {
791 struct intel_renderbuffer *irbDepth
792 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
793 struct intel_renderbuffer *irbStencil
794 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
795
796 if (intel_fb->color_rb[0]) {
797 intel_renderbuffer_set_region(intel_fb->color_rb[0],
798 intel->front_region);
799 }
800 if (intel_fb->color_rb[1]) {
801 intel_renderbuffer_set_region(intel_fb->color_rb[1],
802 intel->back_region);
803 }
804 #if 0
805 if (intel_fb->color_rb[2]) {
806 intel_renderbuffer_set_region(intel_fb->color_rb[2],
807 intel->third_region);
808 }
809 #endif
810 if (irbDepth) {
811 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
812 }
813 if (irbStencil) {
814 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
815 }
816 }
817
818 /* set GLframebuffer size to match window, if needed */
819 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
820
821 if (driReadPriv != driDrawPriv) {
822 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
823 }
824
825 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
826
827 /* The drawbuffer won't always be updated by _mesa_make_current:
828 */
829 if (intel->ctx.DrawBuffer == &intel_fb->Base) {
830
831 if (intel->driReadDrawable != driReadPriv)
832 intel->driReadDrawable = driReadPriv;
833
834 if (intel->driDrawable != driDrawPriv) {
835 if (driDrawPriv->swap_interval == (unsigned)-1) {
836 int i;
837
838 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
839 ? driGetDefaultVBlankFlags(&intel->optionCache)
840 : VBLANK_FLAG_NO_IRQ;
841
842 (*psp->systemTime->getUST) (&intel_fb->swap_ust);
843 driDrawableInitVBlank(driDrawPriv);
844 intel_fb->vbl_waited = driDrawPriv->vblSeq;
845
846 for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
847 if (intel_fb->color_rb[i])
848 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
849 }
850 }
851 intel->driDrawable = driDrawPriv;
852 intelWindowMoved(intel);
853 }
854
855 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
856 }
857 }
858 else {
859 _mesa_make_current(NULL, NULL, NULL);
860 }
861
862 return GL_TRUE;
863 }
864
865 static void
866 intelContendedLock(struct intel_context *intel, GLuint flags)
867 {
868 __DRIdrawablePrivate *dPriv = intel->driDrawable;
869 __DRIscreenPrivate *sPriv = intel->driScreen;
870 volatile struct drm_i915_sarea *sarea = intel->sarea;
871 int me = intel->hHWContext;
872
873 drmGetLock(intel->driFd, intel->hHWContext, flags);
874 intel->locked = 1;
875
876 if (INTEL_DEBUG & DEBUG_LOCK)
877 _mesa_printf("%s - got contended lock\n", __progname);
878
879 /* If the window moved, may need to set a new cliprect now.
880 *
881 * NOTE: This releases and regains the hw lock, so all state
882 * checking must be done *after* this call:
883 */
884 if (dPriv)
885 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
886
887 if (sarea && sarea->ctxOwner != me) {
888 if (INTEL_DEBUG & DEBUG_BUFMGR) {
889 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
890 sarea->ctxOwner, me);
891 }
892 sarea->ctxOwner = me;
893 }
894
895 /* If the last consumer of the texture memory wasn't us, notify the fake
896 * bufmgr and record the new owner. We should have the memory shared
897 * between contexts of a single fake bufmgr, but this will at least make
898 * things correct for now.
899 */
900 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
901 sarea->texAge = intel->hHWContext;
902 intel_bufmgr_fake_contended_lock_take(intel->bufmgr);
903 if (INTEL_DEBUG & DEBUG_BATCH)
904 intel_decode_context_reset();
905 if (INTEL_DEBUG & DEBUG_BUFMGR)
906 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
907 sarea->ctxOwner, intel->hHWContext);
908 }
909
910 if (sarea->width != intel->width || sarea->height != intel->height) {
911 int numClipRects = intel->numClipRects;
912
913 /*
914 * FIXME: Really only need to do this when drawing to a
915 * common back- or front buffer.
916 */
917
918 /*
919 * This will essentially drop the outstanding batchbuffer on
920 * the floor.
921 */
922 intel->numClipRects = 0;
923
924 if (intel->Fallback)
925 _swrast_flush(&intel->ctx);
926
927 if (!IS_965(intel->intelScreen->deviceID))
928 INTEL_FIREVERTICES(intel);
929
930 if (intel->batch->map != intel->batch->ptr)
931 intel_batchbuffer_flush(intel->batch);
932
933 intel->numClipRects = numClipRects;
934
935 /* force window update */
936 intel->lastStamp = 0;
937
938 intel->width = sarea->width;
939 intel->height = sarea->height;
940 }
941
942 /* Drawable changed?
943 */
944 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
945 intelWindowMoved(intel);
946 intel->lastStamp = dPriv->lastStamp;
947 }
948 }
949
950
951 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
952
953 /* Lock the hardware and validate our state.
954 */
955 void LOCK_HARDWARE( struct intel_context *intel )
956 {
957 __DRIdrawable *dPriv = intel->driDrawable;
958 __DRIscreen *sPriv = intel->driScreen;
959 char __ret = 0;
960 struct intel_framebuffer *intel_fb = NULL;
961 struct intel_renderbuffer *intel_rb = NULL;
962
963 _glthread_LOCK_MUTEX(lockMutex);
964 assert(!intel->locked);
965 intel->locked = 1;
966
967 if (intel->driDrawable) {
968 intel_fb = intel->driDrawable->driverPrivate;
969
970 if (intel_fb)
971 intel_rb =
972 intel_get_renderbuffer(&intel_fb->Base,
973 intel_fb->Base._ColorDrawBufferIndexes[0]);
974 }
975
976 if (intel_rb && dPriv->vblFlags &&
977 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
978 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
979 drmVBlank vbl;
980
981 vbl.request.type = DRM_VBLANK_ABSOLUTE;
982
983 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
984 vbl.request.type |= DRM_VBLANK_SECONDARY;
985 }
986
987 vbl.request.sequence = intel_rb->vbl_pending;
988 drmWaitVBlank(intel->driFd, &vbl);
989 intel_fb->vbl_waited = vbl.reply.sequence;
990 }
991
992 DRM_CAS(intel->driHwLock, intel->hHWContext,
993 (DRM_LOCK_HELD|intel->hHWContext), __ret);
994
995 if (sPriv->dri2.enabled) {
996 if (__ret)
997 drmGetLock(intel->driFd, intel->hHWContext, 0);
998 if (__driParseEvents(dPriv->driContextPriv, dPriv)) {
999 intelWindowMoved(intel);
1000 intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
1001 }
1002 } else if (__ret) {
1003 intelContendedLock( intel, 0 );
1004 }
1005
1006
1007 if (INTEL_DEBUG & DEBUG_LOCK)
1008 _mesa_printf("%s - locked\n", __progname);
1009 }
1010
1011
1012 /* Unlock the hardware using the global current context
1013 */
1014 void UNLOCK_HARDWARE( struct intel_context *intel )
1015 {
1016 intel->vtbl.note_unlock( intel );
1017 intel->locked = 0;
1018
1019 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
1020
1021 _glthread_UNLOCK_MUTEX(lockMutex);
1022
1023 if (INTEL_DEBUG & DEBUG_LOCK)
1024 _mesa_printf("%s - unlocked\n", __progname);
1025
1026 /**
1027 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1028 * cliprects.
1029 */
1030 if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS)
1031 intel_batchbuffer_flush(intel->batch);
1032 }
1033