intel/fake_bufmgr: Attempt to restrict references to objects in a batchbuffer > apert...
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
44
45 #include "drivers/common/driverfuncs.h"
46
47 #include "intel_screen.h"
48
49 #include "i830_dri.h"
50
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr_ttm.h"
63
64 #include "drirenderbuffer.h"
65 #include "vblank.h"
66 #include "utils.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
68 #ifndef INTEL_DEBUG
69 int INTEL_DEBUG = (0);
70 #endif
71
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
96
97 #include "extension_helper.h"
98
99 #define DRIVER_DATE "20061102"
100
101 static const GLubyte *
102 intelGetString(GLcontext * ctx, GLenum name)
103 {
104 const char *chipset;
105 static char buffer[128];
106
107 switch (name) {
108 case GL_VENDOR:
109 return (GLubyte *) "Tungsten Graphics, Inc";
110 break;
111
112 case GL_RENDERER:
113 switch (intel_context(ctx)->intelScreen->deviceID) {
114 case PCI_CHIP_845_G:
115 chipset = "Intel(R) 845G";
116 break;
117 case PCI_CHIP_I830_M:
118 chipset = "Intel(R) 830M";
119 break;
120 case PCI_CHIP_I855_GM:
121 chipset = "Intel(R) 852GM/855GM";
122 break;
123 case PCI_CHIP_I865_G:
124 chipset = "Intel(R) 865G";
125 break;
126 case PCI_CHIP_I915_G:
127 chipset = "Intel(R) 915G";
128 break;
129 case PCI_CHIP_E7221_G:
130 chipset = "Intel (R) E7221G (i915)";
131 break;
132 case PCI_CHIP_I915_GM:
133 chipset = "Intel(R) 915GM";
134 break;
135 case PCI_CHIP_I945_G:
136 chipset = "Intel(R) 945G";
137 break;
138 case PCI_CHIP_I945_GM:
139 chipset = "Intel(R) 945GM";
140 break;
141 case PCI_CHIP_I945_GME:
142 chipset = "Intel(R) 945GME";
143 break;
144 case PCI_CHIP_G33_G:
145 chipset = "Intel(R) G33";
146 break;
147 case PCI_CHIP_Q35_G:
148 chipset = "Intel(R) Q35";
149 break;
150 case PCI_CHIP_Q33_G:
151 chipset = "Intel(R) Q33";
152 break;
153 case PCI_CHIP_I965_Q:
154 chipset = "Intel(R) 965Q";
155 break;
156 case PCI_CHIP_I965_G:
157 case PCI_CHIP_I965_G_1:
158 chipset = "Intel(R) 965G";
159 break;
160 case PCI_CHIP_I946_GZ:
161 chipset = "Intel(R) 946GZ";
162 break;
163 case PCI_CHIP_I965_GM:
164 chipset = "Intel(R) 965GM";
165 break;
166 case PCI_CHIP_I965_GME:
167 chipset = "Intel(R) 965GME/GLE";
168 break;
169 case PCI_CHIP_IGD_GM:
170 chipset = "Intel(R) Integrated Graphics Device";
171 break;
172 default:
173 chipset = "Unknown Intel Chipset";
174 break;
175 }
176
177 (void) driGetRendererString(buffer, chipset, DRIVER_DATE, 0);
178 return (GLubyte *) buffer;
179
180 default:
181 return NULL;
182 }
183 }
184
185 /**
186 * Extension strings exported by the intel driver.
187 *
188 * \note
189 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
190 * old i830-specific driver.
191 */
192 static const struct dri_extension card_extensions[] = {
193 {"GL_ARB_multisample", GL_ARB_multisample_functions},
194 {"GL_ARB_multitexture", NULL},
195 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
196 {"GL_NV_point_sprite", GL_NV_point_sprite_functions},
197 {"GL_ARB_texture_border_clamp", NULL},
198 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
199 {"GL_ARB_texture_cube_map", NULL},
200 {"GL_ARB_texture_env_add", NULL},
201 {"GL_ARB_texture_env_combine", NULL},
202 {"GL_ARB_texture_env_dot3", NULL},
203 {"GL_ARB_texture_mirrored_repeat", NULL},
204 {"GL_ARB_texture_non_power_of_two", NULL },
205 {"GL_ARB_texture_rectangle", NULL},
206 {"GL_NV_texture_rectangle", NULL},
207 {"GL_EXT_texture_rectangle", NULL},
208 {"GL_ARB_point_parameters", NULL},
209 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
210 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
211 {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
212 {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
213 {"GL_EXT_blend_equation_separate",
214 GL_EXT_blend_equation_separate_functions},
215 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
216 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
217 {"GL_EXT_blend_logic_op", NULL},
218 {"GL_EXT_blend_subtract", NULL},
219 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
220 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
221 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
222 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions},
223 #if 1 /* XXX FBO temporary? */
224 {"GL_EXT_packed_depth_stencil", NULL},
225 #endif
226 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
227 {"GL_EXT_stencil_wrap", NULL},
228 {"GL_EXT_texture_edge_clamp", NULL},
229 {"GL_EXT_texture_env_combine", NULL},
230 {"GL_EXT_texture_env_dot3", NULL},
231 {"GL_EXT_texture_filter_anisotropic", NULL},
232 {"GL_EXT_texture_lod_bias", NULL},
233 {"GL_3DFX_texture_compression_FXT1", NULL},
234 {"GL_APPLE_client_storage", NULL},
235 {"GL_MESA_pack_invert", NULL},
236 {"GL_MESA_ycbcr_texture", NULL},
237 {"GL_NV_blend_square", NULL},
238 {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
239 {"GL_NV_vertex_program1_1", NULL},
240 { "GL_SGIS_generate_mipmap", NULL },
241 {NULL, NULL}
242 };
243
244 static const struct dri_extension brw_extensions[] = {
245 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
246 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
247 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
248 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
249 { "GL_ARB_point_sprite", NULL},
250 { "GL_ARB_fragment_shader", NULL },
251 { "GL_ARB_draw_buffers", NULL },
252 { "GL_ARB_depth_texture", NULL },
253 { "GL_ARB_fragment_program", NULL },
254 { "GL_ARB_shadow", NULL },
255 { "GL_EXT_shadow_funcs", NULL },
256 /* ARB extn won't work if not enabled */
257 { "GL_SGIX_depth_texture", NULL },
258 { "GL_ARB_texture_env_crossbar", NULL },
259 { "GL_EXT_texture_sRGB", NULL},
260 { NULL, NULL }
261 };
262
263 static const struct dri_extension arb_oc_extensions[] = {
264 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions},
265 {NULL, NULL}
266 };
267
268 static const struct dri_extension ttm_extensions[] = {
269 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
270 {"GL_ARB_pixel_buffer_object", NULL},
271 {NULL, NULL}
272 };
273
274 /**
275 * Initializes potential list of extensions if ctx == NULL, or actually enables
276 * extensions for a context.
277 */
278 void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
279 {
280 struct intel_context *intel = ctx?intel_context(ctx):NULL;
281
282 /* Disable imaging extension until convolution is working in teximage paths.
283 */
284 enable_imaging = GL_FALSE;
285
286 driInitExtensions(ctx, card_extensions, enable_imaging);
287
288 if (intel == NULL || intel->ttm)
289 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
290
291 if (intel == NULL ||
292 (IS_965(intel->intelScreen->deviceID) &&
293 intel->intelScreen->drmMinor >= 8))
294 driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
295
296 if (intel == NULL || IS_965(intel->intelScreen->deviceID))
297 driInitExtensions(ctx, brw_extensions, GL_FALSE);
298 }
299
300 static const struct dri_debug_control debug_control[] = {
301 { "tex", DEBUG_TEXTURE},
302 { "state", DEBUG_STATE},
303 { "ioctl", DEBUG_IOCTL},
304 { "blit", DEBUG_BLIT},
305 { "mip", DEBUG_MIPTREE},
306 { "fall", DEBUG_FALLBACKS},
307 { "verb", DEBUG_VERBOSE},
308 { "bat", DEBUG_BATCH},
309 { "pix", DEBUG_PIXEL},
310 { "buf", DEBUG_BUFMGR},
311 { "reg", DEBUG_REGION},
312 { "fbo", DEBUG_FBO},
313 { "lock", DEBUG_LOCK},
314 { "sync", DEBUG_SYNC},
315 { "prim", DEBUG_PRIMS },
316 { "vert", DEBUG_VERTS },
317 { "dri", DEBUG_DRI },
318 { "dma", DEBUG_DMA },
319 { "san", DEBUG_SANITY },
320 { "sleep", DEBUG_SLEEP },
321 { "stats", DEBUG_STATS },
322 { "tile", DEBUG_TILE },
323 { "sing", DEBUG_SINGLE_THREAD },
324 { "thre", DEBUG_SINGLE_THREAD },
325 { "wm", DEBUG_WM },
326 { "urb", DEBUG_URB },
327 { "vs", DEBUG_VS },
328 { NULL, 0 }
329 };
330
331
332 static void
333 intelInvalidateState(GLcontext * ctx, GLuint new_state)
334 {
335 struct intel_context *intel = intel_context(ctx);
336
337 _swrast_InvalidateState(ctx, new_state);
338 _swsetup_InvalidateState(ctx, new_state);
339 _vbo_InvalidateState(ctx, new_state);
340 _tnl_InvalidateState(ctx, new_state);
341 _tnl_invalidate_vertex_state(ctx, new_state);
342
343 intel->NewGLState |= new_state;
344
345 if (intel->vtbl.invalidate_state)
346 intel->vtbl.invalidate_state( intel, new_state );
347 }
348
349
350 void
351 intelFlush(GLcontext * ctx)
352 {
353 struct intel_context *intel = intel_context(ctx);
354
355 if (intel->Fallback)
356 _swrast_flush(ctx);
357
358 if (!IS_965(intel->intelScreen->deviceID))
359 INTEL_FIREVERTICES(intel);
360
361 if (intel->batch->map != intel->batch->ptr)
362 intel_batchbuffer_flush(intel->batch);
363
364 /* XXX: Need to do an MI_FLUSH here.
365 */
366 }
367
368 void
369 intelFinish(GLcontext * ctx)
370 {
371 struct intel_context *intel = intel_context(ctx);
372 intelFlush(ctx);
373 if (intel->batch->last_fence) {
374 dri_fence_wait(intel->batch->last_fence);
375 dri_fence_unreference(intel->batch->last_fence);
376 intel->batch->last_fence = NULL;
377 }
378 }
379
380 static void
381 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
382 {
383 struct intel_context *intel = intel_context( ctx );
384 struct drm_i915_mmio io = {
385 .read_write = I915_MMIO_READ,
386 .reg = MMIO_REGS_PS_DEPTH_COUNT,
387 .data = &q->Result
388 };
389 intel->stats_wm++;
390 intelFinish(&intel->ctx);
391 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
392 }
393
394 static void
395 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
396 {
397 struct intel_context *intel = intel_context( ctx );
398 GLuint64EXT tmp;
399 struct drm_i915_mmio io = {
400 .read_write = I915_MMIO_READ,
401 .reg = MMIO_REGS_PS_DEPTH_COUNT,
402 .data = &tmp
403 };
404 intelFinish(&intel->ctx);
405 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
406 q->Result = tmp - q->Result;
407 q->Ready = GL_TRUE;
408 intel->stats_wm--;
409 }
410
411 /** Driver-specific fence emit implementation for the fake memory manager. */
412 static unsigned int
413 intel_fence_emit(void *private)
414 {
415 struct intel_context *intel = (struct intel_context *)private;
416 unsigned int fence;
417
418 /* XXX: Need to emit a flush, if we haven't already (at least with the
419 * current batchbuffer implementation, we have).
420 */
421
422 fence = intelEmitIrqLocked(intel);
423
424 return fence;
425 }
426
427 /** Driver-specific fence wait implementation for the fake memory manager. */
428 static int
429 intel_fence_wait(void *private, unsigned int cookie)
430 {
431 struct intel_context *intel = (struct intel_context *)private;
432
433 intelWaitIrq(intel, cookie);
434
435 return 0;
436 }
437
438 static GLboolean
439 intel_init_bufmgr(struct intel_context *intel)
440 {
441 intelScreenPrivate *intelScreen = intel->intelScreen;
442 GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
443 GLboolean ttm_supported;
444
445 /* If we've got a new enough DDX that's initializing TTM and giving us
446 * object handles for the shared buffers, use that.
447 */
448 intel->ttm = GL_FALSE;
449 if (intel->intelScreen->driScrnPriv->dri2.enabled)
450 ttm_supported = GL_TRUE;
451 else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
452 intel->intelScreen->drmMinor >= 11 &&
453 intel->intelScreen->front.bo_handle != -1)
454 ttm_supported = GL_TRUE;
455 else
456 ttm_supported = GL_FALSE;
457
458 if (!ttm_disable && ttm_supported) {
459 int bo_reuse_mode;
460 intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
461 DRM_FENCE_TYPE_EXE,
462 DRM_FENCE_TYPE_EXE |
463 DRM_I915_FENCE_TYPE_RW,
464 BATCH_SZ);
465 if (intel->bufmgr != NULL)
466 intel->ttm = GL_TRUE;
467
468 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
469 switch (bo_reuse_mode) {
470 case DRI_CONF_BO_REUSE_DISABLED:
471 break;
472 case DRI_CONF_BO_REUSE_ALL:
473 intel_ttm_enable_bo_reuse(intel->bufmgr);
474 break;
475 }
476 }
477 /* Otherwise, use the classic buffer manager. */
478 if (intel->bufmgr == NULL) {
479 if (ttm_disable) {
480 fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
481 } else {
482 fprintf(stderr, "Failed to initialize TTM buffer manager. "
483 "Falling back to classic.\n");
484 }
485
486 if (intelScreen->tex.size == 0) {
487 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
488 __func__, __LINE__);
489 return GL_FALSE;
490 }
491
492 intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
493 intelScreen->tex.map,
494 intelScreen->tex.size,
495 intel_fence_emit,
496 intel_fence_wait,
497 intel);
498 }
499
500 return GL_TRUE;
501 }
502
503 void
504 intelInitDriverFunctions(struct dd_function_table *functions)
505 {
506 _mesa_init_driver_functions(functions);
507
508 functions->Flush = intelFlush;
509 functions->Finish = intelFinish;
510 functions->GetString = intelGetString;
511 functions->UpdateState = intelInvalidateState;
512
513 functions->CopyColorTable = _swrast_CopyColorTable;
514 functions->CopyColorSubTable = _swrast_CopyColorSubTable;
515 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
516 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
517
518 functions->BeginQuery = intelBeginQuery;
519 functions->EndQuery = intelEndQuery;
520
521 intelInitTextureFuncs(functions);
522 intelInitStateFuncs(functions);
523 intelInitBufferFuncs(functions);
524 }
525
526
527 GLboolean
528 intelInitContext(struct intel_context *intel,
529 const __GLcontextModes * mesaVis,
530 __DRIcontextPrivate * driContextPriv,
531 void *sharedContextPrivate,
532 struct dd_function_table *functions)
533 {
534 GLcontext *ctx = &intel->ctx;
535 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
536 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
537 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
538 volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
539 (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
540 int fthrottle_mode;
541
542 if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
543 functions, (void *) intel)) {
544 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
545 return GL_FALSE;
546 }
547
548 driContextPriv->driverPrivate = intel;
549 intel->intelScreen = intelScreen;
550 intel->driScreen = sPriv;
551 intel->sarea = saPriv;
552
553 /* Dri stuff */
554 intel->hHWContext = driContextPriv->hHWContext;
555 intel->driFd = sPriv->fd;
556 intel->driHwLock = sPriv->lock;
557
558 intel->width = intelScreen->width;
559 intel->height = intelScreen->height;
560
561 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
562 intel->driScreen->myNum,
563 IS_965(intelScreen->deviceID) ? "i965" : "i915");
564 if (intelScreen->deviceID == PCI_CHIP_I865_G)
565 intel->maxBatchSize = 4096;
566 else
567 intel->maxBatchSize = BATCH_SZ;
568
569 if (!intel_init_bufmgr(intel))
570 return GL_FALSE;
571
572 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
573
574 /* This doesn't yet catch all non-conformant rendering, but it's a
575 * start.
576 */
577 if (getenv("INTEL_STRICT_CONFORMANCE")) {
578 intel->strict_conformance = 1;
579 }
580
581 if (intel->strict_conformance) {
582 ctx->Const.MinLineWidth = 1.0;
583 ctx->Const.MinLineWidthAA = 1.0;
584 ctx->Const.MaxLineWidth = 1.0;
585 ctx->Const.MaxLineWidthAA = 1.0;
586 ctx->Const.LineWidthGranularity = 1.0;
587 }
588 else {
589 ctx->Const.MinLineWidth = 1.0;
590 ctx->Const.MinLineWidthAA = 1.0;
591 ctx->Const.MaxLineWidth = 5.0;
592 ctx->Const.MaxLineWidthAA = 5.0;
593 ctx->Const.LineWidthGranularity = 0.5;
594 }
595
596 ctx->Const.MinPointSize = 1.0;
597 ctx->Const.MinPointSizeAA = 1.0;
598 ctx->Const.MaxPointSize = 255.0;
599 ctx->Const.MaxPointSizeAA = 3.0;
600 ctx->Const.PointSizeGranularity = 1.0;
601
602 /* reinitialize the context point state.
603 * It depend on constants in __GLcontextRec::Const
604 */
605 _mesa_init_point(ctx);
606
607 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
608
609 /* Initialize the software rasterizer and helper modules. */
610 _swrast_CreateContext(ctx);
611 _vbo_CreateContext(ctx);
612 _tnl_CreateContext(ctx);
613 _swsetup_CreateContext(ctx);
614
615 /* Configure swrast to match hardware characteristics: */
616 _swrast_allow_pixel_fog(ctx, GL_FALSE);
617 _swrast_allow_vertex_fog(ctx, GL_TRUE);
618
619 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
620 intel->hw_stipple = 1;
621
622 /* XXX FBO: this doesn't seem to be used anywhere */
623 switch (mesaVis->depthBits) {
624 case 0: /* what to do in this case? */
625 case 16:
626 intel->polygon_offset_scale = 1.0;
627 break;
628 case 24:
629 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
630 break;
631 default:
632 assert(0);
633 break;
634 }
635
636 if (IS_965(intelScreen->deviceID))
637 intel->polygon_offset_scale /= 0xffff;
638
639 intel->RenderIndex = ~0;
640
641 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
642 intel->irqsEmitted = 0;
643
644 intel->do_irqs = (intel->intelScreen->irq_active &&
645 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
646
647 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
648
649 _math_matrix_ctr(&intel->ViewportMatrix);
650
651 if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
652 _mesa_printf("IRQs not active. Exiting\n");
653 exit(1);
654 }
655
656 intelInitExtensions(ctx, GL_FALSE);
657
658 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
659 if (INTEL_DEBUG & DEBUG_BUFMGR)
660 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
661
662 if (!sPriv->dri2.enabled)
663 intel_recreate_static_regions(intel);
664
665 intel->batch = intel_batchbuffer_alloc(intel);
666 intel->last_swap_fence = NULL;
667 intel->first_swap_fence = NULL;
668
669 intel_bufferobj_init(intel);
670 intel_fbo_init(intel);
671
672 if (intel->ctx.Mesa_DXTn) {
673 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
674 _mesa_enable_extension(ctx, "GL_S3_s3tc");
675 }
676 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
677 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
678 }
679
680 intel->prim.primitive = ~0;
681
682 /* Force all software fallbacks */
683 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
684 fprintf(stderr, "disabling 3D rasterization\n");
685 FALLBACK(intel, INTEL_FALLBACK_USER, 1);
686 intel->no_rast = 1;
687 }
688
689 /* Disable all hardware rendering (skip emitting batches and fences/waits
690 * to the kernel)
691 */
692 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
693
694 return GL_TRUE;
695 }
696
697 void
698 intelDestroyContext(__DRIcontextPrivate * driContextPriv)
699 {
700 struct intel_context *intel =
701 (struct intel_context *) driContextPriv->driverPrivate;
702
703 assert(intel); /* should never be null */
704 if (intel) {
705 GLboolean release_texture_heaps;
706
707 INTEL_FIREVERTICES(intel);
708
709 intel->vtbl.destroy(intel);
710
711 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
712 _swsetup_DestroyContext(&intel->ctx);
713 _tnl_DestroyContext(&intel->ctx);
714 _vbo_DestroyContext(&intel->ctx);
715
716 _swrast_DestroyContext(&intel->ctx);
717 intel->Fallback = 0; /* don't call _swrast_Flush later */
718
719 intel_batchbuffer_free(intel->batch);
720
721 if (intel->last_swap_fence) {
722 dri_fence_wait(intel->last_swap_fence);
723 dri_fence_unreference(intel->last_swap_fence);
724 intel->last_swap_fence = NULL;
725 }
726 if (intel->first_swap_fence) {
727 dri_fence_wait(intel->first_swap_fence);
728 dri_fence_unreference(intel->first_swap_fence);
729 intel->first_swap_fence = NULL;
730 }
731
732 if (release_texture_heaps) {
733 /* This share group is about to go away, free our private
734 * texture object data.
735 */
736 if (INTEL_DEBUG & DEBUG_TEXTURE)
737 fprintf(stderr, "do something to free texture heaps\n");
738 }
739
740 /* free the Mesa context */
741 _mesa_free_context_data(&intel->ctx);
742
743 dri_bufmgr_destroy(intel->bufmgr);
744 }
745 }
746
747 GLboolean
748 intelUnbindContext(__DRIcontextPrivate * driContextPriv)
749 {
750 return GL_TRUE;
751 }
752
753 GLboolean
754 intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
755 __DRIdrawablePrivate * driDrawPriv,
756 __DRIdrawablePrivate * driReadPriv)
757 {
758 __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
759
760 if (driContextPriv) {
761 struct intel_context *intel =
762 (struct intel_context *) driContextPriv->driverPrivate;
763 struct intel_framebuffer *intel_fb =
764 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
765 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
766
767
768 /* XXX FBO temporary fix-ups! */
769 /* if the renderbuffers don't have regions, init them from the context */
770 if (!driContextPriv->driScreenPriv->dri2.enabled) {
771 struct intel_renderbuffer *irbDepth
772 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
773 struct intel_renderbuffer *irbStencil
774 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
775
776 if (intel_fb->color_rb[0]) {
777 intel_renderbuffer_set_region(intel_fb->color_rb[0],
778 intel->front_region);
779 }
780 if (intel_fb->color_rb[1]) {
781 intel_renderbuffer_set_region(intel_fb->color_rb[1],
782 intel->back_region);
783 }
784 #if 0
785 if (intel_fb->color_rb[2]) {
786 intel_renderbuffer_set_region(intel_fb->color_rb[2],
787 intel->third_region);
788 }
789 #endif
790 if (irbDepth) {
791 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
792 }
793 if (irbStencil) {
794 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
795 }
796 }
797
798 /* set GLframebuffer size to match window, if needed */
799 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
800
801 if (driReadPriv != driDrawPriv) {
802 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
803 }
804
805 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
806
807 /* The drawbuffer won't always be updated by _mesa_make_current:
808 */
809 if (intel->ctx.DrawBuffer == &intel_fb->Base) {
810
811 if (intel->driReadDrawable != driReadPriv)
812 intel->driReadDrawable = driReadPriv;
813
814 if (intel->driDrawable != driDrawPriv) {
815 if (driDrawPriv->swap_interval == (unsigned)-1) {
816 int i;
817
818 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
819 ? driGetDefaultVBlankFlags(&intel->optionCache)
820 : VBLANK_FLAG_NO_IRQ;
821
822 (*psp->systemTime->getUST) (&intel_fb->swap_ust);
823 driDrawableInitVBlank(driDrawPriv);
824 intel_fb->vbl_waited = driDrawPriv->vblSeq;
825
826 for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
827 if (intel_fb->color_rb[i])
828 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
829 }
830 }
831 intel->driDrawable = driDrawPriv;
832 intelWindowMoved(intel);
833 }
834
835 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
836 }
837 }
838 else {
839 _mesa_make_current(NULL, NULL, NULL);
840 }
841
842 return GL_TRUE;
843 }
844
845 static void
846 intelContendedLock(struct intel_context *intel, GLuint flags)
847 {
848 __DRIdrawablePrivate *dPriv = intel->driDrawable;
849 __DRIscreenPrivate *sPriv = intel->driScreen;
850 volatile struct drm_i915_sarea *sarea = intel->sarea;
851 int me = intel->hHWContext;
852
853 drmGetLock(intel->driFd, intel->hHWContext, flags);
854 intel->locked = 1;
855
856 if (INTEL_DEBUG & DEBUG_LOCK)
857 _mesa_printf("%s - got contended lock\n", __progname);
858
859 /* If the window moved, may need to set a new cliprect now.
860 *
861 * NOTE: This releases and regains the hw lock, so all state
862 * checking must be done *after* this call:
863 */
864 if (dPriv)
865 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
866
867 if (sarea && sarea->ctxOwner != me) {
868 if (INTEL_DEBUG & DEBUG_BUFMGR) {
869 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
870 sarea->ctxOwner, me);
871 }
872 sarea->ctxOwner = me;
873 }
874
875 /* If the last consumer of the texture memory wasn't us, notify the fake
876 * bufmgr and record the new owner. We should have the memory shared
877 * between contexts of a single fake bufmgr, but this will at least make
878 * things correct for now.
879 */
880 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
881 sarea->texAge = intel->hHWContext;
882 dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
883 if (INTEL_DEBUG & DEBUG_BATCH)
884 intel_decode_context_reset();
885 if (INTEL_DEBUG & DEBUG_BUFMGR)
886 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
887 sarea->ctxOwner, intel->hHWContext);
888 }
889
890 if (sarea->width != intel->width || sarea->height != intel->height) {
891 int numClipRects = intel->numClipRects;
892
893 /*
894 * FIXME: Really only need to do this when drawing to a
895 * common back- or front buffer.
896 */
897
898 /*
899 * This will essentially drop the outstanding batchbuffer on
900 * the floor.
901 */
902 intel->numClipRects = 0;
903
904 if (intel->Fallback)
905 _swrast_flush(&intel->ctx);
906
907 if (!IS_965(intel->intelScreen->deviceID))
908 INTEL_FIREVERTICES(intel);
909
910 if (intel->batch->map != intel->batch->ptr)
911 intel_batchbuffer_flush(intel->batch);
912
913 intel->numClipRects = numClipRects;
914
915 /* force window update */
916 intel->lastStamp = 0;
917
918 intel->width = sarea->width;
919 intel->height = sarea->height;
920 }
921
922 /* Drawable changed?
923 */
924 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
925 intelWindowMoved(intel);
926 intel->lastStamp = dPriv->lastStamp;
927 }
928 }
929
930
931 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
932
933 /* Lock the hardware and validate our state.
934 */
935 void LOCK_HARDWARE( struct intel_context *intel )
936 {
937 __DRIdrawable *dPriv = intel->driDrawable;
938 __DRIscreen *sPriv = intel->driScreen;
939 char __ret = 0;
940 struct intel_framebuffer *intel_fb = NULL;
941 struct intel_renderbuffer *intel_rb = NULL;
942
943 _glthread_LOCK_MUTEX(lockMutex);
944 assert(!intel->locked);
945 intel->locked = 1;
946
947 if (intel->driDrawable) {
948 intel_fb = intel->driDrawable->driverPrivate;
949
950 if (intel_fb)
951 intel_rb =
952 intel_get_renderbuffer(&intel_fb->Base,
953 intel_fb->Base._ColorDrawBufferIndexes[0]);
954 }
955
956 if (intel_rb && dPriv->vblFlags &&
957 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
958 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
959 drmVBlank vbl;
960
961 vbl.request.type = DRM_VBLANK_ABSOLUTE;
962
963 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
964 vbl.request.type |= DRM_VBLANK_SECONDARY;
965 }
966
967 vbl.request.sequence = intel_rb->vbl_pending;
968 drmWaitVBlank(intel->driFd, &vbl);
969 intel_fb->vbl_waited = vbl.reply.sequence;
970 }
971
972 DRM_CAS(intel->driHwLock, intel->hHWContext,
973 (DRM_LOCK_HELD|intel->hHWContext), __ret);
974
975 if (sPriv->dri2.enabled) {
976 if (__ret)
977 drmGetLock(intel->driFd, intel->hHWContext, 0);
978 if (__driParseEvents(dPriv->driContextPriv, dPriv)) {
979 intelWindowMoved(intel);
980 intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
981 }
982 } else if (__ret) {
983 intelContendedLock( intel, 0 );
984 }
985
986
987 if (INTEL_DEBUG & DEBUG_LOCK)
988 _mesa_printf("%s - locked\n", __progname);
989 }
990
991
992 /* Unlock the hardware using the global current context
993 */
994 void UNLOCK_HARDWARE( struct intel_context *intel )
995 {
996 intel->vtbl.note_unlock( intel );
997 intel->locked = 0;
998
999 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
1000
1001 _glthread_UNLOCK_MUTEX(lockMutex);
1002
1003 if (INTEL_DEBUG & DEBUG_LOCK)
1004 _mesa_printf("%s - unlocked\n", __progname);
1005
1006 /**
1007 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1008 * cliprects.
1009 */
1010 assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
1011 }
1012