intel: only enable occlusion query if the drm has defines.
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
44
45 #include "drivers/common/driverfuncs.h"
46
47 #include "intel_screen.h"
48
49 #include "i830_dri.h"
50
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr.h"
63
64 #include "drirenderbuffer.h"
65 #include "vblank.h"
66 #include "utils.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
68 #ifndef INTEL_DEBUG
69 int INTEL_DEBUG = (0);
70 #endif
71
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
96
97 #include "extension_helper.h"
98
99 #define DRIVER_DATE "20080716"
100 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
101
102 static const GLubyte *
103 intelGetString(GLcontext * ctx, GLenum name)
104 {
105 const struct intel_context *const intel = intel_context(ctx);
106 const char *chipset;
107 static char buffer[128];
108
109 switch (name) {
110 case GL_VENDOR:
111 return (GLubyte *) "Tungsten Graphics, Inc";
112 break;
113
114 case GL_RENDERER:
115 switch (intel->intelScreen->deviceID) {
116 case PCI_CHIP_845_G:
117 chipset = "Intel(R) 845G";
118 break;
119 case PCI_CHIP_I830_M:
120 chipset = "Intel(R) 830M";
121 break;
122 case PCI_CHIP_I855_GM:
123 chipset = "Intel(R) 852GM/855GM";
124 break;
125 case PCI_CHIP_I865_G:
126 chipset = "Intel(R) 865G";
127 break;
128 case PCI_CHIP_I915_G:
129 chipset = "Intel(R) 915G";
130 break;
131 case PCI_CHIP_E7221_G:
132 chipset = "Intel (R) E7221G (i915)";
133 break;
134 case PCI_CHIP_I915_GM:
135 chipset = "Intel(R) 915GM";
136 break;
137 case PCI_CHIP_I945_G:
138 chipset = "Intel(R) 945G";
139 break;
140 case PCI_CHIP_I945_GM:
141 chipset = "Intel(R) 945GM";
142 break;
143 case PCI_CHIP_I945_GME:
144 chipset = "Intel(R) 945GME";
145 break;
146 case PCI_CHIP_G33_G:
147 chipset = "Intel(R) G33";
148 break;
149 case PCI_CHIP_Q35_G:
150 chipset = "Intel(R) Q35";
151 break;
152 case PCI_CHIP_Q33_G:
153 chipset = "Intel(R) Q33";
154 break;
155 case PCI_CHIP_I965_Q:
156 chipset = "Intel(R) 965Q";
157 break;
158 case PCI_CHIP_I965_G:
159 case PCI_CHIP_I965_G_1:
160 chipset = "Intel(R) 965G";
161 break;
162 case PCI_CHIP_I946_GZ:
163 chipset = "Intel(R) 946GZ";
164 break;
165 case PCI_CHIP_I965_GM:
166 chipset = "Intel(R) 965GM";
167 break;
168 case PCI_CHIP_I965_GME:
169 chipset = "Intel(R) 965GME/GLE";
170 break;
171 case PCI_CHIP_GM45_GM:
172 chipset = "Mobile IntelĀ® GM45 Express Chipset";
173 break;
174 case PCI_CHIP_IGD_E_G:
175 chipset = "Intel(R) Integrated Graphics Device";
176 break;
177 case PCI_CHIP_G45_G:
178 chipset = "Intel(R) G45/G43";
179 break;
180 case PCI_CHIP_Q45_G:
181 chipset = "Intel(R) Q45/Q43";
182 break;
183 default:
184 chipset = "Unknown Intel Chipset";
185 break;
186 }
187
188 (void) driGetRendererString(buffer, chipset,
189 (intel->ttm) ? DRIVER_DATE_GEM : DRIVER_DATE,
190 0);
191 return (GLubyte *) buffer;
192
193 default:
194 return NULL;
195 }
196 }
197
198 void
199 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
200 {
201 struct intel_framebuffer *intel_fb = drawable->driverPrivate;
202 struct intel_renderbuffer *rb;
203 struct intel_region *region, *depth_region;
204 struct intel_context *intel = context->driverPrivate;
205 __DRIbuffer *buffers;
206 __DRIscreen *screen;
207 int i, count;
208 unsigned int attachments[10];
209 uint32_t name;
210 const char *region_name;
211
212 if (INTEL_DEBUG & DEBUG_DRI)
213 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
214
215 screen = intel->intelScreen->driScrnPriv;
216
217 i = 0;
218 if (intel_fb->color_rb[0])
219 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
220 if (intel_fb->color_rb[1])
221 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
222 if (intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH))
223 attachments[i++] = __DRI_BUFFER_DEPTH;
224 if (intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL))
225 attachments[i++] = __DRI_BUFFER_STENCIL;
226
227 buffers = (*screen->dri2.loader->getBuffers)(drawable,
228 &drawable->w,
229 &drawable->h,
230 attachments, i,
231 &count,
232 drawable->loaderPrivate);
233
234 drawable->x = 0;
235 drawable->y = 0;
236 drawable->backX = 0;
237 drawable->backY = 0;
238 drawable->numClipRects = 1;
239 drawable->pClipRects[0].x1 = 0;
240 drawable->pClipRects[0].y1 = 0;
241 drawable->pClipRects[0].x2 = drawable->w;
242 drawable->pClipRects[0].y2 = drawable->h;
243 drawable->numBackClipRects = 1;
244 drawable->pBackClipRects[0].x1 = 0;
245 drawable->pBackClipRects[0].y1 = 0;
246 drawable->pBackClipRects[0].x2 = drawable->w;
247 drawable->pBackClipRects[0].y2 = drawable->h;
248
249 depth_region = NULL;
250 for (i = 0; i < count; i++) {
251 switch (buffers[i].attachment) {
252 case __DRI_BUFFER_FRONT_LEFT:
253 rb = intel_fb->color_rb[0];
254 region_name = "dri2 front buffer";
255 break;
256
257 case __DRI_BUFFER_BACK_LEFT:
258 rb = intel_fb->color_rb[1];
259 region_name = "dri2 back buffer";
260 break;
261
262 case __DRI_BUFFER_DEPTH:
263 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
264 region_name = "dri2 depth buffer";
265 break;
266
267 case __DRI_BUFFER_STENCIL:
268 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
269 region_name = "dri2 stencil buffer";
270 break;
271
272 case __DRI_BUFFER_ACCUM:
273 default:
274 fprintf(stderr,
275 "unhandled buffer attach event, attacment type %d\n",
276 buffers[i].attachment);
277 return;
278 }
279
280 if (rb->region) {
281 intel_bo_flink(rb->region->buffer, &name);
282 if (name == buffers[i].name)
283 continue;
284 }
285
286 if (INTEL_DEBUG & DEBUG_DRI)
287 fprintf(stderr,
288 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
289 buffers[i].name, buffers[i].attachment,
290 buffers[i].cpp, buffers[i].pitch);
291
292 if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
293 if (INTEL_DEBUG & DEBUG_DRI)
294 fprintf(stderr, "(reusing depth buffer as stencil)\n");
295 intel_region_reference(&region, depth_region);
296 }
297 else
298 region = intel_region_alloc_for_handle(intel, buffers[i].cpp,
299 buffers[i].pitch / buffers[i].cpp,
300 drawable->h,
301 buffers[i].name,
302 region_name);
303
304 if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
305 depth_region = region;
306
307 intel_renderbuffer_set_region(rb, region);
308 intel_region_release(&region);
309 }
310
311 driUpdateFramebufferSize(&intel->ctx, drawable);
312 }
313
314 static void
315 intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
316 {
317 struct intel_context *intel = intel_context(ctx);
318 __DRIcontext *driContext = intel->driContext;
319
320 if (!driContext->driScreenPriv->dri2.enabled)
321 return;
322
323 intel_update_renderbuffers(driContext, driContext->driDrawablePriv);
324 if (driContext->driDrawablePriv != driContext->driReadablePriv)
325 intel_update_renderbuffers(driContext, driContext->driReadablePriv);
326
327 ctx->Driver.Viewport = NULL;
328 intel->driDrawable = driContext->driDrawablePriv;
329 intelWindowMoved(intel);
330 intel_draw_buffer(ctx, intel->ctx.DrawBuffer);
331 ctx->Driver.Viewport = intel_viewport;
332 }
333
334 /**
335 * Extension strings exported by the intel driver.
336 *
337 * Extensions supported by all chips supported by i830_dri, i915_dri, or
338 * i965_dri.
339 */
340 static const struct dri_extension card_extensions[] = {
341 {"GL_ARB_multisample", GL_ARB_multisample_functions},
342 {"GL_ARB_multitexture", NULL},
343 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
344 {"GL_NV_point_sprite", GL_NV_point_sprite_functions},
345 {"GL_ARB_texture_border_clamp", NULL},
346 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
347 {"GL_ARB_texture_cube_map", NULL},
348 {"GL_ARB_texture_env_add", NULL},
349 {"GL_ARB_texture_env_combine", NULL},
350 {"GL_ARB_texture_env_crossbar", NULL},
351 {"GL_ARB_texture_env_dot3", NULL},
352 {"GL_ARB_texture_mirrored_repeat", NULL},
353 {"GL_ARB_texture_non_power_of_two", NULL },
354 {"GL_ARB_texture_rectangle", NULL},
355 {"GL_NV_texture_rectangle", NULL},
356 {"GL_EXT_texture_rectangle", NULL},
357 {"GL_ARB_point_parameters", NULL},
358 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
359 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
360 {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
361 {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
362 {"GL_EXT_blend_equation_separate",
363 GL_EXT_blend_equation_separate_functions},
364 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
365 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
366 {"GL_EXT_blend_logic_op", NULL},
367 {"GL_EXT_blend_subtract", NULL},
368 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
369 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
370 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
371 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions},
372 #if 1 /* XXX FBO temporary? */
373 {"GL_EXT_packed_depth_stencil", NULL},
374 #endif
375 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
376 {"GL_EXT_stencil_wrap", NULL},
377 {"GL_EXT_texture_edge_clamp", NULL},
378 {"GL_EXT_texture_env_combine", NULL},
379 {"GL_EXT_texture_env_dot3", NULL},
380 {"GL_EXT_texture_filter_anisotropic", NULL},
381 {"GL_EXT_texture_lod_bias", NULL},
382 {"GL_3DFX_texture_compression_FXT1", NULL},
383 {"GL_APPLE_client_storage", NULL},
384 {"GL_MESA_pack_invert", NULL},
385 {"GL_MESA_ycbcr_texture", NULL},
386 {"GL_NV_blend_square", NULL},
387 {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
388 {"GL_NV_vertex_program1_1", NULL},
389 { "GL_SGIS_generate_mipmap", NULL },
390 {NULL, NULL}
391 };
392
393 static const struct dri_extension brw_extensions[] = {
394 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
395 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
396 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
397 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
398 { "GL_ARB_point_sprite", NULL},
399 { "GL_ARB_fragment_shader", NULL },
400 { "GL_ARB_draw_buffers", NULL },
401 { "GL_ARB_depth_texture", NULL },
402 { "GL_ARB_fragment_program", NULL },
403 { "GL_ARB_shadow", NULL },
404 { "GL_EXT_shadow_funcs", NULL },
405 { "GL_ARB_fragment_program_shadow", NULL },
406 /* ARB extn won't work if not enabled */
407 { "GL_SGIX_depth_texture", NULL },
408 { "GL_EXT_texture_sRGB", NULL},
409 { NULL, NULL }
410 };
411
412 #ifdef I915_MMIO_READ
413 static const struct dri_extension arb_oc_extensions[] = {
414 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions},
415 {NULL, NULL}
416 };
417 #endif
418
419 static const struct dri_extension ttm_extensions[] = {
420 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
421 {"GL_ARB_pixel_buffer_object", NULL},
422 {NULL, NULL}
423 };
424
425 /**
426 * Initializes potential list of extensions if ctx == NULL, or actually enables
427 * extensions for a context.
428 */
429 void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
430 {
431 struct intel_context *intel = ctx?intel_context(ctx):NULL;
432
433 /* Disable imaging extension until convolution is working in teximage paths.
434 */
435 enable_imaging = GL_FALSE;
436
437 driInitExtensions(ctx, card_extensions, enable_imaging);
438
439 if (intel == NULL || intel->ttm)
440 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
441
442 #ifdef I915_MMIO_READ
443 if (intel == NULL ||
444 (IS_965(intel->intelScreen->deviceID) &&
445 intel->intelScreen->drmMinor >= 8))
446 driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
447 #endif
448
449 if (intel == NULL || IS_965(intel->intelScreen->deviceID))
450 driInitExtensions(ctx, brw_extensions, GL_FALSE);
451 }
452
453 static const struct dri_debug_control debug_control[] = {
454 { "tex", DEBUG_TEXTURE},
455 { "state", DEBUG_STATE},
456 { "ioctl", DEBUG_IOCTL},
457 { "blit", DEBUG_BLIT},
458 { "mip", DEBUG_MIPTREE},
459 { "fall", DEBUG_FALLBACKS},
460 { "verb", DEBUG_VERBOSE},
461 { "bat", DEBUG_BATCH},
462 { "pix", DEBUG_PIXEL},
463 { "buf", DEBUG_BUFMGR},
464 { "reg", DEBUG_REGION},
465 { "fbo", DEBUG_FBO},
466 { "lock", DEBUG_LOCK},
467 { "sync", DEBUG_SYNC},
468 { "prim", DEBUG_PRIMS },
469 { "vert", DEBUG_VERTS },
470 { "dri", DEBUG_DRI },
471 { "dma", DEBUG_DMA },
472 { "san", DEBUG_SANITY },
473 { "sleep", DEBUG_SLEEP },
474 { "stats", DEBUG_STATS },
475 { "tile", DEBUG_TILE },
476 { "sing", DEBUG_SINGLE_THREAD },
477 { "thre", DEBUG_SINGLE_THREAD },
478 { "wm", DEBUG_WM },
479 { "urb", DEBUG_URB },
480 { "vs", DEBUG_VS },
481 { NULL, 0 }
482 };
483
484
485 static void
486 intelInvalidateState(GLcontext * ctx, GLuint new_state)
487 {
488 struct intel_context *intel = intel_context(ctx);
489
490 _swrast_InvalidateState(ctx, new_state);
491 _swsetup_InvalidateState(ctx, new_state);
492 _vbo_InvalidateState(ctx, new_state);
493 _tnl_InvalidateState(ctx, new_state);
494 _tnl_invalidate_vertex_state(ctx, new_state);
495
496 intel->NewGLState |= new_state;
497
498 if (intel->vtbl.invalidate_state)
499 intel->vtbl.invalidate_state( intel, new_state );
500 }
501
502
503 void
504 intelFlush(GLcontext * ctx)
505 {
506 struct intel_context *intel = intel_context(ctx);
507
508 if (intel->Fallback)
509 _swrast_flush(ctx);
510
511 if (!IS_965(intel->intelScreen->deviceID))
512 INTEL_FIREVERTICES(intel);
513
514 /* Emit a flush so that any frontbuffer rendering that might have occurred
515 * lands onscreen in a timely manner, even if the X Server doesn't trigger
516 * a flush for us.
517 */
518 intel_batchbuffer_emit_mi_flush(intel->batch);
519
520 if (intel->batch->map != intel->batch->ptr)
521 intel_batchbuffer_flush(intel->batch);
522 }
523
524 void
525 intelFinish(GLcontext * ctx)
526 {
527 struct gl_framebuffer *fb = ctx->DrawBuffer;
528 int i;
529
530 intelFlush(ctx);
531
532 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
533 struct intel_renderbuffer *irb;
534
535 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
536
537 if (irb->region)
538 dri_bo_wait_rendering(irb->region->buffer);
539 }
540 if (fb->_DepthBuffer) {
541 /* XXX: Wait on buffer idle */
542 }
543 }
544
545 #ifdef I915_MMIO_READ
546 static void
547 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
548 {
549 struct intel_context *intel = intel_context( ctx );
550 struct drm_i915_mmio io = {
551 .read_write = I915_MMIO_READ,
552 .reg = MMIO_REGS_PS_DEPTH_COUNT,
553 .data = &q->Result
554 };
555 intel->stats_wm++;
556 intelFinish(&intel->ctx);
557 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
558 }
559
560 static void
561 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
562 {
563 struct intel_context *intel = intel_context( ctx );
564 GLuint64EXT tmp;
565 struct drm_i915_mmio io = {
566 .read_write = I915_MMIO_READ,
567 .reg = MMIO_REGS_PS_DEPTH_COUNT,
568 .data = &tmp
569 };
570 intelFinish(&intel->ctx);
571 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
572 q->Result = tmp - q->Result;
573 q->Ready = GL_TRUE;
574 intel->stats_wm--;
575 }
576 #endif
577
578 /** Driver-specific fence emit implementation for the fake memory manager. */
579 static unsigned int
580 intel_fence_emit(void *private)
581 {
582 struct intel_context *intel = (struct intel_context *)private;
583 unsigned int fence;
584
585 /* XXX: Need to emit a flush, if we haven't already (at least with the
586 * current batchbuffer implementation, we have).
587 */
588
589 fence = intelEmitIrqLocked(intel);
590
591 return fence;
592 }
593
594 /** Driver-specific fence wait implementation for the fake memory manager. */
595 static int
596 intel_fence_wait(void *private, unsigned int cookie)
597 {
598 struct intel_context *intel = (struct intel_context *)private;
599
600 intelWaitIrq(intel, cookie);
601
602 return 0;
603 }
604
605 static GLboolean
606 intel_init_bufmgr(struct intel_context *intel)
607 {
608 intelScreenPrivate *intelScreen = intel->intelScreen;
609 GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
610 int gem_kernel = 0;
611 GLboolean gem_supported;
612 struct drm_i915_getparam gp;
613
614 gp.param = I915_PARAM_HAS_GEM;
615 gp.value = &gem_kernel;
616
617 (void) drmCommandWriteRead(intel->driFd, DRM_I915_GETPARAM, &gp, sizeof(gp));
618
619 /* If we've got a new enough DDX that's initializing GEM and giving us
620 * object handles for the shared buffers, use that.
621 */
622 intel->ttm = GL_FALSE;
623 if (intel->intelScreen->driScrnPriv->dri2.enabled)
624 gem_supported = GL_TRUE;
625 else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
626 gem_kernel &&
627 intel->intelScreen->front.bo_handle != -1)
628 gem_supported = GL_TRUE;
629 else
630 gem_supported = GL_FALSE;
631
632 if (!gem_disable && gem_supported) {
633 int bo_reuse_mode;
634 intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
635 BATCH_SZ);
636 if (intel->bufmgr != NULL)
637 intel->ttm = GL_TRUE;
638
639 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
640 switch (bo_reuse_mode) {
641 case DRI_CONF_BO_REUSE_DISABLED:
642 break;
643 case DRI_CONF_BO_REUSE_ALL:
644 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
645 break;
646 }
647 }
648 /* Otherwise, use the classic buffer manager. */
649 if (intel->bufmgr == NULL) {
650 if (gem_disable) {
651 fprintf(stderr, "GEM disabled. Using classic.\n");
652 } else {
653 fprintf(stderr, "Failed to initialize GEM. "
654 "Falling back to classic.\n");
655 }
656
657 if (intelScreen->tex.size == 0) {
658 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
659 __func__, __LINE__);
660 return GL_FALSE;
661 }
662
663 intel->bufmgr = intel_bufmgr_fake_init(intelScreen->tex.offset,
664 intelScreen->tex.map,
665 intelScreen->tex.size,
666 intel_fence_emit,
667 intel_fence_wait,
668 intel);
669 }
670
671 /* XXX bufmgr should be per-screen, not per-context */
672 intelScreen->ttm = intel->ttm;
673
674 return GL_TRUE;
675 }
676
677 void
678 intelInitDriverFunctions(struct dd_function_table *functions)
679 {
680 _mesa_init_driver_functions(functions);
681
682 functions->Flush = intelFlush;
683 functions->Finish = intelFinish;
684 functions->GetString = intelGetString;
685 functions->UpdateState = intelInvalidateState;
686 functions->Viewport = intel_viewport;
687
688 functions->CopyColorTable = _swrast_CopyColorTable;
689 functions->CopyColorSubTable = _swrast_CopyColorSubTable;
690 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
691 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
692
693 #ifdef I915_MMIO_READ
694 functions->BeginQuery = intelBeginQuery;
695 functions->EndQuery = intelEndQuery;
696 #endif
697
698 intelInitTextureFuncs(functions);
699 intelInitStateFuncs(functions);
700 intelInitBufferFuncs(functions);
701 intelInitPixelFuncs(functions);
702 }
703
704
705 GLboolean
706 intelInitContext(struct intel_context *intel,
707 const __GLcontextModes * mesaVis,
708 __DRIcontextPrivate * driContextPriv,
709 void *sharedContextPrivate,
710 struct dd_function_table *functions)
711 {
712 GLcontext *ctx = &intel->ctx;
713 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
714 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
715 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
716 volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
717 (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
718 int fthrottle_mode;
719
720 if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
721 functions, (void *) intel)) {
722 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
723 return GL_FALSE;
724 }
725
726 driContextPriv->driverPrivate = intel;
727 intel->intelScreen = intelScreen;
728 intel->driScreen = sPriv;
729 intel->sarea = saPriv;
730 intel->driContext = driContextPriv;
731
732 /* Dri stuff */
733 intel->hHWContext = driContextPriv->hHWContext;
734 intel->driFd = sPriv->fd;
735 intel->driHwLock = sPriv->lock;
736
737 intel->width = intelScreen->width;
738 intel->height = intelScreen->height;
739
740 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
741 intel->driScreen->myNum,
742 IS_965(intelScreen->deviceID) ? "i965" : "i915");
743 if (intelScreen->deviceID == PCI_CHIP_I865_G)
744 intel->maxBatchSize = 4096;
745 else
746 intel->maxBatchSize = BATCH_SZ;
747
748 if (!intel_init_bufmgr(intel))
749 return GL_FALSE;
750
751 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
752
753 /* This doesn't yet catch all non-conformant rendering, but it's a
754 * start.
755 */
756 if (getenv("INTEL_STRICT_CONFORMANCE")) {
757 intel->strict_conformance = 1;
758 }
759
760 if (intel->strict_conformance) {
761 ctx->Const.MinLineWidth = 1.0;
762 ctx->Const.MinLineWidthAA = 1.0;
763 ctx->Const.MaxLineWidth = 1.0;
764 ctx->Const.MaxLineWidthAA = 1.0;
765 ctx->Const.LineWidthGranularity = 1.0;
766 }
767 else {
768 ctx->Const.MinLineWidth = 1.0;
769 ctx->Const.MinLineWidthAA = 1.0;
770 ctx->Const.MaxLineWidth = 5.0;
771 ctx->Const.MaxLineWidthAA = 5.0;
772 ctx->Const.LineWidthGranularity = 0.5;
773 }
774
775 ctx->Const.MinPointSize = 1.0;
776 ctx->Const.MinPointSizeAA = 1.0;
777 ctx->Const.MaxPointSize = 255.0;
778 ctx->Const.MaxPointSizeAA = 3.0;
779 ctx->Const.PointSizeGranularity = 1.0;
780
781 /* reinitialize the context point state.
782 * It depend on constants in __GLcontextRec::Const
783 */
784 _mesa_init_point(ctx);
785
786 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
787
788 /* Initialize the software rasterizer and helper modules. */
789 _swrast_CreateContext(ctx);
790 _vbo_CreateContext(ctx);
791 _tnl_CreateContext(ctx);
792 _swsetup_CreateContext(ctx);
793
794 /* Configure swrast to match hardware characteristics: */
795 _swrast_allow_pixel_fog(ctx, GL_FALSE);
796 _swrast_allow_vertex_fog(ctx, GL_TRUE);
797
798 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
799 intel->hw_stipple = 1;
800
801 /* XXX FBO: this doesn't seem to be used anywhere */
802 switch (mesaVis->depthBits) {
803 case 0: /* what to do in this case? */
804 case 16:
805 intel->polygon_offset_scale = 1.0;
806 break;
807 case 24:
808 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
809 break;
810 default:
811 assert(0);
812 break;
813 }
814
815 if (IS_965(intelScreen->deviceID))
816 intel->polygon_offset_scale /= 0xffff;
817
818 intel->RenderIndex = ~0;
819
820 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
821 intel->irqsEmitted = 0;
822
823 intel->do_irqs = (intel->intelScreen->irq_active &&
824 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
825
826 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
827
828 _math_matrix_ctr(&intel->ViewportMatrix);
829
830 if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
831 _mesa_printf("IRQs not active. Exiting\n");
832 exit(1);
833 }
834
835 intelInitExtensions(ctx, GL_FALSE);
836
837 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
838 if (INTEL_DEBUG & DEBUG_BUFMGR)
839 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
840
841 if (!sPriv->dri2.enabled)
842 intel_recreate_static_regions(intel);
843
844 intel->batch = intel_batchbuffer_alloc(intel);
845
846 intel_bufferobj_init(intel);
847 intel_fbo_init(intel);
848
849 if (intel->ctx.Mesa_DXTn) {
850 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
851 _mesa_enable_extension(ctx, "GL_S3_s3tc");
852 }
853 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
854 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
855 }
856
857 intel->prim.primitive = ~0;
858
859 /* Force all software fallbacks */
860 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
861 fprintf(stderr, "disabling 3D rasterization\n");
862 intel->no_rast = 1;
863 }
864
865 /* Disable all hardware rendering (skip emitting batches and fences/waits
866 * to the kernel)
867 */
868 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
869
870 return GL_TRUE;
871 }
872
873 void
874 intelDestroyContext(__DRIcontextPrivate * driContextPriv)
875 {
876 struct intel_context *intel =
877 (struct intel_context *) driContextPriv->driverPrivate;
878
879 assert(intel); /* should never be null */
880 if (intel) {
881 GLboolean release_texture_heaps;
882
883 INTEL_FIREVERTICES(intel);
884
885 intel->vtbl.destroy(intel);
886
887 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
888 _swsetup_DestroyContext(&intel->ctx);
889 _tnl_DestroyContext(&intel->ctx);
890 _vbo_DestroyContext(&intel->ctx);
891
892 _swrast_DestroyContext(&intel->ctx);
893 intel->Fallback = 0; /* don't call _swrast_Flush later */
894
895 intel_batchbuffer_free(intel->batch);
896 free(intel->prim.vb);
897
898 if (release_texture_heaps) {
899 /* This share group is about to go away, free our private
900 * texture object data.
901 */
902 if (INTEL_DEBUG & DEBUG_TEXTURE)
903 fprintf(stderr, "do something to free texture heaps\n");
904 }
905
906 /* free the Mesa context */
907 _mesa_free_context_data(&intel->ctx);
908
909 dri_bufmgr_destroy(intel->bufmgr);
910 }
911 }
912
913 GLboolean
914 intelUnbindContext(__DRIcontextPrivate * driContextPriv)
915 {
916 return GL_TRUE;
917 }
918
919 GLboolean
920 intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
921 __DRIdrawablePrivate * driDrawPriv,
922 __DRIdrawablePrivate * driReadPriv)
923 {
924 __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
925
926 if (driContextPriv) {
927 struct intel_context *intel =
928 (struct intel_context *) driContextPriv->driverPrivate;
929 struct intel_framebuffer *intel_fb =
930 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
931 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
932
933 if (driContextPriv->driScreenPriv->dri2.enabled) {
934 intel_update_renderbuffers(driContextPriv, driDrawPriv);
935 if (driDrawPriv != driReadPriv)
936 intel_update_renderbuffers(driContextPriv, driReadPriv);
937 } else {
938 /* XXX FBO temporary fix-ups! */
939 /* if the renderbuffers don't have regions, init them from the context */
940 struct intel_renderbuffer *irbDepth
941 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
942 struct intel_renderbuffer *irbStencil
943 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
944
945 if (intel_fb->color_rb[0]) {
946 intel_renderbuffer_set_region(intel_fb->color_rb[0],
947 intel->front_region);
948 }
949 if (intel_fb->color_rb[1]) {
950 intel_renderbuffer_set_region(intel_fb->color_rb[1],
951 intel->back_region);
952 }
953 #if 0
954 if (intel_fb->color_rb[2]) {
955 intel_renderbuffer_set_region(intel_fb->color_rb[2],
956 intel->third_region);
957 }
958 #endif
959 if (irbDepth) {
960 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
961 }
962 if (irbStencil) {
963 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
964 }
965 }
966
967 /* set GLframebuffer size to match window, if needed */
968 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
969
970 if (driReadPriv != driDrawPriv) {
971 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
972 }
973
974 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
975
976 /* The drawbuffer won't always be updated by _mesa_make_current:
977 */
978 if (intel->ctx.DrawBuffer == &intel_fb->Base) {
979
980 if (intel->driReadDrawable != driReadPriv)
981 intel->driReadDrawable = driReadPriv;
982
983 if (intel->driDrawable != driDrawPriv) {
984 if (driDrawPriv->swap_interval == (unsigned)-1) {
985 int i;
986
987 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
988 ? driGetDefaultVBlankFlags(&intel->optionCache)
989 : VBLANK_FLAG_NO_IRQ;
990
991 (*psp->systemTime->getUST) (&intel_fb->swap_ust);
992 driDrawableInitVBlank(driDrawPriv);
993 intel_fb->vbl_waited = driDrawPriv->vblSeq;
994
995 for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
996 if (intel_fb->color_rb[i])
997 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
998 }
999 }
1000 intel->driDrawable = driDrawPriv;
1001 intelWindowMoved(intel);
1002 }
1003
1004 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
1005 }
1006 }
1007 else {
1008 _mesa_make_current(NULL, NULL, NULL);
1009 }
1010
1011 return GL_TRUE;
1012 }
1013
1014 static void
1015 intelContendedLock(struct intel_context *intel, GLuint flags)
1016 {
1017 __DRIdrawablePrivate *dPriv = intel->driDrawable;
1018 __DRIscreenPrivate *sPriv = intel->driScreen;
1019 volatile struct drm_i915_sarea *sarea = intel->sarea;
1020 int me = intel->hHWContext;
1021
1022 drmGetLock(intel->driFd, intel->hHWContext, flags);
1023 intel->locked = 1;
1024
1025 if (INTEL_DEBUG & DEBUG_LOCK)
1026 _mesa_printf("%s - got contended lock\n", __progname);
1027
1028 /* If the window moved, may need to set a new cliprect now.
1029 *
1030 * NOTE: This releases and regains the hw lock, so all state
1031 * checking must be done *after* this call:
1032 */
1033 if (dPriv)
1034 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
1035
1036 if (sarea && sarea->ctxOwner != me) {
1037 if (INTEL_DEBUG & DEBUG_BUFMGR) {
1038 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
1039 sarea->ctxOwner, me);
1040 }
1041 sarea->ctxOwner = me;
1042 }
1043
1044 /* If the last consumer of the texture memory wasn't us, notify the fake
1045 * bufmgr and record the new owner. We should have the memory shared
1046 * between contexts of a single fake bufmgr, but this will at least make
1047 * things correct for now.
1048 */
1049 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
1050 sarea->texAge = intel->hHWContext;
1051 intel_bufmgr_fake_contended_lock_take(intel->bufmgr);
1052 if (INTEL_DEBUG & DEBUG_BATCH)
1053 intel_decode_context_reset();
1054 if (INTEL_DEBUG & DEBUG_BUFMGR)
1055 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
1056 sarea->ctxOwner, intel->hHWContext);
1057 }
1058
1059 if (sarea->width != intel->width || sarea->height != intel->height) {
1060 int numClipRects = intel->numClipRects;
1061
1062 /*
1063 * FIXME: Really only need to do this when drawing to a
1064 * common back- or front buffer.
1065 */
1066
1067 /*
1068 * This will essentially drop the outstanding batchbuffer on
1069 * the floor.
1070 */
1071 intel->numClipRects = 0;
1072
1073 if (intel->Fallback)
1074 _swrast_flush(&intel->ctx);
1075
1076 if (!IS_965(intel->intelScreen->deviceID))
1077 INTEL_FIREVERTICES(intel);
1078
1079 if (intel->batch->map != intel->batch->ptr)
1080 intel_batchbuffer_flush(intel->batch);
1081
1082 intel->numClipRects = numClipRects;
1083
1084 /* force window update */
1085 intel->lastStamp = 0;
1086
1087 intel->width = sarea->width;
1088 intel->height = sarea->height;
1089 }
1090
1091 /* Drawable changed?
1092 */
1093 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
1094 intelWindowMoved(intel);
1095 intel->lastStamp = dPriv->lastStamp;
1096 }
1097 }
1098
1099
1100 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
1101
1102 /* Lock the hardware and validate our state.
1103 */
1104 void LOCK_HARDWARE( struct intel_context *intel )
1105 {
1106 __DRIdrawable *dPriv = intel->driDrawable;
1107 __DRIscreen *sPriv = intel->driScreen;
1108 char __ret = 0;
1109 struct intel_framebuffer *intel_fb = NULL;
1110 struct intel_renderbuffer *intel_rb = NULL;
1111
1112 _glthread_LOCK_MUTEX(lockMutex);
1113 assert(!intel->locked);
1114 intel->locked = 1;
1115
1116 if (intel->driDrawable) {
1117 intel_fb = intel->driDrawable->driverPrivate;
1118
1119 if (intel_fb)
1120 intel_rb =
1121 intel_get_renderbuffer(&intel_fb->Base,
1122 intel_fb->Base._ColorDrawBufferIndexes[0]);
1123 }
1124
1125 if (intel_rb && dPriv->vblFlags &&
1126 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
1127 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
1128 drmVBlank vbl;
1129
1130 vbl.request.type = DRM_VBLANK_ABSOLUTE;
1131
1132 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
1133 vbl.request.type |= DRM_VBLANK_SECONDARY;
1134 }
1135
1136 vbl.request.sequence = intel_rb->vbl_pending;
1137 drmWaitVBlank(intel->driFd, &vbl);
1138 intel_fb->vbl_waited = vbl.reply.sequence;
1139 }
1140
1141 if (!sPriv->dri2.enabled) {
1142 DRM_CAS(intel->driHwLock, intel->hHWContext,
1143 (DRM_LOCK_HELD|intel->hHWContext), __ret);
1144
1145 if (__ret)
1146 intelContendedLock( intel, 0 );
1147 }
1148
1149
1150 if (INTEL_DEBUG & DEBUG_LOCK)
1151 _mesa_printf("%s - locked\n", __progname);
1152 }
1153
1154
1155 /* Unlock the hardware using the global current context
1156 */
1157 void UNLOCK_HARDWARE( struct intel_context *intel )
1158 {
1159 __DRIscreen *sPriv = intel->driScreen;
1160
1161 intel->vtbl.note_unlock( intel );
1162 intel->locked = 0;
1163
1164 if (!sPriv->dri2.enabled)
1165 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
1166
1167 _glthread_UNLOCK_MUTEX(lockMutex);
1168
1169 if (INTEL_DEBUG & DEBUG_LOCK)
1170 _mesa_printf("%s - unlocked\n", __progname);
1171
1172 /**
1173 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1174 * cliprects.
1175 */
1176 if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS)
1177 intel_batchbuffer_flush(intel->batch);
1178 }
1179