DRI2: Drop sarea, implement swap buffers in the X server.
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
44
45 #include "drivers/common/driverfuncs.h"
46
47 #include "intel_screen.h"
48
49 #include "i830_dri.h"
50
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr.h"
63
64 #include "drirenderbuffer.h"
65 #include "vblank.h"
66 #include "utils.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
68 #ifndef INTEL_DEBUG
69 int INTEL_DEBUG = (0);
70 #endif
71
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
96
97 #include "extension_helper.h"
98
99 #define DRIVER_DATE "20080716"
100 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
101
102 static const GLubyte *
103 intelGetString(GLcontext * ctx, GLenum name)
104 {
105 const struct intel_context *const intel = intel_context(ctx);
106 const char *chipset;
107 static char buffer[128];
108
109 switch (name) {
110 case GL_VENDOR:
111 return (GLubyte *) "Tungsten Graphics, Inc";
112 break;
113
114 case GL_RENDERER:
115 switch (intel->intelScreen->deviceID) {
116 case PCI_CHIP_845_G:
117 chipset = "Intel(R) 845G";
118 break;
119 case PCI_CHIP_I830_M:
120 chipset = "Intel(R) 830M";
121 break;
122 case PCI_CHIP_I855_GM:
123 chipset = "Intel(R) 852GM/855GM";
124 break;
125 case PCI_CHIP_I865_G:
126 chipset = "Intel(R) 865G";
127 break;
128 case PCI_CHIP_I915_G:
129 chipset = "Intel(R) 915G";
130 break;
131 case PCI_CHIP_E7221_G:
132 chipset = "Intel (R) E7221G (i915)";
133 break;
134 case PCI_CHIP_I915_GM:
135 chipset = "Intel(R) 915GM";
136 break;
137 case PCI_CHIP_I945_G:
138 chipset = "Intel(R) 945G";
139 break;
140 case PCI_CHIP_I945_GM:
141 chipset = "Intel(R) 945GM";
142 break;
143 case PCI_CHIP_I945_GME:
144 chipset = "Intel(R) 945GME";
145 break;
146 case PCI_CHIP_G33_G:
147 chipset = "Intel(R) G33";
148 break;
149 case PCI_CHIP_Q35_G:
150 chipset = "Intel(R) Q35";
151 break;
152 case PCI_CHIP_Q33_G:
153 chipset = "Intel(R) Q33";
154 break;
155 case PCI_CHIP_I965_Q:
156 chipset = "Intel(R) 965Q";
157 break;
158 case PCI_CHIP_I965_G:
159 case PCI_CHIP_I965_G_1:
160 chipset = "Intel(R) 965G";
161 break;
162 case PCI_CHIP_I946_GZ:
163 chipset = "Intel(R) 946GZ";
164 break;
165 case PCI_CHIP_I965_GM:
166 chipset = "Intel(R) 965GM";
167 break;
168 case PCI_CHIP_I965_GME:
169 chipset = "Intel(R) 965GME/GLE";
170 break;
171 case PCI_CHIP_GM45_GM:
172 chipset = "Mobile IntelĀ® GM45 Express Chipset";
173 break;
174 case PCI_CHIP_IGD_E_G:
175 chipset = "Intel(R) Integrated Graphics Device";
176 break;
177 case PCI_CHIP_G45_G:
178 chipset = "Intel(R) G45/G43";
179 break;
180 case PCI_CHIP_Q45_G:
181 chipset = "Intel(R) Q45/Q43";
182 break;
183 default:
184 chipset = "Unknown Intel Chipset";
185 break;
186 }
187
188 (void) driGetRendererString(buffer, chipset,
189 (intel->ttm) ? DRIVER_DATE_GEM : DRIVER_DATE,
190 0);
191 return (GLubyte *) buffer;
192
193 default:
194 return NULL;
195 }
196 }
197
198 void
199 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
200 {
201 struct intel_framebuffer *intel_fb = drawable->driverPrivate;
202 struct intel_renderbuffer *rb;
203 struct intel_region *region, *depth_region;
204 struct intel_context *intel = context->driverPrivate;
205 __DRIbuffer *buffers;
206 __DRIscreen *screen;
207 int i, count;
208 unsigned int attachments[10];
209 uint32_t name;
210 const char *region_name;
211
212 if (INTEL_DEBUG & DEBUG_DRI)
213 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
214
215 screen = intel->intelScreen->driScrnPriv;
216
217 i = 0;
218 if (intel_fb->color_rb[0])
219 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
220 if (intel_fb->color_rb[1])
221 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
222 if (intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH))
223 attachments[i++] = __DRI_BUFFER_DEPTH;
224 if (intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL))
225 attachments[i++] = __DRI_BUFFER_STENCIL;
226
227 buffers = (*screen->dri2.loader->getBuffers)(drawable,
228 &drawable->w,
229 &drawable->h,
230 attachments, i,
231 &count,
232 drawable->loaderPrivate);
233
234 drawable->x = 0;
235 drawable->y = 0;
236 drawable->backX = 0;
237 drawable->backY = 0;
238 drawable->numClipRects = 1;
239 drawable->pClipRects[0].x1 = 0;
240 drawable->pClipRects[0].y1 = 0;
241 drawable->pClipRects[0].x2 = drawable->w;
242 drawable->pClipRects[0].y2 = drawable->h;
243 drawable->numBackClipRects = 1;
244 drawable->pBackClipRects[0].x1 = 0;
245 drawable->pBackClipRects[0].y1 = 0;
246 drawable->pBackClipRects[0].x2 = drawable->w;
247 drawable->pBackClipRects[0].y2 = drawable->h;
248
249 depth_region = NULL;
250 for (i = 0; i < count; i++) {
251 switch (buffers[i].attachment) {
252 case __DRI_BUFFER_FRONT_LEFT:
253 rb = intel_fb->color_rb[0];
254 region_name = "dri2 front buffer";
255 break;
256
257 case __DRI_BUFFER_BACK_LEFT:
258 rb = intel_fb->color_rb[1];
259 region_name = "dri2 back buffer";
260 break;
261
262 case __DRI_BUFFER_DEPTH:
263 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
264 region_name = "dri2 depth buffer";
265 break;
266
267 case __DRI_BUFFER_STENCIL:
268 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
269 region_name = "dri2 stencil buffer";
270 break;
271
272 case __DRI_BUFFER_ACCUM:
273 default:
274 fprintf(stderr,
275 "unhandled buffer attach event, attacment type %d\n",
276 buffers[i].attachment);
277 return;
278 }
279
280 if (rb->region) {
281 intel_bo_flink(rb->region->buffer, &name);
282 if (name == buffers[i].name)
283 continue;
284 }
285
286 if (INTEL_DEBUG & DEBUG_DRI)
287 fprintf(stderr,
288 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
289 buffers[i].name, buffers[i].attachment,
290 buffers[i].cpp, buffers[i].pitch);
291
292 if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
293 if (INTEL_DEBUG & DEBUG_DRI)
294 fprintf(stderr, "(reusing depth buffer as stencil)\n");
295 region = depth_region;
296 }
297 else
298 region = intel_region_alloc_for_handle(intel, buffers[i].cpp,
299 buffers[i].pitch / buffers[i].cpp,
300 drawable->h,
301 buffers[i].name,
302 region_name);
303
304 if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
305 depth_region = region;
306
307 intel_renderbuffer_set_region(rb, region);
308 intel_region_release(&region);
309 }
310
311 driUpdateFramebufferSize(&intel->ctx, drawable);
312 }
313
314 static void
315 intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
316 {
317 struct intel_context *intel = intel_context(ctx);
318 __DRIcontext *driContext = intel->driContext;
319
320 if (!driContext->driScreenPriv->dri2.enabled)
321 return;
322
323 intel_update_renderbuffers(driContext, driContext->driDrawablePriv);
324 if (driContext->driDrawablePriv != driContext->driReadablePriv)
325 intel_update_renderbuffers(driContext, driContext->driReadablePriv);
326
327 ctx->Driver.Viewport = NULL;
328 intel->driDrawable = driContext->driDrawablePriv;
329 intelWindowMoved(intel);
330 intel_draw_buffer(ctx, intel->ctx.DrawBuffer);
331 ctx->Driver.Viewport = intel_viewport;
332 }
333
334 /**
335 * Extension strings exported by the intel driver.
336 *
337 * Extensions supported by all chips supported by i830_dri, i915_dri, or
338 * i965_dri.
339 */
340 static const struct dri_extension card_extensions[] = {
341 {"GL_ARB_multisample", GL_ARB_multisample_functions},
342 {"GL_ARB_multitexture", NULL},
343 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
344 {"GL_NV_point_sprite", GL_NV_point_sprite_functions},
345 {"GL_ARB_texture_border_clamp", NULL},
346 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
347 {"GL_ARB_texture_cube_map", NULL},
348 {"GL_ARB_texture_env_add", NULL},
349 {"GL_ARB_texture_env_combine", NULL},
350 {"GL_ARB_texture_env_crossbar", NULL},
351 {"GL_ARB_texture_env_dot3", NULL},
352 {"GL_ARB_texture_mirrored_repeat", NULL},
353 {"GL_ARB_texture_non_power_of_two", NULL },
354 {"GL_ARB_texture_rectangle", NULL},
355 {"GL_NV_texture_rectangle", NULL},
356 {"GL_EXT_texture_rectangle", NULL},
357 {"GL_ARB_point_parameters", NULL},
358 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
359 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
360 {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
361 {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
362 {"GL_EXT_blend_equation_separate",
363 GL_EXT_blend_equation_separate_functions},
364 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
365 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
366 {"GL_EXT_blend_logic_op", NULL},
367 {"GL_EXT_blend_subtract", NULL},
368 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
369 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
370 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
371 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions},
372 #if 1 /* XXX FBO temporary? */
373 {"GL_EXT_packed_depth_stencil", NULL},
374 #endif
375 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
376 {"GL_EXT_stencil_wrap", NULL},
377 {"GL_EXT_texture_edge_clamp", NULL},
378 {"GL_EXT_texture_env_combine", NULL},
379 {"GL_EXT_texture_env_dot3", NULL},
380 {"GL_EXT_texture_filter_anisotropic", NULL},
381 {"GL_EXT_texture_lod_bias", NULL},
382 {"GL_3DFX_texture_compression_FXT1", NULL},
383 {"GL_APPLE_client_storage", NULL},
384 {"GL_MESA_pack_invert", NULL},
385 {"GL_MESA_ycbcr_texture", NULL},
386 {"GL_NV_blend_square", NULL},
387 {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
388 {"GL_NV_vertex_program1_1", NULL},
389 { "GL_SGIS_generate_mipmap", NULL },
390 {NULL, NULL}
391 };
392
393 static const struct dri_extension brw_extensions[] = {
394 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
395 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
396 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
397 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
398 { "GL_ARB_point_sprite", NULL},
399 { "GL_ARB_fragment_shader", NULL },
400 { "GL_ARB_draw_buffers", NULL },
401 { "GL_ARB_depth_texture", NULL },
402 { "GL_ARB_fragment_program", NULL },
403 { "GL_ARB_shadow", NULL },
404 { "GL_EXT_shadow_funcs", NULL },
405 { "GL_ARB_fragment_program_shadow", NULL },
406 /* ARB extn won't work if not enabled */
407 { "GL_SGIX_depth_texture", NULL },
408 { "GL_EXT_texture_sRGB", NULL},
409 { NULL, NULL }
410 };
411
412 static const struct dri_extension arb_oc_extensions[] = {
413 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions},
414 {NULL, NULL}
415 };
416
417 static const struct dri_extension ttm_extensions[] = {
418 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
419 {"GL_ARB_pixel_buffer_object", NULL},
420 {NULL, NULL}
421 };
422
423 /**
424 * Initializes potential list of extensions if ctx == NULL, or actually enables
425 * extensions for a context.
426 */
427 void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
428 {
429 struct intel_context *intel = ctx?intel_context(ctx):NULL;
430
431 /* Disable imaging extension until convolution is working in teximage paths.
432 */
433 enable_imaging = GL_FALSE;
434
435 driInitExtensions(ctx, card_extensions, enable_imaging);
436
437 if (intel == NULL || intel->ttm)
438 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
439
440 if (intel == NULL ||
441 (IS_965(intel->intelScreen->deviceID) &&
442 intel->intelScreen->drmMinor >= 8))
443 driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
444
445 if (intel == NULL || IS_965(intel->intelScreen->deviceID))
446 driInitExtensions(ctx, brw_extensions, GL_FALSE);
447 }
448
449 static const struct dri_debug_control debug_control[] = {
450 { "tex", DEBUG_TEXTURE},
451 { "state", DEBUG_STATE},
452 { "ioctl", DEBUG_IOCTL},
453 { "blit", DEBUG_BLIT},
454 { "mip", DEBUG_MIPTREE},
455 { "fall", DEBUG_FALLBACKS},
456 { "verb", DEBUG_VERBOSE},
457 { "bat", DEBUG_BATCH},
458 { "pix", DEBUG_PIXEL},
459 { "buf", DEBUG_BUFMGR},
460 { "reg", DEBUG_REGION},
461 { "fbo", DEBUG_FBO},
462 { "lock", DEBUG_LOCK},
463 { "sync", DEBUG_SYNC},
464 { "prim", DEBUG_PRIMS },
465 { "vert", DEBUG_VERTS },
466 { "dri", DEBUG_DRI },
467 { "dma", DEBUG_DMA },
468 { "san", DEBUG_SANITY },
469 { "sleep", DEBUG_SLEEP },
470 { "stats", DEBUG_STATS },
471 { "tile", DEBUG_TILE },
472 { "sing", DEBUG_SINGLE_THREAD },
473 { "thre", DEBUG_SINGLE_THREAD },
474 { "wm", DEBUG_WM },
475 { "urb", DEBUG_URB },
476 { "vs", DEBUG_VS },
477 { NULL, 0 }
478 };
479
480
481 static void
482 intelInvalidateState(GLcontext * ctx, GLuint new_state)
483 {
484 struct intel_context *intel = intel_context(ctx);
485
486 _swrast_InvalidateState(ctx, new_state);
487 _swsetup_InvalidateState(ctx, new_state);
488 _vbo_InvalidateState(ctx, new_state);
489 _tnl_InvalidateState(ctx, new_state);
490 _tnl_invalidate_vertex_state(ctx, new_state);
491
492 intel->NewGLState |= new_state;
493
494 if (intel->vtbl.invalidate_state)
495 intel->vtbl.invalidate_state( intel, new_state );
496 }
497
498
499 void
500 intelFlush(GLcontext * ctx)
501 {
502 struct intel_context *intel = intel_context(ctx);
503
504 if (intel->Fallback)
505 _swrast_flush(ctx);
506
507 if (!IS_965(intel->intelScreen->deviceID))
508 INTEL_FIREVERTICES(intel);
509
510 /* Emit a flush so that any frontbuffer rendering that might have occurred
511 * lands onscreen in a timely manner, even if the X Server doesn't trigger
512 * a flush for us.
513 */
514 intel_batchbuffer_emit_mi_flush(intel->batch);
515
516 if (intel->batch->map != intel->batch->ptr)
517 intel_batchbuffer_flush(intel->batch);
518 }
519
520 void
521 intelFinish(GLcontext * ctx)
522 {
523 struct gl_framebuffer *fb = ctx->DrawBuffer;
524 int i;
525
526 intelFlush(ctx);
527
528 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
529 struct intel_renderbuffer *irb;
530
531 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
532
533 if (irb->region)
534 dri_bo_wait_rendering(irb->region->buffer);
535 }
536 if (fb->_DepthBuffer) {
537 /* XXX: Wait on buffer idle */
538 }
539 }
540
541 static void
542 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
543 {
544 struct intel_context *intel = intel_context( ctx );
545 struct drm_i915_mmio io = {
546 .read_write = I915_MMIO_READ,
547 .reg = MMIO_REGS_PS_DEPTH_COUNT,
548 .data = &q->Result
549 };
550 intel->stats_wm++;
551 intelFinish(&intel->ctx);
552 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
553 }
554
555 static void
556 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
557 {
558 struct intel_context *intel = intel_context( ctx );
559 GLuint64EXT tmp;
560 struct drm_i915_mmio io = {
561 .read_write = I915_MMIO_READ,
562 .reg = MMIO_REGS_PS_DEPTH_COUNT,
563 .data = &tmp
564 };
565 intelFinish(&intel->ctx);
566 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
567 q->Result = tmp - q->Result;
568 q->Ready = GL_TRUE;
569 intel->stats_wm--;
570 }
571
572 /** Driver-specific fence emit implementation for the fake memory manager. */
573 static unsigned int
574 intel_fence_emit(void *private)
575 {
576 struct intel_context *intel = (struct intel_context *)private;
577 unsigned int fence;
578
579 /* XXX: Need to emit a flush, if we haven't already (at least with the
580 * current batchbuffer implementation, we have).
581 */
582
583 fence = intelEmitIrqLocked(intel);
584
585 return fence;
586 }
587
588 /** Driver-specific fence wait implementation for the fake memory manager. */
589 static int
590 intel_fence_wait(void *private, unsigned int cookie)
591 {
592 struct intel_context *intel = (struct intel_context *)private;
593
594 intelWaitIrq(intel, cookie);
595
596 return 0;
597 }
598
599 static GLboolean
600 intel_init_bufmgr(struct intel_context *intel)
601 {
602 intelScreenPrivate *intelScreen = intel->intelScreen;
603 GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
604 int gem_kernel = 0;
605 GLboolean gem_supported;
606 struct drm_i915_getparam gp;
607
608 gp.param = I915_PARAM_HAS_GEM;
609 gp.value = &gem_kernel;
610
611 (void) drmCommandWriteRead(intel->driFd, DRM_I915_GETPARAM, &gp, sizeof(gp));
612
613 /* If we've got a new enough DDX that's initializing GEM and giving us
614 * object handles for the shared buffers, use that.
615 */
616 intel->ttm = GL_FALSE;
617 if (intel->intelScreen->driScrnPriv->dri2.enabled)
618 gem_supported = GL_TRUE;
619 else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
620 gem_kernel &&
621 intel->intelScreen->front.bo_handle != -1)
622 gem_supported = GL_TRUE;
623 else
624 gem_supported = GL_FALSE;
625
626 if (!gem_disable && gem_supported) {
627 int bo_reuse_mode;
628 intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
629 BATCH_SZ);
630 if (intel->bufmgr != NULL)
631 intel->ttm = GL_TRUE;
632
633 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
634 switch (bo_reuse_mode) {
635 case DRI_CONF_BO_REUSE_DISABLED:
636 break;
637 case DRI_CONF_BO_REUSE_ALL:
638 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
639 break;
640 }
641 }
642 /* Otherwise, use the classic buffer manager. */
643 if (intel->bufmgr == NULL) {
644 if (gem_disable) {
645 fprintf(stderr, "GEM disabled. Using classic.\n");
646 } else {
647 fprintf(stderr, "Failed to initialize GEM. "
648 "Falling back to classic.\n");
649 }
650
651 if (intelScreen->tex.size == 0) {
652 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
653 __func__, __LINE__);
654 return GL_FALSE;
655 }
656
657 intel->bufmgr = intel_bufmgr_fake_init(intelScreen->tex.offset,
658 intelScreen->tex.map,
659 intelScreen->tex.size,
660 intel_fence_emit,
661 intel_fence_wait,
662 intel);
663 }
664
665 /* XXX bufmgr should be per-screen, not per-context */
666 intelScreen->ttm = intel->ttm;
667
668 return GL_TRUE;
669 }
670
671 void
672 intelInitDriverFunctions(struct dd_function_table *functions)
673 {
674 _mesa_init_driver_functions(functions);
675
676 functions->Flush = intelFlush;
677 functions->Finish = intelFinish;
678 functions->GetString = intelGetString;
679 functions->UpdateState = intelInvalidateState;
680 functions->Viewport = intel_viewport;
681
682 functions->CopyColorTable = _swrast_CopyColorTable;
683 functions->CopyColorSubTable = _swrast_CopyColorSubTable;
684 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
685 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
686
687 functions->BeginQuery = intelBeginQuery;
688 functions->EndQuery = intelEndQuery;
689
690 intelInitTextureFuncs(functions);
691 intelInitStateFuncs(functions);
692 intelInitBufferFuncs(functions);
693 intelInitPixelFuncs(functions);
694 }
695
696
697 GLboolean
698 intelInitContext(struct intel_context *intel,
699 const __GLcontextModes * mesaVis,
700 __DRIcontextPrivate * driContextPriv,
701 void *sharedContextPrivate,
702 struct dd_function_table *functions)
703 {
704 GLcontext *ctx = &intel->ctx;
705 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
706 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
707 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
708 volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
709 (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
710 int fthrottle_mode;
711
712 if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
713 functions, (void *) intel)) {
714 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
715 return GL_FALSE;
716 }
717
718 driContextPriv->driverPrivate = intel;
719 intel->intelScreen = intelScreen;
720 intel->driScreen = sPriv;
721 intel->sarea = saPriv;
722 intel->driContext = driContextPriv;
723
724 /* Dri stuff */
725 intel->hHWContext = driContextPriv->hHWContext;
726 intel->driFd = sPriv->fd;
727 intel->driHwLock = sPriv->lock;
728
729 intel->width = intelScreen->width;
730 intel->height = intelScreen->height;
731
732 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
733 intel->driScreen->myNum,
734 IS_965(intelScreen->deviceID) ? "i965" : "i915");
735 if (intelScreen->deviceID == PCI_CHIP_I865_G)
736 intel->maxBatchSize = 4096;
737 else
738 intel->maxBatchSize = BATCH_SZ;
739
740 if (!intel_init_bufmgr(intel))
741 return GL_FALSE;
742
743 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
744
745 /* This doesn't yet catch all non-conformant rendering, but it's a
746 * start.
747 */
748 if (getenv("INTEL_STRICT_CONFORMANCE")) {
749 intel->strict_conformance = 1;
750 }
751
752 if (intel->strict_conformance) {
753 ctx->Const.MinLineWidth = 1.0;
754 ctx->Const.MinLineWidthAA = 1.0;
755 ctx->Const.MaxLineWidth = 1.0;
756 ctx->Const.MaxLineWidthAA = 1.0;
757 ctx->Const.LineWidthGranularity = 1.0;
758 }
759 else {
760 ctx->Const.MinLineWidth = 1.0;
761 ctx->Const.MinLineWidthAA = 1.0;
762 ctx->Const.MaxLineWidth = 5.0;
763 ctx->Const.MaxLineWidthAA = 5.0;
764 ctx->Const.LineWidthGranularity = 0.5;
765 }
766
767 ctx->Const.MinPointSize = 1.0;
768 ctx->Const.MinPointSizeAA = 1.0;
769 ctx->Const.MaxPointSize = 255.0;
770 ctx->Const.MaxPointSizeAA = 3.0;
771 ctx->Const.PointSizeGranularity = 1.0;
772
773 /* reinitialize the context point state.
774 * It depend on constants in __GLcontextRec::Const
775 */
776 _mesa_init_point(ctx);
777
778 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
779
780 /* Initialize the software rasterizer and helper modules. */
781 _swrast_CreateContext(ctx);
782 _vbo_CreateContext(ctx);
783 _tnl_CreateContext(ctx);
784 _swsetup_CreateContext(ctx);
785
786 /* Configure swrast to match hardware characteristics: */
787 _swrast_allow_pixel_fog(ctx, GL_FALSE);
788 _swrast_allow_vertex_fog(ctx, GL_TRUE);
789
790 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
791 intel->hw_stipple = 1;
792
793 /* XXX FBO: this doesn't seem to be used anywhere */
794 switch (mesaVis->depthBits) {
795 case 0: /* what to do in this case? */
796 case 16:
797 intel->polygon_offset_scale = 1.0;
798 break;
799 case 24:
800 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
801 break;
802 default:
803 assert(0);
804 break;
805 }
806
807 if (IS_965(intelScreen->deviceID))
808 intel->polygon_offset_scale /= 0xffff;
809
810 intel->RenderIndex = ~0;
811
812 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
813 intel->irqsEmitted = 0;
814
815 intel->do_irqs = (intel->intelScreen->irq_active &&
816 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
817
818 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
819
820 _math_matrix_ctr(&intel->ViewportMatrix);
821
822 if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
823 _mesa_printf("IRQs not active. Exiting\n");
824 exit(1);
825 }
826
827 intelInitExtensions(ctx, GL_FALSE);
828
829 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
830 if (INTEL_DEBUG & DEBUG_BUFMGR)
831 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
832
833 if (!sPriv->dri2.enabled)
834 intel_recreate_static_regions(intel);
835
836 intel->batch = intel_batchbuffer_alloc(intel);
837
838 intel_bufferobj_init(intel);
839 intel_fbo_init(intel);
840
841 if (intel->ctx.Mesa_DXTn) {
842 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
843 _mesa_enable_extension(ctx, "GL_S3_s3tc");
844 }
845 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
846 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
847 }
848
849 intel->prim.primitive = ~0;
850
851 /* Force all software fallbacks */
852 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
853 fprintf(stderr, "disabling 3D rasterization\n");
854 intel->no_rast = 1;
855 }
856
857 /* Disable all hardware rendering (skip emitting batches and fences/waits
858 * to the kernel)
859 */
860 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
861
862 return GL_TRUE;
863 }
864
865 void
866 intelDestroyContext(__DRIcontextPrivate * driContextPriv)
867 {
868 struct intel_context *intel =
869 (struct intel_context *) driContextPriv->driverPrivate;
870
871 assert(intel); /* should never be null */
872 if (intel) {
873 GLboolean release_texture_heaps;
874
875 INTEL_FIREVERTICES(intel);
876
877 intel->vtbl.destroy(intel);
878
879 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
880 _swsetup_DestroyContext(&intel->ctx);
881 _tnl_DestroyContext(&intel->ctx);
882 _vbo_DestroyContext(&intel->ctx);
883
884 _swrast_DestroyContext(&intel->ctx);
885 intel->Fallback = 0; /* don't call _swrast_Flush later */
886
887 intel_batchbuffer_free(intel->batch);
888 free(intel->prim.vb);
889
890 if (release_texture_heaps) {
891 /* This share group is about to go away, free our private
892 * texture object data.
893 */
894 if (INTEL_DEBUG & DEBUG_TEXTURE)
895 fprintf(stderr, "do something to free texture heaps\n");
896 }
897
898 /* free the Mesa context */
899 _mesa_free_context_data(&intel->ctx);
900
901 dri_bufmgr_destroy(intel->bufmgr);
902 }
903 }
904
905 GLboolean
906 intelUnbindContext(__DRIcontextPrivate * driContextPriv)
907 {
908 return GL_TRUE;
909 }
910
911 GLboolean
912 intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
913 __DRIdrawablePrivate * driDrawPriv,
914 __DRIdrawablePrivate * driReadPriv)
915 {
916 __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
917
918 if (driContextPriv) {
919 struct intel_context *intel =
920 (struct intel_context *) driContextPriv->driverPrivate;
921 struct intel_framebuffer *intel_fb =
922 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
923 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
924
925 intel_update_renderbuffers(driContextPriv, driDrawPriv);
926 if (driDrawPriv != driReadPriv)
927 intel_update_renderbuffers(driContextPriv, driReadPriv);
928
929 /* XXX FBO temporary fix-ups! */
930 /* if the renderbuffers don't have regions, init them from the context */
931 if (!driContextPriv->driScreenPriv->dri2.enabled) {
932 struct intel_renderbuffer *irbDepth
933 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
934 struct intel_renderbuffer *irbStencil
935 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
936
937 if (intel_fb->color_rb[0]) {
938 intel_renderbuffer_set_region(intel_fb->color_rb[0],
939 intel->front_region);
940 }
941 if (intel_fb->color_rb[1]) {
942 intel_renderbuffer_set_region(intel_fb->color_rb[1],
943 intel->back_region);
944 }
945 #if 0
946 if (intel_fb->color_rb[2]) {
947 intel_renderbuffer_set_region(intel_fb->color_rb[2],
948 intel->third_region);
949 }
950 #endif
951 if (irbDepth) {
952 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
953 }
954 if (irbStencil) {
955 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
956 }
957 }
958
959 /* set GLframebuffer size to match window, if needed */
960 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
961
962 if (driReadPriv != driDrawPriv) {
963 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
964 }
965
966 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
967
968 /* The drawbuffer won't always be updated by _mesa_make_current:
969 */
970 if (intel->ctx.DrawBuffer == &intel_fb->Base) {
971
972 if (intel->driReadDrawable != driReadPriv)
973 intel->driReadDrawable = driReadPriv;
974
975 if (intel->driDrawable != driDrawPriv) {
976 if (driDrawPriv->swap_interval == (unsigned)-1) {
977 int i;
978
979 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
980 ? driGetDefaultVBlankFlags(&intel->optionCache)
981 : VBLANK_FLAG_NO_IRQ;
982
983 (*psp->systemTime->getUST) (&intel_fb->swap_ust);
984 driDrawableInitVBlank(driDrawPriv);
985 intel_fb->vbl_waited = driDrawPriv->vblSeq;
986
987 for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
988 if (intel_fb->color_rb[i])
989 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
990 }
991 }
992 intel->driDrawable = driDrawPriv;
993 intelWindowMoved(intel);
994 }
995
996 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
997 }
998 }
999 else {
1000 _mesa_make_current(NULL, NULL, NULL);
1001 }
1002
1003 return GL_TRUE;
1004 }
1005
1006 static void
1007 intelContendedLock(struct intel_context *intel, GLuint flags)
1008 {
1009 __DRIdrawablePrivate *dPriv = intel->driDrawable;
1010 __DRIscreenPrivate *sPriv = intel->driScreen;
1011 volatile struct drm_i915_sarea *sarea = intel->sarea;
1012 int me = intel->hHWContext;
1013
1014 drmGetLock(intel->driFd, intel->hHWContext, flags);
1015 intel->locked = 1;
1016
1017 if (INTEL_DEBUG & DEBUG_LOCK)
1018 _mesa_printf("%s - got contended lock\n", __progname);
1019
1020 /* If the window moved, may need to set a new cliprect now.
1021 *
1022 * NOTE: This releases and regains the hw lock, so all state
1023 * checking must be done *after* this call:
1024 */
1025 if (dPriv)
1026 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
1027
1028 if (sarea && sarea->ctxOwner != me) {
1029 if (INTEL_DEBUG & DEBUG_BUFMGR) {
1030 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
1031 sarea->ctxOwner, me);
1032 }
1033 sarea->ctxOwner = me;
1034 }
1035
1036 /* If the last consumer of the texture memory wasn't us, notify the fake
1037 * bufmgr and record the new owner. We should have the memory shared
1038 * between contexts of a single fake bufmgr, but this will at least make
1039 * things correct for now.
1040 */
1041 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
1042 sarea->texAge = intel->hHWContext;
1043 intel_bufmgr_fake_contended_lock_take(intel->bufmgr);
1044 if (INTEL_DEBUG & DEBUG_BATCH)
1045 intel_decode_context_reset();
1046 if (INTEL_DEBUG & DEBUG_BUFMGR)
1047 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
1048 sarea->ctxOwner, intel->hHWContext);
1049 }
1050
1051 if (sarea->width != intel->width || sarea->height != intel->height) {
1052 int numClipRects = intel->numClipRects;
1053
1054 /*
1055 * FIXME: Really only need to do this when drawing to a
1056 * common back- or front buffer.
1057 */
1058
1059 /*
1060 * This will essentially drop the outstanding batchbuffer on
1061 * the floor.
1062 */
1063 intel->numClipRects = 0;
1064
1065 if (intel->Fallback)
1066 _swrast_flush(&intel->ctx);
1067
1068 if (!IS_965(intel->intelScreen->deviceID))
1069 INTEL_FIREVERTICES(intel);
1070
1071 if (intel->batch->map != intel->batch->ptr)
1072 intel_batchbuffer_flush(intel->batch);
1073
1074 intel->numClipRects = numClipRects;
1075
1076 /* force window update */
1077 intel->lastStamp = 0;
1078
1079 intel->width = sarea->width;
1080 intel->height = sarea->height;
1081 }
1082
1083 /* Drawable changed?
1084 */
1085 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
1086 intelWindowMoved(intel);
1087 intel->lastStamp = dPriv->lastStamp;
1088 }
1089 }
1090
1091
1092 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
1093
1094 /* Lock the hardware and validate our state.
1095 */
1096 void LOCK_HARDWARE( struct intel_context *intel )
1097 {
1098 __DRIdrawable *dPriv = intel->driDrawable;
1099 __DRIscreen *sPriv = intel->driScreen;
1100 char __ret = 0;
1101 struct intel_framebuffer *intel_fb = NULL;
1102 struct intel_renderbuffer *intel_rb = NULL;
1103
1104 _glthread_LOCK_MUTEX(lockMutex);
1105 assert(!intel->locked);
1106 intel->locked = 1;
1107
1108 if (intel->driDrawable) {
1109 intel_fb = intel->driDrawable->driverPrivate;
1110
1111 if (intel_fb)
1112 intel_rb =
1113 intel_get_renderbuffer(&intel_fb->Base,
1114 intel_fb->Base._ColorDrawBufferIndexes[0]);
1115 }
1116
1117 if (intel_rb && dPriv->vblFlags &&
1118 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
1119 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
1120 drmVBlank vbl;
1121
1122 vbl.request.type = DRM_VBLANK_ABSOLUTE;
1123
1124 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
1125 vbl.request.type |= DRM_VBLANK_SECONDARY;
1126 }
1127
1128 vbl.request.sequence = intel_rb->vbl_pending;
1129 drmWaitVBlank(intel->driFd, &vbl);
1130 intel_fb->vbl_waited = vbl.reply.sequence;
1131 }
1132
1133 if (!sPriv->dri2.enabled) {
1134 DRM_CAS(intel->driHwLock, intel->hHWContext,
1135 (DRM_LOCK_HELD|intel->hHWContext), __ret);
1136
1137 if (__ret)
1138 intelContendedLock( intel, 0 );
1139 }
1140
1141
1142 if (INTEL_DEBUG & DEBUG_LOCK)
1143 _mesa_printf("%s - locked\n", __progname);
1144 }
1145
1146
1147 /* Unlock the hardware using the global current context
1148 */
1149 void UNLOCK_HARDWARE( struct intel_context *intel )
1150 {
1151 __DRIscreen *sPriv = intel->driScreen;
1152
1153 intel->vtbl.note_unlock( intel );
1154 intel->locked = 0;
1155
1156 if (!sPriv->dri2.enabled)
1157 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
1158
1159 _glthread_UNLOCK_MUTEX(lockMutex);
1160
1161 if (INTEL_DEBUG & DEBUG_LOCK)
1162 _mesa_printf("%s - unlocked\n", __progname);
1163
1164 /**
1165 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1166 * cliprects.
1167 */
1168 if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS)
1169 intel_batchbuffer_flush(intel->batch);
1170 }
1171