1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
45 #include "drivers/common/driverfuncs.h"
47 #include "intel_screen.h"
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr.h"
64 #include "drirenderbuffer.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
69 int INTEL_DEBUG
= (0);
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
97 #include "extension_helper.h"
99 #define DRIVER_DATE "20080716"
100 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
102 static const GLubyte
*
103 intelGetString(GLcontext
* ctx
, GLenum name
)
105 const struct intel_context
*const intel
= intel_context(ctx
);
107 static char buffer
[128];
111 return (GLubyte
*) "Tungsten Graphics, Inc";
115 switch (intel
->intelScreen
->deviceID
) {
117 chipset
= "Intel(R) 845G";
119 case PCI_CHIP_I830_M
:
120 chipset
= "Intel(R) 830M";
122 case PCI_CHIP_I855_GM
:
123 chipset
= "Intel(R) 852GM/855GM";
125 case PCI_CHIP_I865_G
:
126 chipset
= "Intel(R) 865G";
128 case PCI_CHIP_I915_G
:
129 chipset
= "Intel(R) 915G";
131 case PCI_CHIP_E7221_G
:
132 chipset
= "Intel (R) E7221G (i915)";
134 case PCI_CHIP_I915_GM
:
135 chipset
= "Intel(R) 915GM";
137 case PCI_CHIP_I945_G
:
138 chipset
= "Intel(R) 945G";
140 case PCI_CHIP_I945_GM
:
141 chipset
= "Intel(R) 945GM";
143 case PCI_CHIP_I945_GME
:
144 chipset
= "Intel(R) 945GME";
147 chipset
= "Intel(R) G33";
150 chipset
= "Intel(R) Q35";
153 chipset
= "Intel(R) Q33";
155 case PCI_CHIP_I965_Q
:
156 chipset
= "Intel(R) 965Q";
158 case PCI_CHIP_I965_G
:
159 case PCI_CHIP_I965_G_1
:
160 chipset
= "Intel(R) 965G";
162 case PCI_CHIP_I946_GZ
:
163 chipset
= "Intel(R) 946GZ";
165 case PCI_CHIP_I965_GM
:
166 chipset
= "Intel(R) 965GM";
168 case PCI_CHIP_I965_GME
:
169 chipset
= "Intel(R) 965GME/GLE";
171 case PCI_CHIP_GM45_GM
:
172 chipset
= "Mobile IntelĀ® GM45 Express Chipset";
174 case PCI_CHIP_IGD_E_G
:
175 chipset
= "Intel(R) Integrated Graphics Device";
178 chipset
= "Intel(R) G45/G43";
181 chipset
= "Intel(R) Q45/Q43";
184 chipset
= "Unknown Intel Chipset";
188 (void) driGetRendererString(buffer
, chipset
,
189 (intel
->ttm
) ? DRIVER_DATE_GEM
: DRIVER_DATE
,
191 return (GLubyte
*) buffer
;
199 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
201 struct intel_framebuffer
*intel_fb
= drawable
->driverPrivate
;
202 struct intel_renderbuffer
*rb
;
203 struct intel_region
*region
, *depth_region
;
204 struct intel_context
*intel
= context
->driverPrivate
;
205 __DRIbuffer
*buffers
;
208 unsigned int attachments
[10];
210 const char *region_name
;
212 if (INTEL_DEBUG
& DEBUG_DRI
)
213 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
215 screen
= intel
->intelScreen
->driScrnPriv
;
218 if (intel_fb
->color_rb
[0])
219 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
220 if (intel_fb
->color_rb
[1])
221 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
222 if (intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
))
223 attachments
[i
++] = __DRI_BUFFER_DEPTH
;
224 if (intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
))
225 attachments
[i
++] = __DRI_BUFFER_STENCIL
;
227 buffers
= (*screen
->dri2
.loader
->getBuffers
)(drawable
,
232 drawable
->loaderPrivate
);
238 drawable
->numClipRects
= 1;
239 drawable
->pClipRects
[0].x1
= 0;
240 drawable
->pClipRects
[0].y1
= 0;
241 drawable
->pClipRects
[0].x2
= drawable
->w
;
242 drawable
->pClipRects
[0].y2
= drawable
->h
;
243 drawable
->numBackClipRects
= 1;
244 drawable
->pBackClipRects
[0].x1
= 0;
245 drawable
->pBackClipRects
[0].y1
= 0;
246 drawable
->pBackClipRects
[0].x2
= drawable
->w
;
247 drawable
->pBackClipRects
[0].y2
= drawable
->h
;
250 for (i
= 0; i
< count
; i
++) {
251 switch (buffers
[i
].attachment
) {
252 case __DRI_BUFFER_FRONT_LEFT
:
253 rb
= intel_fb
->color_rb
[0];
254 region_name
= "dri2 front buffer";
257 case __DRI_BUFFER_BACK_LEFT
:
258 rb
= intel_fb
->color_rb
[1];
259 region_name
= "dri2 back buffer";
262 case __DRI_BUFFER_DEPTH
:
263 rb
= intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
264 region_name
= "dri2 depth buffer";
267 case __DRI_BUFFER_STENCIL
:
268 rb
= intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
269 region_name
= "dri2 stencil buffer";
272 case __DRI_BUFFER_ACCUM
:
275 "unhandled buffer attach event, attacment type %d\n",
276 buffers
[i
].attachment
);
281 intel_bo_flink(rb
->region
->buffer
, &name
);
282 if (name
== buffers
[i
].name
)
286 if (INTEL_DEBUG
& DEBUG_DRI
)
288 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
289 buffers
[i
].name
, buffers
[i
].attachment
,
290 buffers
[i
].cpp
, buffers
[i
].pitch
);
292 if (buffers
[i
].attachment
== __DRI_BUFFER_STENCIL
&& depth_region
) {
293 if (INTEL_DEBUG
& DEBUG_DRI
)
294 fprintf(stderr
, "(reusing depth buffer as stencil)\n");
295 intel_region_reference(®ion
, depth_region
);
298 region
= intel_region_alloc_for_handle(intel
, buffers
[i
].cpp
,
299 buffers
[i
].pitch
/ buffers
[i
].cpp
,
304 if (buffers
[i
].attachment
== __DRI_BUFFER_DEPTH
)
305 depth_region
= region
;
307 intel_renderbuffer_set_region(rb
, region
);
308 intel_region_release(®ion
);
311 driUpdateFramebufferSize(&intel
->ctx
, drawable
);
315 intel_viewport(GLcontext
*ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
317 struct intel_context
*intel
= intel_context(ctx
);
318 __DRIcontext
*driContext
= intel
->driContext
;
320 if (!driContext
->driScreenPriv
->dri2
.enabled
)
323 intel_update_renderbuffers(driContext
, driContext
->driDrawablePriv
);
324 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
325 intel_update_renderbuffers(driContext
, driContext
->driReadablePriv
);
327 ctx
->Driver
.Viewport
= NULL
;
328 intel
->driDrawable
= driContext
->driDrawablePriv
;
329 intelWindowMoved(intel
);
330 intel_draw_buffer(ctx
, intel
->ctx
.DrawBuffer
);
331 ctx
->Driver
.Viewport
= intel_viewport
;
335 * Extension strings exported by the intel driver.
337 * Extensions supported by all chips supported by i830_dri, i915_dri, or
340 static const struct dri_extension card_extensions
[] = {
341 {"GL_ARB_multisample", GL_ARB_multisample_functions
},
342 {"GL_ARB_multitexture", NULL
},
343 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
344 {"GL_NV_point_sprite", GL_NV_point_sprite_functions
},
345 {"GL_ARB_texture_border_clamp", NULL
},
346 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
347 {"GL_ARB_texture_cube_map", NULL
},
348 {"GL_ARB_texture_env_add", NULL
},
349 {"GL_ARB_texture_env_combine", NULL
},
350 {"GL_ARB_texture_env_crossbar", NULL
},
351 {"GL_ARB_texture_env_dot3", NULL
},
352 {"GL_ARB_texture_mirrored_repeat", NULL
},
353 {"GL_ARB_texture_non_power_of_two", NULL
},
354 {"GL_ARB_texture_rectangle", NULL
},
355 {"GL_NV_texture_rectangle", NULL
},
356 {"GL_EXT_texture_rectangle", NULL
},
357 {"GL_ARB_point_parameters", NULL
},
358 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
359 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
360 {"GL_ARB_window_pos", GL_ARB_window_pos_functions
},
361 {"GL_EXT_blend_color", GL_EXT_blend_color_functions
},
362 {"GL_EXT_blend_equation_separate",
363 GL_EXT_blend_equation_separate_functions
},
364 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
365 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
366 {"GL_EXT_blend_logic_op", NULL
},
367 {"GL_EXT_blend_subtract", NULL
},
368 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
369 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
370 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
371 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions
},
372 #if 1 /* XXX FBO temporary? */
373 {"GL_EXT_packed_depth_stencil", NULL
},
375 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
376 {"GL_EXT_stencil_wrap", NULL
},
377 {"GL_EXT_texture_edge_clamp", NULL
},
378 {"GL_EXT_texture_env_combine", NULL
},
379 {"GL_EXT_texture_env_dot3", NULL
},
380 {"GL_EXT_texture_filter_anisotropic", NULL
},
381 {"GL_EXT_texture_lod_bias", NULL
},
382 {"GL_3DFX_texture_compression_FXT1", NULL
},
383 {"GL_APPLE_client_storage", NULL
},
384 {"GL_MESA_pack_invert", NULL
},
385 {"GL_MESA_ycbcr_texture", NULL
},
386 {"GL_NV_blend_square", NULL
},
387 {"GL_NV_vertex_program", GL_NV_vertex_program_functions
},
388 {"GL_NV_vertex_program1_1", NULL
},
389 { "GL_SGIS_generate_mipmap", NULL
},
393 static const struct dri_extension brw_extensions
[] = {
394 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions
},
395 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions
},
396 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions
},
397 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions
},
398 { "GL_ARB_point_sprite", NULL
},
399 { "GL_ARB_fragment_shader", NULL
},
400 { "GL_ARB_draw_buffers", NULL
},
401 { "GL_ARB_depth_texture", NULL
},
402 { "GL_ARB_fragment_program", NULL
},
403 { "GL_ARB_shadow", NULL
},
404 { "GL_EXT_shadow_funcs", NULL
},
405 { "GL_ARB_fragment_program_shadow", NULL
},
406 /* ARB extn won't work if not enabled */
407 { "GL_SGIX_depth_texture", NULL
},
408 { "GL_EXT_texture_sRGB", NULL
},
412 #ifdef I915_MMIO_READ
413 static const struct dri_extension arb_oc_extensions
[] = {
414 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions
},
419 static const struct dri_extension ttm_extensions
[] = {
420 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
421 {"GL_ARB_pixel_buffer_object", NULL
},
426 * Initializes potential list of extensions if ctx == NULL, or actually enables
427 * extensions for a context.
429 void intelInitExtensions(GLcontext
*ctx
, GLboolean enable_imaging
)
431 struct intel_context
*intel
= ctx
?intel_context(ctx
):NULL
;
433 /* Disable imaging extension until convolution is working in teximage paths.
435 enable_imaging
= GL_FALSE
;
437 driInitExtensions(ctx
, card_extensions
, enable_imaging
);
439 if (intel
== NULL
|| intel
->ttm
)
440 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
442 #ifdef I915_MMIO_READ
444 (IS_965(intel
->intelScreen
->deviceID
) &&
445 intel
->intelScreen
->drmMinor
>= 8))
446 driInitExtensions(ctx
, arb_oc_extensions
, GL_FALSE
);
449 if (intel
== NULL
|| IS_965(intel
->intelScreen
->deviceID
))
450 driInitExtensions(ctx
, brw_extensions
, GL_FALSE
);
453 static const struct dri_debug_control debug_control
[] = {
454 { "tex", DEBUG_TEXTURE
},
455 { "state", DEBUG_STATE
},
456 { "ioctl", DEBUG_IOCTL
},
457 { "blit", DEBUG_BLIT
},
458 { "mip", DEBUG_MIPTREE
},
459 { "fall", DEBUG_FALLBACKS
},
460 { "verb", DEBUG_VERBOSE
},
461 { "bat", DEBUG_BATCH
},
462 { "pix", DEBUG_PIXEL
},
463 { "buf", DEBUG_BUFMGR
},
464 { "reg", DEBUG_REGION
},
466 { "lock", DEBUG_LOCK
},
467 { "sync", DEBUG_SYNC
},
468 { "prim", DEBUG_PRIMS
},
469 { "vert", DEBUG_VERTS
},
470 { "dri", DEBUG_DRI
},
471 { "dma", DEBUG_DMA
},
472 { "san", DEBUG_SANITY
},
473 { "sleep", DEBUG_SLEEP
},
474 { "stats", DEBUG_STATS
},
475 { "tile", DEBUG_TILE
},
476 { "sing", DEBUG_SINGLE_THREAD
},
477 { "thre", DEBUG_SINGLE_THREAD
},
479 { "urb", DEBUG_URB
},
486 intelInvalidateState(GLcontext
* ctx
, GLuint new_state
)
488 struct intel_context
*intel
= intel_context(ctx
);
490 _swrast_InvalidateState(ctx
, new_state
);
491 _swsetup_InvalidateState(ctx
, new_state
);
492 _vbo_InvalidateState(ctx
, new_state
);
493 _tnl_InvalidateState(ctx
, new_state
);
494 _tnl_invalidate_vertex_state(ctx
, new_state
);
496 intel
->NewGLState
|= new_state
;
498 if (intel
->vtbl
.invalidate_state
)
499 intel
->vtbl
.invalidate_state( intel
, new_state
);
504 intelFlush(GLcontext
* ctx
)
506 struct intel_context
*intel
= intel_context(ctx
);
511 if (!IS_965(intel
->intelScreen
->deviceID
))
512 INTEL_FIREVERTICES(intel
);
514 /* Emit a flush so that any frontbuffer rendering that might have occurred
515 * lands onscreen in a timely manner, even if the X Server doesn't trigger
518 intel_batchbuffer_emit_mi_flush(intel
->batch
);
520 if (intel
->batch
->map
!= intel
->batch
->ptr
)
521 intel_batchbuffer_flush(intel
->batch
);
525 intelFinish(GLcontext
* ctx
)
527 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
532 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
533 struct intel_renderbuffer
*irb
;
535 irb
= intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
538 dri_bo_wait_rendering(irb
->region
->buffer
);
540 if (fb
->_DepthBuffer
) {
541 /* XXX: Wait on buffer idle */
545 #ifdef I915_MMIO_READ
547 intelBeginQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
549 struct intel_context
*intel
= intel_context( ctx
);
550 struct drm_i915_mmio io
= {
551 .read_write
= I915_MMIO_READ
,
552 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
556 intelFinish(&intel
->ctx
);
557 drmCommandWrite(intel
->driFd
, DRM_I915_MMIO
, &io
, sizeof(io
));
561 intelEndQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
563 struct intel_context
*intel
= intel_context( ctx
);
565 struct drm_i915_mmio io
= {
566 .read_write
= I915_MMIO_READ
,
567 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
570 intelFinish(&intel
->ctx
);
571 drmCommandWrite(intel
->driFd
, DRM_I915_MMIO
, &io
, sizeof(io
));
572 q
->Result
= tmp
- q
->Result
;
578 /** Driver-specific fence emit implementation for the fake memory manager. */
580 intel_fence_emit(void *private)
582 struct intel_context
*intel
= (struct intel_context
*)private;
585 /* XXX: Need to emit a flush, if we haven't already (at least with the
586 * current batchbuffer implementation, we have).
589 fence
= intelEmitIrqLocked(intel
);
594 /** Driver-specific fence wait implementation for the fake memory manager. */
596 intel_fence_wait(void *private, unsigned int cookie
)
598 struct intel_context
*intel
= (struct intel_context
*)private;
600 intelWaitIrq(intel
, cookie
);
606 intel_init_bufmgr(struct intel_context
*intel
)
608 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
609 GLboolean gem_disable
= getenv("INTEL_NO_GEM") != NULL
;
611 GLboolean gem_supported
;
612 struct drm_i915_getparam gp
;
614 gp
.param
= I915_PARAM_HAS_GEM
;
615 gp
.value
= &gem_kernel
;
617 (void) drmCommandWriteRead(intel
->driFd
, DRM_I915_GETPARAM
, &gp
, sizeof(gp
));
619 /* If we've got a new enough DDX that's initializing GEM and giving us
620 * object handles for the shared buffers, use that.
622 intel
->ttm
= GL_FALSE
;
623 if (intel
->intelScreen
->driScrnPriv
->dri2
.enabled
)
624 gem_supported
= GL_TRUE
;
625 else if (intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
627 intel
->intelScreen
->front
.bo_handle
!= -1)
628 gem_supported
= GL_TRUE
;
630 gem_supported
= GL_FALSE
;
632 if (!gem_disable
&& gem_supported
) {
634 intel
->bufmgr
= intel_bufmgr_gem_init(intel
->driFd
,
636 if (intel
->bufmgr
!= NULL
)
637 intel
->ttm
= GL_TRUE
;
639 bo_reuse_mode
= driQueryOptioni(&intel
->optionCache
, "bo_reuse");
640 switch (bo_reuse_mode
) {
641 case DRI_CONF_BO_REUSE_DISABLED
:
643 case DRI_CONF_BO_REUSE_ALL
:
644 intel_bufmgr_gem_enable_reuse(intel
->bufmgr
);
648 /* Otherwise, use the classic buffer manager. */
649 if (intel
->bufmgr
== NULL
) {
651 fprintf(stderr
, "GEM disabled. Using classic.\n");
653 fprintf(stderr
, "Failed to initialize GEM. "
654 "Falling back to classic.\n");
657 if (intelScreen
->tex
.size
== 0) {
658 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
663 intel
->bufmgr
= intel_bufmgr_fake_init(intelScreen
->tex
.offset
,
664 intelScreen
->tex
.map
,
665 intelScreen
->tex
.size
,
671 /* XXX bufmgr should be per-screen, not per-context */
672 intelScreen
->ttm
= intel
->ttm
;
678 intelInitDriverFunctions(struct dd_function_table
*functions
)
680 _mesa_init_driver_functions(functions
);
682 functions
->Flush
= intelFlush
;
683 functions
->Finish
= intelFinish
;
684 functions
->GetString
= intelGetString
;
685 functions
->UpdateState
= intelInvalidateState
;
686 functions
->Viewport
= intel_viewport
;
688 functions
->CopyColorTable
= _swrast_CopyColorTable
;
689 functions
->CopyColorSubTable
= _swrast_CopyColorSubTable
;
690 functions
->CopyConvolutionFilter1D
= _swrast_CopyConvolutionFilter1D
;
691 functions
->CopyConvolutionFilter2D
= _swrast_CopyConvolutionFilter2D
;
693 #ifdef I915_MMIO_READ
694 functions
->BeginQuery
= intelBeginQuery
;
695 functions
->EndQuery
= intelEndQuery
;
698 intelInitTextureFuncs(functions
);
699 intelInitStateFuncs(functions
);
700 intelInitBufferFuncs(functions
);
701 intelInitPixelFuncs(functions
);
706 intelInitContext(struct intel_context
*intel
,
707 const __GLcontextModes
* mesaVis
,
708 __DRIcontextPrivate
* driContextPriv
,
709 void *sharedContextPrivate
,
710 struct dd_function_table
*functions
)
712 GLcontext
*ctx
= &intel
->ctx
;
713 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
714 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
715 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*) sPriv
->private;
716 volatile struct drm_i915_sarea
*saPriv
= (struct drm_i915_sarea
*)
717 (((GLubyte
*) sPriv
->pSAREA
) + intelScreen
->sarea_priv_offset
);
720 if (!_mesa_initialize_context(&intel
->ctx
, mesaVis
, shareCtx
,
721 functions
, (void *) intel
)) {
722 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__
);
726 driContextPriv
->driverPrivate
= intel
;
727 intel
->intelScreen
= intelScreen
;
728 intel
->driScreen
= sPriv
;
729 intel
->sarea
= saPriv
;
730 intel
->driContext
= driContextPriv
;
733 intel
->hHWContext
= driContextPriv
->hHWContext
;
734 intel
->driFd
= sPriv
->fd
;
735 intel
->driHwLock
= sPriv
->lock
;
737 intel
->width
= intelScreen
->width
;
738 intel
->height
= intelScreen
->height
;
740 driParseConfigFiles(&intel
->optionCache
, &intelScreen
->optionCache
,
741 intel
->driScreen
->myNum
,
742 IS_965(intelScreen
->deviceID
) ? "i965" : "i915");
743 if (intelScreen
->deviceID
== PCI_CHIP_I865_G
)
744 intel
->maxBatchSize
= 4096;
746 intel
->maxBatchSize
= BATCH_SZ
;
748 if (!intel_init_bufmgr(intel
))
751 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
753 /* This doesn't yet catch all non-conformant rendering, but it's a
756 if (getenv("INTEL_STRICT_CONFORMANCE")) {
757 intel
->strict_conformance
= 1;
760 if (intel
->strict_conformance
) {
761 ctx
->Const
.MinLineWidth
= 1.0;
762 ctx
->Const
.MinLineWidthAA
= 1.0;
763 ctx
->Const
.MaxLineWidth
= 1.0;
764 ctx
->Const
.MaxLineWidthAA
= 1.0;
765 ctx
->Const
.LineWidthGranularity
= 1.0;
768 ctx
->Const
.MinLineWidth
= 1.0;
769 ctx
->Const
.MinLineWidthAA
= 1.0;
770 ctx
->Const
.MaxLineWidth
= 5.0;
771 ctx
->Const
.MaxLineWidthAA
= 5.0;
772 ctx
->Const
.LineWidthGranularity
= 0.5;
775 ctx
->Const
.MinPointSize
= 1.0;
776 ctx
->Const
.MinPointSizeAA
= 1.0;
777 ctx
->Const
.MaxPointSize
= 255.0;
778 ctx
->Const
.MaxPointSizeAA
= 3.0;
779 ctx
->Const
.PointSizeGranularity
= 1.0;
781 /* reinitialize the context point state.
782 * It depend on constants in __GLcontextRec::Const
784 _mesa_init_point(ctx
);
786 ctx
->Const
.MaxColorAttachments
= 4; /* XXX FBO: review this */
788 /* Initialize the software rasterizer and helper modules. */
789 _swrast_CreateContext(ctx
);
790 _vbo_CreateContext(ctx
);
791 _tnl_CreateContext(ctx
);
792 _swsetup_CreateContext(ctx
);
794 /* Configure swrast to match hardware characteristics: */
795 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
796 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
798 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
799 intel
->hw_stipple
= 1;
801 /* XXX FBO: this doesn't seem to be used anywhere */
802 switch (mesaVis
->depthBits
) {
803 case 0: /* what to do in this case? */
805 intel
->polygon_offset_scale
= 1.0;
808 intel
->polygon_offset_scale
= 2.0; /* req'd to pass glean */
815 if (IS_965(intelScreen
->deviceID
))
816 intel
->polygon_offset_scale
/= 0xffff;
818 intel
->RenderIndex
= ~0;
820 fthrottle_mode
= driQueryOptioni(&intel
->optionCache
, "fthrottle_mode");
821 intel
->irqsEmitted
= 0;
823 intel
->do_irqs
= (intel
->intelScreen
->irq_active
&&
824 fthrottle_mode
== DRI_CONF_FTHROTTLE_IRQS
);
826 intel
->do_usleeps
= (fthrottle_mode
== DRI_CONF_FTHROTTLE_USLEEPS
);
828 _math_matrix_ctr(&intel
->ViewportMatrix
);
830 if (IS_965(intelScreen
->deviceID
) && !intel
->intelScreen
->irq_active
) {
831 _mesa_printf("IRQs not active. Exiting\n");
835 intelInitExtensions(ctx
, GL_FALSE
);
837 INTEL_DEBUG
= driParseDebugString(getenv("INTEL_DEBUG"), debug_control
);
838 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
839 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
841 if (!sPriv
->dri2
.enabled
)
842 intel_recreate_static_regions(intel
);
844 intel
->batch
= intel_batchbuffer_alloc(intel
);
846 intel_bufferobj_init(intel
);
847 intel_fbo_init(intel
);
849 if (intel
->ctx
.Mesa_DXTn
) {
850 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
851 _mesa_enable_extension(ctx
, "GL_S3_s3tc");
853 else if (driQueryOptionb(&intel
->optionCache
, "force_s3tc_enable")) {
854 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
857 intel
->prim
.primitive
= ~0;
859 /* Force all software fallbacks */
860 if (driQueryOptionb(&intel
->optionCache
, "no_rast")) {
861 fprintf(stderr
, "disabling 3D rasterization\n");
865 /* Disable all hardware rendering (skip emitting batches and fences/waits
868 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
874 intelDestroyContext(__DRIcontextPrivate
* driContextPriv
)
876 struct intel_context
*intel
=
877 (struct intel_context
*) driContextPriv
->driverPrivate
;
879 assert(intel
); /* should never be null */
881 GLboolean release_texture_heaps
;
883 INTEL_FIREVERTICES(intel
);
885 intel
->vtbl
.destroy(intel
);
887 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
888 _swsetup_DestroyContext(&intel
->ctx
);
889 _tnl_DestroyContext(&intel
->ctx
);
890 _vbo_DestroyContext(&intel
->ctx
);
892 _swrast_DestroyContext(&intel
->ctx
);
893 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
895 intel_batchbuffer_free(intel
->batch
);
896 free(intel
->prim
.vb
);
898 if (release_texture_heaps
) {
899 /* This share group is about to go away, free our private
900 * texture object data.
902 if (INTEL_DEBUG
& DEBUG_TEXTURE
)
903 fprintf(stderr
, "do something to free texture heaps\n");
906 /* free the Mesa context */
907 _mesa_free_context_data(&intel
->ctx
);
909 dri_bufmgr_destroy(intel
->bufmgr
);
914 intelUnbindContext(__DRIcontextPrivate
* driContextPriv
)
920 intelMakeCurrent(__DRIcontextPrivate
* driContextPriv
,
921 __DRIdrawablePrivate
* driDrawPriv
,
922 __DRIdrawablePrivate
* driReadPriv
)
924 __DRIscreenPrivate
*psp
= driDrawPriv
->driScreenPriv
;
926 if (driContextPriv
) {
927 struct intel_context
*intel
=
928 (struct intel_context
*) driContextPriv
->driverPrivate
;
929 struct intel_framebuffer
*intel_fb
=
930 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
931 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
933 if (driContextPriv
->driScreenPriv
->dri2
.enabled
) {
934 intel_update_renderbuffers(driContextPriv
, driDrawPriv
);
935 if (driDrawPriv
!= driReadPriv
)
936 intel_update_renderbuffers(driContextPriv
, driReadPriv
);
938 /* XXX FBO temporary fix-ups! */
939 /* if the renderbuffers don't have regions, init them from the context */
940 struct intel_renderbuffer
*irbDepth
941 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
942 struct intel_renderbuffer
*irbStencil
943 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
945 if (intel_fb
->color_rb
[0]) {
946 intel_renderbuffer_set_region(intel_fb
->color_rb
[0],
947 intel
->front_region
);
949 if (intel_fb
->color_rb
[1]) {
950 intel_renderbuffer_set_region(intel_fb
->color_rb
[1],
954 if (intel_fb
->color_rb
[2]) {
955 intel_renderbuffer_set_region(intel_fb
->color_rb
[2],
956 intel
->third_region
);
960 intel_renderbuffer_set_region(irbDepth
, intel
->depth_region
);
963 intel_renderbuffer_set_region(irbStencil
, intel
->depth_region
);
967 /* set GLframebuffer size to match window, if needed */
968 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
970 if (driReadPriv
!= driDrawPriv
) {
971 driUpdateFramebufferSize(&intel
->ctx
, driReadPriv
);
974 _mesa_make_current(&intel
->ctx
, &intel_fb
->Base
, readFb
);
976 /* The drawbuffer won't always be updated by _mesa_make_current:
978 if (intel
->ctx
.DrawBuffer
== &intel_fb
->Base
) {
980 if (intel
->driReadDrawable
!= driReadPriv
)
981 intel
->driReadDrawable
= driReadPriv
;
983 if (intel
->driDrawable
!= driDrawPriv
) {
984 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
987 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
988 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
989 : VBLANK_FLAG_NO_IRQ
;
991 (*psp
->systemTime
->getUST
) (&intel_fb
->swap_ust
);
992 driDrawableInitVBlank(driDrawPriv
);
993 intel_fb
->vbl_waited
= driDrawPriv
->vblSeq
;
995 for (i
= 0; i
< (intel
->intelScreen
->third
.handle
? 3 : 2); i
++) {
996 if (intel_fb
->color_rb
[i
])
997 intel_fb
->color_rb
[i
]->vbl_pending
= driDrawPriv
->vblSeq
;
1000 intel
->driDrawable
= driDrawPriv
;
1001 intelWindowMoved(intel
);
1004 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
1008 _mesa_make_current(NULL
, NULL
, NULL
);
1015 intelContendedLock(struct intel_context
*intel
, GLuint flags
)
1017 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
1018 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
1019 volatile struct drm_i915_sarea
*sarea
= intel
->sarea
;
1020 int me
= intel
->hHWContext
;
1022 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
1025 if (INTEL_DEBUG
& DEBUG_LOCK
)
1026 _mesa_printf("%s - got contended lock\n", __progname
);
1028 /* If the window moved, may need to set a new cliprect now.
1030 * NOTE: This releases and regains the hw lock, so all state
1031 * checking must be done *after* this call:
1034 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
1036 if (sarea
&& sarea
->ctxOwner
!= me
) {
1037 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
1038 fprintf(stderr
, "Lost Context: sarea->ctxOwner %x me %x\n",
1039 sarea
->ctxOwner
, me
);
1041 sarea
->ctxOwner
= me
;
1044 /* If the last consumer of the texture memory wasn't us, notify the fake
1045 * bufmgr and record the new owner. We should have the memory shared
1046 * between contexts of a single fake bufmgr, but this will at least make
1047 * things correct for now.
1049 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
1050 sarea
->texAge
= intel
->hHWContext
;
1051 intel_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
1052 if (INTEL_DEBUG
& DEBUG_BATCH
)
1053 intel_decode_context_reset();
1054 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
1055 fprintf(stderr
, "Lost Textures: sarea->texAge %x hw context %x\n",
1056 sarea
->ctxOwner
, intel
->hHWContext
);
1059 if (sarea
->width
!= intel
->width
|| sarea
->height
!= intel
->height
) {
1060 int numClipRects
= intel
->numClipRects
;
1063 * FIXME: Really only need to do this when drawing to a
1064 * common back- or front buffer.
1068 * This will essentially drop the outstanding batchbuffer on
1071 intel
->numClipRects
= 0;
1073 if (intel
->Fallback
)
1074 _swrast_flush(&intel
->ctx
);
1076 if (!IS_965(intel
->intelScreen
->deviceID
))
1077 INTEL_FIREVERTICES(intel
);
1079 if (intel
->batch
->map
!= intel
->batch
->ptr
)
1080 intel_batchbuffer_flush(intel
->batch
);
1082 intel
->numClipRects
= numClipRects
;
1084 /* force window update */
1085 intel
->lastStamp
= 0;
1087 intel
->width
= sarea
->width
;
1088 intel
->height
= sarea
->height
;
1091 /* Drawable changed?
1093 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
1094 intelWindowMoved(intel
);
1095 intel
->lastStamp
= dPriv
->lastStamp
;
1100 _glthread_DECLARE_STATIC_MUTEX(lockMutex
);
1102 /* Lock the hardware and validate our state.
1104 void LOCK_HARDWARE( struct intel_context
*intel
)
1106 __DRIdrawable
*dPriv
= intel
->driDrawable
;
1107 __DRIscreen
*sPriv
= intel
->driScreen
;
1109 struct intel_framebuffer
*intel_fb
= NULL
;
1110 struct intel_renderbuffer
*intel_rb
= NULL
;
1112 _glthread_LOCK_MUTEX(lockMutex
);
1113 assert(!intel
->locked
);
1116 if (intel
->driDrawable
) {
1117 intel_fb
= intel
->driDrawable
->driverPrivate
;
1121 intel_get_renderbuffer(&intel_fb
->Base
,
1122 intel_fb
->Base
._ColorDrawBufferIndexes
[0]);
1125 if (intel_rb
&& dPriv
->vblFlags
&&
1126 !(dPriv
->vblFlags
& VBLANK_FLAG_NO_IRQ
) &&
1127 (intel_fb
->vbl_waited
- intel_rb
->vbl_pending
) > (1<<23)) {
1130 vbl
.request
.type
= DRM_VBLANK_ABSOLUTE
;
1132 if ( dPriv
->vblFlags
& VBLANK_FLAG_SECONDARY
) {
1133 vbl
.request
.type
|= DRM_VBLANK_SECONDARY
;
1136 vbl
.request
.sequence
= intel_rb
->vbl_pending
;
1137 drmWaitVBlank(intel
->driFd
, &vbl
);
1138 intel_fb
->vbl_waited
= vbl
.reply
.sequence
;
1141 if (!sPriv
->dri2
.enabled
) {
1142 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
1143 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
1146 intelContendedLock( intel
, 0 );
1150 if (INTEL_DEBUG
& DEBUG_LOCK
)
1151 _mesa_printf("%s - locked\n", __progname
);
1155 /* Unlock the hardware using the global current context
1157 void UNLOCK_HARDWARE( struct intel_context
*intel
)
1159 __DRIscreen
*sPriv
= intel
->driScreen
;
1161 intel
->vtbl
.note_unlock( intel
);
1164 if (!sPriv
->dri2
.enabled
)
1165 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
1167 _glthread_UNLOCK_MUTEX(lockMutex
);
1169 if (INTEL_DEBUG
& DEBUG_LOCK
)
1170 _mesa_printf("%s - unlocked\n", __progname
);
1173 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1176 if (intel
->batch
->cliprect_mode
== REFERENCES_CLIPRECTS
)
1177 intel_batchbuffer_flush(intel
->batch
);