1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
36 #include "swrast/swrast.h"
38 #include "intel_context.h"
39 #include "intel_ioctl.h"
40 #include "intel_batchbuffer.h"
43 u_int32_t
intelGetLastFrame (intelContextPtr intel
)
47 drm_i915_getparam_t gp
;
49 gp
.param
= I915_PARAM_LAST_DISPATCH
;
50 gp
.value
= (int *)&frame
;
51 ret
= drmCommandWriteRead( intel
->driFd
, DRM_I915_GETPARAM
,
57 * Emits a marker in the command stream, numbered from 0x00000001 to
60 int intelEmitIrqLocked( intelContextPtr intel
)
65 assert(((*(int *)intel
->driHwLock
) & ~DRM_LOCK_CONT
) ==
66 (DRM_LOCK_HELD
|intel
->hHWContext
));
68 /* Valgrind can't tell that the kernel will have copyout()ed onto this
69 * value, so initialize it now to prevent false positives.
74 ret
= drmCommandWriteRead( intel
->driFd
, DRM_I830_IRQ_EMIT
,
77 fprintf( stderr
, "%s: drmI830IrqEmit: %d\n", __FUNCTION__
, ret
);
82 fprintf(stderr
, "%s --> %d\n", __FUNCTION__
, seq
);
87 /** Blocks on a marker returned by intelEitIrqLocked(). */
88 void intelWaitIrq( intelContextPtr intel
, int seq
)
93 fprintf(stderr
, "%s %d\n", __FUNCTION__
, seq
);
95 intel
->iw
.irq_seq
= seq
;
98 ret
= drmCommandWrite( intel
->driFd
, DRM_I830_IRQ_WAIT
, &intel
->iw
, sizeof(intel
->iw
) );
99 } while (ret
== -EAGAIN
|| ret
== -EINTR
);
102 fprintf( stderr
, "%s: drmI830IrqWait: %d\n", __FUNCTION__
, ret
);
104 intel_dump_batchbuffer( intel
->alloc
.offset
,
113 static void age_intel( intelContextPtr intel
, int age
)
117 for (i
= 0 ; i
< MAX_TEXTURE_UNITS
; i
++)
118 if (intel
->CurrentTexObj
[i
])
119 intel
->CurrentTexObj
[i
]->age
= age
;
122 void intel_dump_batchbuffer( long offset
,
127 fprintf(stderr
, "\n\n\nSTART BATCH (%d dwords):\n", count
);
128 for (i
= 0; i
< count
/4; i
+= 4)
129 fprintf(stderr
, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
130 (unsigned int)offset
+ i
*4, ptr
[i
], ptr
[i
+1], ptr
[i
+2], ptr
[i
+3]);
131 fprintf(stderr
, "END BATCH\n\n\n");
134 void intelRefillBatchLocked( intelContextPtr intel
, GLboolean allow_unlock
)
136 GLuint last_irq
= intel
->alloc
.irq_emitted
;
137 GLuint half
= intel
->alloc
.size
/ 2;
138 GLuint buf
= (intel
->alloc
.active_buf
^= 1);
140 intel
->alloc
.irq_emitted
= intelEmitIrqLocked( intel
);
143 if (allow_unlock
) UNLOCK_HARDWARE( intel
);
144 intelWaitIrq( intel
, last_irq
);
145 if (allow_unlock
) LOCK_HARDWARE( intel
);
149 fprintf(stderr
, "%s: now using half %d\n", __FUNCTION__
, buf
);
151 intel
->batch
.start_offset
= intel
->alloc
.offset
+ buf
* half
;
152 intel
->batch
.ptr
= (unsigned char *)intel
->alloc
.ptr
+ buf
* half
;
153 intel
->batch
.size
= half
- 8;
154 intel
->batch
.space
= half
- 8;
155 assert(intel
->batch
.space
>= 0);
158 #define MI_BATCH_BUFFER_END (0xA<<23)
161 void intelFlushBatchLocked( intelContextPtr intel
,
162 GLboolean ignore_cliprects
,
164 GLboolean allow_unlock
)
166 drmI830BatchBuffer batch
;
168 assert(intel
->locked
);
171 fprintf(stderr
, "%s used %d of %d offset %x..%x refill %d (started in %s)\n",
173 (intel
->batch
.size
- intel
->batch
.space
),
175 intel
->batch
.start_offset
,
176 intel
->batch
.start_offset
+
177 (intel
->batch
.size
- intel
->batch
.space
),
181 /* Throw away non-effective packets. Won't work once we have
182 * hardware contexts which would preserve statechanges beyond a
185 if (intel
->numClipRects
== 0 && !ignore_cliprects
) {
187 /* Without this yeild, an application with no cliprects can hog
188 * the hardware. Without unlocking, the effect is much worse -
189 * effectively a lock-out of other contexts.
192 UNLOCK_HARDWARE( intel
);
194 LOCK_HARDWARE( intel
);
197 /* Note that any state thought to have been emitted actually
200 intel
->batch
.ptr
-= (intel
->batch
.size
- intel
->batch
.space
);
201 intel
->batch
.space
= intel
->batch
.size
;
202 intel
->vtbl
.lost_hardware( intel
);
205 if (intel
->batch
.space
!= intel
->batch
.size
) {
207 if (intel
->sarea
->ctxOwner
!= intel
->hHWContext
) {
208 intel
->perf_boxes
|= I830_BOX_LOST_CONTEXT
;
209 intel
->sarea
->ctxOwner
= intel
->hHWContext
;
212 batch
.start
= intel
->batch
.start_offset
;
213 batch
.used
= intel
->batch
.size
- intel
->batch
.space
;
214 batch
.cliprects
= intel
->pClipRects
;
215 batch
.num_cliprects
= ignore_cliprects
? 0 : intel
->numClipRects
;
217 batch
.DR4
= ((((GLuint
)intel
->drawX
) & 0xffff) |
218 (((GLuint
)intel
->drawY
) << 16));
220 if (intel
->alloc
.offset
) {
221 if ((batch
.used
& 0x4) == 0) {
222 ((int *)intel
->batch
.ptr
)[0] = 0;
223 ((int *)intel
->batch
.ptr
)[1] = MI_BATCH_BUFFER_END
;
225 intel
->batch
.ptr
+= 0x8;
228 ((int *)intel
->batch
.ptr
)[0] = MI_BATCH_BUFFER_END
;
230 intel
->batch
.ptr
+= 0x4;
235 intel_dump_batchbuffer( batch
.start
,
236 (int *)(intel
->batch
.ptr
- batch
.used
),
239 intel
->batch
.start_offset
+= batch
.used
;
240 intel
->batch
.size
-= batch
.used
;
242 if (intel
->batch
.size
< 8) {
244 intel
->batch
.space
= intel
->batch
.size
= 0;
247 intel
->batch
.size
-= 8;
248 intel
->batch
.space
= intel
->batch
.size
;
252 assert(intel
->batch
.space
>= 0);
253 assert(batch
.start
>= intel
->alloc
.offset
);
254 assert(batch
.start
< intel
->alloc
.offset
+ intel
->alloc
.size
);
255 assert(batch
.start
+ batch
.used
> intel
->alloc
.offset
);
256 assert(batch
.start
+ batch
.used
<=
257 intel
->alloc
.offset
+ intel
->alloc
.size
);
260 if (intel
->alloc
.offset
) {
261 if (drmCommandWrite (intel
->driFd
, DRM_I830_BATCHBUFFER
, &batch
,
263 fprintf(stderr
, "DRM_I830_BATCHBUFFER: %d\n", -errno
);
264 UNLOCK_HARDWARE(intel
);
268 drmI830CmdBuffer cmd
;
269 cmd
.buf
= (char *)intel
->alloc
.ptr
+ batch
.start
;
273 cmd
.num_cliprects
= batch
.num_cliprects
;
274 cmd
.cliprects
= batch
.cliprects
;
276 if (drmCommandWrite (intel
->driFd
, DRM_I830_CMDBUFFER
, &cmd
,
278 fprintf(stderr
, "DRM_I830_CMDBUFFER: %d\n", -errno
);
279 UNLOCK_HARDWARE(intel
);
285 age_intel(intel
, intel
->sarea
->last_enqueue
);
287 /* FIXME: use hardware contexts to avoid 'losing' hardware after
290 if (intel
->batch
.contains_geometry
)
291 assert(intel
->batch
.last_emit_state
== intel
->batch
.counter
);
293 intel
->batch
.counter
++;
294 intel
->batch
.contains_geometry
= 0;
295 intel
->batch
.func
= 0;
296 intel
->vtbl
.lost_hardware( intel
);
300 intelRefillBatchLocked( intel
, allow_unlock
);
303 void intelFlushBatch( intelContextPtr intel
, GLboolean refill
)
306 intelFlushBatchLocked( intel
, GL_FALSE
, refill
, GL_FALSE
);
309 LOCK_HARDWARE(intel
);
310 intelFlushBatchLocked( intel
, GL_FALSE
, refill
, GL_TRUE
);
311 UNLOCK_HARDWARE(intel
);
316 void intelWaitForIdle( intelContextPtr intel
)
319 fprintf(stderr
, "%s\n", __FUNCTION__
);
321 intel
->vtbl
.emit_flush( intel
);
322 intelFlushBatch( intel
, GL_TRUE
);
324 /* Use an irq to wait for dma idle -- Need to track lost contexts
325 * to shortcircuit consecutive calls to this function:
327 intelWaitIrq( intel
, intel
->alloc
.irq_emitted
);
328 intel
->alloc
.irq_emitted
= 0;
333 * Check if we need to rotate/warp the front color buffer to the
334 * rotated screen. We generally need to do this when we get a glFlush
335 * or glFinish after drawing to the front color buffer.
338 intelCheckFrontRotate(GLcontext
*ctx
)
340 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
341 if (intel
->ctx
.DrawBuffer
->_ColorDrawBufferMask
[0] == BUFFER_BIT_FRONT_LEFT
) {
342 intelScreenPrivate
*screen
= intel
->intelScreen
;
343 if (screen
->current_rotation
!= 0) {
344 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
345 intelRotateWindow(intel
, dPriv
, BUFFER_BIT_FRONT_LEFT
);
352 * NOT directly called via glFlush.
354 void intelFlush( GLcontext
*ctx
)
356 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
359 _swrast_flush( ctx
);
361 INTEL_FIREVERTICES( intel
);
363 if (intel
->batch
.size
!= intel
->batch
.space
)
364 intelFlushBatch( intel
, GL_FALSE
);
369 * Called via glFlush.
371 void intelglFlush( GLcontext
*ctx
)
374 intelCheckFrontRotate(ctx
);
378 void intelFinish( GLcontext
*ctx
)
380 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
382 intelWaitForIdle( intel
);
383 intelCheckFrontRotate(ctx
);
387 void intelClear(GLcontext
*ctx
, GLbitfield mask
)
389 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
390 const GLuint colorMask
= *((GLuint
*) &ctx
->Color
.ColorMask
);
391 GLbitfield tri_mask
= 0;
392 GLbitfield blit_mask
= 0;
393 GLbitfield swrast_mask
= 0;
396 fprintf(stderr
, "%s\n", __FUNCTION__
);
398 /* Take care of cliprects, which are handled differently for
401 intelFlush( &intel
->ctx
);
403 if (mask
& BUFFER_BIT_FRONT_LEFT
) {
404 if (colorMask
== ~0) {
405 blit_mask
|= BUFFER_BIT_FRONT_LEFT
;
408 tri_mask
|= BUFFER_BIT_FRONT_LEFT
;
412 if (mask
& BUFFER_BIT_BACK_LEFT
) {
413 if (colorMask
== ~0) {
414 blit_mask
|= BUFFER_BIT_BACK_LEFT
;
417 tri_mask
|= BUFFER_BIT_BACK_LEFT
;
421 if (mask
& BUFFER_BIT_DEPTH
) {
422 blit_mask
|= BUFFER_BIT_DEPTH
;
425 if (mask
& BUFFER_BIT_STENCIL
) {
426 if (!intel
->hw_stencil
) {
427 swrast_mask
|= BUFFER_BIT_STENCIL
;
429 else if ((ctx
->Stencil
.WriteMask
[0] & 0xff) != 0xff) {
430 tri_mask
|= BUFFER_BIT_STENCIL
;
433 blit_mask
|= BUFFER_BIT_STENCIL
;
437 swrast_mask
|= (mask
& BUFFER_BIT_ACCUM
);
440 intelClearWithBlit( ctx
, blit_mask
, 0, 0, 0, 0, 0);
443 intel
->vtbl
.clear_with_tris( intel
, tri_mask
, 0, 0, 0, 0, 0);
446 _swrast_Clear( ctx
, swrast_mask
);
451 intelRotateWindow(intelContextPtr intel
, __DRIdrawablePrivate
*dPriv
,
454 if (intel
->vtbl
.rotate_window
) {
455 intel
->vtbl
.rotate_window(intel
, dPriv
, srcBuffer
);
460 void *intelAllocateAGP( intelContextPtr intel
, GLsizei size
)
463 drmI830MemAlloc alloc
;
467 fprintf(stderr
, "%s: %d bytes\n", __FUNCTION__
, size
);
469 alloc
.region
= I830_MEM_REGION_AGP
;
472 alloc
.region_offset
= ®ion_offset
;
474 LOCK_HARDWARE(intel
);
476 /* Make sure the global heap is initialized
478 if (intel
->texture_heaps
[0])
479 driAgeTextures( intel
->texture_heaps
[0] );
482 ret
= drmCommandWriteRead( intel
->driFd
,
484 &alloc
, sizeof(alloc
));
487 fprintf(stderr
, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__
, ret
);
488 UNLOCK_HARDWARE(intel
);
493 fprintf(stderr
, "%s: allocated %d bytes\n", __FUNCTION__
, size
);
495 /* Need to propogate this information (agp memory in use) to our
496 * local texture lru. The kernel has already updated the global
497 * lru. An alternative would have been to allocate memory the
498 * usual way and then notify the kernel to pin the allocation.
500 if (intel
->texture_heaps
[0])
501 driAgeTextures( intel
->texture_heaps
[0] );
503 UNLOCK_HARDWARE(intel
);
505 return (void *)((char *)intel
->intelScreen
->tex
.map
+ region_offset
);
508 void intelFreeAGP( intelContextPtr intel
, void *pointer
)
511 drmI830MemFree memfree
;
514 region_offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
516 if (region_offset
< 0 ||
517 region_offset
> intel
->intelScreen
->tex
.size
) {
518 fprintf(stderr
, "offset %d outside range 0..%d\n", region_offset
,
519 intel
->intelScreen
->tex
.size
);
523 memfree
.region
= I830_MEM_REGION_AGP
;
524 memfree
.region_offset
= region_offset
;
526 ret
= drmCommandWrite( intel
->driFd
,
528 &memfree
, sizeof(memfree
));
531 fprintf(stderr
, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__
, ret
);
534 /* This version of AllocateMemoryMESA allocates only agp memory, and
535 * only does so after the point at which the driver has been
538 * Theoretically a valid context isn't required. However, in this
539 * implementation, it is, as I'm using the hardware lock to protect
540 * the kernel data structures, and the current context to get the
543 void *intelAllocateMemoryMESA(__DRInativeDisplay
*dpy
, int scrn
,
544 GLsizei size
, GLfloat readfreq
,
545 GLfloat writefreq
, GLfloat priority
)
547 GET_CURRENT_CONTEXT(ctx
);
549 if (INTEL_DEBUG
& DEBUG_IOCTL
)
550 fprintf(stderr
, "%s sz %d %f/%f/%f\n", __FUNCTION__
, size
, readfreq
,
551 writefreq
, priority
);
553 if (getenv("INTEL_NO_ALLOC"))
556 if (!ctx
|| INTEL_CONTEXT(ctx
) == 0)
559 return intelAllocateAGP( INTEL_CONTEXT(ctx
), size
);
563 /* Called via glXFreeMemoryMESA() */
564 void intelFreeMemoryMESA(__DRInativeDisplay
*dpy
, int scrn
, GLvoid
*pointer
)
566 GET_CURRENT_CONTEXT(ctx
);
567 if (INTEL_DEBUG
& DEBUG_IOCTL
)
568 fprintf(stderr
, "%s %p\n", __FUNCTION__
, pointer
);
570 if (!ctx
|| INTEL_CONTEXT(ctx
) == 0) {
571 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
575 intelFreeAGP( INTEL_CONTEXT(ctx
), pointer
);
578 /* Called via glXGetMemoryOffsetMESA()
580 * Returns offset of pointer from the start of agp aperture.
582 GLuint
intelGetMemoryOffsetMESA(__DRInativeDisplay
*dpy
, int scrn
,
583 const GLvoid
*pointer
)
585 GET_CURRENT_CONTEXT(ctx
);
586 intelContextPtr intel
;
588 if (!ctx
|| !(intel
= INTEL_CONTEXT(ctx
)) ) {
589 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
593 if (!intelIsAgpMemory( intel
, pointer
, 0 ))
596 return intelAgpOffsetFromVirtual( intel
, pointer
);
600 GLboolean
intelIsAgpMemory( intelContextPtr intel
, const GLvoid
*pointer
,
603 int offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
604 int valid
= (size
>= 0 &&
606 offset
+ size
< intel
->intelScreen
->tex
.size
);
608 if (INTEL_DEBUG
& DEBUG_IOCTL
)
609 fprintf(stderr
, "intelIsAgpMemory( %p ) : %d\n", pointer
, valid
);
615 GLuint
intelAgpOffsetFromVirtual( intelContextPtr intel
, const GLvoid
*pointer
)
617 int offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
619 if (offset
< 0 || offset
> intel
->intelScreen
->tex
.size
)
622 return intel
->intelScreen
->tex
.offset
+ offset
;
629 /* Flip the front & back buffes
631 void intelPageFlip( const __DRIdrawablePrivate
*dPriv
)
634 intelContextPtr intel
;
637 if (INTEL_DEBUG
& DEBUG_IOCTL
)
638 fprintf(stderr
, "%s\n", __FUNCTION__
);
641 assert(dPriv
->driContextPriv
);
642 assert(dPriv
->driContextPriv
->driverPrivate
);
644 intel
= (intelContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
646 intelFlush( &intel
->ctx
);
647 LOCK_HARDWARE( intel
);
649 if (dPriv
->pClipRects
) {
650 *(drm_clip_rect_t
*)intel
->sarea
->boxes
= dPriv
->pClipRects
[0];
651 intel
->sarea
->nbox
= 1;
654 ret
= drmCommandNone(intel
->driFd
, DRM_I830_FLIP
);
656 fprintf(stderr
, "%s: %d\n", __FUNCTION__
, ret
);
657 UNLOCK_HARDWARE( intel
);
661 tmp
= intel
->sarea
->last_enqueue
;
662 intelRefillBatchLocked( intel
);
663 UNLOCK_HARDWARE( intel
);
666 intelSetDrawBuffer( &intel
->ctx
, intel
->ctx
.Color
.DriverDrawBuffer
);