1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
36 #include "swrast/swrast.h"
38 #include "intel_context.h"
39 #include "intel_ioctl.h"
40 #include "intel_batchbuffer.h"
45 static int intelEmitIrqLocked( intelContextPtr intel
)
50 assert(((*(int *)intel
->driHwLock
) & ~DRM_LOCK_CONT
) ==
51 (DRM_LOCK_HELD
|intel
->hHWContext
));
55 ret
= drmCommandWriteRead( intel
->driFd
, DRM_I830_IRQ_EMIT
,
58 fprintf( stderr
, "%s: drmI830IrqEmit: %d\n", __FUNCTION__
, ret
);
63 fprintf(stderr
, "%s --> %d\n", __FUNCTION__
, seq
);
68 static void intelWaitIrq( intelContextPtr intel
, int seq
)
74 fprintf(stderr
, "%s %d\n", __FUNCTION__
, seq
);
79 ret
= drmCommandWrite( intel
->driFd
, DRM_I830_IRQ_WAIT
, &iw
, sizeof(iw
) );
80 } while (ret
== -EAGAIN
|| ret
== -EINTR
);
83 fprintf( stderr
, "%s: drmI830IrqWait: %d\n", __FUNCTION__
, ret
);
85 intel_dump_batchbuffer( intel
->alloc
.offset
,
94 static void age_intel( intelContextPtr intel
, int age
)
98 for (i
= 0 ; i
< MAX_TEXTURE_UNITS
; i
++)
99 if (intel
->CurrentTexObj
[i
])
100 intel
->CurrentTexObj
[i
]->age
= age
;
103 void intel_dump_batchbuffer( long offset
,
108 fprintf(stderr
, "\n\n\nSTART BATCH (%d dwords):\n", count
);
109 for (i
= 0; i
< count
/4; i
+= 4)
110 fprintf(stderr
, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
111 offset
+ i
*4, ptr
[i
], ptr
[i
+1], ptr
[i
+2], ptr
[i
+3]);
112 fprintf(stderr
, "END BATCH\n\n\n");
115 void intelRefillBatchLocked( intelContextPtr intel
, GLboolean allow_unlock
)
117 GLuint last_irq
= intel
->alloc
.irq_emitted
;
118 GLuint half
= intel
->alloc
.size
/ 2;
119 GLuint buf
= (intel
->alloc
.active_buf
^= 1);
121 intel
->alloc
.irq_emitted
= intelEmitIrqLocked( intel
);
124 if (allow_unlock
) UNLOCK_HARDWARE( intel
);
125 intelWaitIrq( intel
, last_irq
);
126 if (allow_unlock
) LOCK_HARDWARE( intel
);
130 fprintf(stderr
, "%s: now using half %d\n", __FUNCTION__
, buf
);
132 intel
->batch
.start_offset
= intel
->alloc
.offset
+ buf
* half
;
133 intel
->batch
.ptr
= (char *)intel
->alloc
.ptr
+ buf
* half
;
134 intel
->batch
.size
= half
- 8;
135 intel
->batch
.space
= half
- 8;
136 assert(intel
->batch
.space
>= 0);
139 #define MI_BATCH_BUFFER_END (0xA<<23)
142 void intelFlushBatchLocked( intelContextPtr intel
,
143 GLboolean ignore_cliprects
,
145 GLboolean allow_unlock
)
147 drmI830BatchBuffer batch
;
149 assert(intel
->locked
);
152 fprintf(stderr
, "%s used %d of %d offset %x..%x refill %d\n",
154 (intel
->batch
.size
- intel
->batch
.space
),
156 intel
->batch
.start_offset
,
157 intel
->batch
.start_offset
+
158 (intel
->batch
.size
- intel
->batch
.space
),
161 /* Throw away non-effective packets. Won't work once we have
162 * hardware contexts which would preserve statechanges beyond a
165 if (intel
->numClipRects
== 0 && !ignore_cliprects
) {
167 /* Without this yeild, an application with no cliprects can hog
168 * the hardware. Without unlocking, the effect is much worse -
169 * effectively a lock-out of other contexts.
172 UNLOCK_HARDWARE( intel
);
174 LOCK_HARDWARE( intel
);
177 /* Note that any state thought to have been emitted actually
180 intel
->batch
.ptr
-= (intel
->batch
.size
- intel
->batch
.space
);
181 intel
->batch
.space
= intel
->batch
.size
;
182 intel
->vtbl
.lost_hardware( intel
);
185 if (intel
->batch
.space
!= intel
->batch
.size
) {
186 batch
.start
= intel
->batch
.start_offset
;
187 batch
.used
= intel
->batch
.size
- intel
->batch
.space
;
188 batch
.cliprects
= intel
->pClipRects
;
189 batch
.num_cliprects
= ignore_cliprects
? 0 : intel
->numClipRects
;
191 batch
.DR4
= ((((GLuint
)intel
->drawX
) & 0xffff) |
192 (((GLuint
)intel
->drawY
) << 16));
194 if (intel
->alloc
.offset
) {
195 if ((batch
.used
& 0x4) == 0) {
196 ((int *)intel
->batch
.ptr
)[0] = 0;
197 ((int *)intel
->batch
.ptr
)[1] = MI_BATCH_BUFFER_END
;
199 intel
->batch
.ptr
+= 0x8;
202 ((int *)intel
->batch
.ptr
)[0] = MI_BATCH_BUFFER_END
;
204 intel
->batch
.ptr
+= 0x4;
209 intel_dump_batchbuffer( batch
.start
,
210 (int *)(intel
->batch
.ptr
- batch
.used
),
214 fprintf(stderr
, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
217 batch
.start
+ batch
.used
,
218 batch
.DR4
, batch
.num_cliprects
);
220 intel
->batch
.start_offset
+= batch
.used
;
221 intel
->batch
.size
-= batch
.used
;
223 if (intel
->batch
.size
< 8) {
225 intel
->batch
.space
= intel
->batch
.size
= 0;
228 intel
->batch
.size
-= 8;
229 intel
->batch
.space
= intel
->batch
.size
;
233 assert(intel
->batch
.space
>= 0);
234 assert(batch
.start
>= intel
->alloc
.offset
);
235 assert(batch
.start
< intel
->alloc
.offset
+ intel
->alloc
.size
);
236 assert(batch
.start
+ batch
.used
> intel
->alloc
.offset
);
237 assert(batch
.start
+ batch
.used
<=
238 intel
->alloc
.offset
+ intel
->alloc
.size
);
241 if (intel
->alloc
.offset
) {
242 if (drmCommandWrite (intel
->driFd
, DRM_I830_BATCHBUFFER
, &batch
,
244 fprintf(stderr
, "DRM_I830_BATCHBUFFER: %d\n", -errno
);
245 UNLOCK_HARDWARE(intel
);
249 drmI830CmdBuffer cmd
;
250 cmd
.buf
= intel
->alloc
.ptr
+ batch
.start
;
254 cmd
.num_cliprects
= batch
.num_cliprects
;
255 cmd
.cliprects
= batch
.cliprects
;
257 if (drmCommandWrite (intel
->driFd
, DRM_I830_CMDBUFFER
, &cmd
,
259 fprintf(stderr
, "DRM_I830_CMDBUFFER: %d\n", -errno
);
260 UNLOCK_HARDWARE(intel
);
266 age_intel(intel
, intel
->sarea
->last_enqueue
);
268 /* FIXME: use hardware contexts to avoid 'losing' hardware after
271 intel
->vtbl
.lost_hardware( intel
);
275 intelRefillBatchLocked( intel
, allow_unlock
);
278 void intelFlushBatch( intelContextPtr intel
, GLboolean refill
)
281 intelFlushBatchLocked( intel
, GL_FALSE
, refill
, GL_FALSE
);
284 LOCK_HARDWARE(intel
);
285 intelFlushBatchLocked( intel
, GL_FALSE
, refill
, GL_TRUE
);
286 UNLOCK_HARDWARE(intel
);
296 void intelWaitForIdle( intelContextPtr intel
)
299 fprintf(stderr
, "%s\n", __FUNCTION__
);
301 intel
->vtbl
.emit_flush( intel
);
302 intelFlushBatch( intel
, GL_TRUE
);
304 /* Use an irq to wait for dma idle -- Need to track lost contexts
305 * to shortcircuit consecutive calls to this function:
307 intelWaitIrq( intel
, intel
->alloc
.irq_emitted
);
308 intel
->alloc
.irq_emitted
= 0;
313 void intelFlush( GLcontext
*ctx
)
315 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
318 _swrast_flush( ctx
);
320 INTEL_FIREVERTICES( intel
);
322 if (intel
->batch
.size
!= intel
->batch
.space
)
323 intelFlushBatch( intel
, GL_FALSE
);
326 void intelFinish( GLcontext
*ctx
)
328 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
330 intelWaitForIdle( intel
);
334 void intelClear(GLcontext
*ctx
, GLbitfield mask
, GLboolean all
,
335 GLint cx
, GLint cy
, GLint cw
, GLint ch
)
337 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
338 const GLuint colorMask
= *((GLuint
*) &ctx
->Color
.ColorMask
);
339 GLbitfield tri_mask
= 0;
340 GLbitfield blit_mask
= 0;
341 GLbitfield swrast_mask
= 0;
344 fprintf(stderr
, "%s\n", __FUNCTION__
);
346 /* Take care of cliprects, which are handled differently for
349 intelFlush( &intel
->ctx
);
351 if (mask
& DD_FRONT_LEFT_BIT
) {
352 if (colorMask
== ~0) {
353 blit_mask
|= DD_FRONT_LEFT_BIT
;
356 tri_mask
|= DD_FRONT_LEFT_BIT
;
360 if (mask
& DD_BACK_LEFT_BIT
) {
361 if (colorMask
== ~0) {
362 blit_mask
|= DD_BACK_LEFT_BIT
;
365 tri_mask
|= DD_BACK_LEFT_BIT
;
369 if (mask
& DD_DEPTH_BIT
) {
370 blit_mask
|= DD_DEPTH_BIT
;
373 if (mask
& DD_STENCIL_BIT
) {
374 if (!intel
->hw_stencil
) {
375 swrast_mask
|= DD_STENCIL_BIT
;
377 else if (ctx
->Stencil
.WriteMask
[0] != 0xff) {
378 tri_mask
|= DD_STENCIL_BIT
;
381 blit_mask
|= DD_STENCIL_BIT
;
385 swrast_mask
|= (mask
& DD_ACCUM_BIT
);
388 intelClearWithBlit( ctx
, blit_mask
, all
, cx
, cy
, cw
, ch
);
391 intel
->vtbl
.clear_with_tris( intel
, tri_mask
, all
, cx
, cy
, cw
, ch
);
394 _swrast_Clear( ctx
, swrast_mask
, all
, cx
, cy
, cw
, ch
);
399 void *intelAllocateAGP( intelContextPtr intel
, GLsizei size
)
402 drmI830MemAlloc alloc
;
406 fprintf(stderr
, "%s: %d bytes\n", __FUNCTION__
, size
);
408 alloc
.region
= I830_MEM_REGION_AGP
;
411 alloc
.region_offset
= ®ion_offset
;
413 LOCK_HARDWARE(intel
);
415 /* Make sure the global heap is initialized
417 if (intel
->texture_heaps
[0])
418 driAgeTextures( intel
->texture_heaps
[0] );
421 ret
= drmCommandWriteRead( intel
->driFd
,
423 &alloc
, sizeof(alloc
));
426 fprintf(stderr
, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__
, ret
);
427 UNLOCK_HARDWARE(intel
);
432 fprintf(stderr
, "%s: allocated %d bytes\n", __FUNCTION__
, size
);
434 /* Need to propogate this information (agp memory in use) to our
435 * local texture lru. The kernel has already updated the global
436 * lru. An alternative would have been to allocate memory the
437 * usual way and then notify the kernel to pin the allocation.
439 if (intel
->texture_heaps
[0])
440 driAgeTextures( intel
->texture_heaps
[0] );
442 UNLOCK_HARDWARE(intel
);
444 return (void *)((char *)intel
->intelScreen
->tex
.map
+ region_offset
);
447 void intelFreeAGP( intelContextPtr intel
, void *pointer
)
450 drmI830MemFree memfree
;
453 region_offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
455 if (region_offset
< 0 ||
456 region_offset
> intel
->intelScreen
->tex
.size
) {
457 fprintf(stderr
, "offset %d outside range 0..%d\n", region_offset
,
458 intel
->intelScreen
->tex
.size
);
462 memfree
.region
= I830_MEM_REGION_AGP
;
463 memfree
.region_offset
= region_offset
;
465 ret
= drmCommandWrite( intel
->driFd
,
467 &memfree
, sizeof(memfree
));
470 fprintf(stderr
, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__
, ret
);
473 /* This version of AllocateMemoryMESA allocates only agp memory, and
474 * only does so after the point at which the driver has been
477 * Theoretically a valid context isn't required. However, in this
478 * implementation, it is, as I'm using the hardware lock to protect
479 * the kernel data structures, and the current context to get the
482 void *intelAllocateMemoryMESA(__DRInativeDisplay
*dpy
, int scrn
,
483 GLsizei size
, GLfloat readfreq
,
484 GLfloat writefreq
, GLfloat priority
)
486 GET_CURRENT_CONTEXT(ctx
);
488 if (INTEL_DEBUG
& DEBUG_IOCTL
)
489 fprintf(stderr
, "%s sz %d %f/%f/%f\n", __FUNCTION__
, size
, readfreq
,
490 writefreq
, priority
);
492 if (getenv("INTEL_NO_ALLOC"))
495 if (!ctx
|| INTEL_CONTEXT(ctx
) == 0)
498 return intelAllocateAGP( INTEL_CONTEXT(ctx
), size
);
502 /* Called via glXFreeMemoryMESA() */
503 void intelFreeMemoryMESA(__DRInativeDisplay
*dpy
, int scrn
, GLvoid
*pointer
)
505 GET_CURRENT_CONTEXT(ctx
);
506 if (INTEL_DEBUG
& DEBUG_IOCTL
)
507 fprintf(stderr
, "%s %p\n", __FUNCTION__
, pointer
);
509 if (!ctx
|| INTEL_CONTEXT(ctx
) == 0) {
510 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
514 intelFreeAGP( INTEL_CONTEXT(ctx
), pointer
);
517 /* Called via glXGetMemoryOffsetMESA()
519 * Returns offset of pointer from the start of agp aperture.
521 GLuint
intelGetMemoryOffsetMESA(__DRInativeDisplay
*dpy
, int scrn
,
522 const GLvoid
*pointer
)
524 GET_CURRENT_CONTEXT(ctx
);
525 intelContextPtr intel
;
527 if (!ctx
|| !(intel
= INTEL_CONTEXT(ctx
)) ) {
528 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
532 if (!intelIsAgpMemory( intel
, pointer
, 0 ))
535 return intelAgpOffsetFromVirtual( intel
, pointer
);
539 GLboolean
intelIsAgpMemory( intelContextPtr intel
, const GLvoid
*pointer
,
542 int offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
543 int valid
= (size
>= 0 &&
545 offset
+ size
< intel
->intelScreen
->tex
.size
);
547 if (INTEL_DEBUG
& DEBUG_IOCTL
)
548 fprintf(stderr
, "intelIsAgpMemory( %p ) : %d\n", pointer
, valid
);
554 GLuint
intelAgpOffsetFromVirtual( intelContextPtr intel
, const GLvoid
*pointer
)
556 int offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
558 if (offset
< 0 || offset
> intel
->intelScreen
->tex
.size
)
561 return intel
->intelScreen
->textureOffset
+ offset
;
568 /* Flip the front & back buffes
570 void intelPageFlip( const __DRIdrawablePrivate
*dPriv
)
573 intelContextPtr intel
;
576 if (INTEL_DEBUG
& DEBUG_IOCTL
)
577 fprintf(stderr
, "%s\n", __FUNCTION__
);
580 assert(dPriv
->driContextPriv
);
581 assert(dPriv
->driContextPriv
->driverPrivate
);
583 intel
= (intelContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
585 intelFlush( &intel
->ctx
);
586 LOCK_HARDWARE( intel
);
588 if (dPriv
->pClipRects
) {
589 *(drm_clip_rect_t
*)intel
->sarea
->boxes
= dPriv
->pClipRects
[0];
590 intel
->sarea
->nbox
= 1;
593 ret
= drmCommandNone(intel
->driFd
, DRM_I830_FLIP
);
595 fprintf(stderr
, "%s: %d\n", __FUNCTION__
, ret
);
596 UNLOCK_HARDWARE( intel
);
600 tmp
= intel
->sarea
->last_enqueue
;
601 intelRefillBatchLocked( intel
);
602 UNLOCK_HARDWARE( intel
);
605 intelSetDrawBuffer( &intel
->ctx
, intel
->ctx
.Color
.DriverDrawBuffer
);