1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
35 #include "swrast/swrast.h"
37 #include "intel_context.h"
38 #include "intel_ioctl.h"
39 #include "intel_batchbuffer.h"
44 static int intelEmitIrqLocked( intelContextPtr intel
)
49 assert(((*(int *)intel
->driHwLock
) & ~DRM_LOCK_CONT
) ==
50 (DRM_LOCK_HELD
|intel
->hHWContext
));
54 ret
= drmCommandWriteRead( intel
->driFd
, DRM_I830_IRQ_EMIT
,
57 fprintf( stderr
, "%s: drmI830IrqEmit: %d\n", __FUNCTION__
, ret
);
62 fprintf(stderr
, "%s --> %d\n", __FUNCTION__
, seq
);
67 static void intelWaitIrq( intelContextPtr intel
, int seq
)
73 fprintf(stderr
, "%s %d\n", __FUNCTION__
, seq
);
78 ret
= drmCommandWrite( intel
->driFd
, DRM_I830_IRQ_WAIT
, &iw
, sizeof(iw
) );
79 } while (ret
== -EAGAIN
|| ret
== -EINTR
);
82 fprintf( stderr
, "%s: drmI830IrqWait: %d\n", __FUNCTION__
, ret
);
84 intel_dump_batchbuffer( intel
->alloc
.offset
,
93 static void age_intel( intelContextPtr intel
, int age
)
97 for (i
= 0 ; i
< MAX_TEXTURE_UNITS
; i
++)
98 if (intel
->CurrentTexObj
[i
])
99 intel
->CurrentTexObj
[i
]->age
= age
;
102 void intel_dump_batchbuffer( long offset
,
107 fprintf(stderr
, "\n\n\nSTART BATCH (%d dwords):\n", count
);
108 for (i
= 0; i
< count
/4; i
+= 4)
109 fprintf(stderr
, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
110 offset
+ i
*4, ptr
[i
], ptr
[i
+1], ptr
[i
+2], ptr
[i
+3]);
111 fprintf(stderr
, "END BATCH\n\n\n");
114 void intelRefillBatchLocked( intelContextPtr intel
, GLboolean allow_unlock
)
116 GLuint last_irq
= intel
->alloc
.irq_emitted
;
117 GLuint half
= intel
->alloc
.size
/ 2;
118 GLuint buf
= (intel
->alloc
.active_buf
^= 1);
120 intel
->alloc
.irq_emitted
= intelEmitIrqLocked( intel
);
123 if (allow_unlock
) UNLOCK_HARDWARE( intel
);
124 intelWaitIrq( intel
, last_irq
);
125 if (allow_unlock
) LOCK_HARDWARE( intel
);
129 fprintf(stderr
, "%s: now using half %d\n", __FUNCTION__
, buf
);
131 intel
->batch
.start_offset
= intel
->alloc
.offset
+ buf
* half
;
132 intel
->batch
.ptr
= (char *)intel
->alloc
.ptr
+ buf
* half
;
133 intel
->batch
.size
= half
- 8;
134 intel
->batch
.space
= half
- 8;
135 assert(intel
->batch
.space
>= 0);
138 #define MI_BATCH_BUFFER_END (0xA<<23)
141 void intelFlushBatchLocked( intelContextPtr intel
,
142 GLboolean ignore_cliprects
,
144 GLboolean allow_unlock
)
146 drmI830BatchBuffer batch
;
148 assert(intel
->locked
);
151 fprintf(stderr
, "%s used %d of %d offset %x..%x refill %d\n",
153 (intel
->batch
.size
- intel
->batch
.space
),
155 intel
->batch
.start_offset
,
156 intel
->batch
.start_offset
+
157 (intel
->batch
.size
- intel
->batch
.space
),
160 /* Throw away non-effective packets. Won't work once we have
161 * hardware contexts which would preserve statechanges beyond a
164 if (intel
->numClipRects
== 0 && !ignore_cliprects
) {
166 /* Without this yeild, an application with no cliprects can hog
167 * the hardware. Without unlocking, the effect is much worse -
168 * effectively a lock-out of other contexts.
171 UNLOCK_HARDWARE( intel
);
173 LOCK_HARDWARE( intel
);
176 /* Note that any state thought to have been emitted actually
179 intel
->batch
.ptr
-= (intel
->batch
.size
- intel
->batch
.space
);
180 intel
->batch
.space
= intel
->batch
.size
;
181 intel
->vtbl
.lost_hardware( intel
);
184 if (intel
->batch
.space
!= intel
->batch
.size
) {
185 batch
.start
= intel
->batch
.start_offset
;
186 batch
.used
= intel
->batch
.size
- intel
->batch
.space
;
187 batch
.cliprects
= intel
->pClipRects
;
188 batch
.num_cliprects
= ignore_cliprects
? 0 : intel
->numClipRects
;
190 batch
.DR4
= ((((GLuint
)intel
->drawX
) & 0xffff) |
191 (((GLuint
)intel
->drawY
) << 16));
193 if (intel
->alloc
.offset
) {
194 if ((batch
.used
& 0x4) == 0) {
195 ((int *)intel
->batch
.ptr
)[0] = 0;
196 ((int *)intel
->batch
.ptr
)[1] = MI_BATCH_BUFFER_END
;
198 intel
->batch
.ptr
+= 0x8;
201 ((int *)intel
->batch
.ptr
)[0] = MI_BATCH_BUFFER_END
;
203 intel
->batch
.ptr
+= 0x4;
208 intel_dump_batchbuffer( batch
.start
,
209 (int *)(intel
->batch
.ptr
- batch
.used
),
213 fprintf(stderr
, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
216 batch
.start
+ batch
.used
,
217 batch
.DR4
, batch
.num_cliprects
);
219 intel
->batch
.start_offset
+= batch
.used
;
220 intel
->batch
.size
-= batch
.used
;
222 if (intel
->batch
.size
< 8) {
224 intel
->batch
.space
= intel
->batch
.size
= 0;
227 intel
->batch
.size
-= 8;
228 intel
->batch
.space
= intel
->batch
.size
;
232 assert(intel
->batch
.space
>= 0);
233 assert(batch
.start
>= intel
->alloc
.offset
);
234 assert(batch
.start
< intel
->alloc
.offset
+ intel
->alloc
.size
);
235 assert(batch
.start
+ batch
.used
> intel
->alloc
.offset
);
236 assert(batch
.start
+ batch
.used
<=
237 intel
->alloc
.offset
+ intel
->alloc
.size
);
240 if (intel
->alloc
.offset
) {
241 if (drmCommandWrite (intel
->driFd
, DRM_I830_BATCHBUFFER
, &batch
,
243 fprintf(stderr
, "DRM_I830_BATCHBUFFER: %d\n", -errno
);
244 UNLOCK_HARDWARE(intel
);
248 drmI830CmdBuffer cmd
;
249 cmd
.buf
= intel
->alloc
.ptr
+ batch
.start
;
253 cmd
.num_cliprects
= batch
.num_cliprects
;
254 cmd
.cliprects
= batch
.cliprects
;
256 if (drmCommandWrite (intel
->driFd
, DRM_I830_CMDBUFFER
, &cmd
,
258 fprintf(stderr
, "DRM_I830_CMDBUFFER: %d\n", -errno
);
259 UNLOCK_HARDWARE(intel
);
265 age_intel(intel
, intel
->sarea
->last_enqueue
);
267 /* FIXME: use hardware contexts to avoid 'losing' hardware after
270 intel
->vtbl
.lost_hardware( intel
);
274 intelRefillBatchLocked( intel
, allow_unlock
);
277 void intelFlushBatch( intelContextPtr intel
, GLboolean refill
)
280 intelFlushBatchLocked( intel
, GL_FALSE
, refill
, GL_FALSE
);
283 LOCK_HARDWARE(intel
);
284 intelFlushBatchLocked( intel
, GL_FALSE
, refill
, GL_TRUE
);
285 UNLOCK_HARDWARE(intel
);
295 void intelWaitForIdle( intelContextPtr intel
)
298 fprintf(stderr
, "%s\n", __FUNCTION__
);
300 intel
->vtbl
.emit_flush( intel
);
301 intelFlushBatch( intel
, GL_TRUE
);
303 /* Use an irq to wait for dma idle -- Need to track lost contexts
304 * to shortcircuit consecutive calls to this function:
306 intelWaitIrq( intel
, intel
->alloc
.irq_emitted
);
307 intel
->alloc
.irq_emitted
= 0;
312 void intelFlush( GLcontext
*ctx
)
314 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
317 _swrast_flush( ctx
);
319 INTEL_FIREVERTICES( intel
);
321 if (intel
->batch
.size
!= intel
->batch
.space
)
322 intelFlushBatch( intel
, GL_FALSE
);
325 void intelFinish( GLcontext
*ctx
)
327 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
329 intelWaitForIdle( intel
);
333 void intelClear(GLcontext
*ctx
, GLbitfield mask
, GLboolean all
,
334 GLint cx
, GLint cy
, GLint cw
, GLint ch
)
336 intelContextPtr intel
= INTEL_CONTEXT( ctx
);
337 const GLuint colorMask
= *((GLuint
*) &ctx
->Color
.ColorMask
);
338 GLbitfield tri_mask
= 0;
339 GLbitfield blit_mask
= 0;
340 GLbitfield swrast_mask
= 0;
343 fprintf(stderr
, "%s\n", __FUNCTION__
);
345 /* Take care of cliprects, which are handled differently for
348 intelFlush( &intel
->ctx
);
350 if (mask
& DD_FRONT_LEFT_BIT
) {
351 if (colorMask
== ~0) {
352 blit_mask
|= DD_FRONT_LEFT_BIT
;
355 tri_mask
|= DD_FRONT_LEFT_BIT
;
359 if (mask
& DD_BACK_LEFT_BIT
) {
360 if (colorMask
== ~0) {
361 blit_mask
|= DD_BACK_LEFT_BIT
;
364 tri_mask
|= DD_BACK_LEFT_BIT
;
368 if (mask
& DD_DEPTH_BIT
) {
369 blit_mask
|= DD_DEPTH_BIT
;
372 if (mask
& DD_STENCIL_BIT
) {
373 if (!intel
->hw_stencil
) {
374 swrast_mask
|= DD_STENCIL_BIT
;
376 else if (ctx
->Stencil
.WriteMask
[0] != 0xff) {
377 tri_mask
|= DD_STENCIL_BIT
;
380 blit_mask
|= DD_STENCIL_BIT
;
384 swrast_mask
|= (mask
& DD_ACCUM_BIT
);
387 intelClearWithBlit( ctx
, blit_mask
, all
, cx
, cy
, cw
, ch
);
390 intel
->vtbl
.clear_with_tris( intel
, tri_mask
, all
, cx
, cy
, cw
, ch
);
393 _swrast_Clear( ctx
, swrast_mask
, all
, cx
, cy
, cw
, ch
);
398 void *intelAllocateAGP( intelContextPtr intel
, GLsizei size
)
401 drmI830MemAlloc alloc
;
405 fprintf(stderr
, "%s: %d bytes\n", __FUNCTION__
, size
);
407 alloc
.region
= I830_MEM_REGION_AGP
;
410 alloc
.region_offset
= ®ion_offset
;
412 LOCK_HARDWARE(intel
);
414 /* Make sure the global heap is initialized
416 if (intel
->texture_heaps
[0])
417 driAgeTextures( intel
->texture_heaps
[0] );
420 ret
= drmCommandWriteRead( intel
->driFd
,
422 &alloc
, sizeof(alloc
));
425 fprintf(stderr
, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__
, ret
);
426 UNLOCK_HARDWARE(intel
);
431 fprintf(stderr
, "%s: allocated %d bytes\n", __FUNCTION__
, size
);
433 /* Need to propogate this information (agp memory in use) to our
434 * local texture lru. The kernel has already updated the global
435 * lru. An alternative would have been to allocate memory the
436 * usual way and then notify the kernel to pin the allocation.
438 if (intel
->texture_heaps
[0])
439 driAgeTextures( intel
->texture_heaps
[0] );
441 UNLOCK_HARDWARE(intel
);
443 return (void *)((char *)intel
->intelScreen
->tex
.map
+ region_offset
);
446 void intelFreeAGP( intelContextPtr intel
, void *pointer
)
449 drmI830MemFree memfree
;
452 region_offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
454 if (region_offset
< 0 ||
455 region_offset
> intel
->intelScreen
->tex
.size
) {
456 fprintf(stderr
, "offset %d outside range 0..%d\n", region_offset
,
457 intel
->intelScreen
->tex
.size
);
461 memfree
.region
= I830_MEM_REGION_AGP
;
462 memfree
.region_offset
= region_offset
;
464 ret
= drmCommandWrite( intel
->driFd
,
466 &memfree
, sizeof(memfree
));
469 fprintf(stderr
, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__
, ret
);
472 /* This version of AllocateMemoryMESA allocates only agp memory, and
473 * only does so after the point at which the driver has been
476 * Theoretically a valid context isn't required. However, in this
477 * implementation, it is, as I'm using the hardware lock to protect
478 * the kernel data structures, and the current context to get the
481 void *intelAllocateMemoryMESA(__DRInativeDisplay
*dpy
, int scrn
,
482 GLsizei size
, GLfloat readfreq
,
483 GLfloat writefreq
, GLfloat priority
)
485 GET_CURRENT_CONTEXT(ctx
);
487 if (INTEL_DEBUG
& DEBUG_IOCTL
)
488 fprintf(stderr
, "%s sz %d %f/%f/%f\n", __FUNCTION__
, size
, readfreq
,
489 writefreq
, priority
);
491 if (getenv("INTEL_NO_ALLOC"))
494 if (!ctx
|| INTEL_CONTEXT(ctx
) == 0)
497 return intelAllocateAGP( INTEL_CONTEXT(ctx
), size
);
501 /* Called via glXFreeMemoryMESA() */
502 void intelFreeMemoryMESA(__DRInativeDisplay
*dpy
, int scrn
, GLvoid
*pointer
)
504 GET_CURRENT_CONTEXT(ctx
);
505 if (INTEL_DEBUG
& DEBUG_IOCTL
)
506 fprintf(stderr
, "%s %p\n", __FUNCTION__
, pointer
);
508 if (!ctx
|| INTEL_CONTEXT(ctx
) == 0) {
509 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
513 intelFreeAGP( INTEL_CONTEXT(ctx
), pointer
);
516 /* Called via glXGetMemoryOffsetMESA()
518 * Returns offset of pointer from the start of agp aperture.
520 GLuint
intelGetMemoryOffsetMESA(__DRInativeDisplay
*dpy
, int scrn
,
521 const GLvoid
*pointer
)
523 GET_CURRENT_CONTEXT(ctx
);
524 intelContextPtr intel
;
526 if (!ctx
|| !(intel
= INTEL_CONTEXT(ctx
)) ) {
527 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
531 if (!intelIsAgpMemory( intel
, pointer
, 0 ))
534 return intelAgpOffsetFromVirtual( intel
, pointer
);
538 GLboolean
intelIsAgpMemory( intelContextPtr intel
, const GLvoid
*pointer
,
541 int offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
542 int valid
= (size
>= 0 &&
544 offset
+ size
< intel
->intelScreen
->tex
.size
);
546 if (INTEL_DEBUG
& DEBUG_IOCTL
)
547 fprintf(stderr
, "intelIsAgpMemory( %p ) : %d\n", pointer
, valid
);
553 GLuint
intelAgpOffsetFromVirtual( intelContextPtr intel
, const GLvoid
*pointer
)
555 int offset
= (char *)pointer
- (char *)intel
->intelScreen
->tex
.map
;
557 if (offset
< 0 || offset
> intel
->intelScreen
->tex
.size
)
560 return intel
->intelScreen
->textureOffset
+ offset
;
567 /* Flip the front & back buffes
569 void intelPageFlip( const __DRIdrawablePrivate
*dPriv
)
572 intelContextPtr intel
;
575 if (INTEL_DEBUG
& DEBUG_IOCTL
)
576 fprintf(stderr
, "%s\n", __FUNCTION__
);
579 assert(dPriv
->driContextPriv
);
580 assert(dPriv
->driContextPriv
->driverPrivate
);
582 intel
= (intelContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
584 intelFlush( &intel
->ctx
);
585 LOCK_HARDWARE( intel
);
587 if (dPriv
->pClipRects
) {
588 *(drm_clip_rect_t
*)intel
->sarea
->boxes
= dPriv
->pClipRects
[0];
589 intel
->sarea
->nbox
= 1;
592 ret
= drmCommandNone(intel
->driFd
, DRM_I830_FLIP
);
594 fprintf(stderr
, "%s: %d\n", __FUNCTION__
, ret
);
595 UNLOCK_HARDWARE( intel
);
599 tmp
= intel
->sarea
->last_enqueue
;
600 intelRefillBatchLocked( intel
);
601 UNLOCK_HARDWARE( intel
);
604 intelSetDrawBuffer( &intel
->ctx
, intel
->ctx
.Color
.DriverDrawBuffer
);