patch to import Jon Smirl's work from Bitkeeper
[mesa.git] / src / mesa / drivers / dri / r200 / r200_ioctl.c
1 /* $XFree86$ */
2 /**************************************************************************
3
4 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5
6 The Weather Channel (TM) funded Tungsten Graphics to develop the
7 initial release of the Radeon 8500 driver under the XFree86 license.
8 This notice must be preserved.
9
10 Permission is hereby granted, free of charge, to any person obtaining
11 a copy of this software and associated documentation files (the
12 "Software"), to deal in the Software without restriction, including
13 without limitation the rights to use, copy, modify, merge, publish,
14 distribute, sublicense, and/or sell copies of the Software, and to
15 permit persons to whom the Software is furnished to do so, subject to
16 the following conditions:
17
18 The above copyright notice and this permission notice (including the
19 next paragraph) shall be included in all copies or substantial
20 portions of the Software.
21
22 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29
30 **************************************************************************/
31
32 /*
33 * Authors:
34 * Keith Whitwell <keith@tungstengraphics.com>
35 */
36
37 #include <sched.h>
38 #include <errno.h>
39
40 #include "glheader.h"
41 #include "imports.h"
42 #include "macros.h"
43 #include "context.h"
44 #include "swrast/swrast.h"
45
46 #include "r200_context.h"
47 #include "r200_state.h"
48 #include "r200_ioctl.h"
49 #include "r200_tcl.h"
50 #include "r200_sanity.h"
51 #include "radeon_reg.h"
52
53 #include "vblank.h"
54
55
56 #define R200_TIMEOUT 512
57 #define R200_IDLE_RETRY 16
58
59
60 static void r200WaitForIdle( r200ContextPtr rmesa );
61
62
63 int r200FlushCmdBufLocked( r200ContextPtr rmesa, const char * caller )
64 {
65 int ret, i;
66 drmRadeonCmdBuffer cmd;
67
68 if (R200_DEBUG & DEBUG_IOCTL) {
69 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
70
71 if (0 & R200_DEBUG & DEBUG_VERBOSE)
72 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
73 fprintf(stderr, "%d: %x\n", i/4,
74 *(int *)(&rmesa->store.cmd_buf[i]));
75 }
76
77 if (R200_DEBUG & DEBUG_DMA)
78 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
79 rmesa->dma.nr_released_bufs);
80
81
82 if (R200_DEBUG & DEBUG_SANITY) {
83 if (rmesa->state.scissor.enabled)
84 ret = r200SanityCmdBuffer( rmesa,
85 rmesa->state.scissor.numClipRects,
86 rmesa->state.scissor.pClipRects);
87 else
88 ret = r200SanityCmdBuffer( rmesa,
89 rmesa->numClipRects,
90 rmesa->pClipRects);
91 if (ret) {
92 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
93 goto out;
94 }
95 }
96
97
98 if (R200_DEBUG & DEBUG_MEMORY) {
99 if (! driValidateTextureHeaps( rmesa->texture_heaps, rmesa->nr_heaps,
100 & rmesa->swapped ) ) {
101 fprintf( stderr, "%s: texture memory is inconsistent - expect "
102 "mangled textures\n", __FUNCTION__ );
103 }
104 }
105
106
107 cmd.bufsz = rmesa->store.cmd_used;
108 cmd.buf = rmesa->store.cmd_buf;
109
110 if (rmesa->state.scissor.enabled) {
111 cmd.nbox = rmesa->state.scissor.numClipRects;
112 cmd.boxes = (drmClipRect *)rmesa->state.scissor.pClipRects;
113 } else {
114 cmd.nbox = rmesa->numClipRects;
115 cmd.boxes = (drmClipRect *)rmesa->pClipRects;
116 }
117
118 ret = drmCommandWrite( rmesa->dri.fd,
119 DRM_RADEON_CMDBUF,
120 &cmd, sizeof(cmd) );
121
122 if (ret)
123 fprintf(stderr, "drmCommandWrite: %d\n", ret);
124
125 if (R200_DEBUG & DEBUG_SYNC) {
126 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
127 r200WaitForIdleLocked( rmesa );
128 }
129
130
131 out:
132 rmesa->store.primnr = 0;
133 rmesa->store.statenr = 0;
134 rmesa->store.cmd_used = 0;
135 rmesa->dma.nr_released_bufs = 0;
136 rmesa->lost_context = 1;
137 return ret;
138 }
139
140
141 /* Note: does not emit any commands to avoid recursion on
142 * r200AllocCmdBuf.
143 */
144 void r200FlushCmdBuf( r200ContextPtr rmesa, const char *caller )
145 {
146 int ret;
147
148 LOCK_HARDWARE( rmesa );
149
150 ret = r200FlushCmdBufLocked( rmesa, caller );
151
152 UNLOCK_HARDWARE( rmesa );
153
154 if (ret) {
155 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
156 exit(ret);
157 }
158 }
159
160
161 /* =============================================================
162 * Hardware vertex buffer handling
163 */
164
165
166 void r200RefillCurrentDmaRegion( r200ContextPtr rmesa )
167 {
168 struct r200_dma_buffer *dmabuf;
169 int fd = rmesa->dri.fd;
170 int index = 0;
171 int size = 0;
172 drmDMAReq dma;
173 int ret;
174
175 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
176 fprintf(stderr, "%s\n", __FUNCTION__);
177
178 if (rmesa->dma.flush) {
179 rmesa->dma.flush( rmesa );
180 }
181
182 if (rmesa->dma.current.buf)
183 r200ReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
184
185 if (rmesa->dma.nr_released_bufs > 4)
186 r200FlushCmdBuf( rmesa, __FUNCTION__ );
187
188 dma.context = rmesa->dri.hwContext;
189 dma.send_count = 0;
190 dma.send_list = NULL;
191 dma.send_sizes = NULL;
192 dma.flags = 0;
193 dma.request_count = 1;
194 dma.request_size = RADEON_BUFFER_SIZE;
195 dma.request_list = &index;
196 dma.request_sizes = &size;
197 dma.granted_count = 0;
198
199 LOCK_HARDWARE(rmesa); /* no need to validate */
200
201 while (1) {
202 ret = drmDMA( fd, &dma );
203 if (ret == 0)
204 break;
205
206 if (rmesa->dma.nr_released_bufs) {
207 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
208 }
209
210 if (rmesa->do_usleeps) {
211 UNLOCK_HARDWARE( rmesa );
212 DO_USLEEP( 1 );
213 LOCK_HARDWARE( rmesa );
214 }
215 }
216
217 UNLOCK_HARDWARE(rmesa);
218
219 if (R200_DEBUG & DEBUG_DMA)
220 fprintf(stderr, "Allocated buffer %d\n", index);
221
222 dmabuf = CALLOC_STRUCT( r200_dma_buffer );
223 dmabuf->buf = &rmesa->r200Screen->buffers->list[index];
224 dmabuf->refcount = 1;
225
226 rmesa->dma.current.buf = dmabuf;
227 rmesa->dma.current.address = dmabuf->buf->address;
228 rmesa->dma.current.end = dmabuf->buf->total;
229 rmesa->dma.current.start = 0;
230 rmesa->dma.current.ptr = 0;
231 }
232
233 void r200ReleaseDmaRegion( r200ContextPtr rmesa,
234 struct r200_dma_region *region,
235 const char *caller )
236 {
237 if (R200_DEBUG & DEBUG_IOCTL)
238 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
239
240 if (!region->buf)
241 return;
242
243 if (rmesa->dma.flush)
244 rmesa->dma.flush( rmesa );
245
246 if (--region->buf->refcount == 0) {
247 drmRadeonCmdHeader *cmd;
248
249 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
250 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
251 region->buf->buf->idx);
252
253 cmd = (drmRadeonCmdHeader *)r200AllocCmdBuf( rmesa, sizeof(*cmd),
254 __FUNCTION__ );
255 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
256 cmd->dma.buf_idx = region->buf->buf->idx;
257 FREE(region->buf);
258 rmesa->dma.nr_released_bufs++;
259 }
260
261 region->buf = 0;
262 region->start = 0;
263 }
264
265 /* Allocates a region from rmesa->dma.current. If there isn't enough
266 * space in current, grab a new buffer (and discard what was left of current)
267 */
268 void r200AllocDmaRegion( r200ContextPtr rmesa,
269 struct r200_dma_region *region,
270 int bytes,
271 int alignment )
272 {
273 if (R200_DEBUG & DEBUG_IOCTL)
274 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
275
276 if (rmesa->dma.flush)
277 rmesa->dma.flush( rmesa );
278
279 if (region->buf)
280 r200ReleaseDmaRegion( rmesa, region, __FUNCTION__ );
281
282 alignment--;
283 rmesa->dma.current.start = rmesa->dma.current.ptr =
284 (rmesa->dma.current.ptr + alignment) & ~alignment;
285
286 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
287 r200RefillCurrentDmaRegion( rmesa );
288
289 region->start = rmesa->dma.current.start;
290 region->ptr = rmesa->dma.current.start;
291 region->end = rmesa->dma.current.start + bytes;
292 region->address = rmesa->dma.current.address;
293 region->buf = rmesa->dma.current.buf;
294 region->buf->refcount++;
295
296 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
297 rmesa->dma.current.start =
298 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
299
300 assert( rmesa->dma.current.ptr <= rmesa->dma.current.end );
301 }
302
303 void r200AllocDmaRegionVerts( r200ContextPtr rmesa,
304 struct r200_dma_region *region,
305 int numverts,
306 int vertsize,
307 int alignment )
308 {
309 r200AllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
310 }
311
312 /* ================================================================
313 * SwapBuffers with client-side throttling
314 */
315
316 static CARD32 r200GetLastFrame(r200ContextPtr rmesa)
317 {
318 drmRadeonGetParam gp;
319 int ret;
320 CARD32 frame;
321
322 gp.param = RADEON_PARAM_LAST_FRAME;
323 gp.value = &frame;
324 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
325 &gp, sizeof(gp) );
326 if ( ret ) {
327 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
328 exit(1);
329 }
330
331 return frame;
332 }
333
334 static void r200EmitIrqLocked( r200ContextPtr rmesa )
335 {
336 drmRadeonIrqEmit ie;
337 int ret;
338
339 ie.irq_seq = &rmesa->iw.irq_seq;
340 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
341 &ie, sizeof(ie) );
342 if ( ret ) {
343 fprintf( stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, ret );
344 exit(1);
345 }
346 }
347
348
349 static void r200WaitIrq( r200ContextPtr rmesa )
350 {
351 int ret;
352
353 do {
354 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
355 &rmesa->iw, sizeof(rmesa->iw) );
356 } while (ret && (errno == EINTR || errno == EAGAIN));
357
358 if ( ret ) {
359 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
360 exit(1);
361 }
362 }
363
364
365 static void r200WaitForFrameCompletion( r200ContextPtr rmesa )
366 {
367 RADEONSAREAPrivPtr sarea = rmesa->sarea;
368
369 if (rmesa->do_irqs) {
370 if (r200GetLastFrame(rmesa) < sarea->last_frame) {
371 if (!rmesa->irqsEmitted) {
372 while (r200GetLastFrame (rmesa) < sarea->last_frame)
373 ;
374 }
375 else {
376 UNLOCK_HARDWARE( rmesa );
377 r200WaitIrq( rmesa );
378 LOCK_HARDWARE( rmesa );
379 }
380 rmesa->irqsEmitted = 10;
381 }
382
383 if (rmesa->irqsEmitted) {
384 r200EmitIrqLocked( rmesa );
385 rmesa->irqsEmitted--;
386 }
387 }
388 else {
389 while (r200GetLastFrame (rmesa) < sarea->last_frame) {
390 UNLOCK_HARDWARE( rmesa );
391 if (rmesa->do_usleeps)
392 DO_USLEEP( 1 );
393 LOCK_HARDWARE( rmesa );
394 }
395 }
396 }
397
398
399
400 /* Copy the back color buffer to the front color buffer.
401 */
402 void r200CopyBuffer( const __DRIdrawablePrivate *dPriv )
403 {
404 r200ContextPtr rmesa;
405 GLint nbox, i, ret;
406 GLboolean missed_target;
407 uint64_t ust;
408
409 assert(dPriv);
410 assert(dPriv->driContextPriv);
411 assert(dPriv->driContextPriv->driverPrivate);
412
413 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
414
415 if ( R200_DEBUG & DEBUG_IOCTL ) {
416 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, rmesa->glCtx );
417 }
418
419 R200_FIREVERTICES( rmesa );
420
421 LOCK_HARDWARE( rmesa );
422
423
424 /* Throttle the frame rate -- only allow one pending swap buffers
425 * request at a time.
426 */
427 r200WaitForFrameCompletion( rmesa );
428 UNLOCK_HARDWARE( rmesa );
429 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
430 LOCK_HARDWARE( rmesa );
431
432 nbox = dPriv->numClipRects; /* must be in locked region */
433
434 for ( i = 0 ; i < nbox ; ) {
435 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
436 XF86DRIClipRectPtr box = dPriv->pClipRects;
437 XF86DRIClipRectPtr b = rmesa->sarea->boxes;
438 GLint n = 0;
439
440 for ( ; i < nr ; i++ ) {
441 *b++ = box[i];
442 n++;
443 }
444 rmesa->sarea->nbox = n;
445
446 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
447
448 if ( ret ) {
449 fprintf( stderr, "DRM_R200_SWAP_BUFFERS: return = %d\n", ret );
450 UNLOCK_HARDWARE( rmesa );
451 exit( 1 );
452 }
453 }
454
455 UNLOCK_HARDWARE( rmesa );
456 rmesa->lost_context = 1;
457
458 rmesa->swap_count++;
459 (*rmesa->get_ust)( & ust );
460 if ( missed_target ) {
461 rmesa->swap_missed_count++;
462 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
463 }
464
465 rmesa->swap_ust = ust;
466
467 sched_yield();
468 }
469
470 void r200PageFlip( const __DRIdrawablePrivate *dPriv )
471 {
472 r200ContextPtr rmesa;
473 GLint ret;
474 GLboolean missed_target;
475
476 assert(dPriv);
477 assert(dPriv->driContextPriv);
478 assert(dPriv->driContextPriv->driverPrivate);
479
480 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
481
482 if ( R200_DEBUG & DEBUG_IOCTL ) {
483 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
484 rmesa->sarea->pfCurrentPage);
485 }
486
487 R200_FIREVERTICES( rmesa );
488 LOCK_HARDWARE( rmesa );
489
490 if (!dPriv->numClipRects) {
491 UNLOCK_HARDWARE( rmesa );
492 usleep( 10000 ); /* throttle invisible client 10ms */
493 return;
494 }
495
496 /* Need to do this for the perf box placement:
497 */
498 {
499 XF86DRIClipRectPtr box = dPriv->pClipRects;
500 XF86DRIClipRectPtr b = rmesa->sarea->boxes;
501 b[0] = box[0];
502 rmesa->sarea->nbox = 1;
503 }
504
505 /* Throttle the frame rate -- only allow a few pending swap buffers
506 * request at a time.
507 */
508 r200WaitForFrameCompletion( rmesa );
509 UNLOCK_HARDWARE( rmesa );
510 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
511 if ( missed_target ) {
512 rmesa->swap_missed_count++;
513 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust );
514 }
515 LOCK_HARDWARE( rmesa );
516
517 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
518
519 UNLOCK_HARDWARE( rmesa );
520
521 if ( ret ) {
522 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
523 exit( 1 );
524 }
525
526 rmesa->swap_count++;
527 (void) (*rmesa->get_ust)( & rmesa->swap_ust );
528
529 if ( rmesa->sarea->pfCurrentPage == 1 ) {
530 rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset;
531 rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch;
532 } else {
533 rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset;
534 rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch;
535 }
536
537 R200_STATECHANGE( rmesa, ctx );
538 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset;
539 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
540 }
541
542
543 /* ================================================================
544 * Buffer clear
545 */
546 static void r200Clear( GLcontext *ctx, GLbitfield mask, GLboolean all,
547 GLint cx, GLint cy, GLint cw, GLint ch )
548 {
549 r200ContextPtr rmesa = R200_CONTEXT(ctx);
550 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
551 GLuint flags = 0;
552 GLuint color_mask = 0;
553 GLint ret, i;
554
555 if ( R200_DEBUG & DEBUG_IOCTL ) {
556 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
557 __FUNCTION__, all, cx, cy, cw, ch );
558 }
559
560 {
561 LOCK_HARDWARE( rmesa );
562 UNLOCK_HARDWARE( rmesa );
563 if ( dPriv->numClipRects == 0 )
564 return;
565 }
566
567 r200EmitState( rmesa );
568
569 /* Need to cope with lostcontext here as kernel relies on
570 * some residual state:
571 */
572 R200_FIREVERTICES( rmesa );
573
574 if ( mask & DD_FRONT_LEFT_BIT ) {
575 flags |= RADEON_FRONT;
576 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
577 mask &= ~DD_FRONT_LEFT_BIT;
578 }
579
580 if ( mask & DD_BACK_LEFT_BIT ) {
581 flags |= RADEON_BACK;
582 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
583 mask &= ~DD_BACK_LEFT_BIT;
584 }
585
586 if ( mask & DD_DEPTH_BIT ) {
587 if ( ctx->Depth.Mask ) flags |= RADEON_DEPTH; /* FIXME: ??? */
588 mask &= ~DD_DEPTH_BIT;
589 }
590
591 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
592 flags |= RADEON_STENCIL;
593 mask &= ~DD_STENCIL_BIT;
594 }
595
596 if ( mask ) {
597 if (R200_DEBUG & DEBUG_FALLBACKS)
598 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
599 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
600 }
601
602 if ( !flags )
603 return;
604
605 /* Flip top to bottom */
606 cx += dPriv->x;
607 cy = dPriv->y + dPriv->h - cy - ch;
608
609 LOCK_HARDWARE( rmesa );
610
611 /* Throttle the number of clear ioctls we do.
612 */
613 while ( 1 ) {
614 drmRadeonGetParam gp;
615 int ret;
616 int clear;
617
618 gp.param = RADEON_PARAM_LAST_CLEAR;
619 gp.value = &clear;
620 ret = drmCommandWriteRead( rmesa->dri.fd,
621 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
622
623 if ( ret ) {
624 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
625 exit(1);
626 }
627
628 /* Clear throttling needs more thought.
629 */
630 if ( rmesa->sarea->last_clear - clear <= 25 ) {
631 break;
632 }
633
634 if (rmesa->do_usleeps) {
635 UNLOCK_HARDWARE( rmesa );
636 DO_USLEEP( 1 );
637 LOCK_HARDWARE( rmesa );
638 }
639 }
640
641
642 for ( i = 0 ; i < dPriv->numClipRects ; ) {
643 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
644 XF86DRIClipRectPtr box = dPriv->pClipRects;
645 XF86DRIClipRectPtr b = rmesa->sarea->boxes;
646 drmRadeonClearType clear;
647 drmRadeonClearRect depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
648 GLint n = 0;
649
650 if ( !all ) {
651 for ( ; i < nr ; i++ ) {
652 GLint x = box[i].x1;
653 GLint y = box[i].y1;
654 GLint w = box[i].x2 - x;
655 GLint h = box[i].y2 - y;
656
657 if ( x < cx ) w -= cx - x, x = cx;
658 if ( y < cy ) h -= cy - y, y = cy;
659 if ( x + w > cx + cw ) w = cx + cw - x;
660 if ( y + h > cy + ch ) h = cy + ch - y;
661 if ( w <= 0 ) continue;
662 if ( h <= 0 ) continue;
663
664 b->x1 = x;
665 b->y1 = y;
666 b->x2 = x + w;
667 b->y2 = y + h;
668 b++;
669 n++;
670 }
671 } else {
672 for ( ; i < nr ; i++ ) {
673 *b++ = box[i];
674 n++;
675 }
676 }
677
678 rmesa->sarea->nbox = n;
679
680 clear.flags = flags;
681 clear.clear_color = rmesa->state.color.clear;
682 clear.clear_depth = 0; /* not used */
683 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
684 clear.depth_mask = rmesa->state.stencil.clear;
685 clear.depth_boxes = depth_boxes;
686
687 n--;
688 b = rmesa->sarea->boxes;
689 for ( ; n >= 0 ; n-- ) {
690 depth_boxes[n].f[RADEON_CLEAR_X1] = (float)b[n].x1;
691 depth_boxes[n].f[RADEON_CLEAR_Y1] = (float)b[n].y1;
692 depth_boxes[n].f[RADEON_CLEAR_X2] = (float)b[n].x2;
693 depth_boxes[n].f[RADEON_CLEAR_Y2] = (float)b[n].y2;
694 depth_boxes[n].f[RADEON_CLEAR_DEPTH] = ctx->Depth.Clear;
695 }
696
697 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
698 &clear, sizeof(drmRadeonClearType));
699
700
701 if ( ret ) {
702 UNLOCK_HARDWARE( rmesa );
703 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
704 exit( 1 );
705 }
706 }
707
708 UNLOCK_HARDWARE( rmesa );
709 rmesa->lost_context = 1;
710 }
711
712
713 void r200WaitForIdleLocked( r200ContextPtr rmesa )
714 {
715 int ret;
716 int i = 0;
717
718 do {
719 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_CP_IDLE);
720 if (ret)
721 DO_USLEEP( 1 );
722 } while (ret && ++i < 100);
723
724 if ( ret < 0 ) {
725 UNLOCK_HARDWARE( rmesa );
726 fprintf( stderr, "Error: R200 timed out... exiting\n" );
727 exit( -1 );
728 }
729 }
730
731
732 static void r200WaitForIdle( r200ContextPtr rmesa )
733 {
734 LOCK_HARDWARE(rmesa);
735 r200WaitForIdleLocked( rmesa );
736 UNLOCK_HARDWARE(rmesa);
737 }
738
739
740 void r200Flush( GLcontext *ctx )
741 {
742 r200ContextPtr rmesa = R200_CONTEXT( ctx );
743
744 if (R200_DEBUG & DEBUG_IOCTL)
745 fprintf(stderr, "%s\n", __FUNCTION__);
746
747 if (rmesa->dma.flush)
748 rmesa->dma.flush( rmesa );
749
750 if (!is_empty_list(&rmesa->hw.dirty))
751 r200EmitState( rmesa );
752
753 if (rmesa->store.cmd_used)
754 r200FlushCmdBuf( rmesa, __FUNCTION__ );
755 }
756
757 /* Make sure all commands have been sent to the hardware and have
758 * completed processing.
759 */
760 void r200Finish( GLcontext *ctx )
761 {
762 r200ContextPtr rmesa = R200_CONTEXT(ctx);
763 r200Flush( ctx );
764
765 if (rmesa->do_irqs) {
766 LOCK_HARDWARE( rmesa );
767 r200EmitIrqLocked( rmesa );
768 UNLOCK_HARDWARE( rmesa );
769 r200WaitIrq( rmesa );
770 }
771 else
772 r200WaitForIdle( rmesa );
773 }
774
775
776 /* This version of AllocateMemoryNV allocates only agp memory, and
777 * only does so after the point at which the driver has been
778 * initialized.
779 *
780 * Theoretically a valid context isn't required. However, in this
781 * implementation, it is, as I'm using the hardware lock to protect
782 * the kernel data structures, and the current context to get the
783 * device fd.
784 */
785 void *r200AllocateMemoryNV(GLsizei size, GLfloat readfreq,
786 GLfloat writefreq, GLfloat priority)
787 {
788 GET_CURRENT_CONTEXT(ctx);
789 r200ContextPtr rmesa;
790 int region_offset;
791 drmRadeonMemAlloc alloc;
792 int ret;
793
794 if (R200_DEBUG & DEBUG_IOCTL)
795 fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
796 writefreq, priority);
797
798 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || rmesa->r200Screen->IsPCI )
799 return NULL;
800
801 if (getenv("R200_NO_ALLOC"))
802 return NULL;
803
804 if (rmesa->dri.drmMinor < 6)
805 return NULL;
806
807 alloc.region = RADEON_MEM_REGION_AGP;
808 alloc.alignment = 0;
809 alloc.size = size;
810 alloc.region_offset = &region_offset;
811
812 ret = drmCommandWriteRead( rmesa->r200Screen->driScreen->fd,
813 DRM_RADEON_ALLOC,
814 &alloc, sizeof(alloc));
815
816 if (ret) {
817 fprintf(stderr, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__, ret);
818 return NULL;
819 }
820
821 {
822 char *region_start = (char *)rmesa->r200Screen->agpTextures.map;
823 return (void *)(region_start + region_offset);
824 }
825 }
826
827
828 /* Called via glXFreeMemoryNV() */
829 void r200FreeMemoryNV(GLvoid *pointer)
830 {
831 GET_CURRENT_CONTEXT(ctx);
832 r200ContextPtr rmesa;
833 int region_offset;
834 drmRadeonMemFree memfree;
835 int ret;
836
837 if (R200_DEBUG & DEBUG_IOCTL)
838 fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
839
840 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || rmesa->r200Screen->IsPCI ) {
841 fprintf(stderr, "%s: no context\n", __FUNCTION__);
842 return;
843 }
844
845 if (rmesa->dri.drmMinor < 6)
846 return;
847
848 region_offset = (char *)pointer - (char *)rmesa->r200Screen->agpTextures.map;
849
850 if (region_offset < 0 ||
851 region_offset > rmesa->r200Screen->agpTextures.size) {
852 fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
853 rmesa->r200Screen->agpTextures.size);
854 return;
855 }
856
857 memfree.region = RADEON_MEM_REGION_AGP;
858 memfree.region_offset = region_offset;
859
860 ret = drmCommandWrite( rmesa->r200Screen->driScreen->fd,
861 DRM_RADEON_FREE,
862 &memfree, sizeof(memfree));
863
864 if (ret)
865 fprintf(stderr, "%s: DRM_RADEON_FREE ret %d\n", __FUNCTION__, ret);
866 }
867
868 /* Called via glXGetAGPOffsetMESA() */
869 GLuint r200GetAGPOffset(const GLvoid *pointer)
870 {
871 GET_CURRENT_CONTEXT(ctx);
872 r200ContextPtr rmesa;
873 GLuint card_offset;
874
875 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) ) {
876 fprintf(stderr, "%s: no context\n", __FUNCTION__);
877 return ~0;
878 }
879
880 if (!r200IsAgpMemory( rmesa, pointer, 0 ))
881 return ~0;
882
883 if (rmesa->dri.drmMinor < 6)
884 return ~0;
885
886 card_offset = r200AgpOffsetFromVirtual( rmesa, pointer );
887
888 return card_offset - rmesa->r200Screen->agp_base;
889 }
890
891
892 GLboolean r200IsAgpMemory( r200ContextPtr rmesa, const GLvoid *pointer,
893 GLint size )
894 {
895 int offset = (char *)pointer - (char *)rmesa->r200Screen->agpTextures.map;
896 int valid = (size >= 0 &&
897 offset >= 0 &&
898 offset + size < rmesa->r200Screen->agpTextures.size);
899
900 if (R200_DEBUG & DEBUG_IOCTL)
901 fprintf(stderr, "r200IsAgpMemory( %p ) : %d\n", pointer, valid );
902
903 return valid;
904 }
905
906
907 GLuint r200AgpOffsetFromVirtual( r200ContextPtr rmesa, const GLvoid *pointer )
908 {
909 int offset = (char *)pointer - (char *)rmesa->r200Screen->agpTextures.map;
910
911 if (offset < 0 || offset > rmesa->r200Screen->agpTextures.size)
912 return ~0;
913 else
914 return rmesa->r200Screen->agp_texture_offset + offset;
915 }
916
917
918
919 void r200InitIoctlFuncs( GLcontext *ctx )
920 {
921 ctx->Driver.Clear = r200Clear;
922 ctx->Driver.Finish = r200Finish;
923 ctx->Driver.Flush = r200Flush;
924 }
925