077afd81e877047e773117f4743ac0d2e2ced3fc
[mesa.git] / src / mesa / drivers / dri / r200 / r200_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/r200/r200_ioctl.c,v 1.4 2002/12/17 00:32:56 dawes Exp $ */
2 /*
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 #include <sched.h>
37 #include <errno.h>
38
39 #include "glheader.h"
40 #include "imports.h"
41 #include "macros.h"
42 #include "context.h"
43 #include "swrast/swrast.h"
44
45 #include "r200_context.h"
46 #include "r200_state.h"
47 #include "r200_ioctl.h"
48 #include "r200_tcl.h"
49 #include "r200_sanity.h"
50 #include "radeon_reg.h"
51
52 #include "vblank.h"
53
54
55 #define R200_TIMEOUT 512
56 #define R200_IDLE_RETRY 16
57
58
59 static void r200WaitForIdle( r200ContextPtr rmesa );
60
61
62 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
63 * we need to unwire our current cmdbuf, hook the one with the saved state in
64 * it, flush it, and then put the current one back. This is so commands at the
65 * start of a cmdbuf can rely on the state being kept from the previous one.
66 */
67 static void r200BackUpAndEmitLostStateLocked( r200ContextPtr rmesa )
68 {
69 GLuint nr_released_bufs;
70 struct r200_store saved_store;
71
72 if (rmesa->backup_store.cmd_used == 0)
73 return;
74
75 if (R200_DEBUG & DEBUG_STATE)
76 fprintf(stderr, "Emitting backup state on lost context\n");
77
78 rmesa->lost_context = GL_FALSE;
79
80 nr_released_bufs = rmesa->dma.nr_released_bufs;
81 saved_store = rmesa->store;
82 rmesa->dma.nr_released_bufs = 0;
83 rmesa->store = rmesa->backup_store;
84 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
85 rmesa->dma.nr_released_bufs = nr_released_bufs;
86 rmesa->store = saved_store;
87 }
88
89 int r200FlushCmdBufLocked( r200ContextPtr rmesa, const char * caller )
90 {
91 int ret, i;
92 drm_radeon_cmd_buffer_t cmd;
93
94 if (rmesa->lost_context)
95 r200BackUpAndEmitLostStateLocked( rmesa );
96
97 if (R200_DEBUG & DEBUG_IOCTL) {
98 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
99
100 if (0 & R200_DEBUG & DEBUG_VERBOSE)
101 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
102 fprintf(stderr, "%d: %x\n", i/4,
103 *(int *)(&rmesa->store.cmd_buf[i]));
104 }
105
106 if (R200_DEBUG & DEBUG_DMA)
107 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
108 rmesa->dma.nr_released_bufs);
109
110
111 if (R200_DEBUG & DEBUG_SANITY) {
112 if (rmesa->state.scissor.enabled)
113 ret = r200SanityCmdBuffer( rmesa,
114 rmesa->state.scissor.numClipRects,
115 rmesa->state.scissor.pClipRects);
116 else
117 ret = r200SanityCmdBuffer( rmesa,
118 rmesa->numClipRects,
119 rmesa->pClipRects);
120 if (ret) {
121 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
122 goto out;
123 }
124 }
125
126
127 if (R200_DEBUG & DEBUG_MEMORY) {
128 if (! driValidateTextureHeaps( rmesa->texture_heaps, rmesa->nr_heaps,
129 & rmesa->swapped ) ) {
130 fprintf( stderr, "%s: texture memory is inconsistent - expect "
131 "mangled textures\n", __FUNCTION__ );
132 }
133 }
134
135
136 cmd.bufsz = rmesa->store.cmd_used;
137 cmd.buf = rmesa->store.cmd_buf;
138
139 if (rmesa->state.scissor.enabled) {
140 cmd.nbox = rmesa->state.scissor.numClipRects;
141 cmd.boxes = (drm_clip_rect_t *)rmesa->state.scissor.pClipRects;
142 } else {
143 cmd.nbox = rmesa->numClipRects;
144 cmd.boxes = (drm_clip_rect_t *)rmesa->pClipRects;
145 }
146
147 ret = drmCommandWrite( rmesa->dri.fd,
148 DRM_RADEON_CMDBUF,
149 &cmd, sizeof(cmd) );
150
151 if (ret)
152 fprintf(stderr, "drmCommandWrite: %d\n", ret);
153
154 if (R200_DEBUG & DEBUG_SYNC) {
155 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
156 r200WaitForIdleLocked( rmesa );
157 }
158
159
160 out:
161 rmesa->store.primnr = 0;
162 rmesa->store.statenr = 0;
163 rmesa->store.cmd_used = 0;
164 rmesa->dma.nr_released_bufs = 0;
165 rmesa->save_on_next_emit = 1;
166
167 return ret;
168 }
169
170
171 /* Note: does not emit any commands to avoid recursion on
172 * r200AllocCmdBuf.
173 */
174 void r200FlushCmdBuf( r200ContextPtr rmesa, const char *caller )
175 {
176 int ret;
177
178 LOCK_HARDWARE( rmesa );
179
180 ret = r200FlushCmdBufLocked( rmesa, caller );
181
182 UNLOCK_HARDWARE( rmesa );
183
184 if (ret) {
185 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
186 exit(ret);
187 }
188 }
189
190
191 /* =============================================================
192 * Hardware vertex buffer handling
193 */
194
195
196 void r200RefillCurrentDmaRegion( r200ContextPtr rmesa )
197 {
198 struct r200_dma_buffer *dmabuf;
199 int fd = rmesa->dri.fd;
200 int index = 0;
201 int size = 0;
202 drmDMAReq dma;
203 int ret;
204
205 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
206 fprintf(stderr, "%s\n", __FUNCTION__);
207
208 if (rmesa->dma.flush) {
209 rmesa->dma.flush( rmesa );
210 }
211
212 if (rmesa->dma.current.buf)
213 r200ReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
214
215 if (rmesa->dma.nr_released_bufs > 4)
216 r200FlushCmdBuf( rmesa, __FUNCTION__ );
217
218 dma.context = rmesa->dri.hwContext;
219 dma.send_count = 0;
220 dma.send_list = NULL;
221 dma.send_sizes = NULL;
222 dma.flags = 0;
223 dma.request_count = 1;
224 dma.request_size = RADEON_BUFFER_SIZE;
225 dma.request_list = &index;
226 dma.request_sizes = &size;
227 dma.granted_count = 0;
228
229 LOCK_HARDWARE(rmesa); /* no need to validate */
230
231 while (1) {
232 ret = drmDMA( fd, &dma );
233 if (ret == 0)
234 break;
235
236 if (rmesa->dma.nr_released_bufs) {
237 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
238 }
239
240 if (rmesa->do_usleeps) {
241 UNLOCK_HARDWARE( rmesa );
242 DO_USLEEP( 1 );
243 LOCK_HARDWARE( rmesa );
244 }
245 }
246
247 UNLOCK_HARDWARE(rmesa);
248
249 if (R200_DEBUG & DEBUG_DMA)
250 fprintf(stderr, "Allocated buffer %d\n", index);
251
252 dmabuf = CALLOC_STRUCT( r200_dma_buffer );
253 dmabuf->buf = &rmesa->r200Screen->buffers->list[index];
254 dmabuf->refcount = 1;
255
256 rmesa->dma.current.buf = dmabuf;
257 rmesa->dma.current.address = dmabuf->buf->address;
258 rmesa->dma.current.end = dmabuf->buf->total;
259 rmesa->dma.current.start = 0;
260 rmesa->dma.current.ptr = 0;
261 }
262
263 void r200ReleaseDmaRegion( r200ContextPtr rmesa,
264 struct r200_dma_region *region,
265 const char *caller )
266 {
267 if (R200_DEBUG & DEBUG_IOCTL)
268 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
269
270 if (!region->buf)
271 return;
272
273 if (rmesa->dma.flush)
274 rmesa->dma.flush( rmesa );
275
276 if (--region->buf->refcount == 0) {
277 drm_radeon_cmd_header_t *cmd;
278
279 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
280 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
281 region->buf->buf->idx);
282
283 cmd = (drm_radeon_cmd_header_t *)r200AllocCmdBuf( rmesa, sizeof(*cmd),
284 __FUNCTION__ );
285 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
286 cmd->dma.buf_idx = region->buf->buf->idx;
287 FREE(region->buf);
288 rmesa->dma.nr_released_bufs++;
289 }
290
291 region->buf = 0;
292 region->start = 0;
293 }
294
295 /* Allocates a region from rmesa->dma.current. If there isn't enough
296 * space in current, grab a new buffer (and discard what was left of current)
297 */
298 void r200AllocDmaRegion( r200ContextPtr rmesa,
299 struct r200_dma_region *region,
300 int bytes,
301 int alignment )
302 {
303 if (R200_DEBUG & DEBUG_IOCTL)
304 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
305
306 if (rmesa->dma.flush)
307 rmesa->dma.flush( rmesa );
308
309 if (region->buf)
310 r200ReleaseDmaRegion( rmesa, region, __FUNCTION__ );
311
312 alignment--;
313 rmesa->dma.current.start = rmesa->dma.current.ptr =
314 (rmesa->dma.current.ptr + alignment) & ~alignment;
315
316 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
317 r200RefillCurrentDmaRegion( rmesa );
318
319 region->start = rmesa->dma.current.start;
320 region->ptr = rmesa->dma.current.start;
321 region->end = rmesa->dma.current.start + bytes;
322 region->address = rmesa->dma.current.address;
323 region->buf = rmesa->dma.current.buf;
324 region->buf->refcount++;
325
326 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
327 rmesa->dma.current.start =
328 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
329
330 assert( rmesa->dma.current.ptr <= rmesa->dma.current.end );
331 }
332
333 void r200AllocDmaRegionVerts( r200ContextPtr rmesa,
334 struct r200_dma_region *region,
335 int numverts,
336 int vertsize,
337 int alignment )
338 {
339 r200AllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
340 }
341
342 /* ================================================================
343 * SwapBuffers with client-side throttling
344 */
345
346 static uint32_t r200GetLastFrame(r200ContextPtr rmesa)
347 {
348 drm_radeon_getparam_t gp;
349 int ret;
350 uint32_t frame;
351
352 gp.param = RADEON_PARAM_LAST_FRAME;
353 gp.value = (int *)&frame;
354 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
355 &gp, sizeof(gp) );
356 if ( ret ) {
357 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
358 exit(1);
359 }
360
361 return frame;
362 }
363
364 static void r200EmitIrqLocked( r200ContextPtr rmesa )
365 {
366 drm_radeon_irq_emit_t ie;
367 int ret;
368
369 ie.irq_seq = &rmesa->iw.irq_seq;
370 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
371 &ie, sizeof(ie) );
372 if ( ret ) {
373 fprintf( stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, ret );
374 exit(1);
375 }
376 }
377
378
379 static void r200WaitIrq( r200ContextPtr rmesa )
380 {
381 int ret;
382
383 do {
384 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
385 &rmesa->iw, sizeof(rmesa->iw) );
386 } while (ret && (errno == EINTR || errno == EAGAIN));
387
388 if ( ret ) {
389 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
390 exit(1);
391 }
392 }
393
394
395 static void r200WaitForFrameCompletion( r200ContextPtr rmesa )
396 {
397 drm_radeon_sarea_t *sarea = rmesa->sarea;
398
399 if (rmesa->do_irqs) {
400 if (r200GetLastFrame(rmesa) < sarea->last_frame) {
401 if (!rmesa->irqsEmitted) {
402 while (r200GetLastFrame (rmesa) < sarea->last_frame)
403 ;
404 }
405 else {
406 UNLOCK_HARDWARE( rmesa );
407 r200WaitIrq( rmesa );
408 LOCK_HARDWARE( rmesa );
409 }
410 rmesa->irqsEmitted = 10;
411 }
412
413 if (rmesa->irqsEmitted) {
414 r200EmitIrqLocked( rmesa );
415 rmesa->irqsEmitted--;
416 }
417 }
418 else {
419 while (r200GetLastFrame (rmesa) < sarea->last_frame) {
420 UNLOCK_HARDWARE( rmesa );
421 if (rmesa->do_usleeps)
422 DO_USLEEP( 1 );
423 LOCK_HARDWARE( rmesa );
424 }
425 }
426 }
427
428
429
430 /* Copy the back color buffer to the front color buffer.
431 */
432 void r200CopyBuffer( const __DRIdrawablePrivate *dPriv )
433 {
434 r200ContextPtr rmesa;
435 GLint nbox, i, ret;
436 GLboolean missed_target;
437 int64_t ust;
438
439 assert(dPriv);
440 assert(dPriv->driContextPriv);
441 assert(dPriv->driContextPriv->driverPrivate);
442
443 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
444
445 if ( R200_DEBUG & DEBUG_IOCTL ) {
446 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *)rmesa->glCtx );
447 }
448
449 R200_FIREVERTICES( rmesa );
450
451 LOCK_HARDWARE( rmesa );
452
453
454 /* Throttle the frame rate -- only allow one pending swap buffers
455 * request at a time.
456 */
457 r200WaitForFrameCompletion( rmesa );
458 UNLOCK_HARDWARE( rmesa );
459 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
460 LOCK_HARDWARE( rmesa );
461
462 nbox = dPriv->numClipRects; /* must be in locked region */
463
464 for ( i = 0 ; i < nbox ; ) {
465 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
466 drm_clip_rect_t *box = dPriv->pClipRects;
467 drm_clip_rect_t *b = rmesa->sarea->boxes;
468 GLint n = 0;
469
470 for ( ; i < nr ; i++ ) {
471 *b++ = box[i];
472 n++;
473 }
474 rmesa->sarea->nbox = n;
475
476 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
477
478 if ( ret ) {
479 fprintf( stderr, "DRM_R200_SWAP_BUFFERS: return = %d\n", ret );
480 UNLOCK_HARDWARE( rmesa );
481 exit( 1 );
482 }
483 }
484
485 UNLOCK_HARDWARE( rmesa );
486 rmesa->hw.all_dirty = GL_TRUE;
487
488 rmesa->swap_count++;
489 (*rmesa->get_ust)( & ust );
490 if ( missed_target ) {
491 rmesa->swap_missed_count++;
492 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
493 }
494
495 rmesa->swap_ust = ust;
496
497 sched_yield();
498 }
499
500 void r200PageFlip( const __DRIdrawablePrivate *dPriv )
501 {
502 r200ContextPtr rmesa;
503 GLint ret;
504 GLboolean missed_target;
505
506 assert(dPriv);
507 assert(dPriv->driContextPriv);
508 assert(dPriv->driContextPriv->driverPrivate);
509
510 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
511
512 if ( R200_DEBUG & DEBUG_IOCTL ) {
513 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
514 rmesa->sarea->pfCurrentPage);
515 }
516
517 R200_FIREVERTICES( rmesa );
518 LOCK_HARDWARE( rmesa );
519
520 if (!dPriv->numClipRects) {
521 UNLOCK_HARDWARE( rmesa );
522 usleep( 10000 ); /* throttle invisible client 10ms */
523 return;
524 }
525
526 /* Need to do this for the perf box placement:
527 */
528 {
529 drm_clip_rect_t *box = dPriv->pClipRects;
530 drm_clip_rect_t *b = rmesa->sarea->boxes;
531 b[0] = box[0];
532 rmesa->sarea->nbox = 1;
533 }
534
535 /* Throttle the frame rate -- only allow a few pending swap buffers
536 * request at a time.
537 */
538 r200WaitForFrameCompletion( rmesa );
539 UNLOCK_HARDWARE( rmesa );
540 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
541 if ( missed_target ) {
542 rmesa->swap_missed_count++;
543 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust );
544 }
545 LOCK_HARDWARE( rmesa );
546
547 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
548
549 UNLOCK_HARDWARE( rmesa );
550
551 if ( ret ) {
552 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
553 exit( 1 );
554 }
555
556 rmesa->swap_count++;
557 (void) (*rmesa->get_ust)( & rmesa->swap_ust );
558
559 if ( rmesa->sarea->pfCurrentPage == 1 ) {
560 rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset;
561 rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch;
562 } else {
563 rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset;
564 rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch;
565 }
566
567 R200_STATECHANGE( rmesa, ctx );
568 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset
569 + rmesa->r200Screen->fbLocation;
570 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
571 }
572
573
574 /* ================================================================
575 * Buffer clear
576 */
577 static void r200Clear( GLcontext *ctx, GLbitfield mask, GLboolean all,
578 GLint cx, GLint cy, GLint cw, GLint ch )
579 {
580 r200ContextPtr rmesa = R200_CONTEXT(ctx);
581 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
582 GLuint flags = 0;
583 GLuint color_mask = 0;
584 GLint ret, i;
585
586 if ( R200_DEBUG & DEBUG_IOCTL ) {
587 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
588 __FUNCTION__, all, cx, cy, cw, ch );
589 }
590
591 {
592 LOCK_HARDWARE( rmesa );
593 UNLOCK_HARDWARE( rmesa );
594 if ( dPriv->numClipRects == 0 )
595 return;
596 }
597
598 r200Flush( ctx );
599
600 if ( mask & DD_FRONT_LEFT_BIT ) {
601 flags |= RADEON_FRONT;
602 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
603 mask &= ~DD_FRONT_LEFT_BIT;
604 }
605
606 if ( mask & DD_BACK_LEFT_BIT ) {
607 flags |= RADEON_BACK;
608 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
609 mask &= ~DD_BACK_LEFT_BIT;
610 }
611
612 if ( mask & DD_DEPTH_BIT ) {
613 if ( ctx->Depth.Mask ) flags |= RADEON_DEPTH; /* FIXME: ??? */
614 mask &= ~DD_DEPTH_BIT;
615 }
616
617 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
618 flags |= RADEON_STENCIL;
619 mask &= ~DD_STENCIL_BIT;
620 }
621
622 if ( mask ) {
623 if (R200_DEBUG & DEBUG_FALLBACKS)
624 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
625 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
626 }
627
628 if ( !flags )
629 return;
630
631 /* Flip top to bottom */
632 cx += dPriv->x;
633 cy = dPriv->y + dPriv->h - cy - ch;
634
635 LOCK_HARDWARE( rmesa );
636
637 /* Throttle the number of clear ioctls we do.
638 */
639 while ( 1 ) {
640 drm_radeon_getparam_t gp;
641 int ret;
642 int clear;
643
644 gp.param = RADEON_PARAM_LAST_CLEAR;
645 gp.value = (int *)&clear;
646 ret = drmCommandWriteRead( rmesa->dri.fd,
647 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
648
649 if ( ret ) {
650 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
651 exit(1);
652 }
653
654 /* Clear throttling needs more thought.
655 */
656 if ( rmesa->sarea->last_clear - clear <= 25 ) {
657 break;
658 }
659
660 if (rmesa->do_usleeps) {
661 UNLOCK_HARDWARE( rmesa );
662 DO_USLEEP( 1 );
663 LOCK_HARDWARE( rmesa );
664 }
665 }
666
667 /* Send current state to the hardware */
668 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
669
670 for ( i = 0 ; i < dPriv->numClipRects ; ) {
671 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
672 drm_clip_rect_t *box = dPriv->pClipRects;
673 drm_clip_rect_t *b = rmesa->sarea->boxes;
674 drm_radeon_clear_t clear;
675 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
676 GLint n = 0;
677
678 if ( !all ) {
679 for ( ; i < nr ; i++ ) {
680 GLint x = box[i].x1;
681 GLint y = box[i].y1;
682 GLint w = box[i].x2 - x;
683 GLint h = box[i].y2 - y;
684
685 if ( x < cx ) w -= cx - x, x = cx;
686 if ( y < cy ) h -= cy - y, y = cy;
687 if ( x + w > cx + cw ) w = cx + cw - x;
688 if ( y + h > cy + ch ) h = cy + ch - y;
689 if ( w <= 0 ) continue;
690 if ( h <= 0 ) continue;
691
692 b->x1 = x;
693 b->y1 = y;
694 b->x2 = x + w;
695 b->y2 = y + h;
696 b++;
697 n++;
698 }
699 } else {
700 for ( ; i < nr ; i++ ) {
701 *b++ = box[i];
702 n++;
703 }
704 }
705
706 rmesa->sarea->nbox = n;
707
708 clear.flags = flags;
709 clear.clear_color = rmesa->state.color.clear;
710 clear.clear_depth = 0; /* not used */
711 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
712 clear.depth_mask = rmesa->state.stencil.clear;
713 clear.depth_boxes = depth_boxes;
714
715 n--;
716 b = rmesa->sarea->boxes;
717 for ( ; n >= 0 ; n-- ) {
718 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
719 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
720 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
721 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
722 depth_boxes[n].f[CLEAR_DEPTH] = ctx->Depth.Clear;
723 }
724
725 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
726 &clear, sizeof(clear));
727
728
729 if ( ret ) {
730 UNLOCK_HARDWARE( rmesa );
731 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
732 exit( 1 );
733 }
734 }
735
736 UNLOCK_HARDWARE( rmesa );
737 rmesa->hw.all_dirty = GL_TRUE;
738 }
739
740
741 void r200WaitForIdleLocked( r200ContextPtr rmesa )
742 {
743 int ret;
744 int i = 0;
745
746 do {
747 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_CP_IDLE);
748 if (ret)
749 DO_USLEEP( 1 );
750 } while (ret && ++i < 100);
751
752 if ( ret < 0 ) {
753 UNLOCK_HARDWARE( rmesa );
754 fprintf( stderr, "Error: R200 timed out... exiting\n" );
755 exit( -1 );
756 }
757 }
758
759
760 static void r200WaitForIdle( r200ContextPtr rmesa )
761 {
762 LOCK_HARDWARE(rmesa);
763 r200WaitForIdleLocked( rmesa );
764 UNLOCK_HARDWARE(rmesa);
765 }
766
767
768 void r200Flush( GLcontext *ctx )
769 {
770 r200ContextPtr rmesa = R200_CONTEXT( ctx );
771
772 if (R200_DEBUG & DEBUG_IOCTL)
773 fprintf(stderr, "%s\n", __FUNCTION__);
774
775 if (rmesa->dma.flush)
776 rmesa->dma.flush( rmesa );
777
778 r200EmitState( rmesa );
779
780 if (rmesa->store.cmd_used)
781 r200FlushCmdBuf( rmesa, __FUNCTION__ );
782 }
783
784 /* Make sure all commands have been sent to the hardware and have
785 * completed processing.
786 */
787 void r200Finish( GLcontext *ctx )
788 {
789 r200ContextPtr rmesa = R200_CONTEXT(ctx);
790 r200Flush( ctx );
791
792 if (rmesa->do_irqs) {
793 LOCK_HARDWARE( rmesa );
794 r200EmitIrqLocked( rmesa );
795 UNLOCK_HARDWARE( rmesa );
796 r200WaitIrq( rmesa );
797 }
798 else
799 r200WaitForIdle( rmesa );
800 }
801
802
803 /* This version of AllocateMemoryMESA allocates only GART memory, and
804 * only does so after the point at which the driver has been
805 * initialized.
806 *
807 * Theoretically a valid context isn't required. However, in this
808 * implementation, it is, as I'm using the hardware lock to protect
809 * the kernel data structures, and the current context to get the
810 * device fd.
811 */
812 void *r200AllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLsizei size,
813 GLfloat readfreq, GLfloat writefreq,
814 GLfloat priority)
815 {
816 GET_CURRENT_CONTEXT(ctx);
817 r200ContextPtr rmesa;
818 int region_offset;
819 drm_radeon_mem_alloc_t alloc;
820 int ret;
821
822 if (R200_DEBUG & DEBUG_IOCTL)
823 fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
824 writefreq, priority);
825
826 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->r200Screen->gartTextures.map)
827 return NULL;
828
829 if (getenv("R200_NO_ALLOC"))
830 return NULL;
831
832 if (rmesa->dri.drmMinor < 6)
833 return NULL;
834
835 alloc.region = RADEON_MEM_REGION_GART;
836 alloc.alignment = 0;
837 alloc.size = size;
838 alloc.region_offset = &region_offset;
839
840 ret = drmCommandWriteRead( rmesa->r200Screen->driScreen->fd,
841 DRM_RADEON_ALLOC,
842 &alloc, sizeof(alloc));
843
844 if (ret) {
845 fprintf(stderr, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__, ret);
846 return NULL;
847 }
848
849 {
850 char *region_start = (char *)rmesa->r200Screen->gartTextures.map;
851 return (void *)(region_start + region_offset);
852 }
853 }
854
855
856 /* Called via glXFreeMemoryMESA() */
857 void r200FreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
858 {
859 GET_CURRENT_CONTEXT(ctx);
860 r200ContextPtr rmesa;
861 ptrdiff_t region_offset;
862 drm_radeon_mem_free_t memfree;
863 int ret;
864
865 if (R200_DEBUG & DEBUG_IOCTL)
866 fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
867
868 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->r200Screen->gartTextures.map) {
869 fprintf(stderr, "%s: no context\n", __FUNCTION__);
870 return;
871 }
872
873 if (rmesa->dri.drmMinor < 6)
874 return;
875
876 region_offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
877
878 if (region_offset < 0 ||
879 region_offset > rmesa->r200Screen->gartTextures.size) {
880 fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
881 rmesa->r200Screen->gartTextures.size);
882 return;
883 }
884
885 memfree.region = RADEON_MEM_REGION_GART;
886 memfree.region_offset = region_offset;
887
888 ret = drmCommandWrite( rmesa->r200Screen->driScreen->fd,
889 DRM_RADEON_FREE,
890 &memfree, sizeof(memfree));
891
892 if (ret)
893 fprintf(stderr, "%s: DRM_RADEON_FREE ret %d\n", __FUNCTION__, ret);
894 }
895
896 /* Called via glXGetMemoryOffsetMESA() */
897 GLuint r200GetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn, const GLvoid *pointer)
898 {
899 GET_CURRENT_CONTEXT(ctx);
900 r200ContextPtr rmesa;
901 GLuint card_offset;
902
903 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) ) {
904 fprintf(stderr, "%s: no context\n", __FUNCTION__);
905 return ~0;
906 }
907
908 if (!r200IsGartMemory( rmesa, pointer, 0 ))
909 return ~0;
910
911 if (rmesa->dri.drmMinor < 6)
912 return ~0;
913
914 card_offset = r200GartOffsetFromVirtual( rmesa, pointer );
915
916 return card_offset - rmesa->r200Screen->gart_base;
917 }
918
919 GLboolean r200IsGartMemory( r200ContextPtr rmesa, const GLvoid *pointer,
920 GLint size )
921 {
922 ptrdiff_t offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
923 int valid = (size >= 0 &&
924 offset >= 0 &&
925 offset + size < rmesa->r200Screen->gartTextures.size);
926
927 if (R200_DEBUG & DEBUG_IOCTL)
928 fprintf(stderr, "r200IsGartMemory( %p ) : %d\n", pointer, valid );
929
930 return valid;
931 }
932
933
934 GLuint r200GartOffsetFromVirtual( r200ContextPtr rmesa, const GLvoid *pointer )
935 {
936 ptrdiff_t offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
937
938 if (offset < 0 || offset > rmesa->r200Screen->gartTextures.size)
939 return ~0;
940 else
941 return rmesa->r200Screen->gart_texture_offset + offset;
942 }
943
944
945
946 void r200InitIoctlFuncs( struct dd_function_table *functions )
947 {
948 functions->Clear = r200Clear;
949 functions->Finish = r200Finish;
950 functions->Flush = r200Flush;
951 }
952