fix up radeon span functions using latest r200 code from Brian,
[mesa.git] / src / mesa / drivers / dri / r200 / r200_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/r200/r200_ioctl.c,v 1.4 2002/12/17 00:32:56 dawes Exp $ */
2 /*
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 #include <sched.h>
37 #include <errno.h>
38
39 #include "glheader.h"
40 #include "imports.h"
41 #include "macros.h"
42 #include "context.h"
43 #include "swrast/swrast.h"
44
45 #include "r200_context.h"
46 #include "r200_state.h"
47 #include "r200_ioctl.h"
48 #include "r200_tcl.h"
49 #include "r200_sanity.h"
50 #include "radeon_reg.h"
51
52 #include "drirenderbuffer.h"
53 #include "vblank.h"
54
55 #define R200_TIMEOUT 512
56 #define R200_IDLE_RETRY 16
57
58
59 static void r200WaitForIdle( r200ContextPtr rmesa );
60
61
62 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
63 * we need to unwire our current cmdbuf, hook the one with the saved state in
64 * it, flush it, and then put the current one back. This is so commands at the
65 * start of a cmdbuf can rely on the state being kept from the previous one.
66 */
67 static void r200BackUpAndEmitLostStateLocked( r200ContextPtr rmesa )
68 {
69 GLuint nr_released_bufs;
70 struct r200_store saved_store;
71
72 if (rmesa->backup_store.cmd_used == 0)
73 return;
74
75 if (R200_DEBUG & DEBUG_STATE)
76 fprintf(stderr, "Emitting backup state on lost context\n");
77
78 rmesa->lost_context = GL_FALSE;
79
80 nr_released_bufs = rmesa->dma.nr_released_bufs;
81 saved_store = rmesa->store;
82 rmesa->dma.nr_released_bufs = 0;
83 rmesa->store = rmesa->backup_store;
84 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
85 rmesa->dma.nr_released_bufs = nr_released_bufs;
86 rmesa->store = saved_store;
87 }
88
89 int r200FlushCmdBufLocked( r200ContextPtr rmesa, const char * caller )
90 {
91 int ret, i;
92 drm_radeon_cmd_buffer_t cmd;
93
94 if (rmesa->lost_context)
95 r200BackUpAndEmitLostStateLocked( rmesa );
96
97 if (R200_DEBUG & DEBUG_IOCTL) {
98 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
99
100 if (0 & R200_DEBUG & DEBUG_VERBOSE)
101 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
102 fprintf(stderr, "%d: %x\n", i/4,
103 *(int *)(&rmesa->store.cmd_buf[i]));
104 }
105
106 if (R200_DEBUG & DEBUG_DMA)
107 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
108 rmesa->dma.nr_released_bufs);
109
110
111 if (R200_DEBUG & DEBUG_SANITY) {
112 if (rmesa->state.scissor.enabled)
113 ret = r200SanityCmdBuffer( rmesa,
114 rmesa->state.scissor.numClipRects,
115 rmesa->state.scissor.pClipRects);
116 else
117 ret = r200SanityCmdBuffer( rmesa,
118 rmesa->numClipRects,
119 rmesa->pClipRects);
120 if (ret) {
121 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
122 goto out;
123 }
124 }
125
126
127 if (R200_DEBUG & DEBUG_MEMORY) {
128 if (! driValidateTextureHeaps( rmesa->texture_heaps, rmesa->nr_heaps,
129 & rmesa->swapped ) ) {
130 fprintf( stderr, "%s: texture memory is inconsistent - expect "
131 "mangled textures\n", __FUNCTION__ );
132 }
133 }
134
135
136 cmd.bufsz = rmesa->store.cmd_used;
137 cmd.buf = rmesa->store.cmd_buf;
138
139 if (rmesa->state.scissor.enabled) {
140 cmd.nbox = rmesa->state.scissor.numClipRects;
141 cmd.boxes = (drm_clip_rect_t *)rmesa->state.scissor.pClipRects;
142 } else {
143 cmd.nbox = rmesa->numClipRects;
144 cmd.boxes = (drm_clip_rect_t *)rmesa->pClipRects;
145 }
146
147 ret = drmCommandWrite( rmesa->dri.fd,
148 DRM_RADEON_CMDBUF,
149 &cmd, sizeof(cmd) );
150
151 if (ret)
152 fprintf(stderr, "drmCommandWrite: %d\n", ret);
153
154 if (R200_DEBUG & DEBUG_SYNC) {
155 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
156 r200WaitForIdleLocked( rmesa );
157 }
158
159
160 out:
161 rmesa->store.primnr = 0;
162 rmesa->store.statenr = 0;
163 rmesa->store.cmd_used = 0;
164 rmesa->dma.nr_released_bufs = 0;
165 rmesa->save_on_next_emit = 1;
166
167 return ret;
168 }
169
170
171 /* Note: does not emit any commands to avoid recursion on
172 * r200AllocCmdBuf.
173 */
174 void r200FlushCmdBuf( r200ContextPtr rmesa, const char *caller )
175 {
176 int ret;
177
178 LOCK_HARDWARE( rmesa );
179
180 ret = r200FlushCmdBufLocked( rmesa, caller );
181
182 UNLOCK_HARDWARE( rmesa );
183
184 if (ret) {
185 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
186 exit(ret);
187 }
188 }
189
190
191 /* =============================================================
192 * Hardware vertex buffer handling
193 */
194
195
196 void r200RefillCurrentDmaRegion( r200ContextPtr rmesa )
197 {
198 struct r200_dma_buffer *dmabuf;
199 int fd = rmesa->dri.fd;
200 int index = 0;
201 int size = 0;
202 drmDMAReq dma;
203 int ret;
204
205 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
206 fprintf(stderr, "%s\n", __FUNCTION__);
207
208 if (rmesa->dma.flush) {
209 rmesa->dma.flush( rmesa );
210 }
211
212 if (rmesa->dma.current.buf)
213 r200ReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
214
215 if (rmesa->dma.nr_released_bufs > 4)
216 r200FlushCmdBuf( rmesa, __FUNCTION__ );
217
218 dma.context = rmesa->dri.hwContext;
219 dma.send_count = 0;
220 dma.send_list = NULL;
221 dma.send_sizes = NULL;
222 dma.flags = 0;
223 dma.request_count = 1;
224 dma.request_size = RADEON_BUFFER_SIZE;
225 dma.request_list = &index;
226 dma.request_sizes = &size;
227 dma.granted_count = 0;
228
229 LOCK_HARDWARE(rmesa); /* no need to validate */
230
231 while (1) {
232 ret = drmDMA( fd, &dma );
233 if (ret == 0)
234 break;
235
236 if (rmesa->dma.nr_released_bufs) {
237 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
238 }
239
240 if (rmesa->do_usleeps) {
241 UNLOCK_HARDWARE( rmesa );
242 DO_USLEEP( 1 );
243 LOCK_HARDWARE( rmesa );
244 }
245 }
246
247 UNLOCK_HARDWARE(rmesa);
248
249 if (R200_DEBUG & DEBUG_DMA)
250 fprintf(stderr, "Allocated buffer %d\n", index);
251
252 dmabuf = CALLOC_STRUCT( r200_dma_buffer );
253 dmabuf->buf = &rmesa->r200Screen->buffers->list[index];
254 dmabuf->refcount = 1;
255
256 rmesa->dma.current.buf = dmabuf;
257 rmesa->dma.current.address = dmabuf->buf->address;
258 rmesa->dma.current.end = dmabuf->buf->total;
259 rmesa->dma.current.start = 0;
260 rmesa->dma.current.ptr = 0;
261 }
262
263 void r200ReleaseDmaRegion( r200ContextPtr rmesa,
264 struct r200_dma_region *region,
265 const char *caller )
266 {
267 if (R200_DEBUG & DEBUG_IOCTL)
268 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
269
270 if (!region->buf)
271 return;
272
273 if (rmesa->dma.flush)
274 rmesa->dma.flush( rmesa );
275
276 if (--region->buf->refcount == 0) {
277 drm_radeon_cmd_header_t *cmd;
278
279 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
280 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
281 region->buf->buf->idx);
282
283 cmd = (drm_radeon_cmd_header_t *)r200AllocCmdBuf( rmesa, sizeof(*cmd),
284 __FUNCTION__ );
285 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
286 cmd->dma.buf_idx = region->buf->buf->idx;
287 FREE(region->buf);
288 rmesa->dma.nr_released_bufs++;
289 }
290
291 region->buf = NULL;
292 region->start = 0;
293 }
294
295 /* Allocates a region from rmesa->dma.current. If there isn't enough
296 * space in current, grab a new buffer (and discard what was left of current)
297 */
298 void r200AllocDmaRegion( r200ContextPtr rmesa,
299 struct r200_dma_region *region,
300 int bytes,
301 int alignment )
302 {
303 if (R200_DEBUG & DEBUG_IOCTL)
304 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
305
306 if (rmesa->dma.flush)
307 rmesa->dma.flush( rmesa );
308
309 if (region->buf)
310 r200ReleaseDmaRegion( rmesa, region, __FUNCTION__ );
311
312 alignment--;
313 rmesa->dma.current.start = rmesa->dma.current.ptr =
314 (rmesa->dma.current.ptr + alignment) & ~alignment;
315
316 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
317 r200RefillCurrentDmaRegion( rmesa );
318
319 region->start = rmesa->dma.current.start;
320 region->ptr = rmesa->dma.current.start;
321 region->end = rmesa->dma.current.start + bytes;
322 region->address = rmesa->dma.current.address;
323 region->buf = rmesa->dma.current.buf;
324 region->buf->refcount++;
325
326 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
327 rmesa->dma.current.start =
328 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
329
330 assert( rmesa->dma.current.ptr <= rmesa->dma.current.end );
331 }
332
333 /* ================================================================
334 * SwapBuffers with client-side throttling
335 */
336
337 static u_int32_t r200GetLastFrame(r200ContextPtr rmesa)
338 {
339 drm_radeon_getparam_t gp;
340 int ret;
341 u_int32_t frame;
342
343 gp.param = RADEON_PARAM_LAST_FRAME;
344 gp.value = (int *)&frame;
345 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
346 &gp, sizeof(gp) );
347 if ( ret ) {
348 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
349 exit(1);
350 }
351
352 return frame;
353 }
354
355 static void r200EmitIrqLocked( r200ContextPtr rmesa )
356 {
357 drm_radeon_irq_emit_t ie;
358 int ret;
359
360 ie.irq_seq = &rmesa->iw.irq_seq;
361 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
362 &ie, sizeof(ie) );
363 if ( ret ) {
364 fprintf( stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, ret );
365 exit(1);
366 }
367 }
368
369
370 static void r200WaitIrq( r200ContextPtr rmesa )
371 {
372 int ret;
373
374 do {
375 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
376 &rmesa->iw, sizeof(rmesa->iw) );
377 } while (ret && (errno == EINTR || errno == EAGAIN));
378
379 if ( ret ) {
380 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
381 exit(1);
382 }
383 }
384
385
386 static void r200WaitForFrameCompletion( r200ContextPtr rmesa )
387 {
388 drm_radeon_sarea_t *sarea = rmesa->sarea;
389
390 if (rmesa->do_irqs) {
391 if (r200GetLastFrame(rmesa) < sarea->last_frame) {
392 if (!rmesa->irqsEmitted) {
393 while (r200GetLastFrame (rmesa) < sarea->last_frame)
394 ;
395 }
396 else {
397 UNLOCK_HARDWARE( rmesa );
398 r200WaitIrq( rmesa );
399 LOCK_HARDWARE( rmesa );
400 }
401 rmesa->irqsEmitted = 10;
402 }
403
404 if (rmesa->irqsEmitted) {
405 r200EmitIrqLocked( rmesa );
406 rmesa->irqsEmitted--;
407 }
408 }
409 else {
410 while (r200GetLastFrame (rmesa) < sarea->last_frame) {
411 UNLOCK_HARDWARE( rmesa );
412 if (rmesa->do_usleeps)
413 DO_USLEEP( 1 );
414 LOCK_HARDWARE( rmesa );
415 }
416 }
417 }
418
419
420
421 /* Copy the back color buffer to the front color buffer.
422 */
423 void r200CopyBuffer( const __DRIdrawablePrivate *dPriv )
424 {
425 r200ContextPtr rmesa;
426 GLint nbox, i, ret;
427 GLboolean missed_target;
428 int64_t ust;
429
430 assert(dPriv);
431 assert(dPriv->driContextPriv);
432 assert(dPriv->driContextPriv->driverPrivate);
433
434 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
435
436 if ( R200_DEBUG & DEBUG_IOCTL ) {
437 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *)rmesa->glCtx );
438 }
439
440 R200_FIREVERTICES( rmesa );
441
442 LOCK_HARDWARE( rmesa );
443
444
445 /* Throttle the frame rate -- only allow one pending swap buffers
446 * request at a time.
447 */
448 r200WaitForFrameCompletion( rmesa );
449 UNLOCK_HARDWARE( rmesa );
450 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
451 LOCK_HARDWARE( rmesa );
452
453 nbox = dPriv->numClipRects; /* must be in locked region */
454
455 for ( i = 0 ; i < nbox ; ) {
456 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
457 drm_clip_rect_t *box = dPriv->pClipRects;
458 drm_clip_rect_t *b = rmesa->sarea->boxes;
459 GLint n = 0;
460
461 for ( ; i < nr ; i++ ) {
462 *b++ = box[i];
463 n++;
464 }
465 rmesa->sarea->nbox = n;
466
467 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
468
469 if ( ret ) {
470 fprintf( stderr, "DRM_R200_SWAP_BUFFERS: return = %d\n", ret );
471 UNLOCK_HARDWARE( rmesa );
472 exit( 1 );
473 }
474 }
475
476 UNLOCK_HARDWARE( rmesa );
477 rmesa->hw.all_dirty = GL_TRUE;
478
479 rmesa->swap_count++;
480 (*dri_interface->getUST)( & ust );
481 if ( missed_target ) {
482 rmesa->swap_missed_count++;
483 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
484 }
485
486 rmesa->swap_ust = ust;
487
488 sched_yield();
489 }
490
491 void r200PageFlip( const __DRIdrawablePrivate *dPriv )
492 {
493 r200ContextPtr rmesa;
494 GLint ret;
495 GLboolean missed_target;
496
497 assert(dPriv);
498 assert(dPriv->driContextPriv);
499 assert(dPriv->driContextPriv->driverPrivate);
500
501 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
502
503 if ( R200_DEBUG & DEBUG_IOCTL ) {
504 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
505 rmesa->sarea->pfCurrentPage);
506 }
507
508 R200_FIREVERTICES( rmesa );
509 LOCK_HARDWARE( rmesa );
510
511 if (!dPriv->numClipRects) {
512 UNLOCK_HARDWARE( rmesa );
513 usleep( 10000 ); /* throttle invisible client 10ms */
514 return;
515 }
516
517 /* Need to do this for the perf box placement:
518 */
519 {
520 drm_clip_rect_t *box = dPriv->pClipRects;
521 drm_clip_rect_t *b = rmesa->sarea->boxes;
522 b[0] = box[0];
523 rmesa->sarea->nbox = 1;
524 }
525
526 /* Throttle the frame rate -- only allow a few pending swap buffers
527 * request at a time.
528 */
529 r200WaitForFrameCompletion( rmesa );
530 UNLOCK_HARDWARE( rmesa );
531 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
532 if ( missed_target ) {
533 rmesa->swap_missed_count++;
534 (void) (*dri_interface->getUST)( & rmesa->swap_missed_ust );
535 }
536 LOCK_HARDWARE( rmesa );
537
538 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
539
540 UNLOCK_HARDWARE( rmesa );
541
542 if ( ret ) {
543 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
544 exit( 1 );
545 }
546
547 rmesa->swap_count++;
548 (void) (*dri_interface->getUST)( & rmesa->swap_ust );
549
550 #if 000
551 if ( rmesa->sarea->pfCurrentPage == 1 ) {
552 rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset;
553 rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch;
554 } else {
555 rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset;
556 rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch;
557 }
558
559 R200_STATECHANGE( rmesa, ctx );
560 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset
561 + rmesa->r200Screen->fbLocation;
562 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
563 if (rmesa->sarea->tiling_enabled) {
564 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] |= R200_COLOR_TILE_ENABLE;
565 }
566 #else
567 /* Get ready for drawing next frame. Update the renderbuffers'
568 * flippedOffset/Pitch fields so we draw into the right place.
569 */
570 driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
571 rmesa->sarea->pfCurrentPage);
572
573
574 r200UpdateDrawBuffer(rmesa->glCtx);
575 #endif
576 }
577
578
579 /* ================================================================
580 * Buffer clear
581 */
582 static void r200Clear( GLcontext *ctx, GLbitfield mask, GLboolean all,
583 GLint cx, GLint cy, GLint cw, GLint ch )
584 {
585 r200ContextPtr rmesa = R200_CONTEXT(ctx);
586 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
587 GLuint flags = 0;
588 GLuint color_mask = 0;
589 GLint ret, i;
590
591 if ( R200_DEBUG & DEBUG_IOCTL ) {
592 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
593 __FUNCTION__, all, cx, cy, cw, ch );
594 }
595
596 {
597 LOCK_HARDWARE( rmesa );
598 UNLOCK_HARDWARE( rmesa );
599 if ( dPriv->numClipRects == 0 )
600 return;
601 }
602
603 r200Flush( ctx );
604
605 if ( mask & BUFFER_BIT_FRONT_LEFT ) {
606 flags |= RADEON_FRONT;
607 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
608 mask &= ~BUFFER_BIT_FRONT_LEFT;
609 }
610
611 if ( mask & BUFFER_BIT_BACK_LEFT ) {
612 flags |= RADEON_BACK;
613 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
614 mask &= ~BUFFER_BIT_BACK_LEFT;
615 }
616
617 if ( mask & BUFFER_BIT_DEPTH ) {
618 flags |= RADEON_DEPTH;
619 mask &= ~BUFFER_BIT_DEPTH;
620 }
621
622 if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
623 flags |= RADEON_STENCIL;
624 mask &= ~BUFFER_BIT_STENCIL;
625 }
626
627 if ( mask ) {
628 if (R200_DEBUG & DEBUG_FALLBACKS)
629 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
630 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
631 }
632
633 if ( !flags )
634 return;
635
636 if (rmesa->using_hyperz) {
637 flags |= RADEON_USE_COMP_ZBUF;
638 /* if (rmesa->r200Screen->chipset & R200_CHIPSET_REAL_R200)
639 flags |= RADEON_USE_HIERZ; */
640 if (!(rmesa->state.stencil.hwBuffer) ||
641 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
642 ((rmesa->state.stencil.clear & R200_STENCIL_WRITE_MASK) == R200_STENCIL_WRITE_MASK))) {
643 flags |= RADEON_CLEAR_FASTZ;
644 }
645 }
646
647 /* Flip top to bottom */
648 cx += dPriv->x;
649 cy = dPriv->y + dPriv->h - cy - ch;
650
651 LOCK_HARDWARE( rmesa );
652
653 /* Throttle the number of clear ioctls we do.
654 */
655 while ( 1 ) {
656 drm_radeon_getparam_t gp;
657 int ret;
658 int clear;
659
660 gp.param = RADEON_PARAM_LAST_CLEAR;
661 gp.value = (int *)&clear;
662 ret = drmCommandWriteRead( rmesa->dri.fd,
663 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
664
665 if ( ret ) {
666 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
667 exit(1);
668 }
669
670 /* Clear throttling needs more thought.
671 */
672 if ( rmesa->sarea->last_clear - clear <= 25 ) {
673 break;
674 }
675
676 if (rmesa->do_usleeps) {
677 UNLOCK_HARDWARE( rmesa );
678 DO_USLEEP( 1 );
679 LOCK_HARDWARE( rmesa );
680 }
681 }
682
683 /* Send current state to the hardware */
684 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
685
686 for ( i = 0 ; i < dPriv->numClipRects ; ) {
687 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
688 drm_clip_rect_t *box = dPriv->pClipRects;
689 drm_clip_rect_t *b = rmesa->sarea->boxes;
690 drm_radeon_clear_t clear;
691 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
692 GLint n = 0;
693
694 if ( !all ) {
695 for ( ; i < nr ; i++ ) {
696 GLint x = box[i].x1;
697 GLint y = box[i].y1;
698 GLint w = box[i].x2 - x;
699 GLint h = box[i].y2 - y;
700
701 if ( x < cx ) w -= cx - x, x = cx;
702 if ( y < cy ) h -= cy - y, y = cy;
703 if ( x + w > cx + cw ) w = cx + cw - x;
704 if ( y + h > cy + ch ) h = cy + ch - y;
705 if ( w <= 0 ) continue;
706 if ( h <= 0 ) continue;
707
708 b->x1 = x;
709 b->y1 = y;
710 b->x2 = x + w;
711 b->y2 = y + h;
712 b++;
713 n++;
714 }
715 } else {
716 for ( ; i < nr ; i++ ) {
717 *b++ = box[i];
718 n++;
719 }
720 }
721
722 rmesa->sarea->nbox = n;
723
724 clear.flags = flags;
725 clear.clear_color = rmesa->state.color.clear;
726 clear.clear_depth = rmesa->state.depth.clear; /* needed for hyperz */
727 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
728 clear.depth_mask = rmesa->state.stencil.clear;
729 clear.depth_boxes = depth_boxes;
730
731 n--;
732 b = rmesa->sarea->boxes;
733 for ( ; n >= 0 ; n-- ) {
734 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
735 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
736 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
737 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
738 depth_boxes[n].f[CLEAR_DEPTH] = ctx->Depth.Clear;
739 }
740
741 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
742 &clear, sizeof(clear));
743
744
745 if ( ret ) {
746 UNLOCK_HARDWARE( rmesa );
747 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
748 exit( 1 );
749 }
750 }
751
752 UNLOCK_HARDWARE( rmesa );
753 rmesa->hw.all_dirty = GL_TRUE;
754 }
755
756
757 void r200WaitForIdleLocked( r200ContextPtr rmesa )
758 {
759 int ret;
760 int i = 0;
761
762 do {
763 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_CP_IDLE);
764 if (ret)
765 DO_USLEEP( 1 );
766 } while (ret && ++i < 100);
767
768 if ( ret < 0 ) {
769 UNLOCK_HARDWARE( rmesa );
770 fprintf( stderr, "Error: R200 timed out... exiting\n" );
771 exit( -1 );
772 }
773 }
774
775
776 static void r200WaitForIdle( r200ContextPtr rmesa )
777 {
778 LOCK_HARDWARE(rmesa);
779 r200WaitForIdleLocked( rmesa );
780 UNLOCK_HARDWARE(rmesa);
781 }
782
783
784 void r200Flush( GLcontext *ctx )
785 {
786 r200ContextPtr rmesa = R200_CONTEXT( ctx );
787
788 if (R200_DEBUG & DEBUG_IOCTL)
789 fprintf(stderr, "%s\n", __FUNCTION__);
790
791 if (rmesa->dma.flush)
792 rmesa->dma.flush( rmesa );
793
794 r200EmitState( rmesa );
795
796 if (rmesa->store.cmd_used)
797 r200FlushCmdBuf( rmesa, __FUNCTION__ );
798 }
799
800 /* Make sure all commands have been sent to the hardware and have
801 * completed processing.
802 */
803 void r200Finish( GLcontext *ctx )
804 {
805 r200ContextPtr rmesa = R200_CONTEXT(ctx);
806 r200Flush( ctx );
807
808 if (rmesa->do_irqs) {
809 LOCK_HARDWARE( rmesa );
810 r200EmitIrqLocked( rmesa );
811 UNLOCK_HARDWARE( rmesa );
812 r200WaitIrq( rmesa );
813 }
814 else
815 r200WaitForIdle( rmesa );
816 }
817
818
819 /* This version of AllocateMemoryMESA allocates only GART memory, and
820 * only does so after the point at which the driver has been
821 * initialized.
822 *
823 * Theoretically a valid context isn't required. However, in this
824 * implementation, it is, as I'm using the hardware lock to protect
825 * the kernel data structures, and the current context to get the
826 * device fd.
827 */
828 void *r200AllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLsizei size,
829 GLfloat readfreq, GLfloat writefreq,
830 GLfloat priority)
831 {
832 GET_CURRENT_CONTEXT(ctx);
833 r200ContextPtr rmesa;
834 int region_offset;
835 drm_radeon_mem_alloc_t alloc;
836 int ret;
837
838 if (R200_DEBUG & DEBUG_IOCTL)
839 fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
840 writefreq, priority);
841
842 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->r200Screen->gartTextures.map)
843 return NULL;
844
845 if (getenv("R200_NO_ALLOC"))
846 return NULL;
847
848 if (rmesa->dri.drmMinor < 6)
849 return NULL;
850
851 alloc.region = RADEON_MEM_REGION_GART;
852 alloc.alignment = 0;
853 alloc.size = size;
854 alloc.region_offset = &region_offset;
855
856 ret = drmCommandWriteRead( rmesa->r200Screen->driScreen->fd,
857 DRM_RADEON_ALLOC,
858 &alloc, sizeof(alloc));
859
860 if (ret) {
861 fprintf(stderr, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__, ret);
862 return NULL;
863 }
864
865 {
866 char *region_start = (char *)rmesa->r200Screen->gartTextures.map;
867 return (void *)(region_start + region_offset);
868 }
869 }
870
871
872 /* Called via glXFreeMemoryMESA() */
873 void r200FreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
874 {
875 GET_CURRENT_CONTEXT(ctx);
876 r200ContextPtr rmesa;
877 ptrdiff_t region_offset;
878 drm_radeon_mem_free_t memfree;
879 int ret;
880
881 if (R200_DEBUG & DEBUG_IOCTL)
882 fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
883
884 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->r200Screen->gartTextures.map) {
885 fprintf(stderr, "%s: no context\n", __FUNCTION__);
886 return;
887 }
888
889 if (rmesa->dri.drmMinor < 6)
890 return;
891
892 region_offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
893
894 if (region_offset < 0 ||
895 region_offset > rmesa->r200Screen->gartTextures.size) {
896 fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
897 rmesa->r200Screen->gartTextures.size);
898 return;
899 }
900
901 memfree.region = RADEON_MEM_REGION_GART;
902 memfree.region_offset = region_offset;
903
904 ret = drmCommandWrite( rmesa->r200Screen->driScreen->fd,
905 DRM_RADEON_FREE,
906 &memfree, sizeof(memfree));
907
908 if (ret)
909 fprintf(stderr, "%s: DRM_RADEON_FREE ret %d\n", __FUNCTION__, ret);
910 }
911
912 /* Called via glXGetMemoryOffsetMESA() */
913 GLuint r200GetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn, const GLvoid *pointer)
914 {
915 GET_CURRENT_CONTEXT(ctx);
916 r200ContextPtr rmesa;
917 GLuint card_offset;
918
919 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) ) {
920 fprintf(stderr, "%s: no context\n", __FUNCTION__);
921 return ~0;
922 }
923
924 if (!r200IsGartMemory( rmesa, pointer, 0 ))
925 return ~0;
926
927 if (rmesa->dri.drmMinor < 6)
928 return ~0;
929
930 card_offset = r200GartOffsetFromVirtual( rmesa, pointer );
931
932 return card_offset - rmesa->r200Screen->gart_base;
933 }
934
935 GLboolean r200IsGartMemory( r200ContextPtr rmesa, const GLvoid *pointer,
936 GLint size )
937 {
938 ptrdiff_t offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
939 int valid = (size >= 0 &&
940 offset >= 0 &&
941 offset + size < rmesa->r200Screen->gartTextures.size);
942
943 if (R200_DEBUG & DEBUG_IOCTL)
944 fprintf(stderr, "r200IsGartMemory( %p ) : %d\n", pointer, valid );
945
946 return valid;
947 }
948
949
950 GLuint r200GartOffsetFromVirtual( r200ContextPtr rmesa, const GLvoid *pointer )
951 {
952 ptrdiff_t offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
953
954 if (offset < 0 || offset > rmesa->r200Screen->gartTextures.size)
955 return ~0;
956 else
957 return rmesa->r200Screen->gart_texture_offset + offset;
958 }
959
960
961
962 void r200InitIoctlFuncs( struct dd_function_table *functions )
963 {
964 functions->Clear = r200Clear;
965 functions->Finish = r200Finish;
966 functions->Flush = r200Flush;
967 }
968