uint*t -> u_int*t changes
[mesa.git] / src / mesa / drivers / dri / r200 / r200_ioctl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/r200/r200_ioctl.c,v 1.4 2002/12/17 00:32:56 dawes Exp $ */
2 /*
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 #include <sched.h>
37 #include <errno.h>
38
39 #include "glheader.h"
40 #include "imports.h"
41 #include "macros.h"
42 #include "context.h"
43 #include "swrast/swrast.h"
44
45 #include "r200_context.h"
46 #include "r200_state.h"
47 #include "r200_ioctl.h"
48 #include "r200_tcl.h"
49 #include "r200_sanity.h"
50 #include "radeon_reg.h"
51
52 #include "vblank.h"
53
54
55 #define R200_TIMEOUT 512
56 #define R200_IDLE_RETRY 16
57
58
59 static void r200WaitForIdle( r200ContextPtr rmesa );
60
61
62 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
63 * we need to unwire our current cmdbuf, hook the one with the saved state in
64 * it, flush it, and then put the current one back. This is so commands at the
65 * start of a cmdbuf can rely on the state being kept from the previous one.
66 */
67 static void r200BackUpAndEmitLostStateLocked( r200ContextPtr rmesa )
68 {
69 GLuint nr_released_bufs;
70 struct r200_store saved_store;
71
72 if (rmesa->backup_store.cmd_used == 0)
73 return;
74
75 if (R200_DEBUG & DEBUG_STATE)
76 fprintf(stderr, "Emitting backup state on lost context\n");
77
78 rmesa->lost_context = GL_FALSE;
79
80 nr_released_bufs = rmesa->dma.nr_released_bufs;
81 saved_store = rmesa->store;
82 rmesa->dma.nr_released_bufs = 0;
83 rmesa->store = rmesa->backup_store;
84 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
85 rmesa->dma.nr_released_bufs = nr_released_bufs;
86 rmesa->store = saved_store;
87 }
88
89 int r200FlushCmdBufLocked( r200ContextPtr rmesa, const char * caller )
90 {
91 int ret, i;
92 drm_radeon_cmd_buffer_t cmd;
93
94 if (rmesa->lost_context)
95 r200BackUpAndEmitLostStateLocked( rmesa );
96
97 if (R200_DEBUG & DEBUG_IOCTL) {
98 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
99
100 if (0 & R200_DEBUG & DEBUG_VERBOSE)
101 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
102 fprintf(stderr, "%d: %x\n", i/4,
103 *(int *)(&rmesa->store.cmd_buf[i]));
104 }
105
106 if (R200_DEBUG & DEBUG_DMA)
107 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
108 rmesa->dma.nr_released_bufs);
109
110
111 if (R200_DEBUG & DEBUG_SANITY) {
112 if (rmesa->state.scissor.enabled)
113 ret = r200SanityCmdBuffer( rmesa,
114 rmesa->state.scissor.numClipRects,
115 rmesa->state.scissor.pClipRects);
116 else
117 ret = r200SanityCmdBuffer( rmesa,
118 rmesa->numClipRects,
119 rmesa->pClipRects);
120 if (ret) {
121 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
122 goto out;
123 }
124 }
125
126
127 if (R200_DEBUG & DEBUG_MEMORY) {
128 if (! driValidateTextureHeaps( rmesa->texture_heaps, rmesa->nr_heaps,
129 & rmesa->swapped ) ) {
130 fprintf( stderr, "%s: texture memory is inconsistent - expect "
131 "mangled textures\n", __FUNCTION__ );
132 }
133 }
134
135
136 cmd.bufsz = rmesa->store.cmd_used;
137 cmd.buf = rmesa->store.cmd_buf;
138
139 if (rmesa->state.scissor.enabled) {
140 cmd.nbox = rmesa->state.scissor.numClipRects;
141 cmd.boxes = (drm_clip_rect_t *)rmesa->state.scissor.pClipRects;
142 } else {
143 cmd.nbox = rmesa->numClipRects;
144 cmd.boxes = (drm_clip_rect_t *)rmesa->pClipRects;
145 }
146
147 ret = drmCommandWrite( rmesa->dri.fd,
148 DRM_RADEON_CMDBUF,
149 &cmd, sizeof(cmd) );
150
151 if (ret)
152 fprintf(stderr, "drmCommandWrite: %d\n", ret);
153
154 if (R200_DEBUG & DEBUG_SYNC) {
155 fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
156 r200WaitForIdleLocked( rmesa );
157 }
158
159
160 out:
161 rmesa->store.primnr = 0;
162 rmesa->store.statenr = 0;
163 rmesa->store.cmd_used = 0;
164 rmesa->dma.nr_released_bufs = 0;
165 rmesa->save_on_next_emit = 1;
166
167 return ret;
168 }
169
170
171 /* Note: does not emit any commands to avoid recursion on
172 * r200AllocCmdBuf.
173 */
174 void r200FlushCmdBuf( r200ContextPtr rmesa, const char *caller )
175 {
176 int ret;
177
178 LOCK_HARDWARE( rmesa );
179
180 ret = r200FlushCmdBufLocked( rmesa, caller );
181
182 UNLOCK_HARDWARE( rmesa );
183
184 if (ret) {
185 fprintf(stderr, "drmRadeonCmdBuffer: %d (exiting)\n", ret);
186 exit(ret);
187 }
188 }
189
190
191 /* =============================================================
192 * Hardware vertex buffer handling
193 */
194
195
196 void r200RefillCurrentDmaRegion( r200ContextPtr rmesa )
197 {
198 struct r200_dma_buffer *dmabuf;
199 int fd = rmesa->dri.fd;
200 int index = 0;
201 int size = 0;
202 drmDMAReq dma;
203 int ret;
204
205 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
206 fprintf(stderr, "%s\n", __FUNCTION__);
207
208 if (rmesa->dma.flush) {
209 rmesa->dma.flush( rmesa );
210 }
211
212 if (rmesa->dma.current.buf)
213 r200ReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
214
215 if (rmesa->dma.nr_released_bufs > 4)
216 r200FlushCmdBuf( rmesa, __FUNCTION__ );
217
218 dma.context = rmesa->dri.hwContext;
219 dma.send_count = 0;
220 dma.send_list = NULL;
221 dma.send_sizes = NULL;
222 dma.flags = 0;
223 dma.request_count = 1;
224 dma.request_size = RADEON_BUFFER_SIZE;
225 dma.request_list = &index;
226 dma.request_sizes = &size;
227 dma.granted_count = 0;
228
229 LOCK_HARDWARE(rmesa); /* no need to validate */
230
231 while (1) {
232 ret = drmDMA( fd, &dma );
233 if (ret == 0)
234 break;
235
236 if (rmesa->dma.nr_released_bufs) {
237 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
238 }
239
240 if (rmesa->do_usleeps) {
241 UNLOCK_HARDWARE( rmesa );
242 DO_USLEEP( 1 );
243 LOCK_HARDWARE( rmesa );
244 }
245 }
246
247 UNLOCK_HARDWARE(rmesa);
248
249 if (R200_DEBUG & DEBUG_DMA)
250 fprintf(stderr, "Allocated buffer %d\n", index);
251
252 dmabuf = CALLOC_STRUCT( r200_dma_buffer );
253 dmabuf->buf = &rmesa->r200Screen->buffers->list[index];
254 dmabuf->refcount = 1;
255
256 rmesa->dma.current.buf = dmabuf;
257 rmesa->dma.current.address = dmabuf->buf->address;
258 rmesa->dma.current.end = dmabuf->buf->total;
259 rmesa->dma.current.start = 0;
260 rmesa->dma.current.ptr = 0;
261 }
262
263 void r200ReleaseDmaRegion( r200ContextPtr rmesa,
264 struct r200_dma_region *region,
265 const char *caller )
266 {
267 if (R200_DEBUG & DEBUG_IOCTL)
268 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
269
270 if (!region->buf)
271 return;
272
273 if (rmesa->dma.flush)
274 rmesa->dma.flush( rmesa );
275
276 if (--region->buf->refcount == 0) {
277 drm_radeon_cmd_header_t *cmd;
278
279 if (R200_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
280 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
281 region->buf->buf->idx);
282
283 cmd = (drm_radeon_cmd_header_t *)r200AllocCmdBuf( rmesa, sizeof(*cmd),
284 __FUNCTION__ );
285 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
286 cmd->dma.buf_idx = region->buf->buf->idx;
287 FREE(region->buf);
288 rmesa->dma.nr_released_bufs++;
289 }
290
291 region->buf = 0;
292 region->start = 0;
293 }
294
295 /* Allocates a region from rmesa->dma.current. If there isn't enough
296 * space in current, grab a new buffer (and discard what was left of current)
297 */
298 void r200AllocDmaRegion( r200ContextPtr rmesa,
299 struct r200_dma_region *region,
300 int bytes,
301 int alignment )
302 {
303 if (R200_DEBUG & DEBUG_IOCTL)
304 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
305
306 if (rmesa->dma.flush)
307 rmesa->dma.flush( rmesa );
308
309 if (region->buf)
310 r200ReleaseDmaRegion( rmesa, region, __FUNCTION__ );
311
312 alignment--;
313 rmesa->dma.current.start = rmesa->dma.current.ptr =
314 (rmesa->dma.current.ptr + alignment) & ~alignment;
315
316 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
317 r200RefillCurrentDmaRegion( rmesa );
318
319 region->start = rmesa->dma.current.start;
320 region->ptr = rmesa->dma.current.start;
321 region->end = rmesa->dma.current.start + bytes;
322 region->address = rmesa->dma.current.address;
323 region->buf = rmesa->dma.current.buf;
324 region->buf->refcount++;
325
326 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
327 rmesa->dma.current.start =
328 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
329
330 assert( rmesa->dma.current.ptr <= rmesa->dma.current.end );
331 }
332
333 void r200AllocDmaRegionVerts( r200ContextPtr rmesa,
334 struct r200_dma_region *region,
335 int numverts,
336 int vertsize,
337 int alignment )
338 {
339 r200AllocDmaRegion( rmesa, region, vertsize * numverts, alignment );
340 }
341
342 /* ================================================================
343 * SwapBuffers with client-side throttling
344 */
345
346 static u_int32_t r200GetLastFrame(r200ContextPtr rmesa)
347 {
348 drm_radeon_getparam_t gp;
349 int ret;
350 u_int32_t frame;
351
352 gp.param = RADEON_PARAM_LAST_FRAME;
353 gp.value = (int *)&frame;
354 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
355 &gp, sizeof(gp) );
356 if ( ret ) {
357 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
358 exit(1);
359 }
360
361 return frame;
362 }
363
364 static void r200EmitIrqLocked( r200ContextPtr rmesa )
365 {
366 drm_radeon_irq_emit_t ie;
367 int ret;
368
369 ie.irq_seq = &rmesa->iw.irq_seq;
370 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
371 &ie, sizeof(ie) );
372 if ( ret ) {
373 fprintf( stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, ret );
374 exit(1);
375 }
376 }
377
378
379 static void r200WaitIrq( r200ContextPtr rmesa )
380 {
381 int ret;
382
383 do {
384 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
385 &rmesa->iw, sizeof(rmesa->iw) );
386 } while (ret && (errno == EINTR || errno == EAGAIN));
387
388 if ( ret ) {
389 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
390 exit(1);
391 }
392 }
393
394
395 static void r200WaitForFrameCompletion( r200ContextPtr rmesa )
396 {
397 drm_radeon_sarea_t *sarea = rmesa->sarea;
398
399 if (rmesa->do_irqs) {
400 if (r200GetLastFrame(rmesa) < sarea->last_frame) {
401 if (!rmesa->irqsEmitted) {
402 while (r200GetLastFrame (rmesa) < sarea->last_frame)
403 ;
404 }
405 else {
406 UNLOCK_HARDWARE( rmesa );
407 r200WaitIrq( rmesa );
408 LOCK_HARDWARE( rmesa );
409 }
410 rmesa->irqsEmitted = 10;
411 }
412
413 if (rmesa->irqsEmitted) {
414 r200EmitIrqLocked( rmesa );
415 rmesa->irqsEmitted--;
416 }
417 }
418 else {
419 while (r200GetLastFrame (rmesa) < sarea->last_frame) {
420 UNLOCK_HARDWARE( rmesa );
421 if (rmesa->do_usleeps)
422 DO_USLEEP( 1 );
423 LOCK_HARDWARE( rmesa );
424 }
425 }
426 }
427
428
429
430 /* Copy the back color buffer to the front color buffer.
431 */
432 void r200CopyBuffer( const __DRIdrawablePrivate *dPriv )
433 {
434 r200ContextPtr rmesa;
435 GLint nbox, i, ret;
436 GLboolean missed_target;
437 int64_t ust;
438
439 assert(dPriv);
440 assert(dPriv->driContextPriv);
441 assert(dPriv->driContextPriv->driverPrivate);
442
443 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
444
445 if ( R200_DEBUG & DEBUG_IOCTL ) {
446 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *)rmesa->glCtx );
447 }
448
449 R200_FIREVERTICES( rmesa );
450
451 LOCK_HARDWARE( rmesa );
452
453
454 /* Throttle the frame rate -- only allow one pending swap buffers
455 * request at a time.
456 */
457 r200WaitForFrameCompletion( rmesa );
458 UNLOCK_HARDWARE( rmesa );
459 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
460 LOCK_HARDWARE( rmesa );
461
462 nbox = dPriv->numClipRects; /* must be in locked region */
463
464 for ( i = 0 ; i < nbox ; ) {
465 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
466 drm_clip_rect_t *box = dPriv->pClipRects;
467 drm_clip_rect_t *b = rmesa->sarea->boxes;
468 GLint n = 0;
469
470 for ( ; i < nr ; i++ ) {
471 *b++ = box[i];
472 n++;
473 }
474 rmesa->sarea->nbox = n;
475
476 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
477
478 if ( ret ) {
479 fprintf( stderr, "DRM_R200_SWAP_BUFFERS: return = %d\n", ret );
480 UNLOCK_HARDWARE( rmesa );
481 exit( 1 );
482 }
483 }
484
485 UNLOCK_HARDWARE( rmesa );
486 rmesa->hw.all_dirty = GL_TRUE;
487
488 rmesa->swap_count++;
489 (*rmesa->get_ust)( & ust );
490 if ( missed_target ) {
491 rmesa->swap_missed_count++;
492 rmesa->swap_missed_ust = ust - rmesa->swap_ust;
493 }
494
495 rmesa->swap_ust = ust;
496
497 sched_yield();
498 }
499
500 void r200PageFlip( const __DRIdrawablePrivate *dPriv )
501 {
502 r200ContextPtr rmesa;
503 GLint ret;
504 GLboolean missed_target;
505
506 assert(dPriv);
507 assert(dPriv->driContextPriv);
508 assert(dPriv->driContextPriv->driverPrivate);
509
510 rmesa = (r200ContextPtr) dPriv->driContextPriv->driverPrivate;
511
512 if ( R200_DEBUG & DEBUG_IOCTL ) {
513 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
514 rmesa->sarea->pfCurrentPage);
515 }
516
517 R200_FIREVERTICES( rmesa );
518 LOCK_HARDWARE( rmesa );
519
520 if (!dPriv->numClipRects) {
521 UNLOCK_HARDWARE( rmesa );
522 usleep( 10000 ); /* throttle invisible client 10ms */
523 return;
524 }
525
526 /* Need to do this for the perf box placement:
527 */
528 {
529 drm_clip_rect_t *box = dPriv->pClipRects;
530 drm_clip_rect_t *b = rmesa->sarea->boxes;
531 b[0] = box[0];
532 rmesa->sarea->nbox = 1;
533 }
534
535 /* Throttle the frame rate -- only allow a few pending swap buffers
536 * request at a time.
537 */
538 r200WaitForFrameCompletion( rmesa );
539 UNLOCK_HARDWARE( rmesa );
540 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target );
541 if ( missed_target ) {
542 rmesa->swap_missed_count++;
543 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust );
544 }
545 LOCK_HARDWARE( rmesa );
546
547 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
548
549 UNLOCK_HARDWARE( rmesa );
550
551 if ( ret ) {
552 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
553 exit( 1 );
554 }
555
556 rmesa->swap_count++;
557 (void) (*rmesa->get_ust)( & rmesa->swap_ust );
558
559 if ( rmesa->sarea->pfCurrentPage == 1 ) {
560 rmesa->state.color.drawOffset = rmesa->r200Screen->frontOffset;
561 rmesa->state.color.drawPitch = rmesa->r200Screen->frontPitch;
562 } else {
563 rmesa->state.color.drawOffset = rmesa->r200Screen->backOffset;
564 rmesa->state.color.drawPitch = rmesa->r200Screen->backPitch;
565 }
566
567 R200_STATECHANGE( rmesa, ctx );
568 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset
569 + rmesa->r200Screen->fbLocation;
570 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch;
571 }
572
573
574 /* ================================================================
575 * Buffer clear
576 */
577 static void r200Clear( GLcontext *ctx, GLbitfield mask, GLboolean all,
578 GLint cx, GLint cy, GLint cw, GLint ch )
579 {
580 r200ContextPtr rmesa = R200_CONTEXT(ctx);
581 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
582 GLuint flags = 0;
583 GLuint color_mask = 0;
584 GLint ret, i;
585
586 if ( R200_DEBUG & DEBUG_IOCTL ) {
587 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n",
588 __FUNCTION__, all, cx, cy, cw, ch );
589 }
590
591 {
592 LOCK_HARDWARE( rmesa );
593 UNLOCK_HARDWARE( rmesa );
594 if ( dPriv->numClipRects == 0 )
595 return;
596 }
597
598 r200Flush( ctx );
599
600 if ( mask & DD_FRONT_LEFT_BIT ) {
601 flags |= RADEON_FRONT;
602 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
603 mask &= ~DD_FRONT_LEFT_BIT;
604 }
605
606 if ( mask & DD_BACK_LEFT_BIT ) {
607 flags |= RADEON_BACK;
608 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
609 mask &= ~DD_BACK_LEFT_BIT;
610 }
611
612 if ( mask & DD_DEPTH_BIT ) {
613 flags |= RADEON_DEPTH;
614 mask &= ~DD_DEPTH_BIT;
615 }
616
617 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) {
618 flags |= RADEON_STENCIL;
619 mask &= ~DD_STENCIL_BIT;
620 }
621
622 if ( mask ) {
623 if (R200_DEBUG & DEBUG_FALLBACKS)
624 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
625 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
626 }
627
628 if ( !flags )
629 return;
630
631 if (rmesa->using_hyperz) {
632 flags |= RADEON_USE_COMP_ZBUF;
633 /* if (rmesa->r200Screen->chipset & R200_CHIPSET_REAL_R200)
634 flags |= RADEON_USE_HIERZ; */
635 if (!(rmesa->state.stencil.hwBuffer) ||
636 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
637 ((rmesa->state.stencil.clear & R200_STENCIL_WRITE_MASK) == R200_STENCIL_WRITE_MASK))) {
638 flags |= RADEON_CLEAR_FASTZ;
639 }
640 }
641
642 /* Flip top to bottom */
643 cx += dPriv->x;
644 cy = dPriv->y + dPriv->h - cy - ch;
645
646 LOCK_HARDWARE( rmesa );
647
648 /* Throttle the number of clear ioctls we do.
649 */
650 while ( 1 ) {
651 drm_radeon_getparam_t gp;
652 int ret;
653 int clear;
654
655 gp.param = RADEON_PARAM_LAST_CLEAR;
656 gp.value = (int *)&clear;
657 ret = drmCommandWriteRead( rmesa->dri.fd,
658 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
659
660 if ( ret ) {
661 fprintf( stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, ret );
662 exit(1);
663 }
664
665 /* Clear throttling needs more thought.
666 */
667 if ( rmesa->sarea->last_clear - clear <= 25 ) {
668 break;
669 }
670
671 if (rmesa->do_usleeps) {
672 UNLOCK_HARDWARE( rmesa );
673 DO_USLEEP( 1 );
674 LOCK_HARDWARE( rmesa );
675 }
676 }
677
678 /* Send current state to the hardware */
679 r200FlushCmdBufLocked( rmesa, __FUNCTION__ );
680
681 for ( i = 0 ; i < dPriv->numClipRects ; ) {
682 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
683 drm_clip_rect_t *box = dPriv->pClipRects;
684 drm_clip_rect_t *b = rmesa->sarea->boxes;
685 drm_radeon_clear_t clear;
686 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
687 GLint n = 0;
688
689 if ( !all ) {
690 for ( ; i < nr ; i++ ) {
691 GLint x = box[i].x1;
692 GLint y = box[i].y1;
693 GLint w = box[i].x2 - x;
694 GLint h = box[i].y2 - y;
695
696 if ( x < cx ) w -= cx - x, x = cx;
697 if ( y < cy ) h -= cy - y, y = cy;
698 if ( x + w > cx + cw ) w = cx + cw - x;
699 if ( y + h > cy + ch ) h = cy + ch - y;
700 if ( w <= 0 ) continue;
701 if ( h <= 0 ) continue;
702
703 b->x1 = x;
704 b->y1 = y;
705 b->x2 = x + w;
706 b->y2 = y + h;
707 b++;
708 n++;
709 }
710 } else {
711 for ( ; i < nr ; i++ ) {
712 *b++ = box[i];
713 n++;
714 }
715 }
716
717 rmesa->sarea->nbox = n;
718
719 clear.flags = flags;
720 clear.clear_color = rmesa->state.color.clear;
721 clear.clear_depth = rmesa->state.depth.clear; /* needed for hyperz */
722 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
723 clear.depth_mask = rmesa->state.stencil.clear;
724 clear.depth_boxes = depth_boxes;
725
726 n--;
727 b = rmesa->sarea->boxes;
728 for ( ; n >= 0 ; n-- ) {
729 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
730 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
731 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
732 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
733 depth_boxes[n].f[CLEAR_DEPTH] = ctx->Depth.Clear;
734 }
735
736 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
737 &clear, sizeof(clear));
738
739
740 if ( ret ) {
741 UNLOCK_HARDWARE( rmesa );
742 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
743 exit( 1 );
744 }
745 }
746
747 UNLOCK_HARDWARE( rmesa );
748 rmesa->hw.all_dirty = GL_TRUE;
749 }
750
751
752 void r200WaitForIdleLocked( r200ContextPtr rmesa )
753 {
754 int ret;
755 int i = 0;
756
757 do {
758 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_CP_IDLE);
759 if (ret)
760 DO_USLEEP( 1 );
761 } while (ret && ++i < 100);
762
763 if ( ret < 0 ) {
764 UNLOCK_HARDWARE( rmesa );
765 fprintf( stderr, "Error: R200 timed out... exiting\n" );
766 exit( -1 );
767 }
768 }
769
770
771 static void r200WaitForIdle( r200ContextPtr rmesa )
772 {
773 LOCK_HARDWARE(rmesa);
774 r200WaitForIdleLocked( rmesa );
775 UNLOCK_HARDWARE(rmesa);
776 }
777
778
779 void r200Flush( GLcontext *ctx )
780 {
781 r200ContextPtr rmesa = R200_CONTEXT( ctx );
782
783 if (R200_DEBUG & DEBUG_IOCTL)
784 fprintf(stderr, "%s\n", __FUNCTION__);
785
786 if (rmesa->dma.flush)
787 rmesa->dma.flush( rmesa );
788
789 r200EmitState( rmesa );
790
791 if (rmesa->store.cmd_used)
792 r200FlushCmdBuf( rmesa, __FUNCTION__ );
793 }
794
795 /* Make sure all commands have been sent to the hardware and have
796 * completed processing.
797 */
798 void r200Finish( GLcontext *ctx )
799 {
800 r200ContextPtr rmesa = R200_CONTEXT(ctx);
801 r200Flush( ctx );
802
803 if (rmesa->do_irqs) {
804 LOCK_HARDWARE( rmesa );
805 r200EmitIrqLocked( rmesa );
806 UNLOCK_HARDWARE( rmesa );
807 r200WaitIrq( rmesa );
808 }
809 else
810 r200WaitForIdle( rmesa );
811 }
812
813
814 /* This version of AllocateMemoryMESA allocates only GART memory, and
815 * only does so after the point at which the driver has been
816 * initialized.
817 *
818 * Theoretically a valid context isn't required. However, in this
819 * implementation, it is, as I'm using the hardware lock to protect
820 * the kernel data structures, and the current context to get the
821 * device fd.
822 */
823 void *r200AllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLsizei size,
824 GLfloat readfreq, GLfloat writefreq,
825 GLfloat priority)
826 {
827 GET_CURRENT_CONTEXT(ctx);
828 r200ContextPtr rmesa;
829 int region_offset;
830 drm_radeon_mem_alloc_t alloc;
831 int ret;
832
833 if (R200_DEBUG & DEBUG_IOCTL)
834 fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
835 writefreq, priority);
836
837 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->r200Screen->gartTextures.map)
838 return NULL;
839
840 if (getenv("R200_NO_ALLOC"))
841 return NULL;
842
843 if (rmesa->dri.drmMinor < 6)
844 return NULL;
845
846 alloc.region = RADEON_MEM_REGION_GART;
847 alloc.alignment = 0;
848 alloc.size = size;
849 alloc.region_offset = &region_offset;
850
851 ret = drmCommandWriteRead( rmesa->r200Screen->driScreen->fd,
852 DRM_RADEON_ALLOC,
853 &alloc, sizeof(alloc));
854
855 if (ret) {
856 fprintf(stderr, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__, ret);
857 return NULL;
858 }
859
860 {
861 char *region_start = (char *)rmesa->r200Screen->gartTextures.map;
862 return (void *)(region_start + region_offset);
863 }
864 }
865
866
867 /* Called via glXFreeMemoryMESA() */
868 void r200FreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
869 {
870 GET_CURRENT_CONTEXT(ctx);
871 r200ContextPtr rmesa;
872 ptrdiff_t region_offset;
873 drm_radeon_mem_free_t memfree;
874 int ret;
875
876 if (R200_DEBUG & DEBUG_IOCTL)
877 fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
878
879 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->r200Screen->gartTextures.map) {
880 fprintf(stderr, "%s: no context\n", __FUNCTION__);
881 return;
882 }
883
884 if (rmesa->dri.drmMinor < 6)
885 return;
886
887 region_offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
888
889 if (region_offset < 0 ||
890 region_offset > rmesa->r200Screen->gartTextures.size) {
891 fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
892 rmesa->r200Screen->gartTextures.size);
893 return;
894 }
895
896 memfree.region = RADEON_MEM_REGION_GART;
897 memfree.region_offset = region_offset;
898
899 ret = drmCommandWrite( rmesa->r200Screen->driScreen->fd,
900 DRM_RADEON_FREE,
901 &memfree, sizeof(memfree));
902
903 if (ret)
904 fprintf(stderr, "%s: DRM_RADEON_FREE ret %d\n", __FUNCTION__, ret);
905 }
906
907 /* Called via glXGetMemoryOffsetMESA() */
908 GLuint r200GetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn, const GLvoid *pointer)
909 {
910 GET_CURRENT_CONTEXT(ctx);
911 r200ContextPtr rmesa;
912 GLuint card_offset;
913
914 if (!ctx || !(rmesa = R200_CONTEXT(ctx)) ) {
915 fprintf(stderr, "%s: no context\n", __FUNCTION__);
916 return ~0;
917 }
918
919 if (!r200IsGartMemory( rmesa, pointer, 0 ))
920 return ~0;
921
922 if (rmesa->dri.drmMinor < 6)
923 return ~0;
924
925 card_offset = r200GartOffsetFromVirtual( rmesa, pointer );
926
927 return card_offset - rmesa->r200Screen->gart_base;
928 }
929
930 GLboolean r200IsGartMemory( r200ContextPtr rmesa, const GLvoid *pointer,
931 GLint size )
932 {
933 ptrdiff_t offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
934 int valid = (size >= 0 &&
935 offset >= 0 &&
936 offset + size < rmesa->r200Screen->gartTextures.size);
937
938 if (R200_DEBUG & DEBUG_IOCTL)
939 fprintf(stderr, "r200IsGartMemory( %p ) : %d\n", pointer, valid );
940
941 return valid;
942 }
943
944
945 GLuint r200GartOffsetFromVirtual( r200ContextPtr rmesa, const GLvoid *pointer )
946 {
947 ptrdiff_t offset = (char *)pointer - (char *)rmesa->r200Screen->gartTextures.map;
948
949 if (offset < 0 || offset > rmesa->r200Screen->gartTextures.size)
950 return ~0;
951 else
952 return rmesa->r200Screen->gart_texture_offset + offset;
953 }
954
955
956
957 void r200InitIoctlFuncs( struct dd_function_table *functions )
958 {
959 functions->Clear = r200Clear;
960 functions->Finish = r200Finish;
961 functions->Flush = r200Flush;
962 }
963