2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
32 * Keith Whitwell <keith@tungstengraphics.com>
38 #include "main/glheader.h"
39 #include "main/imports.h"
40 #include "main/macros.h"
41 #include "main/context.h"
42 #include "swrast/swrast.h"
44 #include "r200_context.h"
45 #include "r200_state.h"
46 #include "r200_ioctl.h"
48 #include "r200_sanity.h"
49 #include "radeon_reg.h"
51 #include "drirenderbuffer.h"
54 #define R200_TIMEOUT 512
55 #define R200_IDLE_RETRY 16
58 /* At this point we were in FlushCmdBufLocked but we had lost our context, so
59 * we need to unwire our current cmdbuf, hook the one with the saved state in
60 * it, flush it, and then put the current one back. This is so commands at the
61 * start of a cmdbuf can rely on the state being kept from the previous one.
63 static void r200BackUpAndEmitLostStateLocked( r200ContextPtr rmesa
)
65 GLuint nr_released_bufs
;
66 struct radeon_store saved_store
;
68 if (rmesa
->backup_store
.cmd_used
== 0)
71 if (R200_DEBUG
& DEBUG_STATE
)
72 fprintf(stderr
, "Emitting backup state on lost context\n");
74 rmesa
->radeon
.lost_context
= GL_FALSE
;
76 nr_released_bufs
= rmesa
->dma
.nr_released_bufs
;
77 saved_store
= rmesa
->store
;
78 rmesa
->dma
.nr_released_bufs
= 0;
79 rmesa
->store
= rmesa
->backup_store
;
80 r200FlushCmdBufLocked( rmesa
, __FUNCTION__
);
81 rmesa
->dma
.nr_released_bufs
= nr_released_bufs
;
82 rmesa
->store
= saved_store
;
85 int r200FlushCmdBufLocked( r200ContextPtr rmesa
, const char * caller
)
88 drm_radeon_cmd_buffer_t cmd
;
90 if (rmesa
->radeon
.lost_context
)
91 r200BackUpAndEmitLostStateLocked( rmesa
);
93 if (R200_DEBUG
& DEBUG_IOCTL
) {
94 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
96 if (0 & R200_DEBUG
& DEBUG_VERBOSE
)
97 for (i
= 0 ; i
< rmesa
->store
.cmd_used
; i
+= 4 )
98 fprintf(stderr
, "%d: %x\n", i
/4,
99 *(int *)(&rmesa
->store
.cmd_buf
[i
]));
102 if (R200_DEBUG
& DEBUG_DMA
)
103 fprintf(stderr
, "%s: Releasing %d buffers\n", __FUNCTION__
,
104 rmesa
->dma
.nr_released_bufs
);
107 if (R200_DEBUG
& DEBUG_SANITY
) {
108 if (rmesa
->radeon
.state
.scissor
.enabled
)
109 ret
= r200SanityCmdBuffer( rmesa
,
110 rmesa
->radeon
.state
.scissor
.numClipRects
,
111 rmesa
->radeon
.state
.scissor
.pClipRects
);
113 ret
= r200SanityCmdBuffer( rmesa
,
114 rmesa
->radeon
.numClipRects
,
115 rmesa
->radeon
.pClipRects
);
117 fprintf(stderr
, "drmSanityCommandWrite: %d\n", ret
);
123 if (R200_DEBUG
& DEBUG_MEMORY
) {
124 if (! driValidateTextureHeaps( rmesa
->radeon
.texture_heaps
, rmesa
->radeon
.nr_heaps
,
125 & rmesa
->radeon
.swapped
) ) {
126 fprintf( stderr
, "%s: texture memory is inconsistent - expect "
127 "mangled textures\n", __FUNCTION__
);
132 cmd
.bufsz
= rmesa
->store
.cmd_used
;
133 cmd
.buf
= rmesa
->store
.cmd_buf
;
135 if (rmesa
->radeon
.state
.scissor
.enabled
) {
136 cmd
.nbox
= rmesa
->radeon
.state
.scissor
.numClipRects
;
137 cmd
.boxes
= (drm_clip_rect_t
*)rmesa
->radeon
.state
.scissor
.pClipRects
;
139 cmd
.nbox
= rmesa
->radeon
.numClipRects
;
140 cmd
.boxes
= (drm_clip_rect_t
*)rmesa
->radeon
.pClipRects
;
143 ret
= drmCommandWrite( rmesa
->radeon
.dri
.fd
,
148 fprintf(stderr
, "drmCommandWrite: %d\n", ret
);
150 if (R200_DEBUG
& DEBUG_SYNC
) {
151 fprintf(stderr
, "\nSyncing in %s\n\n", __FUNCTION__
);
152 radeonWaitForIdleLocked( &rmesa
->radeon
);
157 rmesa
->store
.primnr
= 0;
158 rmesa
->store
.statenr
= 0;
159 rmesa
->store
.cmd_used
= 0;
160 rmesa
->dma
.nr_released_bufs
= 0;
161 rmesa
->save_on_next_emit
= 1;
167 /* Note: does not emit any commands to avoid recursion on
170 void r200FlushCmdBuf( r200ContextPtr rmesa
, const char *caller
)
174 LOCK_HARDWARE( &rmesa
->radeon
);
176 ret
= r200FlushCmdBufLocked( rmesa
, caller
);
178 UNLOCK_HARDWARE( &rmesa
->radeon
);
181 fprintf(stderr
, "drmRadeonCmdBuffer: %d (exiting)\n", ret
);
187 /* =============================================================
188 * Hardware vertex buffer handling
192 void r200RefillCurrentDmaRegion( r200ContextPtr rmesa
)
194 struct radeon_dma_buffer
*dmabuf
;
195 int fd
= rmesa
->radeon
.dri
.fd
;
201 if (R200_DEBUG
& (DEBUG_IOCTL
|DEBUG_DMA
))
202 fprintf(stderr
, "%s\n", __FUNCTION__
);
204 if (rmesa
->dma
.flush
) {
205 rmesa
->dma
.flush( rmesa
->radeon
.glCtx
);
208 if (rmesa
->dma
.current
.buf
)
209 r200ReleaseDmaRegion( rmesa
, &rmesa
->dma
.current
, __FUNCTION__
);
211 if (rmesa
->dma
.nr_released_bufs
> 4)
212 r200FlushCmdBuf( rmesa
, __FUNCTION__
);
214 dma
.context
= rmesa
->radeon
.dri
.hwContext
;
216 dma
.send_list
= NULL
;
217 dma
.send_sizes
= NULL
;
219 dma
.request_count
= 1;
220 dma
.request_size
= RADEON_BUFFER_SIZE
;
221 dma
.request_list
= &index
;
222 dma
.request_sizes
= &size
;
223 dma
.granted_count
= 0;
225 LOCK_HARDWARE(&rmesa
->radeon
); /* no need to validate */
228 ret
= drmDMA( fd
, &dma
);
232 if (rmesa
->dma
.nr_released_bufs
) {
233 r200FlushCmdBufLocked( rmesa
, __FUNCTION__
);
236 if (rmesa
->radeon
.do_usleeps
) {
237 UNLOCK_HARDWARE( &rmesa
->radeon
);
239 LOCK_HARDWARE( &rmesa
->radeon
);
243 UNLOCK_HARDWARE(&rmesa
->radeon
);
245 if (R200_DEBUG
& DEBUG_DMA
)
246 fprintf(stderr
, "Allocated buffer %d\n", index
);
248 dmabuf
= CALLOC_STRUCT( radeon_dma_buffer
);
249 dmabuf
->buf
= &rmesa
->radeon
.radeonScreen
->buffers
->list
[index
];
250 dmabuf
->refcount
= 1;
252 rmesa
->dma
.current
.buf
= dmabuf
;
253 rmesa
->dma
.current
.address
= dmabuf
->buf
->address
;
254 rmesa
->dma
.current
.end
= dmabuf
->buf
->total
;
255 rmesa
->dma
.current
.start
= 0;
256 rmesa
->dma
.current
.ptr
= 0;
259 void r200ReleaseDmaRegion( r200ContextPtr rmesa
,
260 struct radeon_dma_region
*region
,
263 if (R200_DEBUG
& DEBUG_IOCTL
)
264 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
269 if (rmesa
->dma
.flush
)
270 rmesa
->dma
.flush( rmesa
->radeon
.glCtx
);
272 if (--region
->buf
->refcount
== 0) {
273 drm_radeon_cmd_header_t
*cmd
;
275 if (R200_DEBUG
& (DEBUG_IOCTL
|DEBUG_DMA
))
276 fprintf(stderr
, "%s -- DISCARD BUF %d\n", __FUNCTION__
,
277 region
->buf
->buf
->idx
);
279 cmd
= (drm_radeon_cmd_header_t
*)r200AllocCmdBuf( rmesa
, sizeof(*cmd
),
281 cmd
->dma
.cmd_type
= RADEON_CMD_DMA_DISCARD
;
282 cmd
->dma
.buf_idx
= region
->buf
->buf
->idx
;
284 rmesa
->dma
.nr_released_bufs
++;
291 /* Allocates a region from rmesa->dma.current. If there isn't enough
292 * space in current, grab a new buffer (and discard what was left of current)
294 void r200AllocDmaRegion( r200ContextPtr rmesa
,
295 struct radeon_dma_region
*region
,
299 if (R200_DEBUG
& DEBUG_IOCTL
)
300 fprintf(stderr
, "%s %d\n", __FUNCTION__
, bytes
);
302 if (rmesa
->dma
.flush
)
303 rmesa
->dma
.flush( rmesa
->radeon
.glCtx
);
306 r200ReleaseDmaRegion( rmesa
, region
, __FUNCTION__
);
309 rmesa
->dma
.current
.start
= rmesa
->dma
.current
.ptr
=
310 (rmesa
->dma
.current
.ptr
+ alignment
) & ~alignment
;
312 if ( rmesa
->dma
.current
.ptr
+ bytes
> rmesa
->dma
.current
.end
)
313 r200RefillCurrentDmaRegion( rmesa
);
315 region
->start
= rmesa
->dma
.current
.start
;
316 region
->ptr
= rmesa
->dma
.current
.start
;
317 region
->end
= rmesa
->dma
.current
.start
+ bytes
;
318 region
->address
= rmesa
->dma
.current
.address
;
319 region
->buf
= rmesa
->dma
.current
.buf
;
320 region
->buf
->refcount
++;
322 rmesa
->dma
.current
.ptr
+= bytes
; /* bug - if alignment > 7 */
323 rmesa
->dma
.current
.start
=
324 rmesa
->dma
.current
.ptr
= (rmesa
->dma
.current
.ptr
+ 0x7) & ~0x7;
326 assert( rmesa
->dma
.current
.ptr
<= rmesa
->dma
.current
.end
);
330 /* ================================================================
333 static void r200Clear( GLcontext
*ctx
, GLbitfield mask
)
335 r200ContextPtr rmesa
= R200_CONTEXT(ctx
);
336 __DRIdrawablePrivate
*dPriv
= rmesa
->radeon
.dri
.drawable
;
338 GLuint color_mask
= 0;
340 GLint cx
, cy
, cw
, ch
;
342 if ( R200_DEBUG
& DEBUG_IOCTL
) {
343 fprintf( stderr
, "r200Clear\n");
347 LOCK_HARDWARE( &rmesa
->radeon
);
348 UNLOCK_HARDWARE( &rmesa
->radeon
);
349 if ( dPriv
->numClipRects
== 0 )
355 if ( mask
& BUFFER_BIT_FRONT_LEFT
) {
356 flags
|= RADEON_FRONT
;
357 color_mask
= rmesa
->hw
.msk
.cmd
[MSK_RB3D_PLANEMASK
];
358 mask
&= ~BUFFER_BIT_FRONT_LEFT
;
361 if ( mask
& BUFFER_BIT_BACK_LEFT
) {
362 flags
|= RADEON_BACK
;
363 color_mask
= rmesa
->hw
.msk
.cmd
[MSK_RB3D_PLANEMASK
];
364 mask
&= ~BUFFER_BIT_BACK_LEFT
;
367 if ( mask
& BUFFER_BIT_DEPTH
) {
368 flags
|= RADEON_DEPTH
;
369 mask
&= ~BUFFER_BIT_DEPTH
;
372 if ( (mask
& BUFFER_BIT_STENCIL
) && rmesa
->radeon
.state
.stencil
.hwBuffer
) {
373 flags
|= RADEON_STENCIL
;
374 mask
&= ~BUFFER_BIT_STENCIL
;
378 if (R200_DEBUG
& DEBUG_FALLBACKS
)
379 fprintf(stderr
, "%s: swrast clear, mask: %x\n", __FUNCTION__
, mask
);
380 _swrast_Clear( ctx
, mask
);
386 if (rmesa
->using_hyperz
) {
387 flags
|= RADEON_USE_COMP_ZBUF
;
388 /* if (rmesa->radeon.radeonScreen->chip_family == CHIP_FAMILY_R200)
389 flags |= RADEON_USE_HIERZ; */
390 if (!(rmesa
->radeon
.state
.stencil
.hwBuffer
) ||
391 ((flags
& RADEON_DEPTH
) && (flags
& RADEON_STENCIL
) &&
392 ((rmesa
->radeon
.state
.stencil
.clear
& R200_STENCIL_WRITE_MASK
) == R200_STENCIL_WRITE_MASK
))) {
393 flags
|= RADEON_CLEAR_FASTZ
;
397 LOCK_HARDWARE( &rmesa
->radeon
);
399 /* compute region after locking: */
400 cx
= ctx
->DrawBuffer
->_Xmin
;
401 cy
= ctx
->DrawBuffer
->_Ymin
;
402 cw
= ctx
->DrawBuffer
->_Xmax
- cx
;
403 ch
= ctx
->DrawBuffer
->_Ymax
- cy
;
405 /* Flip top to bottom */
407 cy
= dPriv
->y
+ dPriv
->h
- cy
- ch
;
409 /* Throttle the number of clear ioctls we do.
412 drm_radeon_getparam_t gp
;
416 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
417 gp
.value
= (int *)&clear
;
418 ret
= drmCommandWriteRead( rmesa
->radeon
.dri
.fd
,
419 DRM_RADEON_GETPARAM
, &gp
, sizeof(gp
) );
422 fprintf( stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
, ret
);
426 /* Clear throttling needs more thought.
428 if ( rmesa
->radeon
.sarea
->last_clear
- clear
<= 25 ) {
432 if (rmesa
->radeon
.do_usleeps
) {
433 UNLOCK_HARDWARE( &rmesa
->radeon
);
435 LOCK_HARDWARE( &rmesa
->radeon
);
439 /* Send current state to the hardware */
440 r200FlushCmdBufLocked( rmesa
, __FUNCTION__
);
442 for ( i
= 0 ; i
< dPriv
->numClipRects
; ) {
443 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, dPriv
->numClipRects
);
444 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
445 drm_clip_rect_t
*b
= rmesa
->radeon
.sarea
->boxes
;
446 drm_radeon_clear_t clear
;
447 drm_radeon_clear_rect_t depth_boxes
[RADEON_NR_SAREA_CLIPRECTS
];
450 if (cw
!= dPriv
->w
|| ch
!= dPriv
->h
) {
451 /* clear subregion */
452 for ( ; i
< nr
; i
++ ) {
455 GLint w
= box
[i
].x2
- x
;
456 GLint h
= box
[i
].y2
- y
;
458 if ( x
< cx
) w
-= cx
- x
, x
= cx
;
459 if ( y
< cy
) h
-= cy
- y
, y
= cy
;
460 if ( x
+ w
> cx
+ cw
) w
= cx
+ cw
- x
;
461 if ( y
+ h
> cy
+ ch
) h
= cy
+ ch
- y
;
462 if ( w
<= 0 ) continue;
463 if ( h
<= 0 ) continue;
473 /* clear whole window */
474 for ( ; i
< nr
; i
++ ) {
480 rmesa
->radeon
.sarea
->nbox
= n
;
483 clear
.clear_color
= rmesa
->radeon
.state
.color
.clear
;
484 clear
.clear_depth
= rmesa
->radeon
.state
.depth
.clear
; /* needed for hyperz */
485 clear
.color_mask
= rmesa
->hw
.msk
.cmd
[MSK_RB3D_PLANEMASK
];
486 clear
.depth_mask
= rmesa
->radeon
.state
.stencil
.clear
;
487 clear
.depth_boxes
= depth_boxes
;
490 b
= rmesa
->radeon
.sarea
->boxes
;
491 for ( ; n
>= 0 ; n
-- ) {
492 depth_boxes
[n
].f
[CLEAR_X1
] = (float)b
[n
].x1
;
493 depth_boxes
[n
].f
[CLEAR_Y1
] = (float)b
[n
].y1
;
494 depth_boxes
[n
].f
[CLEAR_X2
] = (float)b
[n
].x2
;
495 depth_boxes
[n
].f
[CLEAR_Y2
] = (float)b
[n
].y2
;
496 depth_boxes
[n
].f
[CLEAR_DEPTH
] = ctx
->Depth
.Clear
;
499 ret
= drmCommandWrite( rmesa
->radeon
.dri
.fd
, DRM_RADEON_CLEAR
,
500 &clear
, sizeof(clear
));
504 UNLOCK_HARDWARE( &rmesa
->radeon
);
505 fprintf( stderr
, "DRM_RADEON_CLEAR: return = %d\n", ret
);
510 UNLOCK_HARDWARE( &rmesa
->radeon
);
511 rmesa
->hw
.all_dirty
= GL_TRUE
;
515 void r200Flush( GLcontext
*ctx
)
517 r200ContextPtr rmesa
= R200_CONTEXT( ctx
);
519 if (R200_DEBUG
& DEBUG_IOCTL
)
520 fprintf(stderr
, "%s\n", __FUNCTION__
);
522 if (rmesa
->dma
.flush
)
523 rmesa
->dma
.flush( ctx
);
525 r200EmitState( rmesa
);
527 if (rmesa
->store
.cmd_used
)
528 r200FlushCmdBuf( rmesa
, __FUNCTION__
);
531 /* Make sure all commands have been sent to the hardware and have
532 * completed processing.
534 void r200Finish( GLcontext
*ctx
)
537 radeon_common_finish(ctx
);
541 /* This version of AllocateMemoryMESA allocates only GART memory, and
542 * only does so after the point at which the driver has been
545 * Theoretically a valid context isn't required. However, in this
546 * implementation, it is, as I'm using the hardware lock to protect
547 * the kernel data structures, and the current context to get the
550 void *r200AllocateMemoryMESA(__DRIscreen
*screen
, GLsizei size
,
551 GLfloat readfreq
, GLfloat writefreq
,
554 GET_CURRENT_CONTEXT(ctx
);
555 r200ContextPtr rmesa
;
557 drm_radeon_mem_alloc_t alloc
;
560 if (R200_DEBUG
& DEBUG_IOCTL
)
561 fprintf(stderr
, "%s sz %d %f/%f/%f\n", __FUNCTION__
, size
, readfreq
,
562 writefreq
, priority
);
564 if (!ctx
|| !(rmesa
= R200_CONTEXT(ctx
)) || !rmesa
->radeon
.radeonScreen
->gartTextures
.map
)
567 if (getenv("R200_NO_ALLOC"))
570 alloc
.region
= RADEON_MEM_REGION_GART
;
573 alloc
.region_offset
= ®ion_offset
;
575 ret
= drmCommandWriteRead( rmesa
->radeon
.radeonScreen
->driScreen
->fd
,
577 &alloc
, sizeof(alloc
));
580 fprintf(stderr
, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__
, ret
);
585 char *region_start
= (char *)rmesa
->radeon
.radeonScreen
->gartTextures
.map
;
586 return (void *)(region_start
+ region_offset
);
591 /* Called via glXFreeMemoryMESA() */
592 void r200FreeMemoryMESA(__DRIscreen
*screen
, GLvoid
*pointer
)
594 GET_CURRENT_CONTEXT(ctx
);
595 r200ContextPtr rmesa
;
596 ptrdiff_t region_offset
;
597 drm_radeon_mem_free_t memfree
;
600 if (R200_DEBUG
& DEBUG_IOCTL
)
601 fprintf(stderr
, "%s %p\n", __FUNCTION__
, pointer
);
603 if (!ctx
|| !(rmesa
= R200_CONTEXT(ctx
)) || !rmesa
->radeon
.radeonScreen
->gartTextures
.map
) {
604 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
608 region_offset
= (char *)pointer
- (char *)rmesa
->radeon
.radeonScreen
->gartTextures
.map
;
610 if (region_offset
< 0 ||
611 region_offset
> rmesa
->radeon
.radeonScreen
->gartTextures
.size
) {
612 fprintf(stderr
, "offset %d outside range 0..%d\n", region_offset
,
613 rmesa
->radeon
.radeonScreen
->gartTextures
.size
);
617 memfree
.region
= RADEON_MEM_REGION_GART
;
618 memfree
.region_offset
= region_offset
;
620 ret
= drmCommandWrite( rmesa
->radeon
.radeonScreen
->driScreen
->fd
,
622 &memfree
, sizeof(memfree
));
625 fprintf(stderr
, "%s: DRM_RADEON_FREE ret %d\n", __FUNCTION__
, ret
);
628 /* Called via glXGetMemoryOffsetMESA() */
629 GLuint
r200GetMemoryOffsetMESA(__DRIscreen
*screen
, const GLvoid
*pointer
)
631 GET_CURRENT_CONTEXT(ctx
);
632 r200ContextPtr rmesa
;
635 if (!ctx
|| !(rmesa
= R200_CONTEXT(ctx
)) ) {
636 fprintf(stderr
, "%s: no context\n", __FUNCTION__
);
640 if (!r200IsGartMemory( rmesa
, pointer
, 0 ))
643 card_offset
= r200GartOffsetFromVirtual( rmesa
, pointer
);
645 return card_offset
- rmesa
->radeon
.radeonScreen
->gart_base
;
648 GLboolean
r200IsGartMemory( r200ContextPtr rmesa
, const GLvoid
*pointer
,
651 ptrdiff_t offset
= (char *)pointer
- (char *)rmesa
->radeon
.radeonScreen
->gartTextures
.map
;
652 int valid
= (size
>= 0 &&
654 offset
+ size
< rmesa
->radeon
.radeonScreen
->gartTextures
.size
);
656 if (R200_DEBUG
& DEBUG_IOCTL
)
657 fprintf(stderr
, "r200IsGartMemory( %p ) : %d\n", pointer
, valid
);
663 GLuint
r200GartOffsetFromVirtual( r200ContextPtr rmesa
, const GLvoid
*pointer
)
665 ptrdiff_t offset
= (char *)pointer
- (char *)rmesa
->radeon
.radeonScreen
->gartTextures
.map
;
667 if (offset
< 0 || offset
> rmesa
->radeon
.radeonScreen
->gartTextures
.size
)
670 return rmesa
->radeon
.radeonScreen
->gart_texture_offset
+ offset
;
675 void r200InitIoctlFuncs( struct dd_function_table
*functions
)
677 functions
->Clear
= r200Clear
;
678 functions
->Finish
= r200Finish
;
679 functions
->Flush
= r200Flush
;