1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
55 #include "swrast/swrast.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
62 #include "drirenderbuffer.h"
65 #include "radeon_common.h"
66 #include "radeon_bocs_wrapper.h"
67 #include "radeon_lock.h"
68 #include "radeon_drm.h"
69 #include "radeon_mipmap_tree.h"
71 #define DEBUG_CMDBUF 0
73 /* =============================================================
77 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
78 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
89 if (out
->x1
>= out
->x2
)
91 if (out
->y1
>= out
->y2
)
96 void radeonRecalcScissorRects(radeonContextPtr radeon
)
101 /* Grow cliprect store?
103 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
104 while (radeon
->state
.scissor
.numAllocedClipRects
<
105 radeon
->numClipRects
) {
106 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
107 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
110 if (radeon
->state
.scissor
.pClipRects
)
111 FREE(radeon
->state
.scissor
.pClipRects
);
113 radeon
->state
.scissor
.pClipRects
=
114 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
115 sizeof(drm_clip_rect_t
));
117 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
118 radeon
->state
.scissor
.numAllocedClipRects
= 0;
123 out
= radeon
->state
.scissor
.pClipRects
;
124 radeon
->state
.scissor
.numClipRects
= 0;
126 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
127 if (intersect_rect(out
,
128 &radeon
->pClipRects
[i
],
129 &radeon
->state
.scissor
.rect
)) {
130 radeon
->state
.scissor
.numClipRects
++;
137 * Update cliprects and scissors.
139 void radeonSetCliprects(radeonContextPtr radeon
)
141 __DRIdrawablePrivate
*const drawable
= radeon
->dri
.drawable
;
142 __DRIdrawablePrivate
*const readable
= radeon
->dri
.readable
;
143 GLframebuffer
*const draw_fb
= (GLframebuffer
*)drawable
->driverPrivate
;
144 GLframebuffer
*const read_fb
= (GLframebuffer
*)readable
->driverPrivate
;
146 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
147 if (draw_fb
->_ColorDrawBufferIndexes
[0] == BUFFER_BACK_LEFT
) {
148 /* Can't ignore 2d windows if we are page flipping. */
149 if (drawable
->numBackClipRects
== 0 || radeon
->doPageFlip
||
150 radeon
->sarea
->pfCurrentPage
== 1) {
151 radeon
->numClipRects
= drawable
->numClipRects
;
152 radeon
->pClipRects
= drawable
->pClipRects
;
154 radeon
->numClipRects
= drawable
->numBackClipRects
;
155 radeon
->pClipRects
= drawable
->pBackClipRects
;
158 /* front buffer (or none, or multiple buffers */
159 radeon
->numClipRects
= drawable
->numClipRects
;
160 radeon
->pClipRects
= drawable
->pClipRects
;
164 if ((draw_fb
->Width
!= drawable
->w
) ||
165 (draw_fb
->Height
!= drawable
->h
)) {
166 _mesa_resize_framebuffer(radeon
->glCtx
, draw_fb
,
167 drawable
->w
, drawable
->h
);
168 draw_fb
->Initialized
= GL_TRUE
;
171 if (drawable
!= readable
) {
172 if ((read_fb
->Width
!= readable
->w
) ||
173 (read_fb
->Height
!= readable
->h
)) {
174 _mesa_resize_framebuffer(radeon
->glCtx
, read_fb
,
175 readable
->w
, readable
->h
);
176 read_fb
->Initialized
= GL_TRUE
;
180 if (radeon
->state
.scissor
.enabled
)
181 radeonRecalcScissorRects(radeon
);
183 radeon
->lastStamp
= drawable
->lastStamp
;
186 void radeonUpdateScissor( GLcontext
*ctx
)
188 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
190 if ( rmesa
->dri
.drawable
) {
191 __DRIdrawablePrivate
*dPriv
= rmesa
->dri
.drawable
;
193 int x
= ctx
->Scissor
.X
;
194 int y
= dPriv
->h
- ctx
->Scissor
.Y
- ctx
->Scissor
.Height
;
195 int w
= ctx
->Scissor
.X
+ ctx
->Scissor
.Width
- 1;
196 int h
= dPriv
->h
- ctx
->Scissor
.Y
- 1;
198 rmesa
->state
.scissor
.rect
.x1
= x
+ dPriv
->x
;
199 rmesa
->state
.scissor
.rect
.y1
= y
+ dPriv
->y
;
200 rmesa
->state
.scissor
.rect
.x2
= w
+ dPriv
->x
+ 1;
201 rmesa
->state
.scissor
.rect
.y2
= h
+ dPriv
->y
+ 1;
203 radeonRecalcScissorRects( rmesa
);
207 /* =============================================================
211 void radeonScissor(GLcontext
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
213 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
214 if (ctx
->Scissor
.Enabled
) {
215 /* We don't pipeline cliprect changes */
216 radeon_firevertices(radeon
);
217 radeonUpdateScissor(ctx
);
222 /* ================================================================
223 * SwapBuffers with client-side throttling
226 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
228 drm_radeon_getparam_t gp
;
232 gp
.param
= RADEON_PARAM_LAST_FRAME
;
233 gp
.value
= (int *)&frame
;
234 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
237 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
245 uint32_t radeonGetAge(radeonContextPtr radeon
)
247 drm_radeon_getparam_t gp
;
251 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
252 gp
.value
= (int *)&age
;
253 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
256 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
264 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
266 drm_radeon_irq_emit_t ie
;
269 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
270 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
273 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
279 static void radeonWaitIrq(radeonContextPtr radeon
)
284 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
285 &radeon
->iw
, sizeof(radeon
->iw
));
286 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
289 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
295 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
297 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
299 if (radeon
->do_irqs
) {
300 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
301 if (!radeon
->irqsEmitted
) {
302 while (radeonGetLastFrame(radeon
) <
305 UNLOCK_HARDWARE(radeon
);
306 radeonWaitIrq(radeon
);
307 LOCK_HARDWARE(radeon
);
309 radeon
->irqsEmitted
= 10;
312 if (radeon
->irqsEmitted
) {
313 radeonEmitIrqLocked(radeon
);
314 radeon
->irqsEmitted
--;
317 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
318 UNLOCK_HARDWARE(radeon
);
319 if (radeon
->do_usleeps
)
321 LOCK_HARDWARE(radeon
);
327 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
333 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
336 } while (ret
&& ++i
< 100);
339 UNLOCK_HARDWARE(radeon
);
340 fprintf(stderr
, "Error: R300 timed out... exiting\n");
345 static void radeonWaitForIdle(radeonContextPtr radeon
)
347 LOCK_HARDWARE(radeon
);
348 radeonWaitForIdleLocked(radeon
);
349 UNLOCK_HARDWARE(radeon
);
353 /* Copy the back color buffer to the front color buffer.
355 void radeonCopyBuffer( __DRIdrawablePrivate
*dPriv
,
356 const drm_clip_rect_t
*rect
)
358 radeonContextPtr rmesa
;
360 GLboolean missed_target
;
362 __DRIscreenPrivate
*psp
;
365 assert(dPriv
->driContextPriv
);
366 assert(dPriv
->driContextPriv
->driverPrivate
);
368 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
370 if ( RADEON_DEBUG
& DEBUG_IOCTL
) {
371 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
374 radeon_firevertices(rmesa
);
375 LOCK_HARDWARE( rmesa
);
377 /* Throttle the frame rate -- only allow one pending swap buffers
380 radeonWaitForFrameCompletion( rmesa
);
383 UNLOCK_HARDWARE( rmesa
);
384 driWaitForVBlank( dPriv
, & missed_target
);
385 LOCK_HARDWARE( rmesa
);
388 nbox
= dPriv
->numClipRects
; /* must be in locked region */
390 for ( i
= 0 ; i
< nbox
; ) {
391 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
392 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
393 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
396 for ( ; i
< nr
; i
++ ) {
402 if (rect
->x1
> b
->x1
)
404 if (rect
->y1
> b
->y1
)
406 if (rect
->x2
< b
->x2
)
408 if (rect
->y2
< b
->y2
)
411 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
418 rmesa
->sarea
->nbox
= n
;
423 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
426 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
427 UNLOCK_HARDWARE( rmesa
);
432 UNLOCK_HARDWARE( rmesa
);
435 psp
= dPriv
->driScreenPriv
;
437 (*psp
->systemTime
->getUST
)( & ust
);
438 if ( missed_target
) {
439 rmesa
->swap_missed_count
++;
440 rmesa
->swap_missed_ust
= ust
- rmesa
->swap_ust
;
443 rmesa
->swap_ust
= ust
;
444 rmesa
->hw
.all_dirty
= GL_TRUE
;
449 void radeonPageFlip( __DRIdrawablePrivate
*dPriv
)
451 radeonContextPtr rmesa
;
453 GLboolean missed_target
;
454 __DRIscreenPrivate
*psp
;
455 struct radeon_renderbuffer
*rrb
;
456 GLframebuffer
*fb
= dPriv
->driverPrivate
;
459 assert(dPriv
->driContextPriv
);
460 assert(dPriv
->driContextPriv
->driverPrivate
);
462 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
463 rrb
= (void *)fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
;
465 psp
= dPriv
->driScreenPriv
;
467 if ( RADEON_DEBUG
& DEBUG_IOCTL
) {
468 fprintf(stderr
, "%s: pfCurrentPage: %d\n", __FUNCTION__
,
469 rmesa
->sarea
->pfCurrentPage
);
472 radeon_firevertices(rmesa
);
474 LOCK_HARDWARE( rmesa
);
476 if (!dPriv
->numClipRects
) {
477 UNLOCK_HARDWARE(rmesa
);
478 usleep(10000); /* throttle invisible client 10ms */
482 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
483 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
485 rmesa
->sarea
->nbox
= 1;
487 /* Throttle the frame rate -- only allow a few pending swap buffers
490 radeonWaitForFrameCompletion( rmesa
);
491 UNLOCK_HARDWARE( rmesa
);
492 driWaitForVBlank( dPriv
, & missed_target
);
493 if ( missed_target
) {
494 rmesa
->swap_missed_count
++;
495 (void) (*psp
->systemTime
->getUST
)( & rmesa
->swap_missed_ust
);
497 LOCK_HARDWARE( rmesa
);
499 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_FLIP
);
501 UNLOCK_HARDWARE( rmesa
);
504 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
509 (void) (*psp
->systemTime
->getUST
)( & rmesa
->swap_ust
);
511 /* Get ready for drawing next frame. Update the renderbuffers'
512 * flippedOffset/Pitch fields so we draw into the right place.
514 driFlipRenderbuffers(rmesa
->glCtx
->WinSysDrawBuffer
,
515 rmesa
->sarea
->pfCurrentPage
);
517 rmesa
->state
.color
.rrb
= rrb
;
519 if (rmesa
->vtbl
.update_draw_buffer
)
520 rmesa
->vtbl
.update_draw_buffer(rmesa
->glCtx
);
525 * Swap front and back buffer.
527 void radeonSwapBuffers(__DRIdrawablePrivate
* dPriv
)
529 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
530 radeonContextPtr radeon
;
533 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
536 if (ctx
->Visual
.doubleBufferMode
) {
537 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
538 if (radeon
->doPageFlip
) {
539 radeonPageFlip(dPriv
);
541 radeonCopyBuffer(dPriv
, NULL
);
545 /* XXX this shouldn't be an error but we can't handle it for now */
546 _mesa_problem(NULL
, "%s: drawable has no context!",
551 void radeonCopySubBuffer(__DRIdrawablePrivate
* dPriv
,
552 int x
, int y
, int w
, int h
)
554 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
555 radeonContextPtr radeon
;
558 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
561 if (ctx
->Visual
.doubleBufferMode
) {
562 drm_clip_rect_t rect
;
563 rect
.x1
= x
+ dPriv
->x
;
564 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
565 rect
.x2
= rect
.x1
+ w
;
566 rect
.y2
= rect
.y1
+ h
;
567 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
568 radeonCopyBuffer(dPriv
, &rect
);
571 /* XXX this shouldn't be an error but we can't handle it for now */
572 _mesa_problem(NULL
, "%s: drawable has no context!",
578 static void radeon_print_state_atom( struct radeon_state_atom
*state
)
582 fprintf(stderr
, "emit %s/%d\n", state
->name
, state
->cmd_size
);
584 if (RADEON_DEBUG
& DEBUG_VERBOSE
)
585 for (i
= 0 ; i
< state
->cmd_size
; i
++)
586 fprintf(stderr
, "\t%s[%d]: %x\n", state
->name
, i
, state
->cmd
[i
]);
590 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean dirty
)
592 BATCH_LOCALS(radeon
);
593 struct radeon_state_atom
*atom
;
596 if (radeon
->vtbl
.pre_emit_atoms
)
597 radeon
->vtbl
.pre_emit_atoms(radeon
);
599 /* Emit actual atoms */
600 foreach(atom
, &radeon
->hw
.atomlist
) {
601 if ((atom
->dirty
|| radeon
->hw
.all_dirty
) == dirty
) {
602 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
604 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
605 radeon_print_state_atom(atom
);
608 (*atom
->emit
)(radeon
->glCtx
, atom
);
610 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
611 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
614 atom
->dirty
= GL_FALSE
;
616 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
617 fprintf(stderr
, " skip state %s\n",
627 void radeonEmitState(radeonContextPtr radeon
)
629 if (RADEON_DEBUG
& (DEBUG_STATE
|DEBUG_PRIMS
))
630 fprintf(stderr
, "%s\n", __FUNCTION__
);
632 if (radeon
->vtbl
.pre_emit_state
)
633 radeon
->vtbl
.pre_emit_state(radeon
);
635 /* this code used to return here but now it emits zbs */
636 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
639 /* To avoid going across the entire set of states multiple times, just check
640 * for enough space for the case of emitting all state, and inline the
641 * radeonAllocCmdBuf code here without all the checks.
643 rcommonEnsureCmdBufSpace(radeon
, radeon
->hw
.max_state_size
, __FUNCTION__
);
645 if (!radeon
->cmdbuf
.cs
->cdw
) {
646 if (RADEON_DEBUG
& DEBUG_STATE
)
647 fprintf(stderr
, "Begin reemit state\n");
649 radeonEmitAtoms(radeon
, GL_FALSE
);
652 if (RADEON_DEBUG
& DEBUG_STATE
)
653 fprintf(stderr
, "Begin dirty state\n");
655 radeonEmitAtoms(radeon
, GL_TRUE
);
656 radeon
->hw
.is_dirty
= GL_FALSE
;
657 radeon
->hw
.all_dirty
= GL_FALSE
;
662 void radeonFlush(GLcontext
*ctx
)
664 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
665 if (RADEON_DEBUG
& DEBUG_IOCTL
)
666 fprintf(stderr
, "%s\n", __FUNCTION__
);
668 if (radeon
->dma
.flush
)
669 radeon
->dma
.flush( ctx
);
671 radeonEmitState(radeon
);
673 if (radeon
->cmdbuf
.cs
->cdw
)
674 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
677 /* Make sure all commands have been sent to the hardware and have
678 * completed processing.
680 void radeonFinish(GLcontext
* ctx
)
682 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
683 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
688 if (radeon
->radeonScreen
->kernel_mm
) {
689 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
690 struct radeon_renderbuffer
*rrb
;
691 rrb
= (struct radeon_renderbuffer
*)fb
->_ColorDrawBuffers
[i
];
693 radeon_bo_wait(rrb
->bo
);
695 } else if (radeon
->do_irqs
) {
696 LOCK_HARDWARE(radeon
);
697 radeonEmitIrqLocked(radeon
);
698 UNLOCK_HARDWARE(radeon
);
699 radeonWaitIrq(radeon
);
701 radeonWaitForIdle(radeon
);
707 * Send the current command buffer via ioctl to the hardware.
709 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
713 if (rmesa
->cmdbuf
.flushing
) {
714 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
717 rmesa
->cmdbuf
.flushing
= 1;
718 if (rmesa
->cmdbuf
.cs
->cdw
) {
719 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
720 rmesa
->hw
.all_dirty
= GL_TRUE
;
722 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
723 rmesa
->cmdbuf
.flushing
= 0;
727 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
731 radeonReleaseDmaRegion(rmesa
);
733 LOCK_HARDWARE(rmesa
);
734 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
735 UNLOCK_HARDWARE(rmesa
);
738 fprintf(stderr
, "drmRadeonCmdBuffer: %d\n", ret
);
746 * Make sure that enough space is available in the command buffer
747 * by flushing if necessary.
749 * \param dwords The number of dwords we need to be free on the command buffer
751 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
753 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
||
754 radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
755 rcommonFlushCmdBuf(rmesa
, caller
);
759 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
762 /* Initialize command buffer */
763 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
764 "command_buffer_size");
765 if (size
< 2 * rmesa
->hw
.max_state_size
) {
766 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
771 if (RADEON_DEBUG
& (DEBUG_IOCTL
| DEBUG_DMA
)) {
772 fprintf(stderr
, "sizeof(drm_r300_cmd_header_t)=%zd\n",
773 sizeof(drm_r300_cmd_header_t
));
774 fprintf(stderr
, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
775 sizeof(drm_radeon_cmd_buffer_t
));
777 "Allocating %d bytes command buffer (max state is %d bytes)\n",
778 size
* 4, rmesa
->hw
.max_state_size
* 4);
781 if (rmesa
->radeonScreen
->kernel_mm
) {
782 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
783 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
785 rmesa
->cmdbuf
.csm
= radeon_cs_manager_legacy_ctor(rmesa
);
787 if (rmesa
->cmdbuf
.csm
== NULL
) {
788 /* FIXME: fatal error */
791 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
792 assert(rmesa
->cmdbuf
.cs
!= NULL
);
793 rmesa
->cmdbuf
.size
= size
;
795 if (!rmesa
->radeonScreen
->kernel_mm
) {
796 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, rmesa
->radeonScreen
->texSize
[0]);
797 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, rmesa
->radeonScreen
->gartTextures
.size
);
799 struct drm_radeon_gem_info mminfo
;
801 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
, &mminfo
, sizeof(mminfo
)))
803 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, mminfo
.vram_size
);
804 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, mminfo
.gart_size
);
810 * Destroy the command buffer
812 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
814 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
815 if (rmesa
->radeonScreen
->driScreen
->dri2
.enabled
|| rmesa
->radeonScreen
->kernel_mm
) {
816 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
818 radeon_cs_manager_legacy_dtor(rmesa
->cmdbuf
.csm
);
822 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
825 const char *function
,
828 rcommonEnsureCmdBufSpace(rmesa
, n
, function
);
829 if (!rmesa
->cmdbuf
.cs
->cdw
&& dostate
) {
830 if (RADEON_DEBUG
& DEBUG_IOCTL
)
831 fprintf(stderr
, "Reemit state after flush (from %s)\n", function
);
832 radeonEmitState(rmesa
);
834 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);