1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
62 * Enable verbose debug output for emit code.
65 * 2 also print state alues
67 #define RADEON_CMDBUF 0
69 /* =============================================================
73 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
74 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
85 if (out
->x1
>= out
->x2
)
87 if (out
->y1
>= out
->y2
)
92 void radeonRecalcScissorRects(radeonContextPtr radeon
)
97 /* Grow cliprect store?
99 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
100 while (radeon
->state
.scissor
.numAllocedClipRects
<
101 radeon
->numClipRects
) {
102 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
103 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
106 if (radeon
->state
.scissor
.pClipRects
)
107 FREE(radeon
->state
.scissor
.pClipRects
);
109 radeon
->state
.scissor
.pClipRects
=
110 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
111 sizeof(drm_clip_rect_t
));
113 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
114 radeon
->state
.scissor
.numAllocedClipRects
= 0;
119 out
= radeon
->state
.scissor
.pClipRects
;
120 radeon
->state
.scissor
.numClipRects
= 0;
122 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
123 if (intersect_rect(out
,
124 &radeon
->pClipRects
[i
],
125 &radeon
->state
.scissor
.rect
)) {
126 radeon
->state
.scissor
.numClipRects
++;
131 if (radeon
->vtbl
.update_scissor
)
132 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
135 void radeon_get_cliprects(radeonContextPtr radeon
,
136 struct drm_clip_rect
**cliprects
,
137 unsigned int *num_cliprects
,
138 int *x_off
, int *y_off
)
140 __DRIdrawablePrivate
*dPriv
= radeon_get_drawable(radeon
);
141 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
143 if (radeon
->constant_cliprect
) {
144 radeon
->fboRect
.x1
= 0;
145 radeon
->fboRect
.y1
= 0;
146 radeon
->fboRect
.x2
= radeon
->glCtx
->DrawBuffer
->Width
;
147 radeon
->fboRect
.y2
= radeon
->glCtx
->DrawBuffer
->Height
;
149 *cliprects
= &radeon
->fboRect
;
153 } else if (radeon
->front_cliprects
||
154 rfb
->pf_active
|| dPriv
->numBackClipRects
== 0) {
155 *cliprects
= dPriv
->pClipRects
;
156 *num_cliprects
= dPriv
->numClipRects
;
160 *num_cliprects
= dPriv
->numBackClipRects
;
161 *cliprects
= dPriv
->pBackClipRects
;
162 *x_off
= dPriv
->backX
;
163 *y_off
= dPriv
->backY
;
168 * Update cliprects and scissors.
170 void radeonSetCliprects(radeonContextPtr radeon
)
172 __DRIdrawablePrivate
*const drawable
= radeon_get_drawable(radeon
);
173 __DRIdrawablePrivate
*const readable
= radeon_get_readable(radeon
);
174 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
175 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
178 radeon_get_cliprects(radeon
, &radeon
->pClipRects
,
179 &radeon
->numClipRects
, &x_off
, &y_off
);
181 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
182 (draw_rfb
->base
.Height
!= drawable
->h
)) {
183 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
184 drawable
->w
, drawable
->h
);
185 draw_rfb
->base
.Initialized
= GL_TRUE
;
188 if (drawable
!= readable
) {
189 if ((read_rfb
->base
.Width
!= readable
->w
) ||
190 (read_rfb
->base
.Height
!= readable
->h
)) {
191 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
192 readable
->w
, readable
->h
);
193 read_rfb
->base
.Initialized
= GL_TRUE
;
197 if (radeon
->state
.scissor
.enabled
)
198 radeonRecalcScissorRects(radeon
);
204 void radeonUpdateScissor( GLcontext
*ctx
)
206 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
207 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
208 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
210 int min_x
, min_y
, max_x
, max_y
;
212 if (!ctx
->DrawBuffer
)
215 max_x
= ctx
->DrawBuffer
->Width
- 1;
216 max_y
= ctx
->DrawBuffer
->Height
- 1;
218 if ( !ctx
->DrawBuffer
->Name
) {
220 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
230 if (!rmesa
->radeonScreen
->kernel_mm
) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawablePrivate
*dPriv
= radeon_get_drawable(rmesa
);
236 max_x
+= dPriv
->x
+ 1;
240 max_y
+= dPriv
->y
+ 1;
243 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
244 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
245 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
246 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
248 radeonRecalcScissorRects( rmesa
);
251 /* =============================================================
255 void radeonScissor(GLcontext
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
257 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
258 if (ctx
->Scissor
.Enabled
) {
259 /* We don't pipeline cliprect changes */
260 if (!radeon
->radeonScreen
->kernel_mm
) {
261 radeon_firevertices(radeon
);
263 radeonUpdateScissor(ctx
);
267 void radeonPolygonStipplePreKMS( GLcontext
*ctx
, const GLubyte
*mask
)
269 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
271 drm_radeon_stipple_t stipple
;
273 /* Must flip pattern upside down.
275 for ( i
= 0 ; i
< 32 ; i
++ ) {
276 stipple
.mask
[31 - i
] = ((GLuint
*) mask
)[i
];
279 /* TODO: push this into cmd mechanism
281 radeon_firevertices(radeon
);
282 LOCK_HARDWARE( radeon
);
284 drmCommandWrite( radeon
->dri
.fd
, DRM_RADEON_STIPPLE
,
285 &stipple
, sizeof(stipple
) );
286 UNLOCK_HARDWARE( radeon
);
290 /* ================================================================
291 * SwapBuffers with client-side throttling
294 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
296 drm_radeon_getparam_t gp
;
300 gp
.param
= RADEON_PARAM_LAST_FRAME
;
301 gp
.value
= (int *)&frame
;
302 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
305 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
313 uint32_t radeonGetAge(radeonContextPtr radeon
)
315 drm_radeon_getparam_t gp
;
319 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
320 gp
.value
= (int *)&age
;
321 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
324 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
332 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
334 drm_radeon_irq_emit_t ie
;
337 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
338 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
341 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
347 static void radeonWaitIrq(radeonContextPtr radeon
)
352 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
353 &radeon
->iw
, sizeof(radeon
->iw
));
354 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
357 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
363 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
365 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
367 if (radeon
->do_irqs
) {
368 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
369 if (!radeon
->irqsEmitted
) {
370 while (radeonGetLastFrame(radeon
) <
373 UNLOCK_HARDWARE(radeon
);
374 radeonWaitIrq(radeon
);
375 LOCK_HARDWARE(radeon
);
377 radeon
->irqsEmitted
= 10;
380 if (radeon
->irqsEmitted
) {
381 radeonEmitIrqLocked(radeon
);
382 radeon
->irqsEmitted
--;
385 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
386 UNLOCK_HARDWARE(radeon
);
387 if (radeon
->do_usleeps
)
389 LOCK_HARDWARE(radeon
);
395 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
401 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
404 } while (ret
&& ++i
< 100);
407 UNLOCK_HARDWARE(radeon
);
408 fprintf(stderr
, "Error: R300 timed out... exiting\n");
413 static void radeonWaitForIdle(radeonContextPtr radeon
)
415 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
416 LOCK_HARDWARE(radeon
);
417 radeonWaitForIdleLocked(radeon
);
418 UNLOCK_HARDWARE(radeon
);
422 static void radeon_flip_renderbuffers(struct radeon_framebuffer
*rfb
)
424 int current_page
= rfb
->pf_current_page
;
425 int next_page
= (current_page
+ 1) % rfb
->pf_num_pages
;
426 struct gl_renderbuffer
*tmp_rb
;
428 /* Exchange renderbuffers if necessary but make sure their
429 * reference counts are preserved.
431 if (rfb
->color_rb
[current_page
] &&
432 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
!=
433 &rfb
->color_rb
[current_page
]->base
) {
435 _mesa_reference_renderbuffer(&tmp_rb
,
436 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
437 tmp_rb
= &rfb
->color_rb
[current_page
]->base
;
438 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
, tmp_rb
);
439 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
442 if (rfb
->color_rb
[next_page
] &&
443 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
!=
444 &rfb
->color_rb
[next_page
]->base
) {
446 _mesa_reference_renderbuffer(&tmp_rb
,
447 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
448 tmp_rb
= &rfb
->color_rb
[next_page
]->base
;
449 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
, tmp_rb
);
450 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
454 /* Copy the back color buffer to the front color buffer.
456 void radeonCopyBuffer( __DRIdrawablePrivate
*dPriv
,
457 const drm_clip_rect_t
*rect
)
459 radeonContextPtr rmesa
;
460 struct radeon_framebuffer
*rfb
;
464 assert(dPriv
->driContextPriv
);
465 assert(dPriv
->driContextPriv
->driverPrivate
);
467 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
469 LOCK_HARDWARE(rmesa
);
471 rfb
= dPriv
->driverPrivate
;
473 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
474 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
477 nbox
= dPriv
->numClipRects
; /* must be in locked region */
479 for ( i
= 0 ; i
< nbox
; ) {
480 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
481 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
482 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
485 for ( ; i
< nr
; i
++ ) {
491 if (rect
->x1
> b
->x1
)
493 if (rect
->y1
> b
->y1
)
495 if (rect
->x2
< b
->x2
)
497 if (rect
->y2
< b
->y2
)
500 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
507 rmesa
->sarea
->nbox
= n
;
512 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
515 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
516 UNLOCK_HARDWARE( rmesa
);
521 UNLOCK_HARDWARE( rmesa
);
524 static int radeonScheduleSwap(__DRIdrawablePrivate
*dPriv
, GLboolean
*missed_target
)
526 radeonContextPtr rmesa
;
528 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
529 radeon_firevertices(rmesa
);
531 LOCK_HARDWARE( rmesa
);
533 if (!dPriv
->numClipRects
) {
534 UNLOCK_HARDWARE(rmesa
);
535 usleep(10000); /* throttle invisible client 10ms */
539 radeonWaitForFrameCompletion(rmesa
);
541 UNLOCK_HARDWARE(rmesa
);
542 driWaitForVBlank(dPriv
, missed_target
);
547 static GLboolean
radeonPageFlip( __DRIdrawablePrivate
*dPriv
)
549 radeonContextPtr radeon
;
551 __DRIscreenPrivate
*psp
;
552 struct radeon_renderbuffer
*rrb
;
553 struct radeon_framebuffer
*rfb
;
556 assert(dPriv
->driContextPriv
);
557 assert(dPriv
->driContextPriv
->driverPrivate
);
559 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
560 rfb
= dPriv
->driverPrivate
;
561 rrb
= (void *)rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
;
563 psp
= dPriv
->driScreenPriv
;
565 LOCK_HARDWARE(radeon
);
567 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
568 fprintf(stderr
, "%s: pfCurrentPage: %d %d\n", __FUNCTION__
,
569 radeon
->sarea
->pfCurrentPage
, radeon
->sarea
->pfState
);
571 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
572 drm_clip_rect_t
*b
= radeon
->sarea
->boxes
;
574 radeon
->sarea
->nbox
= 1;
576 ret
= drmCommandNone( radeon
->dri
.fd
, DRM_RADEON_FLIP
);
578 UNLOCK_HARDWARE(radeon
);
581 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
588 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
589 radeon_flip_renderbuffers(rfb
);
590 radeon_draw_buffer(radeon
->glCtx
, &rfb
->base
);
597 * Swap front and back buffer.
599 void radeonSwapBuffers(__DRIdrawablePrivate
* dPriv
)
602 __DRIscreenPrivate
*psp
;
604 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
605 radeonContextPtr radeon
;
608 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
611 if (ctx
->Visual
.doubleBufferMode
) {
612 GLboolean missed_target
;
613 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
614 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
616 radeonScheduleSwap(dPriv
, &missed_target
);
618 if (rfb
->pf_active
) {
619 radeonPageFlip(dPriv
);
621 radeonCopyBuffer(dPriv
, NULL
);
624 psp
= dPriv
->driScreenPriv
;
627 (*psp
->systemTime
->getUST
)( & ust
);
628 if ( missed_target
) {
629 rfb
->swap_missed_count
++;
630 rfb
->swap_missed_ust
= ust
- rfb
->swap_ust
;
634 radeon
->hw
.all_dirty
= GL_TRUE
;
637 /* XXX this shouldn't be an error but we can't handle it for now */
638 _mesa_problem(NULL
, "%s: drawable has no context!",
643 void radeonCopySubBuffer(__DRIdrawablePrivate
* dPriv
,
644 int x
, int y
, int w
, int h
)
646 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
647 radeonContextPtr radeon
;
650 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
653 if (ctx
->Visual
.doubleBufferMode
) {
654 drm_clip_rect_t rect
;
655 rect
.x1
= x
+ dPriv
->x
;
656 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
657 rect
.x2
= rect
.x1
+ w
;
658 rect
.y2
= rect
.y1
+ h
;
659 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
660 radeonCopyBuffer(dPriv
, &rect
);
663 /* XXX this shouldn't be an error but we can't handle it for now */
664 _mesa_problem(NULL
, "%s: drawable has no context!",
669 void radeon_draw_buffer(GLcontext
*ctx
, struct gl_framebuffer
*fb
)
671 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
672 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
678 /* this can happen during the initial context initialization */
682 /* radeons only handle 1 color draw so far */
683 if (fb
->_NumColorDrawBuffers
!= 1) {
684 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
688 /* Do this here, note core Mesa, since this function is called from
689 * many places within the driver.
691 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
692 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
693 _mesa_update_framebuffer(ctx
);
694 /* this updates the DrawBuffer's Width/Height if it's a FBO */
695 _mesa_update_draw_buffer_bounds(ctx
);
698 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
699 /* this may occur when we're called by glBindFrameBuffer() during
700 * the process of someone setting up renderbuffers, etc.
702 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
707 ;/* do something depthy/stencily TODO */
712 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
713 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
714 radeon
->front_cliprects
= GL_TRUE
;
715 radeon
->front_buffer_dirty
= GL_TRUE
;
717 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
718 radeon
->front_cliprects
= GL_FALSE
;
721 /* user FBO in theory */
722 struct radeon_renderbuffer
*rrb
;
723 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
725 offset
= rrb
->draw_offset
;
728 radeon
->constant_cliprect
= GL_TRUE
;
731 if (rrbColor
== NULL
)
732 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
734 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
737 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
738 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
739 if (rrbDepth
&& rrbDepth
->bo
) {
740 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
742 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
745 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
749 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
750 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
751 if (rrbStencil
&& rrbStencil
->bo
) {
752 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
753 /* need to re-compute stencil hw state */
755 rrbDepth
= rrbStencil
;
757 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
760 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
761 if (ctx
->Driver
.Enable
!= NULL
)
762 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
764 ctx
->NewState
|= _NEW_STENCIL
;
767 /* Update culling direction which changes depending on the
768 * orientation of the buffer:
770 if (ctx
->Driver
.FrontFace
)
771 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
773 ctx
->NewState
|= _NEW_POLYGON
;
776 * Update depth test state
778 if (ctx
->Driver
.Enable
) {
779 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
780 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
781 /* Need to update the derived ctx->Stencil._Enabled first */
782 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
783 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
785 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
788 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
789 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
790 radeon
->state
.color
.draw_offset
= offset
;
793 /* update viewport since it depends on window size */
794 if (ctx
->Driver
.Viewport
) {
795 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
796 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
801 ctx
->NewState
|= _NEW_VIEWPORT
;
803 /* Set state we know depends on drawable parameters:
805 radeonUpdateScissor(ctx
);
806 radeon
->NewGLState
|= _NEW_SCISSOR
;
808 if (ctx
->Driver
.DepthRange
)
809 ctx
->Driver
.DepthRange(ctx
,
813 /* Update culling direction which changes depending on the
814 * orientation of the buffer:
816 if (ctx
->Driver
.FrontFace
)
817 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
819 ctx
->NewState
|= _NEW_POLYGON
;
823 * Called via glDrawBuffer.
825 void radeonDrawBuffer( GLcontext
*ctx
, GLenum mode
)
827 if (RADEON_DEBUG
& RADEON_DRI
)
828 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
829 _mesa_lookup_enum_by_nr( mode
));
831 if (ctx
->DrawBuffer
->Name
== 0) {
832 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
834 const GLboolean was_front_buffer_rendering
=
835 radeon
->is_front_buffer_rendering
;
837 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
840 /* If we weren't front-buffer rendering before but we are now, make sure
841 * that the front-buffer has actually been allocated.
843 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
844 radeon_update_renderbuffers(radeon
->dri
.context
,
845 radeon
->dri
.context
->driDrawablePriv
);
849 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
852 void radeonReadBuffer( GLcontext
*ctx
, GLenum mode
)
854 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
855 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
856 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
857 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
858 || (mode
== GL_FRONT
);
860 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
861 radeon_update_renderbuffers(rmesa
->dri
.context
,
862 rmesa
->dri
.context
->driReadablePriv
);
865 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
866 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
867 /* This will update FBO completeness status.
868 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
869 * refers to a missing renderbuffer. Calling glReadBuffer can set
870 * that straight and can make the drawing buffer complete.
872 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
877 /* Turn on/off page flipping according to the flags in the sarea:
879 void radeonUpdatePageFlipping(radeonContextPtr radeon
)
881 struct radeon_framebuffer
*rfb
= radeon_get_drawable(radeon
)->driverPrivate
;
883 rfb
->pf_active
= radeon
->sarea
->pfState
;
884 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
885 rfb
->pf_num_pages
= 2;
886 radeon_flip_renderbuffers(rfb
);
887 radeon_draw_buffer(radeon
->glCtx
, radeon
->glCtx
->DrawBuffer
);
890 void radeon_window_moved(radeonContextPtr radeon
)
892 /* Cliprects has to be updated before doing anything else */
893 radeonSetCliprects(radeon
);
894 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
895 radeonUpdatePageFlipping(radeon
);
899 void radeon_viewport(GLcontext
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
901 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
902 __DRIcontext
*driContext
= radeon
->dri
.context
;
903 void (*old_viewport
)(GLcontext
*ctx
, GLint x
, GLint y
,
904 GLsizei w
, GLsizei h
);
906 if (!driContext
->driScreenPriv
->dri2
.enabled
)
909 if (!radeon
->meta
.internal_viewport_call
&& ctx
->DrawBuffer
->Name
== 0) {
910 if (radeon
->is_front_buffer_rendering
) {
911 ctx
->Driver
.Flush(ctx
);
913 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
);
914 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
915 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
);
918 old_viewport
= ctx
->Driver
.Viewport
;
919 ctx
->Driver
.Viewport
= NULL
;
920 radeon_window_moved(radeon
);
921 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
922 ctx
->Driver
.Viewport
= old_viewport
;
925 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
928 int dwords
= (*state
->check
) (radeon
->glCtx
, state
);
929 drm_r300_cmd_header_t cmd
;
931 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
933 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
934 if (dwords
> state
->cmd_size
)
935 dwords
= state
->cmd_size
;
937 for (i
= 0; i
< dwords
;) {
938 cmd
= *((drm_r300_cmd_header_t
*) &state
->cmd
[i
]);
939 reg
= (cmd
.packet0
.reghi
<< 8) | cmd
.packet0
.reglo
;
940 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
941 state
->name
, i
, reg
, cmd
.packet0
.count
);
943 for (j
= 0; j
< cmd
.packet0
.count
&& i
< dwords
; j
++) {
944 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
945 state
->name
, i
, reg
, state
->cmd
[i
]);
953 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
955 int i
, j
, reg
, count
;
958 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
961 if (!radeon
->radeonScreen
->kernel_mm
) {
962 radeon_print_state_atom_prekmm(radeon
, state
);
966 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
968 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
970 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
971 if (dwords
> state
->cmd_size
)
972 dwords
= state
->cmd_size
;
973 for (i
= 0; i
< dwords
;) {
974 packet0
= state
->cmd
[i
];
975 reg
= (packet0
& 0x1FFF) << 2;
976 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
977 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
978 state
->name
, i
, reg
, count
);
980 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
981 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
982 state
->name
, i
, reg
, state
->cmd
[i
]);
991 * Count total size for next state emit.
993 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
995 struct radeon_state_atom
*atom
;
997 /* check if we are going to emit full state */
999 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
1000 if (!radeon
->hw
.is_dirty
)
1002 foreach(atom
, &radeon
->hw
.atomlist
) {
1004 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1005 dwords
+= atom_size
;
1006 if (RADEON_CMDBUF
&& atom_size
) {
1007 radeon_print_state_atom(radeon
, atom
);
1012 foreach(atom
, &radeon
->hw
.atomlist
) {
1013 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1014 dwords
+= atom_size
;
1015 if (RADEON_CMDBUF
&& atom_size
) {
1016 radeon_print_state_atom(radeon
, atom
);
1022 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
1026 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
1028 BATCH_LOCALS(radeon
);
1031 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
1034 radeon_print_state_atom(radeon
, atom
);
1037 (*atom
->emit
)(radeon
->glCtx
, atom
);
1039 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
1040 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
1044 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
1046 atom
->dirty
= GL_FALSE
;
1050 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
1052 struct radeon_state_atom
*atom
;
1054 if (radeon
->vtbl
.pre_emit_atoms
)
1055 radeon
->vtbl
.pre_emit_atoms(radeon
);
1057 /* Emit actual atoms */
1058 if (radeon
->hw
.all_dirty
|| emitAll
) {
1059 foreach(atom
, &radeon
->hw
.atomlist
)
1060 radeon_emit_atom( radeon
, atom
);
1062 foreach(atom
, &radeon
->hw
.atomlist
) {
1064 radeon_emit_atom( radeon
, atom
);
1071 static GLboolean
radeon_revalidate_bos(GLcontext
*ctx
)
1073 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1076 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
1077 if (ret
== RADEON_CS_SPACE_FLUSH
)
1082 void radeonEmitState(radeonContextPtr radeon
)
1084 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
1086 if (radeon
->vtbl
.pre_emit_state
)
1087 radeon
->vtbl
.pre_emit_state(radeon
);
1089 /* this code used to return here but now it emits zbs */
1090 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
1093 if (!radeon
->cmdbuf
.cs
->cdw
) {
1094 if (RADEON_DEBUG
& RADEON_STATE
)
1095 fprintf(stderr
, "Begin reemit state\n");
1097 radeonEmitAtoms(radeon
, GL_TRUE
);
1100 if (RADEON_DEBUG
& RADEON_STATE
)
1101 fprintf(stderr
, "Begin dirty state\n");
1103 radeonEmitAtoms(radeon
, GL_FALSE
);
1106 radeon
->hw
.is_dirty
= GL_FALSE
;
1107 radeon
->hw
.all_dirty
= GL_FALSE
;
1111 void radeonFlush(GLcontext
*ctx
)
1113 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1114 if (RADEON_DEBUG
& RADEON_IOCTL
)
1115 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
1117 /* okay if we have no cmds in the buffer &&
1118 we have no DMA flush &&
1119 we have no DMA buffer allocated.
1120 then no point flushing anything at all.
1122 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
1125 if (radeon
->dma
.flush
)
1126 radeon
->dma
.flush( ctx
);
1128 if (radeon
->cmdbuf
.cs
->cdw
)
1129 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
1131 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
1132 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
1134 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
1135 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
1136 __DRIdrawablePrivate
* drawable
= radeon_get_drawable(radeon
);
1137 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
1139 /* Only clear the dirty bit if front-buffer rendering is no longer
1140 * enabled. This is done so that the dirty bit can only be set in
1141 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1142 * each of N places that do rendering. This has worse performances,
1143 * but it is much easier to get correct.
1145 if (!radeon
->is_front_buffer_rendering
) {
1146 radeon
->front_buffer_dirty
= GL_FALSE
;
1152 /* Make sure all commands have been sent to the hardware and have
1153 * completed processing.
1155 void radeonFinish(GLcontext
* ctx
)
1157 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1158 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1161 if (ctx
->Driver
.Flush
)
1162 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
1164 if (radeon
->radeonScreen
->kernel_mm
) {
1165 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1166 struct radeon_renderbuffer
*rrb
;
1167 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
1169 radeon_bo_wait(rrb
->bo
);
1172 struct radeon_renderbuffer
*rrb
;
1173 rrb
= radeon_get_depthbuffer(radeon
);
1175 radeon_bo_wait(rrb
->bo
);
1177 } else if (radeon
->do_irqs
) {
1178 LOCK_HARDWARE(radeon
);
1179 radeonEmitIrqLocked(radeon
);
1180 UNLOCK_HARDWARE(radeon
);
1181 radeonWaitIrq(radeon
);
1183 radeonWaitForIdle(radeon
);
1189 * Send the current command buffer via ioctl to the hardware.
1191 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
1195 if (rmesa
->cmdbuf
.flushing
) {
1196 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
1199 rmesa
->cmdbuf
.flushing
= 1;
1201 if (RADEON_DEBUG
& RADEON_IOCTL
) {
1202 fprintf(stderr
, "%s from %s - %i cliprects\n",
1203 __FUNCTION__
, caller
, rmesa
->numClipRects
);
1206 radeonEmitQueryEnd(rmesa
->glCtx
);
1208 if (rmesa
->cmdbuf
.cs
->cdw
) {
1209 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
1210 rmesa
->hw
.all_dirty
= GL_TRUE
;
1212 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
1213 rmesa
->cmdbuf
.flushing
= 0;
1215 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
1216 fprintf(stderr
,"failed to revalidate buffers\n");
1222 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
1226 radeonReleaseDmaRegions(rmesa
);
1228 LOCK_HARDWARE(rmesa
);
1229 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
1230 UNLOCK_HARDWARE(rmesa
);
1233 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
1234 "parse or rejected command stream. See dmesg "
1235 "for more info.\n", ret
);
1243 * Make sure that enough space is available in the command buffer
1244 * by flushing if necessary.
1246 * \param dwords The number of dwords we need to be free on the command buffer
1248 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
1250 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
1251 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
1252 /* If we try to flush empty buffer there is too big rendering operation. */
1253 assert(rmesa
->cmdbuf
.cs
->cdw
);
1254 rcommonFlushCmdBuf(rmesa
, caller
);
1260 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
1263 /* Initialize command buffer */
1264 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
1265 "command_buffer_size");
1266 if (size
< 2 * rmesa
->hw
.max_state_size
) {
1267 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
1269 if (size
> 64 * 256)
1272 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1273 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
1274 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1275 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
1276 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1277 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1278 size
* 4, rmesa
->hw
.max_state_size
* 4);
1280 if (rmesa
->radeonScreen
->kernel_mm
) {
1281 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
1282 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
1284 rmesa
->cmdbuf
.csm
= radeon_cs_manager_legacy_ctor(rmesa
);
1286 if (rmesa
->cmdbuf
.csm
== NULL
) {
1287 /* FIXME: fatal error */
1290 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
1291 assert(rmesa
->cmdbuf
.cs
!= NULL
);
1292 rmesa
->cmdbuf
.size
= size
;
1294 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
1295 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
1297 if (!rmesa
->radeonScreen
->kernel_mm
) {
1298 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, rmesa
->radeonScreen
->texSize
[0]);
1299 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, rmesa
->radeonScreen
->gartTextures
.size
);
1301 struct drm_radeon_gem_info mminfo
= { 0 };
1303 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
, &mminfo
, sizeof(mminfo
)))
1305 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, mminfo
.vram_visible
);
1306 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, mminfo
.gart_size
);
1312 * Destroy the command buffer
1314 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
1316 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
1317 if (rmesa
->radeonScreen
->driScreen
->dri2
.enabled
|| rmesa
->radeonScreen
->kernel_mm
) {
1318 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
1320 radeon_cs_manager_legacy_dtor(rmesa
->cmdbuf
.csm
);
1324 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
1327 const char *function
,
1330 if (!rmesa
->cmdbuf
.cs
->cdw
&& dostate
) {
1331 radeon_print(RADEON_STATE
, RADEON_NORMAL
,
1332 "Reemit state after flush (from %s)\n", function
);
1333 radeonEmitState(rmesa
);
1335 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
1337 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1338 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
1342 void radeonUserClear(GLcontext
*ctx
, GLuint mask
)
1344 _mesa_meta_Clear(ctx
, mask
);