1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
62 * Enable verbose debug output for emit code.
65 * 2 also print state alues
67 #define RADEON_CMDBUF 0
69 /* =============================================================
73 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
74 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
85 if (out
->x1
>= out
->x2
)
87 if (out
->y1
>= out
->y2
)
92 void radeonRecalcScissorRects(radeonContextPtr radeon
)
97 /* Grow cliprect store?
99 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
100 while (radeon
->state
.scissor
.numAllocedClipRects
<
101 radeon
->numClipRects
) {
102 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
103 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
106 if (radeon
->state
.scissor
.pClipRects
)
107 FREE(radeon
->state
.scissor
.pClipRects
);
109 radeon
->state
.scissor
.pClipRects
=
110 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
111 sizeof(drm_clip_rect_t
));
113 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
114 radeon
->state
.scissor
.numAllocedClipRects
= 0;
119 out
= radeon
->state
.scissor
.pClipRects
;
120 radeon
->state
.scissor
.numClipRects
= 0;
122 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
123 if (intersect_rect(out
,
124 &radeon
->pClipRects
[i
],
125 &radeon
->state
.scissor
.rect
)) {
126 radeon
->state
.scissor
.numClipRects
++;
131 if (radeon
->vtbl
.update_scissor
)
132 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
135 void radeon_get_cliprects(radeonContextPtr radeon
,
136 struct drm_clip_rect
**cliprects
,
137 unsigned int *num_cliprects
,
138 int *x_off
, int *y_off
)
140 __DRIdrawable
*dPriv
= radeon_get_drawable(radeon
);
141 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
143 if (radeon
->constant_cliprect
) {
144 radeon
->fboRect
.x1
= 0;
145 radeon
->fboRect
.y1
= 0;
146 radeon
->fboRect
.x2
= radeon
->glCtx
->DrawBuffer
->Width
;
147 radeon
->fboRect
.y2
= radeon
->glCtx
->DrawBuffer
->Height
;
149 *cliprects
= &radeon
->fboRect
;
153 } else if (radeon
->front_cliprects
||
154 rfb
->pf_active
|| dPriv
->numBackClipRects
== 0) {
155 *cliprects
= dPriv
->pClipRects
;
156 *num_cliprects
= dPriv
->numClipRects
;
160 *num_cliprects
= dPriv
->numBackClipRects
;
161 *cliprects
= dPriv
->pBackClipRects
;
162 *x_off
= dPriv
->backX
;
163 *y_off
= dPriv
->backY
;
168 * Update cliprects and scissors.
170 void radeonSetCliprects(radeonContextPtr radeon
)
172 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
173 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
175 if(drawable
== NULL
&& readable
== NULL
)
178 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
179 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
182 radeon_get_cliprects(radeon
, &radeon
->pClipRects
,
183 &radeon
->numClipRects
, &x_off
, &y_off
);
185 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
186 (draw_rfb
->base
.Height
!= drawable
->h
)) {
187 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
188 drawable
->w
, drawable
->h
);
189 draw_rfb
->base
.Initialized
= GL_TRUE
;
192 if (drawable
!= readable
) {
193 if ((read_rfb
->base
.Width
!= readable
->w
) ||
194 (read_rfb
->base
.Height
!= readable
->h
)) {
195 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
196 readable
->w
, readable
->h
);
197 read_rfb
->base
.Initialized
= GL_TRUE
;
201 if (radeon
->state
.scissor
.enabled
)
202 radeonRecalcScissorRects(radeon
);
208 void radeonUpdateScissor( struct gl_context
*ctx
)
210 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
211 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
212 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
214 int min_x
, min_y
, max_x
, max_y
;
216 if (!ctx
->DrawBuffer
)
219 max_x
= ctx
->DrawBuffer
->Width
- 1;
220 max_y
= ctx
->DrawBuffer
->Height
- 1;
222 if ( !ctx
->DrawBuffer
->Name
) {
224 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
234 if (!rmesa
->radeonScreen
->kernel_mm
) {
235 /* Fix scissors for dri 1 */
236 __DRIdrawable
*dPriv
= radeon_get_drawable(rmesa
);
240 max_x
+= dPriv
->x
+ 1;
244 max_y
+= dPriv
->y
+ 1;
247 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
248 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
249 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
250 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
252 radeonRecalcScissorRects( rmesa
);
255 /* =============================================================
259 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
261 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
262 if (ctx
->Scissor
.Enabled
) {
263 /* We don't pipeline cliprect changes */
264 radeon_firevertices(radeon
);
265 radeonUpdateScissor(ctx
);
269 /* ================================================================
270 * SwapBuffers with client-side throttling
273 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
275 drm_radeon_getparam_t gp
;
279 gp
.param
= RADEON_PARAM_LAST_FRAME
;
280 gp
.value
= (int *)&frame
;
281 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
284 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
292 uint32_t radeonGetAge(radeonContextPtr radeon
)
294 drm_radeon_getparam_t gp
;
298 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
299 gp
.value
= (int *)&age
;
300 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
303 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
311 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
313 drm_radeon_irq_emit_t ie
;
316 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
317 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
320 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
326 static void radeonWaitIrq(radeonContextPtr radeon
)
331 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
332 &radeon
->iw
, sizeof(radeon
->iw
));
333 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
336 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
342 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
344 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
346 if (radeon
->do_irqs
) {
347 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
348 if (!radeon
->irqsEmitted
) {
349 while (radeonGetLastFrame(radeon
) <
352 UNLOCK_HARDWARE(radeon
);
353 radeonWaitIrq(radeon
);
354 LOCK_HARDWARE(radeon
);
356 radeon
->irqsEmitted
= 10;
359 if (radeon
->irqsEmitted
) {
360 radeonEmitIrqLocked(radeon
);
361 radeon
->irqsEmitted
--;
364 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
365 UNLOCK_HARDWARE(radeon
);
366 if (radeon
->do_usleeps
)
368 LOCK_HARDWARE(radeon
);
374 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
380 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
383 } while (ret
&& ++i
< 100);
386 UNLOCK_HARDWARE(radeon
);
387 fprintf(stderr
, "Error: R300 timed out... exiting\n");
392 static void radeonWaitForIdle(radeonContextPtr radeon
)
394 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
395 LOCK_HARDWARE(radeon
);
396 radeonWaitForIdleLocked(radeon
);
397 UNLOCK_HARDWARE(radeon
);
401 static void radeon_flip_renderbuffers(struct radeon_framebuffer
*rfb
)
403 int current_page
= rfb
->pf_current_page
;
404 int next_page
= (current_page
+ 1) % rfb
->pf_num_pages
;
405 struct gl_renderbuffer
*tmp_rb
;
407 /* Exchange renderbuffers if necessary but make sure their
408 * reference counts are preserved.
410 if (rfb
->color_rb
[current_page
] &&
411 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
!=
412 &rfb
->color_rb
[current_page
]->base
) {
414 _mesa_reference_renderbuffer(&tmp_rb
,
415 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
416 tmp_rb
= &rfb
->color_rb
[current_page
]->base
;
417 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
, tmp_rb
);
418 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
421 if (rfb
->color_rb
[next_page
] &&
422 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
!=
423 &rfb
->color_rb
[next_page
]->base
) {
425 _mesa_reference_renderbuffer(&tmp_rb
,
426 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
427 tmp_rb
= &rfb
->color_rb
[next_page
]->base
;
428 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
, tmp_rb
);
429 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
433 /* Copy the back color buffer to the front color buffer.
435 void radeonCopyBuffer( __DRIdrawable
*dPriv
,
436 const drm_clip_rect_t
*rect
)
438 radeonContextPtr rmesa
;
439 struct radeon_framebuffer
*rfb
;
443 assert(dPriv
->driContextPriv
);
444 assert(dPriv
->driContextPriv
->driverPrivate
);
446 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
448 LOCK_HARDWARE(rmesa
);
450 rfb
= dPriv
->driverPrivate
;
452 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
453 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
456 nbox
= dPriv
->numClipRects
; /* must be in locked region */
458 for ( i
= 0 ; i
< nbox
; ) {
459 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
460 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
461 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
464 for ( ; i
< nr
; i
++ ) {
470 if (rect
->x1
> b
->x1
)
472 if (rect
->y1
> b
->y1
)
474 if (rect
->x2
< b
->x2
)
476 if (rect
->y2
< b
->y2
)
479 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
486 rmesa
->sarea
->nbox
= n
;
491 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
494 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
495 UNLOCK_HARDWARE( rmesa
);
500 UNLOCK_HARDWARE( rmesa
);
503 static int radeonScheduleSwap(__DRIdrawable
*dPriv
, GLboolean
*missed_target
)
505 radeonContextPtr rmesa
;
507 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
508 radeon_firevertices(rmesa
);
510 LOCK_HARDWARE( rmesa
);
512 if (!dPriv
->numClipRects
) {
513 UNLOCK_HARDWARE(rmesa
);
514 usleep(10000); /* throttle invisible client 10ms */
518 radeonWaitForFrameCompletion(rmesa
);
520 UNLOCK_HARDWARE(rmesa
);
521 driWaitForVBlank(dPriv
, missed_target
);
526 static GLboolean
radeonPageFlip( __DRIdrawable
*dPriv
)
528 radeonContextPtr radeon
;
531 struct radeon_renderbuffer
*rrb
;
532 struct radeon_framebuffer
*rfb
;
535 assert(dPriv
->driContextPriv
);
536 assert(dPriv
->driContextPriv
->driverPrivate
);
538 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
539 rfb
= dPriv
->driverPrivate
;
540 rrb
= (void *)rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
;
542 psp
= dPriv
->driScreenPriv
;
544 LOCK_HARDWARE(radeon
);
546 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
547 fprintf(stderr
, "%s: pfCurrentPage: %d %d\n", __FUNCTION__
,
548 radeon
->sarea
->pfCurrentPage
, radeon
->sarea
->pfState
);
550 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
551 drm_clip_rect_t
*b
= radeon
->sarea
->boxes
;
553 radeon
->sarea
->nbox
= 1;
555 ret
= drmCommandNone( radeon
->dri
.fd
, DRM_RADEON_FLIP
);
557 UNLOCK_HARDWARE(radeon
);
560 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
567 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
568 radeon_flip_renderbuffers(rfb
);
569 radeon_draw_buffer(radeon
->glCtx
, &rfb
->base
);
576 * Swap front and back buffer.
578 void radeonSwapBuffers(__DRIdrawable
* dPriv
)
583 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
584 radeonContextPtr radeon
;
585 struct gl_context
*ctx
;
587 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
590 if (ctx
->Visual
.doubleBufferMode
) {
591 GLboolean missed_target
;
592 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
593 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
595 radeonScheduleSwap(dPriv
, &missed_target
);
597 if (rfb
->pf_active
) {
598 radeonPageFlip(dPriv
);
600 radeonCopyBuffer(dPriv
, NULL
);
603 psp
= dPriv
->driScreenPriv
;
606 (*psp
->systemTime
->getUST
)( & ust
);
607 if ( missed_target
) {
608 rfb
->swap_missed_count
++;
609 rfb
->swap_missed_ust
= ust
- rfb
->swap_ust
;
613 radeon
->hw
.all_dirty
= GL_TRUE
;
616 /* XXX this shouldn't be an error but we can't handle it for now */
617 _mesa_problem(NULL
, "%s: drawable has no context!",
622 void radeonCopySubBuffer(__DRIdrawable
* dPriv
,
623 int x
, int y
, int w
, int h
)
625 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
626 radeonContextPtr radeon
;
627 struct gl_context
*ctx
;
629 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
632 if (ctx
->Visual
.doubleBufferMode
) {
633 drm_clip_rect_t rect
;
634 rect
.x1
= x
+ dPriv
->x
;
635 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
636 rect
.x2
= rect
.x1
+ w
;
637 rect
.y2
= rect
.y1
+ h
;
638 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
639 radeonCopyBuffer(dPriv
, &rect
);
642 /* XXX this shouldn't be an error but we can't handle it for now */
643 _mesa_problem(NULL
, "%s: drawable has no context!",
649 * Check if we're about to draw into the front color buffer.
650 * If so, set the intel->front_buffer_dirty field to true.
653 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
655 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
656 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
659 /* drawing to window system buffer */
660 if (fb
->_NumColorDrawBuffers
> 0) {
661 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
662 radeon
->front_buffer_dirty
= GL_TRUE
;
669 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
671 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
672 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
678 /* this can happen during the initial context initialization */
682 /* radeons only handle 1 color draw so far */
683 if (fb
->_NumColorDrawBuffers
!= 1) {
684 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
688 /* Do this here, note core Mesa, since this function is called from
689 * many places within the driver.
691 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
692 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
693 _mesa_update_framebuffer(ctx
);
694 /* this updates the DrawBuffer's Width/Height if it's a FBO */
695 _mesa_update_draw_buffer_bounds(ctx
);
698 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
699 /* this may occur when we're called by glBindFrameBuffer() during
700 * the process of someone setting up renderbuffers, etc.
702 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
707 ;/* do something depthy/stencily TODO */
712 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
713 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
714 radeon
->front_cliprects
= GL_TRUE
;
716 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
717 radeon
->front_cliprects
= GL_FALSE
;
720 /* user FBO in theory */
721 struct radeon_renderbuffer
*rrb
;
722 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
724 offset
= rrb
->draw_offset
;
727 radeon
->constant_cliprect
= GL_TRUE
;
730 if (rrbColor
== NULL
)
731 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
733 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
736 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
737 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
738 if (rrbDepth
&& rrbDepth
->bo
) {
739 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
741 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
744 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
748 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
749 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
750 if (rrbStencil
&& rrbStencil
->bo
) {
751 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
752 /* need to re-compute stencil hw state */
754 rrbDepth
= rrbStencil
;
756 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
759 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
760 if (ctx
->Driver
.Enable
!= NULL
)
761 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
763 ctx
->NewState
|= _NEW_STENCIL
;
766 /* Update culling direction which changes depending on the
767 * orientation of the buffer:
769 if (ctx
->Driver
.FrontFace
)
770 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
772 ctx
->NewState
|= _NEW_POLYGON
;
775 * Update depth test state
777 if (ctx
->Driver
.Enable
) {
778 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
779 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
780 /* Need to update the derived ctx->Stencil._Enabled first */
781 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
782 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
784 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
787 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
788 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
789 radeon
->state
.color
.draw_offset
= offset
;
792 /* update viewport since it depends on window size */
793 if (ctx
->Driver
.Viewport
) {
794 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
795 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
800 ctx
->NewState
|= _NEW_VIEWPORT
;
802 /* Set state we know depends on drawable parameters:
804 radeonUpdateScissor(ctx
);
805 radeon
->NewGLState
|= _NEW_SCISSOR
;
807 if (ctx
->Driver
.DepthRange
)
808 ctx
->Driver
.DepthRange(ctx
,
812 /* Update culling direction which changes depending on the
813 * orientation of the buffer:
815 if (ctx
->Driver
.FrontFace
)
816 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
818 ctx
->NewState
|= _NEW_POLYGON
;
822 * Called via glDrawBuffer.
824 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
826 if (RADEON_DEBUG
& RADEON_DRI
)
827 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
828 _mesa_lookup_enum_by_nr( mode
));
830 if (ctx
->DrawBuffer
->Name
== 0) {
831 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
833 const GLboolean was_front_buffer_rendering
=
834 radeon
->is_front_buffer_rendering
;
836 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
839 /* If we weren't front-buffer rendering before but we are now, make sure
840 * that the front-buffer has actually been allocated.
842 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
843 radeon_update_renderbuffers(radeon
->dri
.context
,
844 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
848 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
851 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
853 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
854 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
855 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
856 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
857 || (mode
== GL_FRONT
);
859 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
860 radeon_update_renderbuffers(rmesa
->dri
.context
,
861 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
864 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
865 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
866 /* This will update FBO completeness status.
867 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
868 * refers to a missing renderbuffer. Calling glReadBuffer can set
869 * that straight and can make the drawing buffer complete.
871 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
876 /* Turn on/off page flipping according to the flags in the sarea:
878 void radeonUpdatePageFlipping(radeonContextPtr radeon
)
880 struct radeon_framebuffer
*rfb
= radeon_get_drawable(radeon
)->driverPrivate
;
882 rfb
->pf_active
= radeon
->sarea
->pfState
;
883 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
884 rfb
->pf_num_pages
= 2;
885 radeon_flip_renderbuffers(rfb
);
886 radeon_draw_buffer(radeon
->glCtx
, radeon
->glCtx
->DrawBuffer
);
889 void radeon_window_moved(radeonContextPtr radeon
)
891 /* Cliprects has to be updated before doing anything else */
892 radeonSetCliprects(radeon
);
893 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
894 radeonUpdatePageFlipping(radeon
);
898 void radeon_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
900 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
901 __DRIcontext
*driContext
= radeon
->dri
.context
;
902 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
903 GLsizei w
, GLsizei h
);
905 if (!driContext
->driScreenPriv
->dri2
.enabled
)
908 if (!radeon
->meta
.internal_viewport_call
&& ctx
->DrawBuffer
->Name
== 0) {
909 if (radeon
->is_front_buffer_rendering
) {
910 ctx
->Driver
.Flush(ctx
);
912 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
913 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
914 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
917 old_viewport
= ctx
->Driver
.Viewport
;
918 ctx
->Driver
.Viewport
= NULL
;
919 radeon_window_moved(radeon
);
920 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
921 ctx
->Driver
.Viewport
= old_viewport
;
924 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
927 int dwords
= (*state
->check
) (radeon
->glCtx
, state
);
928 drm_r300_cmd_header_t cmd
;
930 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
932 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
933 if (dwords
> state
->cmd_size
)
934 dwords
= state
->cmd_size
;
936 for (i
= 0; i
< dwords
;) {
937 cmd
= *((drm_r300_cmd_header_t
*) &state
->cmd
[i
]);
938 reg
= (cmd
.packet0
.reghi
<< 8) | cmd
.packet0
.reglo
;
939 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
940 state
->name
, i
, reg
, cmd
.packet0
.count
);
942 for (j
= 0; j
< cmd
.packet0
.count
&& i
< dwords
; j
++) {
943 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
944 state
->name
, i
, reg
, state
->cmd
[i
]);
952 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
954 int i
, j
, reg
, count
;
957 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
960 if (!radeon
->radeonScreen
->kernel_mm
) {
961 radeon_print_state_atom_prekmm(radeon
, state
);
965 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
967 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
969 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
970 if (dwords
> state
->cmd_size
)
971 dwords
= state
->cmd_size
;
972 for (i
= 0; i
< dwords
;) {
973 packet0
= state
->cmd
[i
];
974 reg
= (packet0
& 0x1FFF) << 2;
975 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
976 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
977 state
->name
, i
, reg
, count
);
979 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
980 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
981 state
->name
, i
, reg
, state
->cmd
[i
]);
990 * Count total size for next state emit.
992 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
994 struct radeon_state_atom
*atom
;
996 /* check if we are going to emit full state */
998 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
999 if (!radeon
->hw
.is_dirty
)
1001 foreach(atom
, &radeon
->hw
.atomlist
) {
1003 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1004 dwords
+= atom_size
;
1005 if (RADEON_CMDBUF
&& atom_size
) {
1006 radeon_print_state_atom(radeon
, atom
);
1011 foreach(atom
, &radeon
->hw
.atomlist
) {
1012 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1013 dwords
+= atom_size
;
1014 if (RADEON_CMDBUF
&& atom_size
) {
1015 radeon_print_state_atom(radeon
, atom
);
1021 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
1025 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
1027 BATCH_LOCALS(radeon
);
1030 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
1033 radeon_print_state_atom(radeon
, atom
);
1036 (*atom
->emit
)(radeon
->glCtx
, atom
);
1038 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
1039 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
1042 atom
->dirty
= GL_FALSE
;
1045 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
1050 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
1052 struct radeon_state_atom
*atom
;
1054 if (radeon
->vtbl
.pre_emit_atoms
)
1055 radeon
->vtbl
.pre_emit_atoms(radeon
);
1057 /* Emit actual atoms */
1058 if (radeon
->hw
.all_dirty
|| emitAll
) {
1059 foreach(atom
, &radeon
->hw
.atomlist
)
1060 radeon_emit_atom( radeon
, atom
);
1062 foreach(atom
, &radeon
->hw
.atomlist
) {
1064 radeon_emit_atom( radeon
, atom
);
1071 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
1073 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1076 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
1077 if (ret
== RADEON_CS_SPACE_FLUSH
)
1082 void radeonEmitState(radeonContextPtr radeon
)
1084 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
1086 if (radeon
->vtbl
.pre_emit_state
)
1087 radeon
->vtbl
.pre_emit_state(radeon
);
1089 /* this code used to return here but now it emits zbs */
1090 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
1093 if (!radeon
->cmdbuf
.cs
->cdw
) {
1094 if (RADEON_DEBUG
& RADEON_STATE
)
1095 fprintf(stderr
, "Begin reemit state\n");
1097 radeonEmitAtoms(radeon
, GL_TRUE
);
1100 if (RADEON_DEBUG
& RADEON_STATE
)
1101 fprintf(stderr
, "Begin dirty state\n");
1103 radeonEmitAtoms(radeon
, GL_FALSE
);
1106 radeon
->hw
.is_dirty
= GL_FALSE
;
1107 radeon
->hw
.all_dirty
= GL_FALSE
;
1111 void radeonFlush(struct gl_context
*ctx
)
1113 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1114 if (RADEON_DEBUG
& RADEON_IOCTL
)
1115 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
1117 /* okay if we have no cmds in the buffer &&
1118 we have no DMA flush &&
1119 we have no DMA buffer allocated.
1120 then no point flushing anything at all.
1122 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
1125 if (radeon
->dma
.flush
)
1126 radeon
->dma
.flush( ctx
);
1128 if (radeon
->cmdbuf
.cs
->cdw
)
1129 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
1132 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
1133 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
1135 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
1136 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
1137 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
1139 /* We set the dirty bit in radeon_prepare_render() if we're
1140 * front buffer rendering once we get there.
1142 radeon
->front_buffer_dirty
= GL_FALSE
;
1144 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
1149 /* Make sure all commands have been sent to the hardware and have
1150 * completed processing.
1152 void radeonFinish(struct gl_context
* ctx
)
1154 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1155 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1158 if (ctx
->Driver
.Flush
)
1159 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
1161 if (radeon
->radeonScreen
->kernel_mm
) {
1162 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1163 struct radeon_renderbuffer
*rrb
;
1164 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
1166 radeon_bo_wait(rrb
->bo
);
1169 struct radeon_renderbuffer
*rrb
;
1170 rrb
= radeon_get_depthbuffer(radeon
);
1172 radeon_bo_wait(rrb
->bo
);
1174 } else if (radeon
->do_irqs
) {
1175 LOCK_HARDWARE(radeon
);
1176 radeonEmitIrqLocked(radeon
);
1177 UNLOCK_HARDWARE(radeon
);
1178 radeonWaitIrq(radeon
);
1180 radeonWaitForIdle(radeon
);
1186 * Send the current command buffer via ioctl to the hardware.
1188 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
1192 if (rmesa
->cmdbuf
.flushing
) {
1193 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
1196 rmesa
->cmdbuf
.flushing
= 1;
1198 if (RADEON_DEBUG
& RADEON_IOCTL
) {
1199 fprintf(stderr
, "%s from %s - %i cliprects\n",
1200 __FUNCTION__
, caller
, rmesa
->numClipRects
);
1203 radeonEmitQueryEnd(rmesa
->glCtx
);
1205 if (rmesa
->cmdbuf
.cs
->cdw
) {
1206 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
1207 rmesa
->hw
.all_dirty
= GL_TRUE
;
1209 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
1210 rmesa
->cmdbuf
.flushing
= 0;
1212 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
1213 fprintf(stderr
,"failed to revalidate buffers\n");
1219 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
1223 radeonReleaseDmaRegions(rmesa
);
1225 LOCK_HARDWARE(rmesa
);
1226 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
1227 UNLOCK_HARDWARE(rmesa
);
1230 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
1231 "parse or rejected command stream. See dmesg "
1232 "for more info.\n", ret
);
1240 * Make sure that enough space is available in the command buffer
1241 * by flushing if necessary.
1243 * \param dwords The number of dwords we need to be free on the command buffer
1245 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
1247 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
1248 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
1249 /* If we try to flush empty buffer there is too big rendering operation. */
1250 assert(rmesa
->cmdbuf
.cs
->cdw
);
1251 rcommonFlushCmdBuf(rmesa
, caller
);
1257 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
1260 /* Initialize command buffer */
1261 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
1262 "command_buffer_size");
1263 if (size
< 2 * rmesa
->hw
.max_state_size
) {
1264 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
1266 if (size
> 64 * 256)
1269 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1270 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
1271 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1272 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
1273 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1274 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1275 size
* 4, rmesa
->hw
.max_state_size
* 4);
1277 if (rmesa
->radeonScreen
->kernel_mm
) {
1278 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
1279 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
1281 rmesa
->cmdbuf
.csm
= radeon_cs_manager_legacy_ctor(rmesa
);
1283 if (rmesa
->cmdbuf
.csm
== NULL
) {
1284 /* FIXME: fatal error */
1287 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
1288 assert(rmesa
->cmdbuf
.cs
!= NULL
);
1289 rmesa
->cmdbuf
.size
= size
;
1291 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
1292 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
1294 if (!rmesa
->radeonScreen
->kernel_mm
) {
1295 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, rmesa
->radeonScreen
->texSize
[0]);
1296 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, rmesa
->radeonScreen
->gartTextures
.size
);
1298 struct drm_radeon_gem_info mminfo
= { 0 };
1300 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
, &mminfo
, sizeof(mminfo
)))
1302 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, mminfo
.vram_visible
);
1303 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, mminfo
.gart_size
);
1309 * Destroy the command buffer
1311 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
1313 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
1314 if (rmesa
->radeonScreen
->driScreen
->dri2
.enabled
|| rmesa
->radeonScreen
->kernel_mm
) {
1315 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
1317 radeon_cs_manager_legacy_dtor(rmesa
->cmdbuf
.csm
);
1321 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
1324 const char *function
,
1327 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
1329 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1330 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
1334 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
1336 _mesa_meta_Clear(ctx
, mask
);