1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
62 * Enable verbose debug output for emit code.
65 * 2 also print state alues
67 #define RADEON_CMDBUF 0
69 /* =============================================================
73 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
74 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
85 if (out
->x1
>= out
->x2
)
87 if (out
->y1
>= out
->y2
)
92 void radeonRecalcScissorRects(radeonContextPtr radeon
)
97 /* Grow cliprect store?
99 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
100 while (radeon
->state
.scissor
.numAllocedClipRects
<
101 radeon
->numClipRects
) {
102 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
103 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
106 if (radeon
->state
.scissor
.pClipRects
)
107 FREE(radeon
->state
.scissor
.pClipRects
);
109 radeon
->state
.scissor
.pClipRects
=
110 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
111 sizeof(drm_clip_rect_t
));
113 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
114 radeon
->state
.scissor
.numAllocedClipRects
= 0;
119 out
= radeon
->state
.scissor
.pClipRects
;
120 radeon
->state
.scissor
.numClipRects
= 0;
122 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
123 if (intersect_rect(out
,
124 &radeon
->pClipRects
[i
],
125 &radeon
->state
.scissor
.rect
)) {
126 radeon
->state
.scissor
.numClipRects
++;
131 if (radeon
->vtbl
.update_scissor
)
132 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
135 void radeon_get_cliprects(radeonContextPtr radeon
,
136 struct drm_clip_rect
**cliprects
,
137 unsigned int *num_cliprects
,
138 int *x_off
, int *y_off
)
140 __DRIdrawablePrivate
*dPriv
= radeon_get_drawable(radeon
);
141 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
143 if (radeon
->constant_cliprect
) {
144 radeon
->fboRect
.x1
= 0;
145 radeon
->fboRect
.y1
= 0;
146 radeon
->fboRect
.x2
= radeon
->glCtx
->DrawBuffer
->Width
;
147 radeon
->fboRect
.y2
= radeon
->glCtx
->DrawBuffer
->Height
;
149 *cliprects
= &radeon
->fboRect
;
153 } else if (radeon
->front_cliprects
||
154 rfb
->pf_active
|| dPriv
->numBackClipRects
== 0) {
155 *cliprects
= dPriv
->pClipRects
;
156 *num_cliprects
= dPriv
->numClipRects
;
160 *num_cliprects
= dPriv
->numBackClipRects
;
161 *cliprects
= dPriv
->pBackClipRects
;
162 *x_off
= dPriv
->backX
;
163 *y_off
= dPriv
->backY
;
168 * Update cliprects and scissors.
170 void radeonSetCliprects(radeonContextPtr radeon
)
172 __DRIdrawablePrivate
*const drawable
= radeon_get_drawable(radeon
);
173 __DRIdrawablePrivate
*const readable
= radeon_get_readable(radeon
);
174 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
175 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
178 radeon_get_cliprects(radeon
, &radeon
->pClipRects
,
179 &radeon
->numClipRects
, &x_off
, &y_off
);
181 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
182 (draw_rfb
->base
.Height
!= drawable
->h
)) {
183 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
184 drawable
->w
, drawable
->h
);
185 draw_rfb
->base
.Initialized
= GL_TRUE
;
188 if (drawable
!= readable
) {
189 if ((read_rfb
->base
.Width
!= readable
->w
) ||
190 (read_rfb
->base
.Height
!= readable
->h
)) {
191 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
192 readable
->w
, readable
->h
);
193 read_rfb
->base
.Initialized
= GL_TRUE
;
197 if (radeon
->state
.scissor
.enabled
)
198 radeonRecalcScissorRects(radeon
);
204 void radeonUpdateScissor( GLcontext
*ctx
)
206 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
207 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
208 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
210 int min_x
, min_y
, max_x
, max_y
;
212 if (!ctx
->DrawBuffer
)
215 max_x
= ctx
->DrawBuffer
->Width
- 1;
216 max_y
= ctx
->DrawBuffer
->Height
- 1;
218 if ( !ctx
->DrawBuffer
->Name
) {
220 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
230 if (!rmesa
->radeonScreen
->kernel_mm
) {
231 /* Fix scissors for dri 1 */
233 __DRIdrawablePrivate
*dPriv
= radeon_get_drawable(rmesa
);
237 max_x
+= dPriv
->x
+ 1;
241 max_y
+= dPriv
->y
+ 1;
244 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
245 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
246 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
247 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
249 radeonRecalcScissorRects( rmesa
);
252 /* =============================================================
256 void radeonScissor(GLcontext
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
258 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
259 if (ctx
->Scissor
.Enabled
) {
260 /* We don't pipeline cliprect changes */
261 radeon_firevertices(radeon
);
262 radeonUpdateScissor(ctx
);
266 void radeonPolygonStipplePreKMS( GLcontext
*ctx
, const GLubyte
*mask
)
268 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
270 drm_radeon_stipple_t stipple
;
272 /* Must flip pattern upside down.
274 for ( i
= 0 ; i
< 32 ; i
++ ) {
275 stipple
.mask
[31 - i
] = ((GLuint
*) mask
)[i
];
278 /* TODO: push this into cmd mechanism
280 radeon_firevertices(radeon
);
281 LOCK_HARDWARE( radeon
);
283 drmCommandWrite( radeon
->dri
.fd
, DRM_RADEON_STIPPLE
,
284 &stipple
, sizeof(stipple
) );
285 UNLOCK_HARDWARE( radeon
);
289 /* ================================================================
290 * SwapBuffers with client-side throttling
293 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
295 drm_radeon_getparam_t gp
;
299 gp
.param
= RADEON_PARAM_LAST_FRAME
;
300 gp
.value
= (int *)&frame
;
301 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
304 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
312 uint32_t radeonGetAge(radeonContextPtr radeon
)
314 drm_radeon_getparam_t gp
;
318 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
319 gp
.value
= (int *)&age
;
320 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
323 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
331 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
333 drm_radeon_irq_emit_t ie
;
336 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
337 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
340 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
346 static void radeonWaitIrq(radeonContextPtr radeon
)
351 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
352 &radeon
->iw
, sizeof(radeon
->iw
));
353 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
356 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
362 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
364 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
366 if (radeon
->do_irqs
) {
367 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
368 if (!radeon
->irqsEmitted
) {
369 while (radeonGetLastFrame(radeon
) <
372 UNLOCK_HARDWARE(radeon
);
373 radeonWaitIrq(radeon
);
374 LOCK_HARDWARE(radeon
);
376 radeon
->irqsEmitted
= 10;
379 if (radeon
->irqsEmitted
) {
380 radeonEmitIrqLocked(radeon
);
381 radeon
->irqsEmitted
--;
384 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
385 UNLOCK_HARDWARE(radeon
);
386 if (radeon
->do_usleeps
)
388 LOCK_HARDWARE(radeon
);
394 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
400 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
403 } while (ret
&& ++i
< 100);
406 UNLOCK_HARDWARE(radeon
);
407 fprintf(stderr
, "Error: R300 timed out... exiting\n");
412 static void radeonWaitForIdle(radeonContextPtr radeon
)
414 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
415 LOCK_HARDWARE(radeon
);
416 radeonWaitForIdleLocked(radeon
);
417 UNLOCK_HARDWARE(radeon
);
421 static void radeon_flip_renderbuffers(struct radeon_framebuffer
*rfb
)
423 int current_page
= rfb
->pf_current_page
;
424 int next_page
= (current_page
+ 1) % rfb
->pf_num_pages
;
425 struct gl_renderbuffer
*tmp_rb
;
427 /* Exchange renderbuffers if necessary but make sure their
428 * reference counts are preserved.
430 if (rfb
->color_rb
[current_page
] &&
431 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
!=
432 &rfb
->color_rb
[current_page
]->base
) {
434 _mesa_reference_renderbuffer(&tmp_rb
,
435 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
436 tmp_rb
= &rfb
->color_rb
[current_page
]->base
;
437 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
, tmp_rb
);
438 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
441 if (rfb
->color_rb
[next_page
] &&
442 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
!=
443 &rfb
->color_rb
[next_page
]->base
) {
445 _mesa_reference_renderbuffer(&tmp_rb
,
446 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
447 tmp_rb
= &rfb
->color_rb
[next_page
]->base
;
448 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
, tmp_rb
);
449 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
453 /* Copy the back color buffer to the front color buffer.
455 void radeonCopyBuffer( __DRIdrawablePrivate
*dPriv
,
456 const drm_clip_rect_t
*rect
)
458 radeonContextPtr rmesa
;
459 struct radeon_framebuffer
*rfb
;
463 assert(dPriv
->driContextPriv
);
464 assert(dPriv
->driContextPriv
->driverPrivate
);
466 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
468 LOCK_HARDWARE(rmesa
);
470 rfb
= dPriv
->driverPrivate
;
472 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
473 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
476 nbox
= dPriv
->numClipRects
; /* must be in locked region */
478 for ( i
= 0 ; i
< nbox
; ) {
479 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
480 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
481 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
484 for ( ; i
< nr
; i
++ ) {
490 if (rect
->x1
> b
->x1
)
492 if (rect
->y1
> b
->y1
)
494 if (rect
->x2
< b
->x2
)
496 if (rect
->y2
< b
->y2
)
499 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
506 rmesa
->sarea
->nbox
= n
;
511 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
514 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
515 UNLOCK_HARDWARE( rmesa
);
520 UNLOCK_HARDWARE( rmesa
);
523 static int radeonScheduleSwap(__DRIdrawablePrivate
*dPriv
, GLboolean
*missed_target
)
525 radeonContextPtr rmesa
;
527 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
528 radeon_firevertices(rmesa
);
530 LOCK_HARDWARE( rmesa
);
532 if (!dPriv
->numClipRects
) {
533 UNLOCK_HARDWARE(rmesa
);
534 usleep(10000); /* throttle invisible client 10ms */
538 radeonWaitForFrameCompletion(rmesa
);
540 UNLOCK_HARDWARE(rmesa
);
541 driWaitForVBlank(dPriv
, missed_target
);
546 static GLboolean
radeonPageFlip( __DRIdrawablePrivate
*dPriv
)
548 radeonContextPtr radeon
;
550 __DRIscreenPrivate
*psp
;
551 struct radeon_renderbuffer
*rrb
;
552 struct radeon_framebuffer
*rfb
;
555 assert(dPriv
->driContextPriv
);
556 assert(dPriv
->driContextPriv
->driverPrivate
);
558 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
559 rfb
= dPriv
->driverPrivate
;
560 rrb
= (void *)rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
;
562 psp
= dPriv
->driScreenPriv
;
564 LOCK_HARDWARE(radeon
);
566 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
567 fprintf(stderr
, "%s: pfCurrentPage: %d %d\n", __FUNCTION__
,
568 radeon
->sarea
->pfCurrentPage
, radeon
->sarea
->pfState
);
570 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
571 drm_clip_rect_t
*b
= radeon
->sarea
->boxes
;
573 radeon
->sarea
->nbox
= 1;
575 ret
= drmCommandNone( radeon
->dri
.fd
, DRM_RADEON_FLIP
);
577 UNLOCK_HARDWARE(radeon
);
580 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
587 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
588 radeon_flip_renderbuffers(rfb
);
589 radeon_draw_buffer(radeon
->glCtx
, &rfb
->base
);
596 * Swap front and back buffer.
598 void radeonSwapBuffers(__DRIdrawablePrivate
* dPriv
)
601 __DRIscreenPrivate
*psp
;
603 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
604 radeonContextPtr radeon
;
607 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
610 if (ctx
->Visual
.doubleBufferMode
) {
611 GLboolean missed_target
;
612 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
613 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
615 radeonScheduleSwap(dPriv
, &missed_target
);
617 if (rfb
->pf_active
) {
618 radeonPageFlip(dPriv
);
620 radeonCopyBuffer(dPriv
, NULL
);
623 psp
= dPriv
->driScreenPriv
;
626 (*psp
->systemTime
->getUST
)( & ust
);
627 if ( missed_target
) {
628 rfb
->swap_missed_count
++;
629 rfb
->swap_missed_ust
= ust
- rfb
->swap_ust
;
633 radeon
->hw
.all_dirty
= GL_TRUE
;
636 /* XXX this shouldn't be an error but we can't handle it for now */
637 _mesa_problem(NULL
, "%s: drawable has no context!",
642 void radeonCopySubBuffer(__DRIdrawablePrivate
* dPriv
,
643 int x
, int y
, int w
, int h
)
645 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
646 radeonContextPtr radeon
;
649 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
652 if (ctx
->Visual
.doubleBufferMode
) {
653 drm_clip_rect_t rect
;
654 rect
.x1
= x
+ dPriv
->x
;
655 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
656 rect
.x2
= rect
.x1
+ w
;
657 rect
.y2
= rect
.y1
+ h
;
658 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
659 radeonCopyBuffer(dPriv
, &rect
);
662 /* XXX this shouldn't be an error but we can't handle it for now */
663 _mesa_problem(NULL
, "%s: drawable has no context!",
668 void radeon_draw_buffer(GLcontext
*ctx
, struct gl_framebuffer
*fb
)
670 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
671 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
677 /* this can happen during the initial context initialization */
681 /* radeons only handle 1 color draw so far */
682 if (fb
->_NumColorDrawBuffers
!= 1) {
683 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
687 /* Do this here, note core Mesa, since this function is called from
688 * many places within the driver.
690 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
691 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
692 _mesa_update_framebuffer(ctx
);
693 /* this updates the DrawBuffer's Width/Height if it's a FBO */
694 _mesa_update_draw_buffer_bounds(ctx
);
697 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
698 /* this may occur when we're called by glBindFrameBuffer() during
699 * the process of someone setting up renderbuffers, etc.
701 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
706 ;/* do something depthy/stencily TODO */
711 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
712 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
713 radeon
->front_cliprects
= GL_TRUE
;
714 radeon
->front_buffer_dirty
= GL_TRUE
;
716 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
717 radeon
->front_cliprects
= GL_FALSE
;
720 /* user FBO in theory */
721 struct radeon_renderbuffer
*rrb
;
722 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
724 offset
= rrb
->draw_offset
;
727 radeon
->constant_cliprect
= GL_TRUE
;
730 if (rrbColor
== NULL
)
731 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
733 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
736 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
737 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
738 if (rrbDepth
&& rrbDepth
->bo
) {
739 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
741 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
744 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
748 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
749 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
750 if (rrbStencil
&& rrbStencil
->bo
) {
751 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
752 /* need to re-compute stencil hw state */
754 rrbDepth
= rrbStencil
;
756 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
759 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
760 if (ctx
->Driver
.Enable
!= NULL
)
761 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
763 ctx
->NewState
|= _NEW_STENCIL
;
766 /* Update culling direction which changes depending on the
767 * orientation of the buffer:
769 if (ctx
->Driver
.FrontFace
)
770 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
772 ctx
->NewState
|= _NEW_POLYGON
;
775 * Update depth test state
777 if (ctx
->Driver
.Enable
) {
778 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
779 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
780 /* Need to update the derived ctx->Stencil._Enabled first */
781 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
782 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
784 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
787 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
788 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
789 radeon
->state
.color
.draw_offset
= offset
;
792 /* update viewport since it depends on window size */
793 if (ctx
->Driver
.Viewport
) {
794 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
795 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
800 ctx
->NewState
|= _NEW_VIEWPORT
;
802 /* Set state we know depends on drawable parameters:
804 radeonUpdateScissor(ctx
);
805 radeon
->NewGLState
|= _NEW_SCISSOR
;
807 if (ctx
->Driver
.DepthRange
)
808 ctx
->Driver
.DepthRange(ctx
,
812 /* Update culling direction which changes depending on the
813 * orientation of the buffer:
815 if (ctx
->Driver
.FrontFace
)
816 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
818 ctx
->NewState
|= _NEW_POLYGON
;
822 * Called via glDrawBuffer.
824 void radeonDrawBuffer( GLcontext
*ctx
, GLenum mode
)
826 if (RADEON_DEBUG
& RADEON_DRI
)
827 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
828 _mesa_lookup_enum_by_nr( mode
));
830 if (ctx
->DrawBuffer
->Name
== 0) {
831 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
833 const GLboolean was_front_buffer_rendering
=
834 radeon
->is_front_buffer_rendering
;
836 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
839 /* If we weren't front-buffer rendering before but we are now, make sure
840 * that the front-buffer has actually been allocated.
842 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
843 radeon_update_renderbuffers(radeon
->dri
.context
,
844 radeon
->dri
.context
->driDrawablePriv
);
848 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
851 void radeonReadBuffer( GLcontext
*ctx
, GLenum mode
)
853 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
854 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
855 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
856 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
857 || (mode
== GL_FRONT
);
859 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
860 radeon_update_renderbuffers(rmesa
->dri
.context
,
861 rmesa
->dri
.context
->driReadablePriv
);
864 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
865 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
866 /* This will update FBO completeness status.
867 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
868 * refers to a missing renderbuffer. Calling glReadBuffer can set
869 * that straight and can make the drawing buffer complete.
871 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
876 /* Turn on/off page flipping according to the flags in the sarea:
878 void radeonUpdatePageFlipping(radeonContextPtr radeon
)
880 struct radeon_framebuffer
*rfb
= radeon_get_drawable(radeon
)->driverPrivate
;
882 rfb
->pf_active
= radeon
->sarea
->pfState
;
883 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
884 rfb
->pf_num_pages
= 2;
885 radeon_flip_renderbuffers(rfb
);
886 radeon_draw_buffer(radeon
->glCtx
, radeon
->glCtx
->DrawBuffer
);
889 void radeon_window_moved(radeonContextPtr radeon
)
891 /* Cliprects has to be updated before doing anything else */
892 radeonSetCliprects(radeon
);
893 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
894 radeonUpdatePageFlipping(radeon
);
898 void radeon_viewport(GLcontext
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
900 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
901 __DRIcontext
*driContext
= radeon
->dri
.context
;
902 void (*old_viewport
)(GLcontext
*ctx
, GLint x
, GLint y
,
903 GLsizei w
, GLsizei h
);
905 if (!driContext
->driScreenPriv
->dri2
.enabled
)
908 if (!radeon
->meta
.internal_viewport_call
&& ctx
->DrawBuffer
->Name
== 0) {
909 if (radeon
->is_front_buffer_rendering
) {
910 ctx
->Driver
.Flush(ctx
);
912 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
);
913 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
914 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
);
917 old_viewport
= ctx
->Driver
.Viewport
;
918 ctx
->Driver
.Viewport
= NULL
;
919 radeon_window_moved(radeon
);
920 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
921 ctx
->Driver
.Viewport
= old_viewport
;
924 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
927 int dwords
= (*state
->check
) (radeon
->glCtx
, state
);
928 drm_r300_cmd_header_t cmd
;
930 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
932 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
933 if (dwords
> state
->cmd_size
)
934 dwords
= state
->cmd_size
;
936 for (i
= 0; i
< dwords
;) {
937 cmd
= *((drm_r300_cmd_header_t
*) &state
->cmd
[i
]);
938 reg
= (cmd
.packet0
.reghi
<< 8) | cmd
.packet0
.reglo
;
939 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
940 state
->name
, i
, reg
, cmd
.packet0
.count
);
942 for (j
= 0; j
< cmd
.packet0
.count
&& i
< dwords
; j
++) {
943 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
944 state
->name
, i
, reg
, state
->cmd
[i
]);
952 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
954 int i
, j
, reg
, count
;
957 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
960 if (!radeon
->radeonScreen
->kernel_mm
) {
961 radeon_print_state_atom_prekmm(radeon
, state
);
965 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
967 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
969 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
970 if (dwords
> state
->cmd_size
)
971 dwords
= state
->cmd_size
;
972 for (i
= 0; i
< dwords
;) {
973 packet0
= state
->cmd
[i
];
974 reg
= (packet0
& 0x1FFF) << 2;
975 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
976 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
977 state
->name
, i
, reg
, count
);
979 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
980 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
981 state
->name
, i
, reg
, state
->cmd
[i
]);
990 * Count total size for next state emit.
992 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
994 struct radeon_state_atom
*atom
;
996 /* check if we are going to emit full state */
998 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
999 if (!radeon
->hw
.is_dirty
)
1001 foreach(atom
, &radeon
->hw
.atomlist
) {
1003 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1004 dwords
+= atom_size
;
1005 if (RADEON_CMDBUF
&& atom_size
) {
1006 radeon_print_state_atom(radeon
, atom
);
1011 foreach(atom
, &radeon
->hw
.atomlist
) {
1012 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1013 dwords
+= atom_size
;
1014 if (RADEON_CMDBUF
&& atom_size
) {
1015 radeon_print_state_atom(radeon
, atom
);
1021 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
1025 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
1027 BATCH_LOCALS(radeon
);
1030 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
1033 radeon_print_state_atom(radeon
, atom
);
1036 (*atom
->emit
)(radeon
->glCtx
, atom
);
1038 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
1039 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
1043 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
1045 atom
->dirty
= GL_FALSE
;
1049 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
1051 struct radeon_state_atom
*atom
;
1053 if (radeon
->vtbl
.pre_emit_atoms
)
1054 radeon
->vtbl
.pre_emit_atoms(radeon
);
1056 /* Emit actual atoms */
1057 if (radeon
->hw
.all_dirty
|| emitAll
) {
1058 foreach(atom
, &radeon
->hw
.atomlist
)
1059 radeon_emit_atom( radeon
, atom
);
1061 foreach(atom
, &radeon
->hw
.atomlist
) {
1063 radeon_emit_atom( radeon
, atom
);
1070 static GLboolean
radeon_revalidate_bos(GLcontext
*ctx
)
1072 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1075 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
1076 if (ret
== RADEON_CS_SPACE_FLUSH
)
1081 void radeonEmitState(radeonContextPtr radeon
)
1083 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
1085 if (radeon
->vtbl
.pre_emit_state
)
1086 radeon
->vtbl
.pre_emit_state(radeon
);
1088 /* this code used to return here but now it emits zbs */
1089 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
1092 if (!radeon
->cmdbuf
.cs
->cdw
) {
1093 if (RADEON_DEBUG
& RADEON_STATE
)
1094 fprintf(stderr
, "Begin reemit state\n");
1096 radeonEmitAtoms(radeon
, GL_TRUE
);
1099 if (RADEON_DEBUG
& RADEON_STATE
)
1100 fprintf(stderr
, "Begin dirty state\n");
1102 radeonEmitAtoms(radeon
, GL_FALSE
);
1105 radeon
->hw
.is_dirty
= GL_FALSE
;
1106 radeon
->hw
.all_dirty
= GL_FALSE
;
1110 void radeonFlush(GLcontext
*ctx
)
1112 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1113 if (RADEON_DEBUG
& RADEON_IOCTL
)
1114 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
1116 /* okay if we have no cmds in the buffer &&
1117 we have no DMA flush &&
1118 we have no DMA buffer allocated.
1119 then no point flushing anything at all.
1121 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
1124 if (radeon
->dma
.flush
)
1125 radeon
->dma
.flush( ctx
);
1127 radeonEmitState(radeon
);
1129 if (radeon
->cmdbuf
.cs
->cdw
)
1130 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
1132 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
1133 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
1135 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
1136 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
1137 __DRIdrawablePrivate
* drawable
= radeon_get_drawable(radeon
);
1138 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
1140 /* Only clear the dirty bit if front-buffer rendering is no longer
1141 * enabled. This is done so that the dirty bit can only be set in
1142 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1143 * each of N places that do rendering. This has worse performances,
1144 * but it is much easier to get correct.
1146 if (!radeon
->is_front_buffer_rendering
) {
1147 radeon
->front_buffer_dirty
= GL_FALSE
;
1152 make_empty_list(&radeon
->query
.not_flushed_head
);
1156 /* Make sure all commands have been sent to the hardware and have
1157 * completed processing.
1159 void radeonFinish(GLcontext
* ctx
)
1161 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1162 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1165 if (ctx
->Driver
.Flush
)
1166 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
1168 if (radeon
->radeonScreen
->kernel_mm
) {
1169 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1170 struct radeon_renderbuffer
*rrb
;
1171 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
1173 radeon_bo_wait(rrb
->bo
);
1176 struct radeon_renderbuffer
*rrb
;
1177 rrb
= radeon_get_depthbuffer(radeon
);
1179 radeon_bo_wait(rrb
->bo
);
1181 } else if (radeon
->do_irqs
) {
1182 LOCK_HARDWARE(radeon
);
1183 radeonEmitIrqLocked(radeon
);
1184 UNLOCK_HARDWARE(radeon
);
1185 radeonWaitIrq(radeon
);
1187 radeonWaitForIdle(radeon
);
1193 * Send the current command buffer via ioctl to the hardware.
1195 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
1199 if (rmesa
->cmdbuf
.flushing
) {
1200 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
1203 rmesa
->cmdbuf
.flushing
= 1;
1205 if (RADEON_DEBUG
& RADEON_IOCTL
) {
1206 fprintf(stderr
, "%s from %s - %i cliprects\n",
1207 __FUNCTION__
, caller
, rmesa
->numClipRects
);
1210 radeonEmitQueryEnd(rmesa
->glCtx
);
1212 if (rmesa
->cmdbuf
.cs
->cdw
) {
1213 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
1214 rmesa
->hw
.all_dirty
= GL_TRUE
;
1216 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
1217 rmesa
->cmdbuf
.flushing
= 0;
1219 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
1220 fprintf(stderr
,"failed to revalidate buffers\n");
1226 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
1230 radeonReleaseDmaRegions(rmesa
);
1232 LOCK_HARDWARE(rmesa
);
1233 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
1234 UNLOCK_HARDWARE(rmesa
);
1237 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
1238 "parse or rejected command stream. See dmesg "
1239 "for more info.\n", ret
);
1247 * Make sure that enough space is available in the command buffer
1248 * by flushing if necessary.
1250 * \param dwords The number of dwords we need to be free on the command buffer
1252 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
1254 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
1255 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
1256 /* If we try to flush empty buffer there is too big rendering operation. */
1257 assert(rmesa
->cmdbuf
.cs
->cdw
);
1258 rcommonFlushCmdBuf(rmesa
, caller
);
1264 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
1267 /* Initialize command buffer */
1268 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
1269 "command_buffer_size");
1270 if (size
< 2 * rmesa
->hw
.max_state_size
) {
1271 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
1273 if (size
> 64 * 256)
1276 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1277 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
1278 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1279 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
1280 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1281 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1282 size
* 4, rmesa
->hw
.max_state_size
* 4);
1284 if (rmesa
->radeonScreen
->kernel_mm
) {
1285 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
1286 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
1288 rmesa
->cmdbuf
.csm
= radeon_cs_manager_legacy_ctor(rmesa
);
1290 if (rmesa
->cmdbuf
.csm
== NULL
) {
1291 /* FIXME: fatal error */
1294 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
1295 assert(rmesa
->cmdbuf
.cs
!= NULL
);
1296 rmesa
->cmdbuf
.size
= size
;
1298 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
1299 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
1301 if (!rmesa
->radeonScreen
->kernel_mm
) {
1302 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, rmesa
->radeonScreen
->texSize
[0]);
1303 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, rmesa
->radeonScreen
->gartTextures
.size
);
1305 struct drm_radeon_gem_info mminfo
= { 0 };
1307 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
, &mminfo
, sizeof(mminfo
)))
1309 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, mminfo
.vram_visible
);
1310 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, mminfo
.gart_size
);
1316 * Destroy the command buffer
1318 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
1320 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
1321 if (rmesa
->radeonScreen
->driScreen
->dri2
.enabled
|| rmesa
->radeonScreen
->kernel_mm
) {
1322 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
1324 radeon_cs_manager_legacy_dtor(rmesa
->cmdbuf
.csm
);
1328 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
1331 const char *function
,
1334 if (!rmesa
->cmdbuf
.cs
->cdw
&& dostate
) {
1335 radeon_print(RADEON_STATE
, RADEON_NORMAL
,
1336 "Reemit state after flush (from %s)\n", function
);
1337 radeonEmitState(rmesa
);
1339 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
1341 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1342 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
1346 void radeonUserClear(GLcontext
*ctx
, GLuint mask
)
1348 _mesa_meta_clear(ctx
, mask
);