1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
62 * Enable verbose debug output for emit code.
65 * 2 also print state alues
67 #define RADEON_CMDBUF 0
69 /* =============================================================
73 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
74 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
85 if (out
->x1
>= out
->x2
)
87 if (out
->y1
>= out
->y2
)
92 void radeonRecalcScissorRects(radeonContextPtr radeon
)
97 /* Grow cliprect store?
99 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
100 while (radeon
->state
.scissor
.numAllocedClipRects
<
101 radeon
->numClipRects
) {
102 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
103 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
106 if (radeon
->state
.scissor
.pClipRects
)
107 FREE(radeon
->state
.scissor
.pClipRects
);
109 radeon
->state
.scissor
.pClipRects
=
110 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
111 sizeof(drm_clip_rect_t
));
113 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
114 radeon
->state
.scissor
.numAllocedClipRects
= 0;
119 out
= radeon
->state
.scissor
.pClipRects
;
120 radeon
->state
.scissor
.numClipRects
= 0;
122 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
123 if (intersect_rect(out
,
124 &radeon
->pClipRects
[i
],
125 &radeon
->state
.scissor
.rect
)) {
126 radeon
->state
.scissor
.numClipRects
++;
131 if (radeon
->vtbl
.update_scissor
)
132 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
135 void radeon_get_cliprects(radeonContextPtr radeon
,
136 struct drm_clip_rect
**cliprects
,
137 unsigned int *num_cliprects
,
138 int *x_off
, int *y_off
)
140 __DRIdrawablePrivate
*dPriv
= radeon_get_drawable(radeon
);
141 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
143 if (radeon
->constant_cliprect
) {
144 radeon
->fboRect
.x1
= 0;
145 radeon
->fboRect
.y1
= 0;
146 radeon
->fboRect
.x2
= radeon
->glCtx
->DrawBuffer
->Width
;
147 radeon
->fboRect
.y2
= radeon
->glCtx
->DrawBuffer
->Height
;
149 *cliprects
= &radeon
->fboRect
;
153 } else if (radeon
->front_cliprects
||
154 rfb
->pf_active
|| dPriv
->numBackClipRects
== 0) {
155 *cliprects
= dPriv
->pClipRects
;
156 *num_cliprects
= dPriv
->numClipRects
;
160 *num_cliprects
= dPriv
->numBackClipRects
;
161 *cliprects
= dPriv
->pBackClipRects
;
162 *x_off
= dPriv
->backX
;
163 *y_off
= dPriv
->backY
;
168 * Update cliprects and scissors.
170 void radeonSetCliprects(radeonContextPtr radeon
)
172 __DRIdrawablePrivate
*const drawable
= radeon_get_drawable(radeon
);
173 __DRIdrawablePrivate
*const readable
= radeon_get_readable(radeon
);
174 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
175 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
178 radeon_get_cliprects(radeon
, &radeon
->pClipRects
,
179 &radeon
->numClipRects
, &x_off
, &y_off
);
181 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
182 (draw_rfb
->base
.Height
!= drawable
->h
)) {
183 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
184 drawable
->w
, drawable
->h
);
185 draw_rfb
->base
.Initialized
= GL_TRUE
;
188 if (drawable
!= readable
) {
189 if ((read_rfb
->base
.Width
!= readable
->w
) ||
190 (read_rfb
->base
.Height
!= readable
->h
)) {
191 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
192 readable
->w
, readable
->h
);
193 read_rfb
->base
.Initialized
= GL_TRUE
;
197 if (radeon
->state
.scissor
.enabled
)
198 radeonRecalcScissorRects(radeon
);
204 void radeonUpdateScissor( GLcontext
*ctx
)
206 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
207 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
208 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
210 int min_x
, min_y
, max_x
, max_y
;
212 if (!ctx
->DrawBuffer
)
215 max_x
= ctx
->DrawBuffer
->Width
- 1;
216 max_y
= ctx
->DrawBuffer
->Height
- 1;
218 if ( !ctx
->DrawBuffer
->Name
) {
220 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
230 if (!rmesa
->radeonScreen
->kernel_mm
) {
231 /* Fix scissors for dri 1 */
232 __DRIdrawablePrivate
*dPriv
= radeon_get_drawable(rmesa
);
236 max_x
+= dPriv
->x
+ 1;
240 max_y
+= dPriv
->y
+ 1;
243 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
244 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
245 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
246 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
248 radeonRecalcScissorRects( rmesa
);
251 /* =============================================================
255 void radeonScissor(GLcontext
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
257 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
258 if (ctx
->Scissor
.Enabled
) {
259 /* We don't pipeline cliprect changes */
260 radeon_firevertices(radeon
);
261 radeonUpdateScissor(ctx
);
265 void radeonPolygonStipplePreKMS( GLcontext
*ctx
, const GLubyte
*mask
)
267 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
269 drm_radeon_stipple_t stipple
;
271 /* Must flip pattern upside down.
273 for ( i
= 0 ; i
< 32 ; i
++ ) {
274 stipple
.mask
[31 - i
] = ((GLuint
*) mask
)[i
];
277 /* TODO: push this into cmd mechanism
279 radeon_firevertices(radeon
);
280 LOCK_HARDWARE( radeon
);
282 drmCommandWrite( radeon
->dri
.fd
, DRM_RADEON_STIPPLE
,
283 &stipple
, sizeof(stipple
) );
284 UNLOCK_HARDWARE( radeon
);
288 /* ================================================================
289 * SwapBuffers with client-side throttling
292 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
294 drm_radeon_getparam_t gp
;
298 gp
.param
= RADEON_PARAM_LAST_FRAME
;
299 gp
.value
= (int *)&frame
;
300 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
303 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
311 uint32_t radeonGetAge(radeonContextPtr radeon
)
313 drm_radeon_getparam_t gp
;
317 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
318 gp
.value
= (int *)&age
;
319 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
322 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
330 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
332 drm_radeon_irq_emit_t ie
;
335 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
336 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
339 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
345 static void radeonWaitIrq(radeonContextPtr radeon
)
350 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
351 &radeon
->iw
, sizeof(radeon
->iw
));
352 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
355 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
361 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
363 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
365 if (radeon
->do_irqs
) {
366 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
367 if (!radeon
->irqsEmitted
) {
368 while (radeonGetLastFrame(radeon
) <
371 UNLOCK_HARDWARE(radeon
);
372 radeonWaitIrq(radeon
);
373 LOCK_HARDWARE(radeon
);
375 radeon
->irqsEmitted
= 10;
378 if (radeon
->irqsEmitted
) {
379 radeonEmitIrqLocked(radeon
);
380 radeon
->irqsEmitted
--;
383 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
384 UNLOCK_HARDWARE(radeon
);
385 if (radeon
->do_usleeps
)
387 LOCK_HARDWARE(radeon
);
393 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
399 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
402 } while (ret
&& ++i
< 100);
405 UNLOCK_HARDWARE(radeon
);
406 fprintf(stderr
, "Error: R300 timed out... exiting\n");
411 static void radeonWaitForIdle(radeonContextPtr radeon
)
413 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
414 LOCK_HARDWARE(radeon
);
415 radeonWaitForIdleLocked(radeon
);
416 UNLOCK_HARDWARE(radeon
);
420 static void radeon_flip_renderbuffers(struct radeon_framebuffer
*rfb
)
422 int current_page
= rfb
->pf_current_page
;
423 int next_page
= (current_page
+ 1) % rfb
->pf_num_pages
;
424 struct gl_renderbuffer
*tmp_rb
;
426 /* Exchange renderbuffers if necessary but make sure their
427 * reference counts are preserved.
429 if (rfb
->color_rb
[current_page
] &&
430 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
!=
431 &rfb
->color_rb
[current_page
]->base
) {
433 _mesa_reference_renderbuffer(&tmp_rb
,
434 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
435 tmp_rb
= &rfb
->color_rb
[current_page
]->base
;
436 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
, tmp_rb
);
437 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
440 if (rfb
->color_rb
[next_page
] &&
441 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
!=
442 &rfb
->color_rb
[next_page
]->base
) {
444 _mesa_reference_renderbuffer(&tmp_rb
,
445 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
446 tmp_rb
= &rfb
->color_rb
[next_page
]->base
;
447 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
, tmp_rb
);
448 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
452 /* Copy the back color buffer to the front color buffer.
454 void radeonCopyBuffer( __DRIdrawablePrivate
*dPriv
,
455 const drm_clip_rect_t
*rect
)
457 radeonContextPtr rmesa
;
458 struct radeon_framebuffer
*rfb
;
462 assert(dPriv
->driContextPriv
);
463 assert(dPriv
->driContextPriv
->driverPrivate
);
465 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
467 LOCK_HARDWARE(rmesa
);
469 rfb
= dPriv
->driverPrivate
;
471 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
472 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
475 nbox
= dPriv
->numClipRects
; /* must be in locked region */
477 for ( i
= 0 ; i
< nbox
; ) {
478 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
479 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
480 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
483 for ( ; i
< nr
; i
++ ) {
489 if (rect
->x1
> b
->x1
)
491 if (rect
->y1
> b
->y1
)
493 if (rect
->x2
< b
->x2
)
495 if (rect
->y2
< b
->y2
)
498 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
505 rmesa
->sarea
->nbox
= n
;
510 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
513 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
514 UNLOCK_HARDWARE( rmesa
);
519 UNLOCK_HARDWARE( rmesa
);
522 static int radeonScheduleSwap(__DRIdrawablePrivate
*dPriv
, GLboolean
*missed_target
)
524 radeonContextPtr rmesa
;
526 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
527 radeon_firevertices(rmesa
);
529 LOCK_HARDWARE( rmesa
);
531 if (!dPriv
->numClipRects
) {
532 UNLOCK_HARDWARE(rmesa
);
533 usleep(10000); /* throttle invisible client 10ms */
537 radeonWaitForFrameCompletion(rmesa
);
539 UNLOCK_HARDWARE(rmesa
);
540 driWaitForVBlank(dPriv
, missed_target
);
545 static GLboolean
radeonPageFlip( __DRIdrawablePrivate
*dPriv
)
547 radeonContextPtr radeon
;
549 __DRIscreenPrivate
*psp
;
550 struct radeon_renderbuffer
*rrb
;
551 struct radeon_framebuffer
*rfb
;
554 assert(dPriv
->driContextPriv
);
555 assert(dPriv
->driContextPriv
->driverPrivate
);
557 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
558 rfb
= dPriv
->driverPrivate
;
559 rrb
= (void *)rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
;
561 psp
= dPriv
->driScreenPriv
;
563 LOCK_HARDWARE(radeon
);
565 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
566 fprintf(stderr
, "%s: pfCurrentPage: %d %d\n", __FUNCTION__
,
567 radeon
->sarea
->pfCurrentPage
, radeon
->sarea
->pfState
);
569 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
570 drm_clip_rect_t
*b
= radeon
->sarea
->boxes
;
572 radeon
->sarea
->nbox
= 1;
574 ret
= drmCommandNone( radeon
->dri
.fd
, DRM_RADEON_FLIP
);
576 UNLOCK_HARDWARE(radeon
);
579 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
586 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
587 radeon_flip_renderbuffers(rfb
);
588 radeon_draw_buffer(radeon
->glCtx
, &rfb
->base
);
595 * Swap front and back buffer.
597 void radeonSwapBuffers(__DRIdrawablePrivate
* dPriv
)
600 __DRIscreenPrivate
*psp
;
602 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
603 radeonContextPtr radeon
;
606 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
609 if (ctx
->Visual
.doubleBufferMode
) {
610 GLboolean missed_target
;
611 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
612 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
614 radeonScheduleSwap(dPriv
, &missed_target
);
616 if (rfb
->pf_active
) {
617 radeonPageFlip(dPriv
);
619 radeonCopyBuffer(dPriv
, NULL
);
622 psp
= dPriv
->driScreenPriv
;
625 (*psp
->systemTime
->getUST
)( & ust
);
626 if ( missed_target
) {
627 rfb
->swap_missed_count
++;
628 rfb
->swap_missed_ust
= ust
- rfb
->swap_ust
;
632 radeon
->hw
.all_dirty
= GL_TRUE
;
635 /* XXX this shouldn't be an error but we can't handle it for now */
636 _mesa_problem(NULL
, "%s: drawable has no context!",
641 void radeonCopySubBuffer(__DRIdrawablePrivate
* dPriv
,
642 int x
, int y
, int w
, int h
)
644 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
645 radeonContextPtr radeon
;
648 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
651 if (ctx
->Visual
.doubleBufferMode
) {
652 drm_clip_rect_t rect
;
653 rect
.x1
= x
+ dPriv
->x
;
654 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
655 rect
.x2
= rect
.x1
+ w
;
656 rect
.y2
= rect
.y1
+ h
;
657 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
658 radeonCopyBuffer(dPriv
, &rect
);
661 /* XXX this shouldn't be an error but we can't handle it for now */
662 _mesa_problem(NULL
, "%s: drawable has no context!",
667 void radeon_draw_buffer(GLcontext
*ctx
, struct gl_framebuffer
*fb
)
669 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
670 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
676 /* this can happen during the initial context initialization */
680 /* radeons only handle 1 color draw so far */
681 if (fb
->_NumColorDrawBuffers
!= 1) {
682 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
686 /* Do this here, note core Mesa, since this function is called from
687 * many places within the driver.
689 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
690 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
691 _mesa_update_framebuffer(ctx
);
692 /* this updates the DrawBuffer's Width/Height if it's a FBO */
693 _mesa_update_draw_buffer_bounds(ctx
);
696 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
697 /* this may occur when we're called by glBindFrameBuffer() during
698 * the process of someone setting up renderbuffers, etc.
700 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
705 ;/* do something depthy/stencily TODO */
710 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
711 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
712 radeon
->front_cliprects
= GL_TRUE
;
713 radeon
->front_buffer_dirty
= GL_TRUE
;
715 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
716 radeon
->front_cliprects
= GL_FALSE
;
719 /* user FBO in theory */
720 struct radeon_renderbuffer
*rrb
;
721 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
723 offset
= rrb
->draw_offset
;
726 radeon
->constant_cliprect
= GL_TRUE
;
729 if (rrbColor
== NULL
)
730 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
732 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
735 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
736 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
737 if (rrbDepth
&& rrbDepth
->bo
) {
738 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
740 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
743 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
747 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
748 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
749 if (rrbStencil
&& rrbStencil
->bo
) {
750 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
751 /* need to re-compute stencil hw state */
753 rrbDepth
= rrbStencil
;
755 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
758 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
759 if (ctx
->Driver
.Enable
!= NULL
)
760 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
762 ctx
->NewState
|= _NEW_STENCIL
;
765 /* Update culling direction which changes depending on the
766 * orientation of the buffer:
768 if (ctx
->Driver
.FrontFace
)
769 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
771 ctx
->NewState
|= _NEW_POLYGON
;
774 * Update depth test state
776 if (ctx
->Driver
.Enable
) {
777 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
778 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
779 /* Need to update the derived ctx->Stencil._Enabled first */
780 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
781 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
783 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
786 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
787 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
788 radeon
->state
.color
.draw_offset
= offset
;
791 /* update viewport since it depends on window size */
792 if (ctx
->Driver
.Viewport
) {
793 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
794 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
799 ctx
->NewState
|= _NEW_VIEWPORT
;
801 /* Set state we know depends on drawable parameters:
803 radeonUpdateScissor(ctx
);
804 radeon
->NewGLState
|= _NEW_SCISSOR
;
806 if (ctx
->Driver
.DepthRange
)
807 ctx
->Driver
.DepthRange(ctx
,
811 /* Update culling direction which changes depending on the
812 * orientation of the buffer:
814 if (ctx
->Driver
.FrontFace
)
815 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
817 ctx
->NewState
|= _NEW_POLYGON
;
821 * Called via glDrawBuffer.
823 void radeonDrawBuffer( GLcontext
*ctx
, GLenum mode
)
825 if (RADEON_DEBUG
& RADEON_DRI
)
826 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
827 _mesa_lookup_enum_by_nr( mode
));
829 if (ctx
->DrawBuffer
->Name
== 0) {
830 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
832 const GLboolean was_front_buffer_rendering
=
833 radeon
->is_front_buffer_rendering
;
835 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
838 /* If we weren't front-buffer rendering before but we are now, make sure
839 * that the front-buffer has actually been allocated.
841 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
842 radeon_update_renderbuffers(radeon
->dri
.context
,
843 radeon
->dri
.context
->driDrawablePriv
);
847 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
850 void radeonReadBuffer( GLcontext
*ctx
, GLenum mode
)
852 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
853 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
854 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
855 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
856 || (mode
== GL_FRONT
);
858 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
859 radeon_update_renderbuffers(rmesa
->dri
.context
,
860 rmesa
->dri
.context
->driReadablePriv
);
863 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
864 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
865 /* This will update FBO completeness status.
866 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
867 * refers to a missing renderbuffer. Calling glReadBuffer can set
868 * that straight and can make the drawing buffer complete.
870 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
875 /* Turn on/off page flipping according to the flags in the sarea:
877 void radeonUpdatePageFlipping(radeonContextPtr radeon
)
879 struct radeon_framebuffer
*rfb
= radeon_get_drawable(radeon
)->driverPrivate
;
881 rfb
->pf_active
= radeon
->sarea
->pfState
;
882 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
883 rfb
->pf_num_pages
= 2;
884 radeon_flip_renderbuffers(rfb
);
885 radeon_draw_buffer(radeon
->glCtx
, radeon
->glCtx
->DrawBuffer
);
888 void radeon_window_moved(radeonContextPtr radeon
)
890 /* Cliprects has to be updated before doing anything else */
891 radeonSetCliprects(radeon
);
892 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
893 radeonUpdatePageFlipping(radeon
);
897 void radeon_viewport(GLcontext
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
899 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
900 __DRIcontext
*driContext
= radeon
->dri
.context
;
901 void (*old_viewport
)(GLcontext
*ctx
, GLint x
, GLint y
,
902 GLsizei w
, GLsizei h
);
904 if (!driContext
->driScreenPriv
->dri2
.enabled
)
907 if (!radeon
->meta
.internal_viewport_call
&& ctx
->DrawBuffer
->Name
== 0) {
908 if (radeon
->is_front_buffer_rendering
) {
909 ctx
->Driver
.Flush(ctx
);
911 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
);
912 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
913 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
);
916 old_viewport
= ctx
->Driver
.Viewport
;
917 ctx
->Driver
.Viewport
= NULL
;
918 radeon_window_moved(radeon
);
919 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
920 ctx
->Driver
.Viewport
= old_viewport
;
923 static void radeon_print_state_atom_prekmm(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
926 int dwords
= (*state
->check
) (radeon
->glCtx
, state
);
927 drm_r300_cmd_header_t cmd
;
929 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
931 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
932 if (dwords
> state
->cmd_size
)
933 dwords
= state
->cmd_size
;
935 for (i
= 0; i
< dwords
;) {
936 cmd
= *((drm_r300_cmd_header_t
*) &state
->cmd
[i
]);
937 reg
= (cmd
.packet0
.reghi
<< 8) | cmd
.packet0
.reglo
;
938 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
939 state
->name
, i
, reg
, cmd
.packet0
.count
);
941 for (j
= 0; j
< cmd
.packet0
.count
&& i
< dwords
; j
++) {
942 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
943 state
->name
, i
, reg
, state
->cmd
[i
]);
951 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
953 int i
, j
, reg
, count
;
956 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
959 if (!radeon
->radeonScreen
->kernel_mm
) {
960 radeon_print_state_atom_prekmm(radeon
, state
);
964 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
966 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
968 if (radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
969 if (dwords
> state
->cmd_size
)
970 dwords
= state
->cmd_size
;
971 for (i
= 0; i
< dwords
;) {
972 packet0
= state
->cmd
[i
];
973 reg
= (packet0
& 0x1FFF) << 2;
974 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
975 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
976 state
->name
, i
, reg
, count
);
978 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
979 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
980 state
->name
, i
, reg
, state
->cmd
[i
]);
989 * Count total size for next state emit.
991 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
993 struct radeon_state_atom
*atom
;
995 /* check if we are going to emit full state */
997 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
998 if (!radeon
->hw
.is_dirty
)
1000 foreach(atom
, &radeon
->hw
.atomlist
) {
1002 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1003 dwords
+= atom_size
;
1004 if (RADEON_CMDBUF
&& atom_size
) {
1005 radeon_print_state_atom(radeon
, atom
);
1010 foreach(atom
, &radeon
->hw
.atomlist
) {
1011 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
1012 dwords
+= atom_size
;
1013 if (RADEON_CMDBUF
&& atom_size
) {
1014 radeon_print_state_atom(radeon
, atom
);
1020 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
1024 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
1026 BATCH_LOCALS(radeon
);
1029 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
1032 radeon_print_state_atom(radeon
, atom
);
1035 (*atom
->emit
)(radeon
->glCtx
, atom
);
1037 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
1038 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
1042 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
1044 atom
->dirty
= GL_FALSE
;
1048 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
1050 struct radeon_state_atom
*atom
;
1052 if (radeon
->vtbl
.pre_emit_atoms
)
1053 radeon
->vtbl
.pre_emit_atoms(radeon
);
1055 /* Emit actual atoms */
1056 if (radeon
->hw
.all_dirty
|| emitAll
) {
1057 foreach(atom
, &radeon
->hw
.atomlist
)
1058 radeon_emit_atom( radeon
, atom
);
1060 foreach(atom
, &radeon
->hw
.atomlist
) {
1062 radeon_emit_atom( radeon
, atom
);
1069 static GLboolean
radeon_revalidate_bos(GLcontext
*ctx
)
1071 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1074 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
1075 if (ret
== RADEON_CS_SPACE_FLUSH
)
1080 void radeonEmitState(radeonContextPtr radeon
)
1082 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
1084 if (radeon
->vtbl
.pre_emit_state
)
1085 radeon
->vtbl
.pre_emit_state(radeon
);
1087 /* this code used to return here but now it emits zbs */
1088 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
1091 if (!radeon
->cmdbuf
.cs
->cdw
) {
1092 if (RADEON_DEBUG
& RADEON_STATE
)
1093 fprintf(stderr
, "Begin reemit state\n");
1095 radeonEmitAtoms(radeon
, GL_TRUE
);
1098 if (RADEON_DEBUG
& RADEON_STATE
)
1099 fprintf(stderr
, "Begin dirty state\n");
1101 radeonEmitAtoms(radeon
, GL_FALSE
);
1104 radeon
->hw
.is_dirty
= GL_FALSE
;
1105 radeon
->hw
.all_dirty
= GL_FALSE
;
1109 void radeonFlush(GLcontext
*ctx
)
1111 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1112 if (RADEON_DEBUG
& RADEON_IOCTL
)
1113 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
1115 /* okay if we have no cmds in the buffer &&
1116 we have no DMA flush &&
1117 we have no DMA buffer allocated.
1118 then no point flushing anything at all.
1120 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
1123 if (radeon
->dma
.flush
)
1124 radeon
->dma
.flush( ctx
);
1126 radeonEmitState(radeon
);
1128 if (radeon
->cmdbuf
.cs
->cdw
)
1129 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
1131 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
1132 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
1134 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
1135 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
1136 __DRIdrawablePrivate
* drawable
= radeon_get_drawable(radeon
);
1137 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
1139 /* Only clear the dirty bit if front-buffer rendering is no longer
1140 * enabled. This is done so that the dirty bit can only be set in
1141 * glDrawBuffer. Otherwise the dirty bit would have to be set at
1142 * each of N places that do rendering. This has worse performances,
1143 * but it is much easier to get correct.
1145 if (!radeon
->is_front_buffer_rendering
) {
1146 radeon
->front_buffer_dirty
= GL_FALSE
;
1151 make_empty_list(&radeon
->query
.not_flushed_head
);
1155 /* Make sure all commands have been sent to the hardware and have
1156 * completed processing.
1158 void radeonFinish(GLcontext
* ctx
)
1160 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1161 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1164 if (ctx
->Driver
.Flush
)
1165 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
1167 if (radeon
->radeonScreen
->kernel_mm
) {
1168 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1169 struct radeon_renderbuffer
*rrb
;
1170 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
1172 radeon_bo_wait(rrb
->bo
);
1175 struct radeon_renderbuffer
*rrb
;
1176 rrb
= radeon_get_depthbuffer(radeon
);
1178 radeon_bo_wait(rrb
->bo
);
1180 } else if (radeon
->do_irqs
) {
1181 LOCK_HARDWARE(radeon
);
1182 radeonEmitIrqLocked(radeon
);
1183 UNLOCK_HARDWARE(radeon
);
1184 radeonWaitIrq(radeon
);
1186 radeonWaitForIdle(radeon
);
1192 * Send the current command buffer via ioctl to the hardware.
1194 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
1198 if (rmesa
->cmdbuf
.flushing
) {
1199 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
1202 rmesa
->cmdbuf
.flushing
= 1;
1204 if (RADEON_DEBUG
& RADEON_IOCTL
) {
1205 fprintf(stderr
, "%s from %s - %i cliprects\n",
1206 __FUNCTION__
, caller
, rmesa
->numClipRects
);
1209 radeonEmitQueryEnd(rmesa
->glCtx
);
1211 if (rmesa
->cmdbuf
.cs
->cdw
) {
1212 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
1213 rmesa
->hw
.all_dirty
= GL_TRUE
;
1215 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
1216 rmesa
->cmdbuf
.flushing
= 0;
1218 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
1219 fprintf(stderr
,"failed to revalidate buffers\n");
1225 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
1229 radeonReleaseDmaRegions(rmesa
);
1231 LOCK_HARDWARE(rmesa
);
1232 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
1233 UNLOCK_HARDWARE(rmesa
);
1236 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
1237 "parse or rejected command stream. See dmesg "
1238 "for more info.\n", ret
);
1246 * Make sure that enough space is available in the command buffer
1247 * by flushing if necessary.
1249 * \param dwords The number of dwords we need to be free on the command buffer
1251 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
1253 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
1254 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
1255 /* If we try to flush empty buffer there is too big rendering operation. */
1256 assert(rmesa
->cmdbuf
.cs
->cdw
);
1257 rcommonFlushCmdBuf(rmesa
, caller
);
1263 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
1266 /* Initialize command buffer */
1267 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
1268 "command_buffer_size");
1269 if (size
< 2 * rmesa
->hw
.max_state_size
) {
1270 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
1272 if (size
> 64 * 256)
1275 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1276 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
1277 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1278 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
1279 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1280 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1281 size
* 4, rmesa
->hw
.max_state_size
* 4);
1283 if (rmesa
->radeonScreen
->kernel_mm
) {
1284 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
1285 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
1287 rmesa
->cmdbuf
.csm
= radeon_cs_manager_legacy_ctor(rmesa
);
1289 if (rmesa
->cmdbuf
.csm
== NULL
) {
1290 /* FIXME: fatal error */
1293 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
1294 assert(rmesa
->cmdbuf
.cs
!= NULL
);
1295 rmesa
->cmdbuf
.size
= size
;
1297 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
1298 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
1300 if (!rmesa
->radeonScreen
->kernel_mm
) {
1301 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, rmesa
->radeonScreen
->texSize
[0]);
1302 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, rmesa
->radeonScreen
->gartTextures
.size
);
1304 struct drm_radeon_gem_info mminfo
= { 0 };
1306 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
, &mminfo
, sizeof(mminfo
)))
1308 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, mminfo
.vram_visible
);
1309 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, mminfo
.gart_size
);
1315 * Destroy the command buffer
1317 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
1319 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
1320 if (rmesa
->radeonScreen
->driScreen
->dri2
.enabled
|| rmesa
->radeonScreen
->kernel_mm
) {
1321 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
1323 radeon_cs_manager_legacy_dtor(rmesa
->cmdbuf
.csm
);
1327 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
1330 const char *function
,
1333 if (!rmesa
->cmdbuf
.cs
->cdw
&& dostate
) {
1334 radeon_print(RADEON_STATE
, RADEON_NORMAL
,
1335 "Reemit state after flush (from %s)\n", function
);
1336 radeonEmitState(rmesa
);
1338 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
1340 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1341 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
1345 void radeonUserClear(GLcontext
*ctx
, GLuint mask
)
1347 _mesa_meta_Clear(ctx
, mask
);