1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
62 * Enable verbose debug output for emit code.
65 * 2 also print state alues
67 #define RADEON_CMDBUF 0
69 /* =============================================================
73 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
74 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
85 if (out
->x1
>= out
->x2
)
87 if (out
->y1
>= out
->y2
)
92 void radeonRecalcScissorRects(radeonContextPtr radeon
)
97 /* Grow cliprect store?
99 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
100 while (radeon
->state
.scissor
.numAllocedClipRects
<
101 radeon
->numClipRects
) {
102 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
103 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
106 if (radeon
->state
.scissor
.pClipRects
)
107 FREE(radeon
->state
.scissor
.pClipRects
);
109 radeon
->state
.scissor
.pClipRects
=
110 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
111 sizeof(drm_clip_rect_t
));
113 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
114 radeon
->state
.scissor
.numAllocedClipRects
= 0;
119 out
= radeon
->state
.scissor
.pClipRects
;
120 radeon
->state
.scissor
.numClipRects
= 0;
122 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
123 if (intersect_rect(out
,
124 &radeon
->pClipRects
[i
],
125 &radeon
->state
.scissor
.rect
)) {
126 radeon
->state
.scissor
.numClipRects
++;
131 if (radeon
->vtbl
.update_scissor
)
132 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
135 void radeon_get_cliprects(radeonContextPtr radeon
,
136 struct drm_clip_rect
**cliprects
,
137 unsigned int *num_cliprects
,
138 int *x_off
, int *y_off
)
140 __DRIdrawable
*dPriv
= radeon_get_drawable(radeon
);
141 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
143 if (radeon
->constant_cliprect
) {
144 radeon
->fboRect
.x1
= 0;
145 radeon
->fboRect
.y1
= 0;
146 radeon
->fboRect
.x2
= radeon
->glCtx
->DrawBuffer
->Width
;
147 radeon
->fboRect
.y2
= radeon
->glCtx
->DrawBuffer
->Height
;
149 *cliprects
= &radeon
->fboRect
;
153 } else if (radeon
->front_cliprects
||
154 rfb
->pf_active
|| dPriv
->numBackClipRects
== 0) {
155 *cliprects
= dPriv
->pClipRects
;
156 *num_cliprects
= dPriv
->numClipRects
;
160 *num_cliprects
= dPriv
->numBackClipRects
;
161 *cliprects
= dPriv
->pBackClipRects
;
162 *x_off
= dPriv
->backX
;
163 *y_off
= dPriv
->backY
;
168 * Update cliprects and scissors.
170 void radeonSetCliprects(radeonContextPtr radeon
)
172 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
173 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
175 if(drawable
== NULL
&& readable
== NULL
)
178 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
179 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
182 radeon_get_cliprects(radeon
, &radeon
->pClipRects
,
183 &radeon
->numClipRects
, &x_off
, &y_off
);
185 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
186 (draw_rfb
->base
.Height
!= drawable
->h
)) {
187 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
188 drawable
->w
, drawable
->h
);
189 draw_rfb
->base
.Initialized
= GL_TRUE
;
192 if (drawable
!= readable
) {
193 if ((read_rfb
->base
.Width
!= readable
->w
) ||
194 (read_rfb
->base
.Height
!= readable
->h
)) {
195 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
196 readable
->w
, readable
->h
);
197 read_rfb
->base
.Initialized
= GL_TRUE
;
201 if (radeon
->state
.scissor
.enabled
)
202 radeonRecalcScissorRects(radeon
);
208 void radeonUpdateScissor( struct gl_context
*ctx
)
210 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
211 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
212 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
214 int min_x
, min_y
, max_x
, max_y
;
216 if (!ctx
->DrawBuffer
)
219 max_x
= ctx
->DrawBuffer
->Width
- 1;
220 max_y
= ctx
->DrawBuffer
->Height
- 1;
222 if ( !ctx
->DrawBuffer
->Name
) {
224 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
235 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
236 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
237 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
238 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
240 radeonRecalcScissorRects( rmesa
);
243 /* =============================================================
247 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
249 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
250 if (ctx
->Scissor
.Enabled
) {
251 /* We don't pipeline cliprect changes */
252 radeon_firevertices(radeon
);
253 radeonUpdateScissor(ctx
);
257 /* ================================================================
258 * SwapBuffers with client-side throttling
261 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
263 drm_radeon_getparam_t gp
;
267 gp
.param
= RADEON_PARAM_LAST_FRAME
;
268 gp
.value
= (int *)&frame
;
269 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
272 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
280 uint32_t radeonGetAge(radeonContextPtr radeon
)
282 drm_radeon_getparam_t gp
;
286 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
287 gp
.value
= (int *)&age
;
288 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
291 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
299 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
301 drm_radeon_irq_emit_t ie
;
304 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
305 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
308 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
314 static void radeonWaitIrq(radeonContextPtr radeon
)
319 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
320 &radeon
->iw
, sizeof(radeon
->iw
));
321 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
324 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
330 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
332 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
334 if (radeon
->do_irqs
) {
335 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
336 if (!radeon
->irqsEmitted
) {
337 while (radeonGetLastFrame(radeon
) <
340 UNLOCK_HARDWARE(radeon
);
341 radeonWaitIrq(radeon
);
342 LOCK_HARDWARE(radeon
);
344 radeon
->irqsEmitted
= 10;
347 if (radeon
->irqsEmitted
) {
348 radeonEmitIrqLocked(radeon
);
349 radeon
->irqsEmitted
--;
352 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
353 UNLOCK_HARDWARE(radeon
);
354 if (radeon
->do_usleeps
)
356 LOCK_HARDWARE(radeon
);
362 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
368 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
371 } while (ret
&& ++i
< 100);
374 UNLOCK_HARDWARE(radeon
);
375 fprintf(stderr
, "Error: R300 timed out... exiting\n");
380 static void radeon_flip_renderbuffers(struct radeon_framebuffer
*rfb
)
382 int current_page
= rfb
->pf_current_page
;
383 int next_page
= (current_page
+ 1) % rfb
->pf_num_pages
;
384 struct gl_renderbuffer
*tmp_rb
;
386 /* Exchange renderbuffers if necessary but make sure their
387 * reference counts are preserved.
389 if (rfb
->color_rb
[current_page
] &&
390 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
!=
391 &rfb
->color_rb
[current_page
]->base
) {
393 _mesa_reference_renderbuffer(&tmp_rb
,
394 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
395 tmp_rb
= &rfb
->color_rb
[current_page
]->base
;
396 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
, tmp_rb
);
397 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
400 if (rfb
->color_rb
[next_page
] &&
401 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
!=
402 &rfb
->color_rb
[next_page
]->base
) {
404 _mesa_reference_renderbuffer(&tmp_rb
,
405 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
406 tmp_rb
= &rfb
->color_rb
[next_page
]->base
;
407 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
, tmp_rb
);
408 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
412 /* Copy the back color buffer to the front color buffer.
414 void radeonCopyBuffer( __DRIdrawable
*dPriv
,
415 const drm_clip_rect_t
*rect
)
417 radeonContextPtr rmesa
;
421 assert(dPriv
->driContextPriv
);
422 assert(dPriv
->driContextPriv
->driverPrivate
);
424 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
426 LOCK_HARDWARE(rmesa
);
428 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
429 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
432 nbox
= dPriv
->numClipRects
; /* must be in locked region */
434 for ( i
= 0 ; i
< nbox
; ) {
435 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
436 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
437 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
440 for ( ; i
< nr
; i
++ ) {
446 if (rect
->x1
> b
->x1
)
448 if (rect
->y1
> b
->y1
)
450 if (rect
->x2
< b
->x2
)
452 if (rect
->y2
< b
->y2
)
455 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
462 rmesa
->sarea
->nbox
= n
;
467 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
470 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
471 UNLOCK_HARDWARE( rmesa
);
476 UNLOCK_HARDWARE( rmesa
);
479 static int radeonScheduleSwap(__DRIdrawable
*dPriv
, GLboolean
*missed_target
)
481 radeonContextPtr rmesa
;
483 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
484 radeon_firevertices(rmesa
);
486 LOCK_HARDWARE( rmesa
);
488 if (!dPriv
->numClipRects
) {
489 UNLOCK_HARDWARE(rmesa
);
490 usleep(10000); /* throttle invisible client 10ms */
494 radeonWaitForFrameCompletion(rmesa
);
496 UNLOCK_HARDWARE(rmesa
);
497 driWaitForVBlank(dPriv
, missed_target
);
502 static GLboolean
radeonPageFlip( __DRIdrawable
*dPriv
)
504 radeonContextPtr radeon
;
506 struct radeon_framebuffer
*rfb
;
509 assert(dPriv
->driContextPriv
);
510 assert(dPriv
->driContextPriv
->driverPrivate
);
512 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
513 rfb
= dPriv
->driverPrivate
;
515 LOCK_HARDWARE(radeon
);
517 if ( RADEON_DEBUG
& RADEON_IOCTL
) {
518 fprintf(stderr
, "%s: pfCurrentPage: %d %d\n", __FUNCTION__
,
519 radeon
->sarea
->pfCurrentPage
, radeon
->sarea
->pfState
);
521 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
522 drm_clip_rect_t
*b
= radeon
->sarea
->boxes
;
524 radeon
->sarea
->nbox
= 1;
526 ret
= drmCommandNone( radeon
->dri
.fd
, DRM_RADEON_FLIP
);
528 UNLOCK_HARDWARE(radeon
);
531 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
538 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
539 radeon_flip_renderbuffers(rfb
);
540 radeon_draw_buffer(radeon
->glCtx
, &rfb
->base
);
547 * Swap front and back buffer.
549 void radeonSwapBuffers(__DRIdrawable
* dPriv
)
554 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
555 radeonContextPtr radeon
;
556 struct gl_context
*ctx
;
558 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
561 if (ctx
->Visual
.doubleBufferMode
) {
562 GLboolean missed_target
;
563 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
564 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
566 radeonScheduleSwap(dPriv
, &missed_target
);
568 if (rfb
->pf_active
) {
569 radeonPageFlip(dPriv
);
571 radeonCopyBuffer(dPriv
, NULL
);
574 psp
= dPriv
->driScreenPriv
;
577 (*psp
->systemTime
->getUST
)( & ust
);
578 if ( missed_target
) {
579 rfb
->swap_missed_count
++;
580 rfb
->swap_missed_ust
= ust
- rfb
->swap_ust
;
584 radeon
->hw
.all_dirty
= GL_TRUE
;
587 /* XXX this shouldn't be an error but we can't handle it for now */
588 _mesa_problem(NULL
, "%s: drawable has no context!",
593 void radeonCopySubBuffer(__DRIdrawable
* dPriv
,
594 int x
, int y
, int w
, int h
)
596 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
597 radeonContextPtr radeon
;
598 struct gl_context
*ctx
;
600 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
603 if (ctx
->Visual
.doubleBufferMode
) {
604 drm_clip_rect_t rect
;
605 rect
.x1
= x
+ dPriv
->x
;
606 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
607 rect
.x2
= rect
.x1
+ w
;
608 rect
.y2
= rect
.y1
+ h
;
609 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
610 radeonCopyBuffer(dPriv
, &rect
);
613 /* XXX this shouldn't be an error but we can't handle it for now */
614 _mesa_problem(NULL
, "%s: drawable has no context!",
620 * Check if we're about to draw into the front color buffer.
621 * If so, set the intel->front_buffer_dirty field to true.
624 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
626 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
627 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
630 /* drawing to window system buffer */
631 if (fb
->_NumColorDrawBuffers
> 0) {
632 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
633 radeon
->front_buffer_dirty
= GL_TRUE
;
640 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
642 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
643 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
649 /* this can happen during the initial context initialization */
653 /* radeons only handle 1 color draw so far */
654 if (fb
->_NumColorDrawBuffers
!= 1) {
655 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
659 /* Do this here, note core Mesa, since this function is called from
660 * many places within the driver.
662 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
663 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
664 _mesa_update_framebuffer(ctx
);
665 /* this updates the DrawBuffer's Width/Height if it's a FBO */
666 _mesa_update_draw_buffer_bounds(ctx
);
669 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
670 /* this may occur when we're called by glBindFrameBuffer() during
671 * the process of someone setting up renderbuffers, etc.
673 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
678 ;/* do something depthy/stencily TODO */
683 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
684 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
685 radeon
->front_cliprects
= GL_TRUE
;
687 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
688 radeon
->front_cliprects
= GL_FALSE
;
691 /* user FBO in theory */
692 struct radeon_renderbuffer
*rrb
;
693 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
695 offset
= rrb
->draw_offset
;
698 radeon
->constant_cliprect
= GL_TRUE
;
701 if (rrbColor
== NULL
)
702 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
704 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
707 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
708 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
709 if (rrbDepth
&& rrbDepth
->bo
) {
710 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
712 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
715 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
719 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
720 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
721 if (rrbStencil
&& rrbStencil
->bo
) {
722 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
723 /* need to re-compute stencil hw state */
725 rrbDepth
= rrbStencil
;
727 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
730 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
731 if (ctx
->Driver
.Enable
!= NULL
)
732 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
734 ctx
->NewState
|= _NEW_STENCIL
;
737 /* Update culling direction which changes depending on the
738 * orientation of the buffer:
740 if (ctx
->Driver
.FrontFace
)
741 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
743 ctx
->NewState
|= _NEW_POLYGON
;
746 * Update depth test state
748 if (ctx
->Driver
.Enable
) {
749 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
750 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
751 /* Need to update the derived ctx->Stencil._Enabled first */
752 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
753 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
755 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
758 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
759 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
760 radeon
->state
.color
.draw_offset
= offset
;
763 /* update viewport since it depends on window size */
764 if (ctx
->Driver
.Viewport
) {
765 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
766 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
771 ctx
->NewState
|= _NEW_VIEWPORT
;
773 /* Set state we know depends on drawable parameters:
775 radeonUpdateScissor(ctx
);
776 radeon
->NewGLState
|= _NEW_SCISSOR
;
778 if (ctx
->Driver
.DepthRange
)
779 ctx
->Driver
.DepthRange(ctx
,
783 /* Update culling direction which changes depending on the
784 * orientation of the buffer:
786 if (ctx
->Driver
.FrontFace
)
787 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
789 ctx
->NewState
|= _NEW_POLYGON
;
793 * Called via glDrawBuffer.
795 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
797 if (RADEON_DEBUG
& RADEON_DRI
)
798 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
799 _mesa_lookup_enum_by_nr( mode
));
801 if (ctx
->DrawBuffer
->Name
== 0) {
802 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
804 const GLboolean was_front_buffer_rendering
=
805 radeon
->is_front_buffer_rendering
;
807 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
810 /* If we weren't front-buffer rendering before but we are now, make sure
811 * that the front-buffer has actually been allocated.
813 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
814 radeon_update_renderbuffers(radeon
->dri
.context
,
815 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
819 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
822 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
824 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
825 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
826 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
827 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
828 || (mode
== GL_FRONT
);
830 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
831 radeon_update_renderbuffers(rmesa
->dri
.context
,
832 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
835 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
836 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
837 /* This will update FBO completeness status.
838 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
839 * refers to a missing renderbuffer. Calling glReadBuffer can set
840 * that straight and can make the drawing buffer complete.
842 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
847 /* Turn on/off page flipping according to the flags in the sarea:
849 void radeonUpdatePageFlipping(radeonContextPtr radeon
)
851 struct radeon_framebuffer
*rfb
= radeon_get_drawable(radeon
)->driverPrivate
;
853 rfb
->pf_active
= radeon
->sarea
->pfState
;
854 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
855 rfb
->pf_num_pages
= 2;
856 radeon_flip_renderbuffers(rfb
);
857 radeon_draw_buffer(radeon
->glCtx
, radeon
->glCtx
->DrawBuffer
);
860 void radeon_window_moved(radeonContextPtr radeon
)
862 /* Cliprects has to be updated before doing anything else */
863 radeonSetCliprects(radeon
);
864 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
865 radeonUpdatePageFlipping(radeon
);
869 void radeon_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
871 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
872 __DRIcontext
*driContext
= radeon
->dri
.context
;
873 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
874 GLsizei w
, GLsizei h
);
876 if (!driContext
->driScreenPriv
->dri2
.enabled
)
879 if (ctx
->DrawBuffer
->Name
== 0) {
880 if (radeon
->is_front_buffer_rendering
) {
881 ctx
->Driver
.Flush(ctx
);
883 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
884 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
885 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
888 old_viewport
= ctx
->Driver
.Viewport
;
889 ctx
->Driver
.Viewport
= NULL
;
890 radeon_window_moved(radeon
);
891 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
892 ctx
->Driver
.Viewport
= old_viewport
;
895 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
897 int i
, j
, reg
, count
;
900 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
903 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
905 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
907 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
908 if (dwords
> state
->cmd_size
)
909 dwords
= state
->cmd_size
;
910 for (i
= 0; i
< dwords
;) {
911 packet0
= state
->cmd
[i
];
912 reg
= (packet0
& 0x1FFF) << 2;
913 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
914 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
915 state
->name
, i
, reg
, count
);
917 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
918 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
919 state
->name
, i
, reg
, state
->cmd
[i
]);
928 * Count total size for next state emit.
930 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
932 struct radeon_state_atom
*atom
;
934 /* check if we are going to emit full state */
936 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
937 if (!radeon
->hw
.is_dirty
)
939 foreach(atom
, &radeon
->hw
.atomlist
) {
941 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
943 if (RADEON_CMDBUF
&& atom_size
) {
944 radeon_print_state_atom(radeon
, atom
);
949 foreach(atom
, &radeon
->hw
.atomlist
) {
950 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
952 if (RADEON_CMDBUF
&& atom_size
) {
953 radeon_print_state_atom(radeon
, atom
);
959 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
963 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
965 BATCH_LOCALS(radeon
);
968 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
971 radeon_print_state_atom(radeon
, atom
);
974 (*atom
->emit
)(radeon
->glCtx
, atom
);
976 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
977 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
980 atom
->dirty
= GL_FALSE
;
983 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
988 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
990 struct radeon_state_atom
*atom
;
992 if (radeon
->vtbl
.pre_emit_atoms
)
993 radeon
->vtbl
.pre_emit_atoms(radeon
);
995 /* Emit actual atoms */
996 if (radeon
->hw
.all_dirty
|| emitAll
) {
997 foreach(atom
, &radeon
->hw
.atomlist
)
998 radeon_emit_atom( radeon
, atom
);
1000 foreach(atom
, &radeon
->hw
.atomlist
) {
1002 radeon_emit_atom( radeon
, atom
);
1009 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
1011 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1014 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
1015 if (ret
== RADEON_CS_SPACE_FLUSH
)
1020 void radeonEmitState(radeonContextPtr radeon
)
1022 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
1024 if (radeon
->vtbl
.pre_emit_state
)
1025 radeon
->vtbl
.pre_emit_state(radeon
);
1027 /* this code used to return here but now it emits zbs */
1028 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
1031 if (!radeon
->cmdbuf
.cs
->cdw
) {
1032 if (RADEON_DEBUG
& RADEON_STATE
)
1033 fprintf(stderr
, "Begin reemit state\n");
1035 radeonEmitAtoms(radeon
, GL_TRUE
);
1038 if (RADEON_DEBUG
& RADEON_STATE
)
1039 fprintf(stderr
, "Begin dirty state\n");
1041 radeonEmitAtoms(radeon
, GL_FALSE
);
1044 radeon
->hw
.is_dirty
= GL_FALSE
;
1045 radeon
->hw
.all_dirty
= GL_FALSE
;
1049 void radeonFlush(struct gl_context
*ctx
)
1051 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1052 if (RADEON_DEBUG
& RADEON_IOCTL
)
1053 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
1055 /* okay if we have no cmds in the buffer &&
1056 we have no DMA flush &&
1057 we have no DMA buffer allocated.
1058 then no point flushing anything at all.
1060 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
1063 if (radeon
->dma
.flush
)
1064 radeon
->dma
.flush( ctx
);
1066 if (radeon
->cmdbuf
.cs
->cdw
)
1067 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
1070 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
1071 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
1073 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
1074 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
1075 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
1077 /* We set the dirty bit in radeon_prepare_render() if we're
1078 * front buffer rendering once we get there.
1080 radeon
->front_buffer_dirty
= GL_FALSE
;
1082 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
1087 /* Make sure all commands have been sent to the hardware and have
1088 * completed processing.
1090 void radeonFinish(struct gl_context
* ctx
)
1092 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1093 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1094 struct radeon_renderbuffer
*rrb
;
1097 if (ctx
->Driver
.Flush
)
1098 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
1100 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1101 struct radeon_renderbuffer
*rrb
;
1102 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
1104 radeon_bo_wait(rrb
->bo
);
1106 rrb
= radeon_get_depthbuffer(radeon
);
1108 radeon_bo_wait(rrb
->bo
);
1113 * Send the current command buffer via ioctl to the hardware.
1115 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
1119 if (rmesa
->cmdbuf
.flushing
) {
1120 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
1123 rmesa
->cmdbuf
.flushing
= 1;
1125 if (RADEON_DEBUG
& RADEON_IOCTL
) {
1126 fprintf(stderr
, "%s from %s - %i cliprects\n",
1127 __FUNCTION__
, caller
, rmesa
->numClipRects
);
1130 radeonEmitQueryEnd(rmesa
->glCtx
);
1132 if (rmesa
->cmdbuf
.cs
->cdw
) {
1133 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
1134 rmesa
->hw
.all_dirty
= GL_TRUE
;
1136 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
1137 rmesa
->cmdbuf
.flushing
= 0;
1139 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
1140 fprintf(stderr
,"failed to revalidate buffers\n");
1146 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
1150 radeonReleaseDmaRegions(rmesa
);
1152 LOCK_HARDWARE(rmesa
);
1153 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
1154 UNLOCK_HARDWARE(rmesa
);
1157 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
1158 "parse or rejected command stream. See dmesg "
1159 "for more info.\n", ret
);
1167 * Make sure that enough space is available in the command buffer
1168 * by flushing if necessary.
1170 * \param dwords The number of dwords we need to be free on the command buffer
1172 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
1174 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
1175 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
1176 /* If we try to flush empty buffer there is too big rendering operation. */
1177 assert(rmesa
->cmdbuf
.cs
->cdw
);
1178 rcommonFlushCmdBuf(rmesa
, caller
);
1184 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
1187 struct drm_radeon_gem_info mminfo
= { 0 };
1189 /* Initialize command buffer */
1190 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
1191 "command_buffer_size");
1192 if (size
< 2 * rmesa
->hw
.max_state_size
) {
1193 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
1195 if (size
> 64 * 256)
1198 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1199 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
1200 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1201 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
1202 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
1203 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1204 size
* 4, rmesa
->hw
.max_state_size
* 4);
1207 radeon_cs_manager_gem_ctor(rmesa
->radeonScreen
->driScreen
->fd
);
1208 if (rmesa
->cmdbuf
.csm
== NULL
) {
1209 /* FIXME: fatal error */
1212 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
1213 assert(rmesa
->cmdbuf
.cs
!= NULL
);
1214 rmesa
->cmdbuf
.size
= size
;
1216 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
1217 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
1220 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
,
1221 &mminfo
, sizeof(mminfo
))) {
1222 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
1223 mminfo
.vram_visible
);
1224 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
1230 * Destroy the command buffer
1232 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
1234 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
1235 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
1238 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
1241 const char *function
,
1244 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
1246 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1247 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
1251 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
1253 _mesa_meta_Clear(ctx
, mask
);