1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/api_arrayelt.h"
49 #include "main/enums.h"
50 #include "main/colormac.h"
51 #include "main/light.h"
52 #include "main/framebuffer.h"
53 #include "main/simple_list.h"
54 #include "main/renderbuffer.h"
55 #include "swrast/swrast.h"
58 #include "tnl/t_pipeline.h"
59 #include "swrast_setup/swrast_setup.h"
61 #include "main/blend.h"
62 #include "main/bufferobj.h"
63 #include "main/buffers.h"
64 #include "main/depth.h"
65 #include "main/shaders.h"
66 #include "main/texstate.h"
67 #include "main/varray.h"
68 #include "glapi/dispatch.h"
69 #include "swrast/swrast.h"
70 #include "main/stencil.h"
71 #include "main/matrix.h"
72 #include "main/attrib.h"
73 #include "main/enable.h"
74 #include "main/viewport.h"
79 #include "radeon_common.h"
80 #include "radeon_bocs_wrapper.h"
81 #include "radeon_lock.h"
82 #include "radeon_drm.h"
83 #include "radeon_mipmap_tree.h"
85 #define DEBUG_CMDBUF 0
87 /* =============================================================
91 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
92 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
103 if (out
->x1
>= out
->x2
)
105 if (out
->y1
>= out
->y2
)
110 void radeonRecalcScissorRects(radeonContextPtr radeon
)
112 drm_clip_rect_t
*out
;
115 /* Grow cliprect store?
117 if (radeon
->state
.scissor
.numAllocedClipRects
< radeon
->numClipRects
) {
118 while (radeon
->state
.scissor
.numAllocedClipRects
<
119 radeon
->numClipRects
) {
120 radeon
->state
.scissor
.numAllocedClipRects
+= 1; /* zero case */
121 radeon
->state
.scissor
.numAllocedClipRects
*= 2;
124 if (radeon
->state
.scissor
.pClipRects
)
125 FREE(radeon
->state
.scissor
.pClipRects
);
127 radeon
->state
.scissor
.pClipRects
=
128 MALLOC(radeon
->state
.scissor
.numAllocedClipRects
*
129 sizeof(drm_clip_rect_t
));
131 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
132 radeon
->state
.scissor
.numAllocedClipRects
= 0;
137 out
= radeon
->state
.scissor
.pClipRects
;
138 radeon
->state
.scissor
.numClipRects
= 0;
140 for (i
= 0; i
< radeon
->numClipRects
; i
++) {
141 if (intersect_rect(out
,
142 &radeon
->pClipRects
[i
],
143 &radeon
->state
.scissor
.rect
)) {
144 radeon
->state
.scissor
.numClipRects
++;
150 void radeon_get_cliprects(radeonContextPtr radeon
,
151 struct drm_clip_rect
**cliprects
,
152 unsigned int *num_cliprects
,
153 int *x_off
, int *y_off
)
155 __DRIdrawablePrivate
*dPriv
= radeon
->dri
.drawable
;
156 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
158 if (radeon
->constant_cliprect
) {
159 radeon
->fboRect
.x1
= 0;
160 radeon
->fboRect
.y1
= 0;
161 radeon
->fboRect
.x2
= radeon
->glCtx
->DrawBuffer
->Width
;
162 radeon
->fboRect
.y2
= radeon
->glCtx
->DrawBuffer
->Height
;
164 *cliprects
= &radeon
->fboRect
;
168 } else if (radeon
->front_cliprects
||
169 rfb
->pf_active
|| dPriv
->numBackClipRects
== 0) {
170 *cliprects
= dPriv
->pClipRects
;
171 *num_cliprects
= dPriv
->numClipRects
;
175 *num_cliprects
= dPriv
->numBackClipRects
;
176 *cliprects
= dPriv
->pBackClipRects
;
177 *x_off
= dPriv
->backX
;
178 *y_off
= dPriv
->backY
;
183 * Update cliprects and scissors.
185 void radeonSetCliprects(radeonContextPtr radeon
)
187 __DRIdrawablePrivate
*const drawable
= radeon
->dri
.drawable
;
188 __DRIdrawablePrivate
*const readable
= radeon
->dri
.readable
;
189 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
190 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
193 radeon_get_cliprects(radeon
, &radeon
->pClipRects
,
194 &radeon
->numClipRects
, &x_off
, &y_off
);
196 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
197 (draw_rfb
->base
.Height
!= drawable
->h
)) {
198 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
199 drawable
->w
, drawable
->h
);
200 draw_rfb
->base
.Initialized
= GL_TRUE
;
203 if (drawable
!= readable
) {
204 if ((read_rfb
->base
.Width
!= readable
->w
) ||
205 (read_rfb
->base
.Height
!= readable
->h
)) {
206 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
207 readable
->w
, readable
->h
);
208 read_rfb
->base
.Initialized
= GL_TRUE
;
212 if (radeon
->state
.scissor
.enabled
)
213 radeonRecalcScissorRects(radeon
);
219 void radeonUpdateScissor( GLcontext
*ctx
)
221 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
223 if ( rmesa
->dri
.drawable
) {
224 __DRIdrawablePrivate
*dPriv
= rmesa
->dri
.drawable
;
226 int x
= ctx
->Scissor
.X
;
227 int y
= dPriv
->h
- ctx
->Scissor
.Y
- ctx
->Scissor
.Height
;
228 int w
= ctx
->Scissor
.X
+ ctx
->Scissor
.Width
- 1;
229 int h
= dPriv
->h
- ctx
->Scissor
.Y
- 1;
231 rmesa
->state
.scissor
.rect
.x1
= x
+ dPriv
->x
;
232 rmesa
->state
.scissor
.rect
.y1
= y
+ dPriv
->y
;
233 rmesa
->state
.scissor
.rect
.x2
= w
+ dPriv
->x
+ 1;
234 rmesa
->state
.scissor
.rect
.y2
= h
+ dPriv
->y
+ 1;
236 radeonRecalcScissorRects( rmesa
);
240 /* =============================================================
244 void radeonScissor(GLcontext
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
246 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
247 if (ctx
->Scissor
.Enabled
) {
248 /* We don't pipeline cliprect changes */
249 radeon_firevertices(radeon
);
250 radeonUpdateScissor(ctx
);
255 /* ================================================================
256 * SwapBuffers with client-side throttling
259 static uint32_t radeonGetLastFrame(radeonContextPtr radeon
)
261 drm_radeon_getparam_t gp
;
265 gp
.param
= RADEON_PARAM_LAST_FRAME
;
266 gp
.value
= (int *)&frame
;
267 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
270 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
278 uint32_t radeonGetAge(radeonContextPtr radeon
)
280 drm_radeon_getparam_t gp
;
284 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
285 gp
.value
= (int *)&age
;
286 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
289 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
297 static void radeonEmitIrqLocked(radeonContextPtr radeon
)
299 drm_radeon_irq_emit_t ie
;
302 ie
.irq_seq
= &radeon
->iw
.irq_seq
;
303 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_IRQ_EMIT
,
306 fprintf(stderr
, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__
,
312 static void radeonWaitIrq(radeonContextPtr radeon
)
317 ret
= drmCommandWrite(radeon
->dri
.fd
, DRM_RADEON_IRQ_WAIT
,
318 &radeon
->iw
, sizeof(radeon
->iw
));
319 } while (ret
&& (errno
== EINTR
|| errno
== EBUSY
));
322 fprintf(stderr
, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__
,
328 static void radeonWaitForFrameCompletion(radeonContextPtr radeon
)
330 drm_radeon_sarea_t
*sarea
= radeon
->sarea
;
332 if (radeon
->do_irqs
) {
333 if (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
334 if (!radeon
->irqsEmitted
) {
335 while (radeonGetLastFrame(radeon
) <
338 UNLOCK_HARDWARE(radeon
);
339 radeonWaitIrq(radeon
);
340 LOCK_HARDWARE(radeon
);
342 radeon
->irqsEmitted
= 10;
345 if (radeon
->irqsEmitted
) {
346 radeonEmitIrqLocked(radeon
);
347 radeon
->irqsEmitted
--;
350 while (radeonGetLastFrame(radeon
) < sarea
->last_frame
) {
351 UNLOCK_HARDWARE(radeon
);
352 if (radeon
->do_usleeps
)
354 LOCK_HARDWARE(radeon
);
360 void radeonWaitForIdleLocked(radeonContextPtr radeon
)
366 ret
= drmCommandNone(radeon
->dri
.fd
, DRM_RADEON_CP_IDLE
);
369 } while (ret
&& ++i
< 100);
372 UNLOCK_HARDWARE(radeon
);
373 fprintf(stderr
, "Error: R300 timed out... exiting\n");
378 static void radeonWaitForIdle(radeonContextPtr radeon
)
380 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
381 LOCK_HARDWARE(radeon
);
382 radeonWaitForIdleLocked(radeon
);
383 UNLOCK_HARDWARE(radeon
);
387 static void radeon_flip_renderbuffers(struct radeon_framebuffer
*rfb
)
389 int current_page
= rfb
->pf_current_page
;
390 int next_page
= (current_page
+ 1) % rfb
->pf_num_pages
;
391 struct gl_renderbuffer
*tmp_rb
;
393 /* Exchange renderbuffers if necessary but make sure their
394 * reference counts are preserved.
396 if (rfb
->color_rb
[current_page
] &&
397 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
!=
398 &rfb
->color_rb
[current_page
]->base
) {
400 _mesa_reference_renderbuffer(&tmp_rb
,
401 rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
402 tmp_rb
= &rfb
->color_rb
[current_page
]->base
;
403 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
, tmp_rb
);
404 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
407 if (rfb
->color_rb
[next_page
] &&
408 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
!=
409 &rfb
->color_rb
[next_page
]->base
) {
411 _mesa_reference_renderbuffer(&tmp_rb
,
412 rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
413 tmp_rb
= &rfb
->color_rb
[next_page
]->base
;
414 _mesa_reference_renderbuffer(&rfb
->base
.Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
, tmp_rb
);
415 _mesa_reference_renderbuffer(&tmp_rb
, NULL
);
419 /* Copy the back color buffer to the front color buffer.
421 void radeonCopyBuffer( __DRIdrawablePrivate
*dPriv
,
422 const drm_clip_rect_t
*rect
)
424 radeonContextPtr rmesa
;
425 struct radeon_framebuffer
*rfb
;
429 assert(dPriv
->driContextPriv
);
430 assert(dPriv
->driContextPriv
->driverPrivate
);
432 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
434 LOCK_HARDWARE(rmesa
);
436 rfb
= dPriv
->driverPrivate
;
438 if ( RADEON_DEBUG
& DEBUG_IOCTL
) {
439 fprintf( stderr
, "\n%s( %p )\n\n", __FUNCTION__
, (void *) rmesa
->glCtx
);
442 nbox
= dPriv
->numClipRects
; /* must be in locked region */
444 for ( i
= 0 ; i
< nbox
; ) {
445 GLint nr
= MIN2( i
+ RADEON_NR_SAREA_CLIPRECTS
, nbox
);
446 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
447 drm_clip_rect_t
*b
= rmesa
->sarea
->boxes
;
450 for ( ; i
< nr
; i
++ ) {
456 if (rect
->x1
> b
->x1
)
458 if (rect
->y1
> b
->y1
)
460 if (rect
->x2
< b
->x2
)
462 if (rect
->y2
< b
->y2
)
465 if (b
->x1
>= b
->x2
|| b
->y1
>= b
->y2
)
472 rmesa
->sarea
->nbox
= n
;
477 ret
= drmCommandNone( rmesa
->dri
.fd
, DRM_RADEON_SWAP
);
480 fprintf( stderr
, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret
);
481 UNLOCK_HARDWARE( rmesa
);
486 UNLOCK_HARDWARE( rmesa
);
489 static int radeonScheduleSwap(__DRIdrawablePrivate
*dPriv
, GLboolean
*missed_target
)
491 radeonContextPtr rmesa
;
493 rmesa
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
494 radeon_firevertices(rmesa
);
496 LOCK_HARDWARE( rmesa
);
498 if (!dPriv
->numClipRects
) {
499 UNLOCK_HARDWARE(rmesa
);
500 usleep(10000); /* throttle invisible client 10ms */
504 radeonWaitForFrameCompletion(rmesa
);
506 UNLOCK_HARDWARE(rmesa
);
507 driWaitForVBlank(dPriv
, missed_target
);
512 static GLboolean
radeonPageFlip( __DRIdrawablePrivate
*dPriv
)
514 radeonContextPtr radeon
;
516 __DRIscreenPrivate
*psp
;
517 struct radeon_renderbuffer
*rrb
;
518 struct radeon_framebuffer
*rfb
;
521 assert(dPriv
->driContextPriv
);
522 assert(dPriv
->driContextPriv
->driverPrivate
);
524 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
525 rfb
= dPriv
->driverPrivate
;
526 rrb
= (void *)rfb
->base
.Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
;
528 psp
= dPriv
->driScreenPriv
;
530 LOCK_HARDWARE(radeon
);
532 if ( RADEON_DEBUG
& DEBUG_IOCTL
) {
533 fprintf(stderr
, "%s: pfCurrentPage: %d %d\n", __FUNCTION__
,
534 radeon
->sarea
->pfCurrentPage
, radeon
->sarea
->pfState
);
536 drm_clip_rect_t
*box
= dPriv
->pClipRects
;
537 drm_clip_rect_t
*b
= radeon
->sarea
->boxes
;
539 radeon
->sarea
->nbox
= 1;
541 ret
= drmCommandNone( radeon
->dri
.fd
, DRM_RADEON_FLIP
);
543 UNLOCK_HARDWARE(radeon
);
546 fprintf( stderr
, "DRM_RADEON_FLIP: return = %d\n", ret
);
553 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
554 radeon_flip_renderbuffers(rfb
);
555 radeon_draw_buffer(radeon
->glCtx
, &rfb
->base
);
562 * Swap front and back buffer.
564 void radeonSwapBuffers(__DRIdrawablePrivate
* dPriv
)
567 __DRIscreenPrivate
*psp
;
569 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
570 radeonContextPtr radeon
;
573 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
576 if (ctx
->Visual
.doubleBufferMode
) {
577 GLboolean missed_target
;
578 struct radeon_framebuffer
*rfb
= dPriv
->driverPrivate
;
579 _mesa_notifySwapBuffers(ctx
);/* flush pending rendering comands */
581 radeonScheduleSwap(dPriv
, &missed_target
);
583 if (rfb
->pf_active
) {
584 radeonPageFlip(dPriv
);
586 radeonCopyBuffer(dPriv
, NULL
);
589 psp
= dPriv
->driScreenPriv
;
592 (*psp
->systemTime
->getUST
)( & ust
);
593 if ( missed_target
) {
594 rfb
->swap_missed_count
++;
595 rfb
->swap_missed_ust
= ust
- rfb
->swap_ust
;
599 radeon
->hw
.all_dirty
= GL_TRUE
;
602 /* XXX this shouldn't be an error but we can't handle it for now */
603 _mesa_problem(NULL
, "%s: drawable has no context!",
608 void radeonCopySubBuffer(__DRIdrawablePrivate
* dPriv
,
609 int x
, int y
, int w
, int h
)
611 if (dPriv
->driContextPriv
&& dPriv
->driContextPriv
->driverPrivate
) {
612 radeonContextPtr radeon
;
615 radeon
= (radeonContextPtr
) dPriv
->driContextPriv
->driverPrivate
;
618 if (ctx
->Visual
.doubleBufferMode
) {
619 drm_clip_rect_t rect
;
620 rect
.x1
= x
+ dPriv
->x
;
621 rect
.y1
= (dPriv
->h
- y
- h
) + dPriv
->y
;
622 rect
.x2
= rect
.x1
+ w
;
623 rect
.y2
= rect
.y1
+ h
;
624 _mesa_notifySwapBuffers(ctx
); /* flush pending rendering comands */
625 radeonCopyBuffer(dPriv
, &rect
);
628 /* XXX this shouldn't be an error but we can't handle it for now */
629 _mesa_problem(NULL
, "%s: drawable has no context!",
634 void radeon_draw_buffer(GLcontext
*ctx
, struct gl_framebuffer
*fb
)
636 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
637 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
643 /* this can happen during the initial context initialization */
647 /* radeons only handle 1 color draw so far */
648 if (fb
->_NumColorDrawBuffers
!= 1) {
649 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
653 /* Do this here, note core Mesa, since this function is called from
654 * many places within the driver.
656 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
657 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
658 _mesa_update_framebuffer(ctx
);
659 /* this updates the DrawBuffer's Width/Height if it's a FBO */
660 _mesa_update_draw_buffer_bounds(ctx
);
663 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
664 /* this may occur when we're called by glBindFrameBuffer() during
665 * the process of someone setting up renderbuffers, etc.
667 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
672 ;/* do something depthy/stencily TODO */
677 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
678 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
679 radeon
->front_cliprects
= GL_TRUE
;
681 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
682 radeon
->front_cliprects
= GL_FALSE
;
685 /* user FBO in theory */
686 struct radeon_renderbuffer
*rrb
;
687 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
689 offset
= rrb
->draw_offset
;
692 radeon
->constant_cliprect
= GL_TRUE
;
695 if (rrbColor
== NULL
)
696 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
698 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
701 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
702 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
703 if (rrbDepth
&& rrbDepth
->bo
) {
704 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
706 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
709 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
713 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
714 rrbStencil
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
715 if (rrbStencil
&& rrbStencil
->bo
) {
716 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
717 /* need to re-compute stencil hw state */
719 rrbDepth
= rrbStencil
;
721 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
724 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
725 if (ctx
->Driver
.Enable
!= NULL
)
726 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
728 ctx
->NewState
|= _NEW_STENCIL
;
731 /* Update culling direction which changes depending on the
732 * orientation of the buffer:
734 if (ctx
->Driver
.FrontFace
)
735 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
737 ctx
->NewState
|= _NEW_POLYGON
;
740 * Update depth test state
742 if (ctx
->Driver
.Enable
) {
743 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
744 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
745 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
746 (ctx
->Stencil
._Enabled
&& fb
->Visual
.stencilBits
> 0));
748 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
751 radeon
->state
.depth
.rrb
= rrbDepth
;
752 radeon
->state
.color
.rrb
= rrbColor
;
753 radeon
->state
.color
.draw_offset
= offset
;
756 /* update viewport since it depends on window size */
757 if (ctx
->Driver
.Viewport
) {
758 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
759 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
764 ctx
->NewState
|= _NEW_VIEWPORT
;
766 /* Set state we know depends on drawable parameters:
768 if (ctx
->Driver
.Scissor
)
769 ctx
->Driver
.Scissor(ctx
, ctx
->Scissor
.X
, ctx
->Scissor
.Y
,
770 ctx
->Scissor
.Width
, ctx
->Scissor
.Height
);
771 radeon
->NewGLState
|= _NEW_SCISSOR
;
773 if (ctx
->Driver
.DepthRange
)
774 ctx
->Driver
.DepthRange(ctx
,
778 /* Update culling direction which changes depending on the
779 * orientation of the buffer:
781 if (ctx
->Driver
.FrontFace
)
782 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
784 ctx
->NewState
|= _NEW_POLYGON
;
788 * Called via glDrawBuffer.
790 void radeonDrawBuffer( GLcontext
*ctx
, GLenum mode
)
792 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
794 if (RADEON_DEBUG
& DEBUG_DRI
)
795 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
796 _mesa_lookup_enum_by_nr( mode
));
798 radeon_firevertices(radeon
); /* don't pipeline cliprect changes */
800 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
803 void radeonReadBuffer( GLcontext
*ctx
, GLenum mode
)
805 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
806 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
807 /* This will update FBO completeness status.
808 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
809 * refers to a missing renderbuffer. Calling glReadBuffer can set
810 * that straight and can make the drawing buffer complete.
812 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
817 /* Turn on/off page flipping according to the flags in the sarea:
819 void radeonUpdatePageFlipping(radeonContextPtr radeon
)
821 struct radeon_framebuffer
*rfb
= radeon
->dri
.drawable
->driverPrivate
;
823 rfb
->pf_active
= radeon
->sarea
->pfState
;
824 rfb
->pf_current_page
= radeon
->sarea
->pfCurrentPage
;
825 rfb
->pf_num_pages
= 2;
826 radeon_flip_renderbuffers(rfb
);
827 radeon_draw_buffer(radeon
->glCtx
, radeon
->glCtx
->DrawBuffer
);
830 void radeon_window_moved(radeonContextPtr radeon
)
832 if (!radeon
->radeonScreen
->driScreen
->dri2
.enabled
) {
833 radeonUpdatePageFlipping(radeon
);
835 radeonSetCliprects(radeon
);
838 void radeon_viewport(GLcontext
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
840 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
841 __DRIcontext
*driContext
= radeon
->dri
.context
;
842 void (*old_viewport
)(GLcontext
*ctx
, GLint x
, GLint y
,
843 GLsizei w
, GLsizei h
);
845 if (!driContext
->driScreenPriv
->dri2
.enabled
)
848 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
);
849 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
850 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
);
852 old_viewport
= ctx
->Driver
.Viewport
;
853 ctx
->Driver
.Viewport
= NULL
;
854 radeon
->dri
.drawable
= driContext
->driDrawablePriv
;
855 radeon_window_moved(radeon
);
856 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
857 ctx
->Driver
.Viewport
= old_viewport
;
861 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
864 int dwords
= (*state
->check
)(radeon
->glCtx
, state
);
866 fprintf(stderr
, "emit %s %d/%d\n", state
->name
, state
->cmd_size
, dwords
);
868 if (RADEON_DEBUG
& DEBUG_VERBOSE
)
869 for (i
= 0 ; i
< dwords
; i
++)
870 fprintf(stderr
, "\t%s[%d]: %x\n", state
->name
, i
, state
->cmd
[i
]);
874 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean dirty
)
876 BATCH_LOCALS(radeon
);
877 struct radeon_state_atom
*atom
;
880 if (radeon
->vtbl
.pre_emit_atoms
)
881 radeon
->vtbl
.pre_emit_atoms(radeon
);
883 /* Emit actual atoms */
884 foreach(atom
, &radeon
->hw
.atomlist
) {
885 if ((atom
->dirty
|| radeon
->hw
.all_dirty
) == dirty
) {
886 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
888 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
889 radeon_print_state_atom(radeon
, atom
);
892 (*atom
->emit
)(radeon
->glCtx
, atom
);
894 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
895 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
898 atom
->dirty
= GL_FALSE
;
900 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_STATE
) {
901 fprintf(stderr
, " skip state %s\n",
911 GLboolean
radeon_revalidate_bos(GLcontext
*ctx
)
913 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
917 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
, radeon
->state
.bos
, radeon
->state
.validated_bo_count
);
918 if (ret
== RADEON_CS_SPACE_OP_TO_BIG
)
920 if (ret
== RADEON_CS_SPACE_FLUSH
) {
930 void radeon_validate_reset_bos(radeonContextPtr radeon
)
934 for (i
= 0; i
< radeon
->state
.validated_bo_count
; i
++) {
935 radeon
->state
.bos
[i
].bo
= NULL
;
936 radeon
->state
.bos
[i
].read_domains
= 0;
937 radeon
->state
.bos
[i
].write_domain
= 0;
938 radeon
->state
.bos
[i
].new_accounted
= 0;
940 radeon
->state
.validated_bo_count
= 0;
943 void radeon_validate_bo(radeonContextPtr radeon
, struct radeon_bo
*bo
, uint32_t read_domains
, uint32_t write_domain
)
945 radeon
->state
.bos
[radeon
->state
.validated_bo_count
].bo
= bo
;
946 radeon
->state
.bos
[radeon
->state
.validated_bo_count
].read_domains
= read_domains
;
947 radeon
->state
.bos
[radeon
->state
.validated_bo_count
].write_domain
= write_domain
;
948 radeon
->state
.bos
[radeon
->state
.validated_bo_count
].new_accounted
= 0;
949 radeon
->state
.validated_bo_count
++;
951 assert(radeon
->state
.validated_bo_count
< RADEON_MAX_BOS
);
954 void radeonEmitState(radeonContextPtr radeon
)
956 if (RADEON_DEBUG
& (DEBUG_STATE
|DEBUG_PRIMS
))
957 fprintf(stderr
, "%s\n", __FUNCTION__
);
959 if (radeon
->vtbl
.pre_emit_state
)
960 radeon
->vtbl
.pre_emit_state(radeon
);
962 /* this code used to return here but now it emits zbs */
963 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
966 /* To avoid going across the entire set of states multiple times, just check
967 * for enough space for the case of emitting all state, and inline the
968 * radeonAllocCmdBuf code here without all the checks.
970 rcommonEnsureCmdBufSpace(radeon
, radeon
->hw
.max_state_size
, __FUNCTION__
);
972 if (!radeon
->cmdbuf
.cs
->cdw
) {
973 if (RADEON_DEBUG
& DEBUG_STATE
)
974 fprintf(stderr
, "Begin reemit state\n");
976 radeonEmitAtoms(radeon
, GL_FALSE
);
979 if (RADEON_DEBUG
& DEBUG_STATE
)
980 fprintf(stderr
, "Begin dirty state\n");
982 radeonEmitAtoms(radeon
, GL_TRUE
);
983 radeon
->hw
.is_dirty
= GL_FALSE
;
984 radeon
->hw
.all_dirty
= GL_FALSE
;
989 void radeonFlush(GLcontext
*ctx
)
991 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
992 if (RADEON_DEBUG
& DEBUG_IOCTL
)
993 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
995 /* okay if we have no cmds in the buffer &&
996 we have no DMA flush &&
997 we have no DMA buffer allocated.
998 then no point flushing anything at all.
1000 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& !radeon
->dma
.current
)
1003 if (radeon
->dma
.flush
)
1004 radeon
->dma
.flush( ctx
);
1006 radeonEmitState(radeon
);
1008 if (radeon
->cmdbuf
.cs
->cdw
)
1009 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
1012 /* Make sure all commands have been sent to the hardware and have
1013 * completed processing.
1015 void radeonFinish(GLcontext
* ctx
)
1017 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
1018 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1023 if (radeon
->radeonScreen
->kernel_mm
) {
1024 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1025 struct radeon_renderbuffer
*rrb
;
1026 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
1028 radeon_bo_wait(rrb
->bo
);
1031 struct radeon_renderbuffer
*rrb
;
1032 rrb
= radeon_get_depthbuffer(radeon
);
1034 radeon_bo_wait(rrb
->bo
);
1036 } else if (radeon
->do_irqs
) {
1037 LOCK_HARDWARE(radeon
);
1038 radeonEmitIrqLocked(radeon
);
1039 UNLOCK_HARDWARE(radeon
);
1040 radeonWaitIrq(radeon
);
1042 radeonWaitForIdle(radeon
);
1048 * Send the current command buffer via ioctl to the hardware.
1050 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
1054 if (rmesa
->cmdbuf
.flushing
) {
1055 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
1058 rmesa
->cmdbuf
.flushing
= 1;
1060 if (RADEON_DEBUG
& DEBUG_IOCTL
) {
1061 fprintf(stderr
, "%s from %s - %i cliprects\n",
1062 __FUNCTION__
, caller
, rmesa
->numClipRects
);
1065 if (rmesa
->cmdbuf
.cs
->cdw
) {
1066 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
1067 rmesa
->hw
.all_dirty
= GL_TRUE
;
1069 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
1070 rmesa
->cmdbuf
.flushing
= 0;
1072 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
1073 fprintf(stderr
,"failed to revalidate buffers\n");
1079 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
1083 radeonReleaseDmaRegion(rmesa
);
1085 LOCK_HARDWARE(rmesa
);
1086 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
1087 UNLOCK_HARDWARE(rmesa
);
1090 fprintf(stderr
, "drmRadeonCmdBuffer: %d\n", ret
);
1098 * Make sure that enough space is available in the command buffer
1099 * by flushing if necessary.
1101 * \param dwords The number of dwords we need to be free on the command buffer
1103 void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
1105 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
||
1106 radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
1107 rcommonFlushCmdBuf(rmesa
, caller
);
1111 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
1114 /* Initialize command buffer */
1115 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
1116 "command_buffer_size");
1117 if (size
< 2 * rmesa
->hw
.max_state_size
) {
1118 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
1120 if (size
> 64 * 256)
1123 if (RADEON_DEBUG
& (DEBUG_IOCTL
| DEBUG_DMA
)) {
1124 fprintf(stderr
, "sizeof(drm_r300_cmd_header_t)=%zd\n",
1125 sizeof(drm_r300_cmd_header_t
));
1126 fprintf(stderr
, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
1127 sizeof(drm_radeon_cmd_buffer_t
));
1129 "Allocating %d bytes command buffer (max state is %d bytes)\n",
1130 size
* 4, rmesa
->hw
.max_state_size
* 4);
1133 if (rmesa
->radeonScreen
->kernel_mm
) {
1134 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
1135 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
1137 rmesa
->cmdbuf
.csm
= radeon_cs_manager_legacy_ctor(rmesa
);
1139 if (rmesa
->cmdbuf
.csm
== NULL
) {
1140 /* FIXME: fatal error */
1143 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
1144 assert(rmesa
->cmdbuf
.cs
!= NULL
);
1145 rmesa
->cmdbuf
.size
= size
;
1147 if (!rmesa
->radeonScreen
->kernel_mm
) {
1148 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, rmesa
->radeonScreen
->texSize
[0]);
1149 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, rmesa
->radeonScreen
->gartTextures
.size
);
1151 struct drm_radeon_gem_info mminfo
;
1153 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
, &mminfo
, sizeof(mminfo
)))
1155 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
, mminfo
.vram_visible
);
1156 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
, mminfo
.gart_size
);
1162 * Destroy the command buffer
1164 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
1166 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
1167 if (rmesa
->radeonScreen
->driScreen
->dri2
.enabled
|| rmesa
->radeonScreen
->kernel_mm
) {
1168 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
1170 radeon_cs_manager_legacy_dtor(rmesa
->cmdbuf
.csm
);
1174 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
1177 const char *function
,
1180 rcommonEnsureCmdBufSpace(rmesa
, n
, function
);
1181 if (!rmesa
->cmdbuf
.cs
->cdw
&& dostate
) {
1182 if (RADEON_DEBUG
& DEBUG_IOCTL
)
1183 fprintf(stderr
, "Reemit state after flush (from %s)\n", function
);
1184 radeonEmitState(rmesa
);
1186 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
1188 if (DEBUG_CMDBUF
&& RADEON_DEBUG
& DEBUG_IOCTL
)
1189 fprintf(stderr
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
1190 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
1197 radeon_meta_set_passthrough_transform(radeonContextPtr radeon
)
1199 GLcontext
*ctx
= radeon
->glCtx
;
1201 radeon
->meta
.saved_vp_x
= ctx
->Viewport
.X
;
1202 radeon
->meta
.saved_vp_y
= ctx
->Viewport
.Y
;
1203 radeon
->meta
.saved_vp_width
= ctx
->Viewport
.Width
;
1204 radeon
->meta
.saved_vp_height
= ctx
->Viewport
.Height
;
1205 radeon
->meta
.saved_matrix_mode
= ctx
->Transform
.MatrixMode
;
1207 _mesa_Viewport(0, 0, ctx
->DrawBuffer
->Width
, ctx
->DrawBuffer
->Height
);
1209 _mesa_MatrixMode(GL_PROJECTION
);
1211 _mesa_LoadIdentity();
1212 _mesa_Ortho(0, ctx
->DrawBuffer
->Width
, 0, ctx
->DrawBuffer
->Height
, 1, -1);
1214 _mesa_MatrixMode(GL_MODELVIEW
);
1216 _mesa_LoadIdentity();
1220 radeon_meta_restore_transform(radeonContextPtr radeon
)
1222 _mesa_MatrixMode(GL_PROJECTION
);
1224 _mesa_MatrixMode(GL_MODELVIEW
);
1227 _mesa_MatrixMode(radeon
->meta
.saved_matrix_mode
);
1229 _mesa_Viewport(radeon
->meta
.saved_vp_x
, radeon
->meta
.saved_vp_y
,
1230 radeon
->meta
.saved_vp_width
, radeon
->meta
.saved_vp_height
);
1235 * Perform glClear where mask contains only color, depth, and/or stencil.
1237 * The implementation is based on calling into Mesa to set GL state and
1238 * performing normal triangle rendering. The intent of this path is to
1239 * have as generic a path as possible, so that any driver could make use of
1244 void radeon_clear_tris(GLcontext
*ctx
, GLbitfield mask
)
1246 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
1247 GLfloat vertices
[4][3];
1248 GLfloat color
[4][4];
1250 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1252 GLboolean saved_fp_enable
= GL_FALSE
, saved_vp_enable
= GL_FALSE
;
1253 GLboolean saved_shader_program
= 0;
1254 unsigned int saved_active_texture
;
1256 assert((mask
& ~(TRI_CLEAR_COLOR_BITS
| BUFFER_BIT_DEPTH
|
1257 BUFFER_BIT_STENCIL
)) == 0);
1259 _mesa_PushAttrib(GL_COLOR_BUFFER_BIT
|
1261 GL_DEPTH_BUFFER_BIT
|
1263 GL_STENCIL_BUFFER_BIT
|
1266 _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT
);
1267 saved_active_texture
= ctx
->Texture
.CurrentUnit
;
1269 /* Disable existing GL state we don't want to apply to a clear. */
1270 _mesa_Disable(GL_ALPHA_TEST
);
1271 _mesa_Disable(GL_BLEND
);
1272 _mesa_Disable(GL_CULL_FACE
);
1273 _mesa_Disable(GL_FOG
);
1274 _mesa_Disable(GL_POLYGON_SMOOTH
);
1275 _mesa_Disable(GL_POLYGON_STIPPLE
);
1276 _mesa_Disable(GL_POLYGON_OFFSET_FILL
);
1277 _mesa_Disable(GL_LIGHTING
);
1278 _mesa_Disable(GL_CLIP_PLANE0
);
1279 _mesa_Disable(GL_CLIP_PLANE1
);
1280 _mesa_Disable(GL_CLIP_PLANE2
);
1281 _mesa_Disable(GL_CLIP_PLANE3
);
1282 _mesa_Disable(GL_CLIP_PLANE4
);
1283 _mesa_Disable(GL_CLIP_PLANE5
);
1284 if (ctx
->Extensions
.ARB_fragment_program
&& ctx
->FragmentProgram
.Enabled
) {
1285 saved_fp_enable
= GL_TRUE
;
1286 _mesa_Disable(GL_FRAGMENT_PROGRAM_ARB
);
1288 if (ctx
->Extensions
.ARB_vertex_program
&& ctx
->VertexProgram
.Enabled
) {
1289 saved_vp_enable
= GL_TRUE
;
1290 _mesa_Disable(GL_VERTEX_PROGRAM_ARB
);
1292 if (ctx
->Extensions
.ARB_shader_objects
&& ctx
->Shader
.CurrentProgram
) {
1293 saved_shader_program
= ctx
->Shader
.CurrentProgram
->Name
;
1294 _mesa_UseProgramObjectARB(0);
1297 if (ctx
->Texture
._EnabledUnits
!= 0) {
1300 for (i
= 0; i
< ctx
->Const
.MaxTextureUnits
; i
++) {
1301 _mesa_ActiveTextureARB(GL_TEXTURE0
+ i
);
1302 _mesa_Disable(GL_TEXTURE_1D
);
1303 _mesa_Disable(GL_TEXTURE_2D
);
1304 _mesa_Disable(GL_TEXTURE_3D
);
1305 if (ctx
->Extensions
.ARB_texture_cube_map
)
1306 _mesa_Disable(GL_TEXTURE_CUBE_MAP_ARB
);
1307 if (ctx
->Extensions
.NV_texture_rectangle
)
1308 _mesa_Disable(GL_TEXTURE_RECTANGLE_NV
);
1309 if (ctx
->Extensions
.MESA_texture_array
) {
1310 _mesa_Disable(GL_TEXTURE_1D_ARRAY_EXT
);
1311 _mesa_Disable(GL_TEXTURE_2D_ARRAY_EXT
);
1316 radeon_meta_set_passthrough_transform(rmesa
);
1318 for (i
= 0; i
< 4; i
++) {
1319 color
[i
][0] = ctx
->Color
.ClearColor
[0];
1320 color
[i
][1] = ctx
->Color
.ClearColor
[1];
1321 color
[i
][2] = ctx
->Color
.ClearColor
[2];
1322 color
[i
][3] = ctx
->Color
.ClearColor
[3];
1325 /* convert clear Z from [0,1] to NDC coord in [-1,1] */
1327 dst_z
= -1.0 + 2.0 * ctx
->Depth
.Clear
;
1328 /* Prepare the vertices, which are the same regardless of which buffer we're
1331 vertices
[0][0] = fb
->_Xmin
;
1332 vertices
[0][1] = fb
->_Ymin
;
1333 vertices
[0][2] = dst_z
;
1334 vertices
[1][0] = fb
->_Xmax
;
1335 vertices
[1][1] = fb
->_Ymin
;
1336 vertices
[1][2] = dst_z
;
1337 vertices
[2][0] = fb
->_Xmax
;
1338 vertices
[2][1] = fb
->_Ymax
;
1339 vertices
[2][2] = dst_z
;
1340 vertices
[3][0] = fb
->_Xmin
;
1341 vertices
[3][1] = fb
->_Ymax
;
1342 vertices
[3][2] = dst_z
;
1344 _mesa_ColorPointer(4, GL_FLOAT
, 4 * sizeof(GLfloat
), &color
);
1345 _mesa_VertexPointer(3, GL_FLOAT
, 3 * sizeof(GLfloat
), &vertices
);
1346 _mesa_Enable(GL_COLOR_ARRAY
);
1347 _mesa_Enable(GL_VERTEX_ARRAY
);
1350 GLuint this_mask
= 0;
1353 color_bit
= _mesa_ffs(mask
& TRI_CLEAR_COLOR_BITS
);
1355 this_mask
|= (1 << (color_bit
- 1));
1357 /* Clear depth/stencil in the same pass as color. */
1358 this_mask
|= (mask
& (BUFFER_BIT_DEPTH
| BUFFER_BIT_STENCIL
));
1360 /* Select the current color buffer and use the color write mask if
1361 * we have one, otherwise don't write any color channels.
1363 if (this_mask
& BUFFER_BIT_FRONT_LEFT
)
1364 _mesa_DrawBuffer(GL_FRONT_LEFT
);
1365 else if (this_mask
& BUFFER_BIT_BACK_LEFT
)
1366 _mesa_DrawBuffer(GL_BACK_LEFT
);
1367 else if (color_bit
!= 0)
1368 _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0
+
1369 (color_bit
- BUFFER_COLOR0
- 1));
1371 _mesa_ColorMask(GL_FALSE
, GL_FALSE
, GL_FALSE
, GL_FALSE
);
1373 /* Control writing of the depth clear value to depth. */
1374 if (this_mask
& BUFFER_BIT_DEPTH
) {
1375 _mesa_DepthFunc(GL_ALWAYS
);
1376 _mesa_DepthMask(GL_TRUE
);
1377 _mesa_Enable(GL_DEPTH_TEST
);
1379 _mesa_Disable(GL_DEPTH_TEST
);
1380 _mesa_DepthMask(GL_FALSE
);
1383 /* Control writing of the stencil clear value to stencil. */
1384 if (this_mask
& BUFFER_BIT_STENCIL
) {
1385 _mesa_Enable(GL_STENCIL_TEST
);
1386 _mesa_StencilOp(GL_REPLACE
, GL_REPLACE
, GL_REPLACE
);
1387 _mesa_StencilFuncSeparate(GL_FRONT
, GL_ALWAYS
, ctx
->Stencil
.Clear
,
1388 ctx
->Stencil
.WriteMask
[0]);
1390 _mesa_Disable(GL_STENCIL_TEST
);
1393 CALL_DrawArrays(ctx
->Exec
, (GL_TRIANGLE_FAN
, 0, 4));
1398 radeon_meta_restore_transform(rmesa
);
1400 _mesa_ActiveTextureARB(GL_TEXTURE0
+ saved_active_texture
);
1401 if (saved_fp_enable
)
1402 _mesa_Enable(GL_FRAGMENT_PROGRAM_ARB
);
1403 if (saved_vp_enable
)
1404 _mesa_Enable(GL_VERTEX_PROGRAM_ARB
);
1406 if (saved_shader_program
)
1407 _mesa_UseProgramObjectARB(saved_shader_program
);
1409 _mesa_PopClientAttrib();