1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
53 #include "radeon_common.h"
54 #include "radeon_drm.h"
55 #include "radeon_queryobj.h"
58 * Enable verbose debug output for emit code.
61 * 2 also print state alues
63 #define RADEON_CMDBUF 0
65 /* =============================================================
69 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
70 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
81 if (out
->x1
>= out
->x2
)
83 if (out
->y1
>= out
->y2
)
88 void radeonRecalcScissorRects(radeonContextPtr radeon
)
90 struct gl_context
*ctx
= radeon
->glCtx
;
91 drm_clip_rect_t bounds
;
95 bounds
.x2
= ctx
->DrawBuffer
->Width
;
96 bounds
.y2
= ctx
->DrawBuffer
->Height
;
98 if (!radeon
->state
.scissor
.numAllocedClipRects
) {
99 radeon
->state
.scissor
.numAllocedClipRects
= 1;
100 radeon
->state
.scissor
.pClipRects
=
101 MALLOC(sizeof(drm_clip_rect_t
));
103 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
104 radeon
->state
.scissor
.numAllocedClipRects
= 0;
109 radeon
->state
.scissor
.numClipRects
= 0;
110 if (intersect_rect(radeon
->state
.scissor
.pClipRects
,
112 &radeon
->state
.scissor
.rect
)) {
113 radeon
->state
.scissor
.numClipRects
= 1;
116 if (radeon
->vtbl
.update_scissor
)
117 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
121 * Update cliprects and scissors.
123 void radeonSetCliprects(radeonContextPtr radeon
)
125 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
126 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
128 if(drawable
== NULL
&& readable
== NULL
)
131 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
132 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
134 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
135 (draw_rfb
->base
.Height
!= drawable
->h
)) {
136 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
137 drawable
->w
, drawable
->h
);
138 draw_rfb
->base
.Initialized
= GL_TRUE
;
141 if (drawable
!= readable
) {
142 if ((read_rfb
->base
.Width
!= readable
->w
) ||
143 (read_rfb
->base
.Height
!= readable
->h
)) {
144 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
145 readable
->w
, readable
->h
);
146 read_rfb
->base
.Initialized
= GL_TRUE
;
150 if (radeon
->state
.scissor
.enabled
)
151 radeonRecalcScissorRects(radeon
);
157 void radeonUpdateScissor( struct gl_context
*ctx
)
159 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
160 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
161 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
163 int min_x
, min_y
, max_x
, max_y
;
165 if (!ctx
->DrawBuffer
)
168 max_x
= ctx
->DrawBuffer
->Width
- 1;
169 max_y
= ctx
->DrawBuffer
->Height
- 1;
171 if ( !ctx
->DrawBuffer
->Name
) {
173 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
184 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
185 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
186 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
187 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
189 radeonRecalcScissorRects( rmesa
);
192 /* =============================================================
196 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
198 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
199 if (ctx
->Scissor
.Enabled
) {
200 /* We don't pipeline cliprect changes */
201 radeon_firevertices(radeon
);
202 radeonUpdateScissor(ctx
);
206 /* ================================================================
207 * SwapBuffers with client-side throttling
210 uint32_t radeonGetAge(radeonContextPtr radeon
)
212 drm_radeon_getparam_t gp
;
216 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
217 gp
.value
= (int *)&age
;
218 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
221 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
230 * Check if we're about to draw into the front color buffer.
231 * If so, set the intel->front_buffer_dirty field to true.
234 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
236 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
237 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
240 /* drawing to window system buffer */
241 if (fb
->_NumColorDrawBuffers
> 0) {
242 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
243 radeon
->front_buffer_dirty
= GL_TRUE
;
250 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
252 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
253 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
259 /* this can happen during the initial context initialization */
263 /* radeons only handle 1 color draw so far */
264 if (fb
->_NumColorDrawBuffers
!= 1) {
265 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
269 /* Do this here, note core Mesa, since this function is called from
270 * many places within the driver.
272 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
273 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
274 _mesa_update_framebuffer(ctx
);
275 /* this updates the DrawBuffer's Width/Height if it's a FBO */
276 _mesa_update_draw_buffer_bounds(ctx
);
279 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
280 /* this may occur when we're called by glBindFrameBuffer() during
281 * the process of someone setting up renderbuffers, etc.
283 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
288 ;/* do something depthy/stencily TODO */
293 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
294 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
295 radeon
->front_cliprects
= GL_TRUE
;
297 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
298 radeon
->front_cliprects
= GL_FALSE
;
301 /* user FBO in theory */
302 struct radeon_renderbuffer
*rrb
;
303 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
305 offset
= rrb
->draw_offset
;
308 radeon
->constant_cliprect
= GL_TRUE
;
311 if (rrbColor
== NULL
)
312 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
314 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
317 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
318 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
319 if (rrbDepth
&& rrbDepth
->bo
) {
320 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
322 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
325 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
329 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
330 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
331 if (rrbStencil
&& rrbStencil
->bo
) {
332 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
333 /* need to re-compute stencil hw state */
335 rrbDepth
= rrbStencil
;
337 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
340 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
341 if (ctx
->Driver
.Enable
!= NULL
)
342 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
344 ctx
->NewState
|= _NEW_STENCIL
;
347 /* Update culling direction which changes depending on the
348 * orientation of the buffer:
350 if (ctx
->Driver
.FrontFace
)
351 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
353 ctx
->NewState
|= _NEW_POLYGON
;
356 * Update depth test state
358 if (ctx
->Driver
.Enable
) {
359 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
360 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
361 /* Need to update the derived ctx->Stencil._Enabled first */
362 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
363 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
365 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
368 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
369 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
370 radeon
->state
.color
.draw_offset
= offset
;
373 /* update viewport since it depends on window size */
374 if (ctx
->Driver
.Viewport
) {
375 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
376 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
381 ctx
->NewState
|= _NEW_VIEWPORT
;
383 /* Set state we know depends on drawable parameters:
385 radeonUpdateScissor(ctx
);
386 radeon
->NewGLState
|= _NEW_SCISSOR
;
388 if (ctx
->Driver
.DepthRange
)
389 ctx
->Driver
.DepthRange(ctx
,
393 /* Update culling direction which changes depending on the
394 * orientation of the buffer:
396 if (ctx
->Driver
.FrontFace
)
397 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
399 ctx
->NewState
|= _NEW_POLYGON
;
403 * Called via glDrawBuffer.
405 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
407 if (RADEON_DEBUG
& RADEON_DRI
)
408 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
409 _mesa_lookup_enum_by_nr( mode
));
411 if (ctx
->DrawBuffer
->Name
== 0) {
412 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
414 const GLboolean was_front_buffer_rendering
=
415 radeon
->is_front_buffer_rendering
;
417 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
420 /* If we weren't front-buffer rendering before but we are now, make sure
421 * that the front-buffer has actually been allocated.
423 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
424 radeon_update_renderbuffers(radeon
->dri
.context
,
425 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
429 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
432 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
434 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
435 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
436 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
437 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
438 || (mode
== GL_FRONT
);
440 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
441 radeon_update_renderbuffers(rmesa
->dri
.context
,
442 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
445 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
446 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
447 /* This will update FBO completeness status.
448 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
449 * refers to a missing renderbuffer. Calling glReadBuffer can set
450 * that straight and can make the drawing buffer complete.
452 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
456 void radeon_window_moved(radeonContextPtr radeon
)
458 /* Cliprects has to be updated before doing anything else */
459 radeonSetCliprects(radeon
);
462 void radeon_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
464 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
465 __DRIcontext
*driContext
= radeon
->dri
.context
;
466 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
467 GLsizei w
, GLsizei h
);
469 if (ctx
->DrawBuffer
->Name
== 0) {
470 if (radeon
->is_front_buffer_rendering
) {
471 ctx
->Driver
.Flush(ctx
);
473 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
474 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
475 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
478 old_viewport
= ctx
->Driver
.Viewport
;
479 ctx
->Driver
.Viewport
= NULL
;
480 radeon_window_moved(radeon
);
481 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
482 ctx
->Driver
.Viewport
= old_viewport
;
485 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
487 int i
, j
, reg
, count
;
490 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
493 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
495 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
497 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
498 if (dwords
> state
->cmd_size
)
499 dwords
= state
->cmd_size
;
500 for (i
= 0; i
< dwords
;) {
501 packet0
= state
->cmd
[i
];
502 reg
= (packet0
& 0x1FFF) << 2;
503 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
504 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
505 state
->name
, i
, reg
, count
);
507 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
508 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
509 state
->name
, i
, reg
, state
->cmd
[i
]);
518 * Count total size for next state emit.
520 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
522 struct radeon_state_atom
*atom
;
524 /* check if we are going to emit full state */
526 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
527 if (!radeon
->hw
.is_dirty
)
529 foreach(atom
, &radeon
->hw
.atomlist
) {
531 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
533 if (RADEON_CMDBUF
&& atom_size
) {
534 radeon_print_state_atom(radeon
, atom
);
539 foreach(atom
, &radeon
->hw
.atomlist
) {
540 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
542 if (RADEON_CMDBUF
&& atom_size
) {
543 radeon_print_state_atom(radeon
, atom
);
549 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
553 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
555 BATCH_LOCALS(radeon
);
558 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
561 radeon_print_state_atom(radeon
, atom
);
564 (*atom
->emit
)(radeon
->glCtx
, atom
);
566 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
567 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
570 atom
->dirty
= GL_FALSE
;
573 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
578 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
580 struct radeon_state_atom
*atom
;
582 if (radeon
->vtbl
.pre_emit_atoms
)
583 radeon
->vtbl
.pre_emit_atoms(radeon
);
585 /* Emit actual atoms */
586 if (radeon
->hw
.all_dirty
|| emitAll
) {
587 foreach(atom
, &radeon
->hw
.atomlist
)
588 radeon_emit_atom( radeon
, atom
);
590 foreach(atom
, &radeon
->hw
.atomlist
) {
592 radeon_emit_atom( radeon
, atom
);
599 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
601 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
604 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
605 if (ret
== RADEON_CS_SPACE_FLUSH
)
610 void radeonEmitState(radeonContextPtr radeon
)
612 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
614 if (radeon
->vtbl
.pre_emit_state
)
615 radeon
->vtbl
.pre_emit_state(radeon
);
617 /* this code used to return here but now it emits zbs */
618 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
621 if (!radeon
->cmdbuf
.cs
->cdw
) {
622 if (RADEON_DEBUG
& RADEON_STATE
)
623 fprintf(stderr
, "Begin reemit state\n");
625 radeonEmitAtoms(radeon
, GL_TRUE
);
628 if (RADEON_DEBUG
& RADEON_STATE
)
629 fprintf(stderr
, "Begin dirty state\n");
631 radeonEmitAtoms(radeon
, GL_FALSE
);
634 radeon
->hw
.is_dirty
= GL_FALSE
;
635 radeon
->hw
.all_dirty
= GL_FALSE
;
639 void radeonFlush(struct gl_context
*ctx
)
641 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
642 if (RADEON_DEBUG
& RADEON_IOCTL
)
643 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
645 /* okay if we have no cmds in the buffer &&
646 we have no DMA flush &&
647 we have no DMA buffer allocated.
648 then no point flushing anything at all.
650 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
653 if (radeon
->dma
.flush
)
654 radeon
->dma
.flush( ctx
);
656 if (radeon
->cmdbuf
.cs
->cdw
)
657 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
660 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
661 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
663 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
664 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
665 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
667 /* We set the dirty bit in radeon_prepare_render() if we're
668 * front buffer rendering once we get there.
670 radeon
->front_buffer_dirty
= GL_FALSE
;
672 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
677 /* Make sure all commands have been sent to the hardware and have
678 * completed processing.
680 void radeonFinish(struct gl_context
* ctx
)
682 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
683 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
684 struct radeon_renderbuffer
*rrb
;
687 if (ctx
->Driver
.Flush
)
688 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
690 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
691 struct radeon_renderbuffer
*rrb
;
692 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
694 radeon_bo_wait(rrb
->bo
);
696 rrb
= radeon_get_depthbuffer(radeon
);
698 radeon_bo_wait(rrb
->bo
);
703 * Send the current command buffer via ioctl to the hardware.
705 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
709 if (rmesa
->cmdbuf
.flushing
) {
710 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
713 rmesa
->cmdbuf
.flushing
= 1;
715 if (RADEON_DEBUG
& RADEON_IOCTL
) {
716 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
719 radeonEmitQueryEnd(rmesa
->glCtx
);
721 if (rmesa
->cmdbuf
.cs
->cdw
) {
722 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
723 rmesa
->hw
.all_dirty
= GL_TRUE
;
725 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
726 rmesa
->cmdbuf
.flushing
= 0;
728 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
729 fprintf(stderr
,"failed to revalidate buffers\n");
735 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
739 radeonReleaseDmaRegions(rmesa
);
741 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
744 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
745 "parse or rejected command stream. See dmesg "
746 "for more info.\n", ret
);
754 * Make sure that enough space is available in the command buffer
755 * by flushing if necessary.
757 * \param dwords The number of dwords we need to be free on the command buffer
759 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
761 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
762 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
763 /* If we try to flush empty buffer there is too big rendering operation. */
764 assert(rmesa
->cmdbuf
.cs
->cdw
);
765 rcommonFlushCmdBuf(rmesa
, caller
);
771 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
774 struct drm_radeon_gem_info mminfo
= { 0 };
776 /* Initialize command buffer */
777 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
778 "command_buffer_size");
779 if (size
< 2 * rmesa
->hw
.max_state_size
) {
780 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
785 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
786 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
787 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
788 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
789 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
790 "Allocating %d bytes command buffer (max state is %d bytes)\n",
791 size
* 4, rmesa
->hw
.max_state_size
* 4);
794 radeon_cs_manager_gem_ctor(rmesa
->radeonScreen
->driScreen
->fd
);
795 if (rmesa
->cmdbuf
.csm
== NULL
) {
796 /* FIXME: fatal error */
799 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
800 assert(rmesa
->cmdbuf
.cs
!= NULL
);
801 rmesa
->cmdbuf
.size
= size
;
803 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
804 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
807 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
,
808 &mminfo
, sizeof(mminfo
))) {
809 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
810 mminfo
.vram_visible
);
811 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
817 * Destroy the command buffer
819 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
821 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
822 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
825 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
828 const char *function
,
831 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
833 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
834 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
838 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
840 _mesa_meta_Clear(ctx
, mask
);