1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_drm.h"
58 #include "radeon_queryobj.h"
61 * Enable verbose debug output for emit code.
64 * 2 also print state alues
66 #define RADEON_CMDBUF 0
68 /* =============================================================
72 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
73 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
84 if (out
->x1
>= out
->x2
)
86 if (out
->y1
>= out
->y2
)
91 void radeonRecalcScissorRects(radeonContextPtr radeon
)
93 struct gl_context
*ctx
= radeon
->glCtx
;
94 drm_clip_rect_t bounds
;
98 bounds
.x2
= ctx
->DrawBuffer
->Width
;
99 bounds
.x2
= ctx
->DrawBuffer
->Height
;
101 if (!radeon
->state
.scissor
.numAllocedClipRects
) {
102 radeon
->state
.scissor
.numAllocedClipRects
= 1;
103 radeon
->state
.scissor
.pClipRects
=
104 MALLOC(sizeof(drm_clip_rect_t
));
106 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
107 radeon
->state
.scissor
.numAllocedClipRects
= 0;
112 radeon
->state
.scissor
.numClipRects
= 0;
113 if (intersect_rect(radeon
->state
.scissor
.pClipRects
,
115 &radeon
->state
.scissor
.rect
)) {
116 radeon
->state
.scissor
.numClipRects
= 1;
119 if (radeon
->vtbl
.update_scissor
)
120 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
124 * Update cliprects and scissors.
126 void radeonSetCliprects(radeonContextPtr radeon
)
128 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
129 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
131 if(drawable
== NULL
&& readable
== NULL
)
134 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
135 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
137 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
138 (draw_rfb
->base
.Height
!= drawable
->h
)) {
139 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
140 drawable
->w
, drawable
->h
);
141 draw_rfb
->base
.Initialized
= GL_TRUE
;
144 if (drawable
!= readable
) {
145 if ((read_rfb
->base
.Width
!= readable
->w
) ||
146 (read_rfb
->base
.Height
!= readable
->h
)) {
147 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
148 readable
->w
, readable
->h
);
149 read_rfb
->base
.Initialized
= GL_TRUE
;
153 if (radeon
->state
.scissor
.enabled
)
154 radeonRecalcScissorRects(radeon
);
160 void radeonUpdateScissor( struct gl_context
*ctx
)
162 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
163 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
164 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
166 int min_x
, min_y
, max_x
, max_y
;
168 if (!ctx
->DrawBuffer
)
171 max_x
= ctx
->DrawBuffer
->Width
- 1;
172 max_y
= ctx
->DrawBuffer
->Height
- 1;
174 if ( !ctx
->DrawBuffer
->Name
) {
176 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
187 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
188 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
189 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
190 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
192 radeonRecalcScissorRects( rmesa
);
195 /* =============================================================
199 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
201 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
202 if (ctx
->Scissor
.Enabled
) {
203 /* We don't pipeline cliprect changes */
204 radeon_firevertices(radeon
);
205 radeonUpdateScissor(ctx
);
209 /* ================================================================
210 * SwapBuffers with client-side throttling
213 uint32_t radeonGetAge(radeonContextPtr radeon
)
215 drm_radeon_getparam_t gp
;
219 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
220 gp
.value
= (int *)&age
;
221 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
224 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
233 * Check if we're about to draw into the front color buffer.
234 * If so, set the intel->front_buffer_dirty field to true.
237 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
239 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
240 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
243 /* drawing to window system buffer */
244 if (fb
->_NumColorDrawBuffers
> 0) {
245 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
246 radeon
->front_buffer_dirty
= GL_TRUE
;
253 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
255 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
256 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
262 /* this can happen during the initial context initialization */
266 /* radeons only handle 1 color draw so far */
267 if (fb
->_NumColorDrawBuffers
!= 1) {
268 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
272 /* Do this here, note core Mesa, since this function is called from
273 * many places within the driver.
275 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
276 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
277 _mesa_update_framebuffer(ctx
);
278 /* this updates the DrawBuffer's Width/Height if it's a FBO */
279 _mesa_update_draw_buffer_bounds(ctx
);
282 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
283 /* this may occur when we're called by glBindFrameBuffer() during
284 * the process of someone setting up renderbuffers, etc.
286 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
291 ;/* do something depthy/stencily TODO */
296 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
297 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
298 radeon
->front_cliprects
= GL_TRUE
;
300 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
301 radeon
->front_cliprects
= GL_FALSE
;
304 /* user FBO in theory */
305 struct radeon_renderbuffer
*rrb
;
306 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
308 offset
= rrb
->draw_offset
;
311 radeon
->constant_cliprect
= GL_TRUE
;
314 if (rrbColor
== NULL
)
315 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
317 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
320 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
321 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
322 if (rrbDepth
&& rrbDepth
->bo
) {
323 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
325 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
328 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
332 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
333 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
334 if (rrbStencil
&& rrbStencil
->bo
) {
335 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
336 /* need to re-compute stencil hw state */
338 rrbDepth
= rrbStencil
;
340 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
343 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
344 if (ctx
->Driver
.Enable
!= NULL
)
345 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
347 ctx
->NewState
|= _NEW_STENCIL
;
350 /* Update culling direction which changes depending on the
351 * orientation of the buffer:
353 if (ctx
->Driver
.FrontFace
)
354 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
356 ctx
->NewState
|= _NEW_POLYGON
;
359 * Update depth test state
361 if (ctx
->Driver
.Enable
) {
362 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
363 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
364 /* Need to update the derived ctx->Stencil._Enabled first */
365 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
366 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
368 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
371 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
372 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
373 radeon
->state
.color
.draw_offset
= offset
;
376 /* update viewport since it depends on window size */
377 if (ctx
->Driver
.Viewport
) {
378 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
379 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
384 ctx
->NewState
|= _NEW_VIEWPORT
;
386 /* Set state we know depends on drawable parameters:
388 radeonUpdateScissor(ctx
);
389 radeon
->NewGLState
|= _NEW_SCISSOR
;
391 if (ctx
->Driver
.DepthRange
)
392 ctx
->Driver
.DepthRange(ctx
,
396 /* Update culling direction which changes depending on the
397 * orientation of the buffer:
399 if (ctx
->Driver
.FrontFace
)
400 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
402 ctx
->NewState
|= _NEW_POLYGON
;
406 * Called via glDrawBuffer.
408 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
410 if (RADEON_DEBUG
& RADEON_DRI
)
411 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
412 _mesa_lookup_enum_by_nr( mode
));
414 if (ctx
->DrawBuffer
->Name
== 0) {
415 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
417 const GLboolean was_front_buffer_rendering
=
418 radeon
->is_front_buffer_rendering
;
420 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
423 /* If we weren't front-buffer rendering before but we are now, make sure
424 * that the front-buffer has actually been allocated.
426 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
427 radeon_update_renderbuffers(radeon
->dri
.context
,
428 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
432 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
435 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
437 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
438 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
439 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
440 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
441 || (mode
== GL_FRONT
);
443 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
444 radeon_update_renderbuffers(rmesa
->dri
.context
,
445 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
448 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
449 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
450 /* This will update FBO completeness status.
451 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
452 * refers to a missing renderbuffer. Calling glReadBuffer can set
453 * that straight and can make the drawing buffer complete.
455 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
459 void radeon_window_moved(radeonContextPtr radeon
)
461 /* Cliprects has to be updated before doing anything else */
462 radeonSetCliprects(radeon
);
465 void radeon_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
467 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
468 __DRIcontext
*driContext
= radeon
->dri
.context
;
469 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
470 GLsizei w
, GLsizei h
);
472 if (!driContext
->driScreenPriv
->dri2
.enabled
)
475 if (ctx
->DrawBuffer
->Name
== 0) {
476 if (radeon
->is_front_buffer_rendering
) {
477 ctx
->Driver
.Flush(ctx
);
479 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
480 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
481 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
484 old_viewport
= ctx
->Driver
.Viewport
;
485 ctx
->Driver
.Viewport
= NULL
;
486 radeon_window_moved(radeon
);
487 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
488 ctx
->Driver
.Viewport
= old_viewport
;
491 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
493 int i
, j
, reg
, count
;
496 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
499 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
501 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
503 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
504 if (dwords
> state
->cmd_size
)
505 dwords
= state
->cmd_size
;
506 for (i
= 0; i
< dwords
;) {
507 packet0
= state
->cmd
[i
];
508 reg
= (packet0
& 0x1FFF) << 2;
509 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
510 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
511 state
->name
, i
, reg
, count
);
513 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
514 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
515 state
->name
, i
, reg
, state
->cmd
[i
]);
524 * Count total size for next state emit.
526 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
528 struct radeon_state_atom
*atom
;
530 /* check if we are going to emit full state */
532 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
533 if (!radeon
->hw
.is_dirty
)
535 foreach(atom
, &radeon
->hw
.atomlist
) {
537 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
539 if (RADEON_CMDBUF
&& atom_size
) {
540 radeon_print_state_atom(radeon
, atom
);
545 foreach(atom
, &radeon
->hw
.atomlist
) {
546 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
548 if (RADEON_CMDBUF
&& atom_size
) {
549 radeon_print_state_atom(radeon
, atom
);
555 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
559 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
561 BATCH_LOCALS(radeon
);
564 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
567 radeon_print_state_atom(radeon
, atom
);
570 (*atom
->emit
)(radeon
->glCtx
, atom
);
572 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
573 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
576 atom
->dirty
= GL_FALSE
;
579 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
584 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
586 struct radeon_state_atom
*atom
;
588 if (radeon
->vtbl
.pre_emit_atoms
)
589 radeon
->vtbl
.pre_emit_atoms(radeon
);
591 /* Emit actual atoms */
592 if (radeon
->hw
.all_dirty
|| emitAll
) {
593 foreach(atom
, &radeon
->hw
.atomlist
)
594 radeon_emit_atom( radeon
, atom
);
596 foreach(atom
, &radeon
->hw
.atomlist
) {
598 radeon_emit_atom( radeon
, atom
);
605 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
607 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
610 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
611 if (ret
== RADEON_CS_SPACE_FLUSH
)
616 void radeonEmitState(radeonContextPtr radeon
)
618 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
620 if (radeon
->vtbl
.pre_emit_state
)
621 radeon
->vtbl
.pre_emit_state(radeon
);
623 /* this code used to return here but now it emits zbs */
624 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
627 if (!radeon
->cmdbuf
.cs
->cdw
) {
628 if (RADEON_DEBUG
& RADEON_STATE
)
629 fprintf(stderr
, "Begin reemit state\n");
631 radeonEmitAtoms(radeon
, GL_TRUE
);
634 if (RADEON_DEBUG
& RADEON_STATE
)
635 fprintf(stderr
, "Begin dirty state\n");
637 radeonEmitAtoms(radeon
, GL_FALSE
);
640 radeon
->hw
.is_dirty
= GL_FALSE
;
641 radeon
->hw
.all_dirty
= GL_FALSE
;
645 void radeonFlush(struct gl_context
*ctx
)
647 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
648 if (RADEON_DEBUG
& RADEON_IOCTL
)
649 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
651 /* okay if we have no cmds in the buffer &&
652 we have no DMA flush &&
653 we have no DMA buffer allocated.
654 then no point flushing anything at all.
656 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
659 if (radeon
->dma
.flush
)
660 radeon
->dma
.flush( ctx
);
662 if (radeon
->cmdbuf
.cs
->cdw
)
663 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
666 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
667 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
669 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
670 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
671 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
673 /* We set the dirty bit in radeon_prepare_render() if we're
674 * front buffer rendering once we get there.
676 radeon
->front_buffer_dirty
= GL_FALSE
;
678 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
683 /* Make sure all commands have been sent to the hardware and have
684 * completed processing.
686 void radeonFinish(struct gl_context
* ctx
)
688 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
689 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
690 struct radeon_renderbuffer
*rrb
;
693 if (ctx
->Driver
.Flush
)
694 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
696 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
697 struct radeon_renderbuffer
*rrb
;
698 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
700 radeon_bo_wait(rrb
->bo
);
702 rrb
= radeon_get_depthbuffer(radeon
);
704 radeon_bo_wait(rrb
->bo
);
709 * Send the current command buffer via ioctl to the hardware.
711 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
715 if (rmesa
->cmdbuf
.flushing
) {
716 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
719 rmesa
->cmdbuf
.flushing
= 1;
721 if (RADEON_DEBUG
& RADEON_IOCTL
) {
722 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
725 radeonEmitQueryEnd(rmesa
->glCtx
);
727 if (rmesa
->cmdbuf
.cs
->cdw
) {
728 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
729 rmesa
->hw
.all_dirty
= GL_TRUE
;
731 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
732 rmesa
->cmdbuf
.flushing
= 0;
734 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
735 fprintf(stderr
,"failed to revalidate buffers\n");
741 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
745 radeonReleaseDmaRegions(rmesa
);
747 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
750 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
751 "parse or rejected command stream. See dmesg "
752 "for more info.\n", ret
);
760 * Make sure that enough space is available in the command buffer
761 * by flushing if necessary.
763 * \param dwords The number of dwords we need to be free on the command buffer
765 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
767 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
768 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
769 /* If we try to flush empty buffer there is too big rendering operation. */
770 assert(rmesa
->cmdbuf
.cs
->cdw
);
771 rcommonFlushCmdBuf(rmesa
, caller
);
777 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
780 struct drm_radeon_gem_info mminfo
= { 0 };
782 /* Initialize command buffer */
783 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
784 "command_buffer_size");
785 if (size
< 2 * rmesa
->hw
.max_state_size
) {
786 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
791 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
792 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
793 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
794 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
795 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
796 "Allocating %d bytes command buffer (max state is %d bytes)\n",
797 size
* 4, rmesa
->hw
.max_state_size
* 4);
800 radeon_cs_manager_gem_ctor(rmesa
->radeonScreen
->driScreen
->fd
);
801 if (rmesa
->cmdbuf
.csm
== NULL
) {
802 /* FIXME: fatal error */
805 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
806 assert(rmesa
->cmdbuf
.cs
!= NULL
);
807 rmesa
->cmdbuf
.size
= size
;
809 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
810 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
813 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
,
814 &mminfo
, sizeof(mminfo
))) {
815 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
816 mminfo
.vram_visible
);
817 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
823 * Destroy the command buffer
825 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
827 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
828 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
831 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
834 const char *function
,
837 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
839 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
840 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
844 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
846 _mesa_meta_Clear(ctx
, mask
);