1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
53 #include "radeon_common.h"
54 #include "radeon_drm.h"
55 #include "radeon_queryobj.h"
58 * Enable verbose debug output for emit code.
61 * 2 also print state alues
63 #define RADEON_CMDBUF 0
65 /* =============================================================
69 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
70 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
81 if (out
->x1
>= out
->x2
)
83 if (out
->y1
>= out
->y2
)
88 void radeonRecalcScissorRects(radeonContextPtr radeon
)
90 struct gl_context
*ctx
= radeon
->glCtx
;
91 drm_clip_rect_t bounds
;
95 bounds
.x2
= ctx
->DrawBuffer
->Width
;
96 bounds
.y2
= ctx
->DrawBuffer
->Height
;
98 if (!radeon
->state
.scissor
.numAllocedClipRects
) {
99 radeon
->state
.scissor
.numAllocedClipRects
= 1;
100 radeon
->state
.scissor
.pClipRects
=
101 MALLOC(sizeof(drm_clip_rect_t
));
103 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
104 radeon
->state
.scissor
.numAllocedClipRects
= 0;
109 radeon
->state
.scissor
.numClipRects
= 0;
110 if (intersect_rect(radeon
->state
.scissor
.pClipRects
,
112 &radeon
->state
.scissor
.rect
)) {
113 radeon
->state
.scissor
.numClipRects
= 1;
116 if (radeon
->vtbl
.update_scissor
)
117 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
121 * Update cliprects and scissors.
123 void radeonSetCliprects(radeonContextPtr radeon
)
125 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
126 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
128 if(drawable
== NULL
&& readable
== NULL
)
131 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
132 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
134 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
135 (draw_rfb
->base
.Height
!= drawable
->h
)) {
136 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
137 drawable
->w
, drawable
->h
);
138 draw_rfb
->base
.Initialized
= GL_TRUE
;
141 if (drawable
!= readable
) {
142 if ((read_rfb
->base
.Width
!= readable
->w
) ||
143 (read_rfb
->base
.Height
!= readable
->h
)) {
144 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
145 readable
->w
, readable
->h
);
146 read_rfb
->base
.Initialized
= GL_TRUE
;
150 if (radeon
->state
.scissor
.enabled
)
151 radeonRecalcScissorRects(radeon
);
157 void radeonUpdateScissor( struct gl_context
*ctx
)
159 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
160 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
161 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
163 int min_x
, min_y
, max_x
, max_y
;
165 if (!ctx
->DrawBuffer
)
168 max_x
= ctx
->DrawBuffer
->Width
- 1;
169 max_y
= ctx
->DrawBuffer
->Height
- 1;
171 if ( !ctx
->DrawBuffer
->Name
) {
173 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
184 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
185 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
186 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
187 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
189 radeonRecalcScissorRects( rmesa
);
192 /* =============================================================
196 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
198 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
199 if (ctx
->Scissor
.Enabled
) {
200 /* We don't pipeline cliprect changes */
201 radeon_firevertices(radeon
);
202 radeonUpdateScissor(ctx
);
206 /* ================================================================
207 * SwapBuffers with client-side throttling
210 uint32_t radeonGetAge(radeonContextPtr radeon
)
212 drm_radeon_getparam_t gp
;
216 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
217 gp
.value
= (int *)&age
;
218 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
221 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
230 * Check if we're about to draw into the front color buffer.
231 * If so, set the intel->front_buffer_dirty field to true.
234 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
236 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
237 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
240 /* drawing to window system buffer */
241 if (fb
->_NumColorDrawBuffers
> 0) {
242 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
243 radeon
->front_buffer_dirty
= GL_TRUE
;
250 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
252 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
253 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
259 /* this can happen during the initial context initialization */
263 /* radeons only handle 1 color draw so far */
264 if (fb
->_NumColorDrawBuffers
!= 1) {
265 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
269 /* Do this here, note core Mesa, since this function is called from
270 * many places within the driver.
272 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
273 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
274 _mesa_update_framebuffer(ctx
);
275 /* this updates the DrawBuffer's Width/Height if it's a FBO */
276 _mesa_update_draw_buffer_bounds(ctx
);
279 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
280 /* this may occur when we're called by glBindFrameBuffer() during
281 * the process of someone setting up renderbuffers, etc.
283 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
288 ;/* do something depthy/stencily TODO */
293 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
294 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
295 radeon
->front_cliprects
= GL_TRUE
;
297 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
298 radeon
->front_cliprects
= GL_FALSE
;
301 /* user FBO in theory */
302 struct radeon_renderbuffer
*rrb
;
303 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
305 offset
= rrb
->draw_offset
;
310 if (rrbColor
== NULL
)
311 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
313 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
316 if (fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
) {
317 rrbDepth
= radeon_renderbuffer(fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
318 if (rrbDepth
&& rrbDepth
->bo
) {
319 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
321 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
324 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
328 if (fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
) {
329 rrbStencil
= radeon_renderbuffer(fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
330 if (rrbStencil
&& rrbStencil
->bo
) {
331 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
332 /* need to re-compute stencil hw state */
334 rrbDepth
= rrbStencil
;
336 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
339 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
340 if (ctx
->Driver
.Enable
!= NULL
)
341 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
343 ctx
->NewState
|= _NEW_STENCIL
;
346 /* Update culling direction which changes depending on the
347 * orientation of the buffer:
349 if (ctx
->Driver
.FrontFace
)
350 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
352 ctx
->NewState
|= _NEW_POLYGON
;
355 * Update depth test state
357 if (ctx
->Driver
.Enable
) {
358 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
359 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
360 /* Need to update the derived ctx->Stencil._Enabled first */
361 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
362 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
364 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
367 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
368 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
369 radeon
->state
.color
.draw_offset
= offset
;
372 /* update viewport since it depends on window size */
373 if (ctx
->Driver
.Viewport
) {
374 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
375 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
380 ctx
->NewState
|= _NEW_VIEWPORT
;
382 /* Set state we know depends on drawable parameters:
384 radeonUpdateScissor(ctx
);
385 radeon
->NewGLState
|= _NEW_SCISSOR
;
387 if (ctx
->Driver
.DepthRange
)
388 ctx
->Driver
.DepthRange(ctx
,
392 /* Update culling direction which changes depending on the
393 * orientation of the buffer:
395 if (ctx
->Driver
.FrontFace
)
396 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
398 ctx
->NewState
|= _NEW_POLYGON
;
402 * Called via glDrawBuffer.
404 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
406 if (RADEON_DEBUG
& RADEON_DRI
)
407 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
408 _mesa_lookup_enum_by_nr( mode
));
410 if (ctx
->DrawBuffer
->Name
== 0) {
411 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
413 const GLboolean was_front_buffer_rendering
=
414 radeon
->is_front_buffer_rendering
;
416 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
419 /* If we weren't front-buffer rendering before but we are now, make sure
420 * that the front-buffer has actually been allocated.
422 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
423 radeon_update_renderbuffers(radeon
->dri
.context
,
424 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
428 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
431 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
433 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
434 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
435 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
436 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
437 || (mode
== GL_FRONT
);
439 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
440 radeon_update_renderbuffers(rmesa
->dri
.context
,
441 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
444 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
445 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
446 /* This will update FBO completeness status.
447 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
448 * refers to a missing renderbuffer. Calling glReadBuffer can set
449 * that straight and can make the drawing buffer complete.
451 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
455 void radeon_window_moved(radeonContextPtr radeon
)
457 /* Cliprects has to be updated before doing anything else */
458 radeonSetCliprects(radeon
);
461 void radeon_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
463 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
464 __DRIcontext
*driContext
= radeon
->dri
.context
;
465 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
466 GLsizei w
, GLsizei h
);
468 if (ctx
->DrawBuffer
->Name
== 0) {
469 if (radeon
->is_front_buffer_rendering
) {
470 ctx
->Driver
.Flush(ctx
);
472 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
473 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
474 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
477 old_viewport
= ctx
->Driver
.Viewport
;
478 ctx
->Driver
.Viewport
= NULL
;
479 radeon_window_moved(radeon
);
480 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
481 ctx
->Driver
.Viewport
= old_viewport
;
484 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
486 int i
, j
, reg
, count
;
489 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
492 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
494 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
496 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
497 if (dwords
> state
->cmd_size
)
498 dwords
= state
->cmd_size
;
499 for (i
= 0; i
< dwords
;) {
500 packet0
= state
->cmd
[i
];
501 reg
= (packet0
& 0x1FFF) << 2;
502 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
503 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
504 state
->name
, i
, reg
, count
);
506 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
507 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
508 state
->name
, i
, reg
, state
->cmd
[i
]);
517 * Count total size for next state emit.
519 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
521 struct radeon_state_atom
*atom
;
523 /* check if we are going to emit full state */
525 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
526 if (!radeon
->hw
.is_dirty
)
528 foreach(atom
, &radeon
->hw
.atomlist
) {
530 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
532 if (RADEON_CMDBUF
&& atom_size
) {
533 radeon_print_state_atom(radeon
, atom
);
538 foreach(atom
, &radeon
->hw
.atomlist
) {
539 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
541 if (RADEON_CMDBUF
&& atom_size
) {
542 radeon_print_state_atom(radeon
, atom
);
548 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
552 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
554 BATCH_LOCALS(radeon
);
557 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
560 radeon_print_state_atom(radeon
, atom
);
563 (*atom
->emit
)(radeon
->glCtx
, atom
);
565 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
566 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
569 atom
->dirty
= GL_FALSE
;
572 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
577 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
579 struct radeon_state_atom
*atom
;
581 if (radeon
->vtbl
.pre_emit_atoms
)
582 radeon
->vtbl
.pre_emit_atoms(radeon
);
584 /* Emit actual atoms */
585 if (radeon
->hw
.all_dirty
|| emitAll
) {
586 foreach(atom
, &radeon
->hw
.atomlist
)
587 radeon_emit_atom( radeon
, atom
);
589 foreach(atom
, &radeon
->hw
.atomlist
) {
591 radeon_emit_atom( radeon
, atom
);
598 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
600 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
603 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
604 if (ret
== RADEON_CS_SPACE_FLUSH
)
609 void radeonEmitState(radeonContextPtr radeon
)
611 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
613 if (radeon
->vtbl
.pre_emit_state
)
614 radeon
->vtbl
.pre_emit_state(radeon
);
616 /* this code used to return here but now it emits zbs */
617 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
620 if (!radeon
->cmdbuf
.cs
->cdw
) {
621 if (RADEON_DEBUG
& RADEON_STATE
)
622 fprintf(stderr
, "Begin reemit state\n");
624 radeonEmitAtoms(radeon
, GL_TRUE
);
627 if (RADEON_DEBUG
& RADEON_STATE
)
628 fprintf(stderr
, "Begin dirty state\n");
630 radeonEmitAtoms(radeon
, GL_FALSE
);
633 radeon
->hw
.is_dirty
= GL_FALSE
;
634 radeon
->hw
.all_dirty
= GL_FALSE
;
638 void radeonFlush(struct gl_context
*ctx
)
640 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
641 if (RADEON_DEBUG
& RADEON_IOCTL
)
642 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
644 /* okay if we have no cmds in the buffer &&
645 we have no DMA flush &&
646 we have no DMA buffer allocated.
647 then no point flushing anything at all.
649 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
652 if (radeon
->dma
.flush
)
653 radeon
->dma
.flush( ctx
);
655 if (radeon
->cmdbuf
.cs
->cdw
)
656 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
659 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
660 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
662 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
663 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
664 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
666 /* We set the dirty bit in radeon_prepare_render() if we're
667 * front buffer rendering once we get there.
669 radeon
->front_buffer_dirty
= GL_FALSE
;
671 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
676 /* Make sure all commands have been sent to the hardware and have
677 * completed processing.
679 void radeonFinish(struct gl_context
* ctx
)
681 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
682 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
683 struct radeon_renderbuffer
*rrb
;
686 if (ctx
->Driver
.Flush
)
687 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
689 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
690 struct radeon_renderbuffer
*rrb
;
691 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
693 radeon_bo_wait(rrb
->bo
);
695 rrb
= radeon_get_depthbuffer(radeon
);
697 radeon_bo_wait(rrb
->bo
);
702 * Send the current command buffer via ioctl to the hardware.
704 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
708 if (rmesa
->cmdbuf
.flushing
) {
709 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
712 rmesa
->cmdbuf
.flushing
= 1;
714 if (RADEON_DEBUG
& RADEON_IOCTL
) {
715 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
718 radeonEmitQueryEnd(rmesa
->glCtx
);
720 if (rmesa
->cmdbuf
.cs
->cdw
) {
721 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
722 rmesa
->hw
.all_dirty
= GL_TRUE
;
724 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
725 rmesa
->cmdbuf
.flushing
= 0;
727 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
728 fprintf(stderr
,"failed to revalidate buffers\n");
734 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
738 radeonReleaseDmaRegions(rmesa
);
740 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
743 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
744 "parse or rejected command stream. See dmesg "
745 "for more info.\n", ret
);
753 * Make sure that enough space is available in the command buffer
754 * by flushing if necessary.
756 * \param dwords The number of dwords we need to be free on the command buffer
758 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
760 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
761 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
762 /* If we try to flush empty buffer there is too big rendering operation. */
763 assert(rmesa
->cmdbuf
.cs
->cdw
);
764 rcommonFlushCmdBuf(rmesa
, caller
);
770 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
773 struct drm_radeon_gem_info mminfo
= { 0 };
775 /* Initialize command buffer */
776 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
777 "command_buffer_size");
778 if (size
< 2 * rmesa
->hw
.max_state_size
) {
779 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
784 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
785 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
786 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
787 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
788 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
789 "Allocating %d bytes command buffer (max state is %d bytes)\n",
790 size
* 4, rmesa
->hw
.max_state_size
* 4);
793 radeon_cs_manager_gem_ctor(rmesa
->radeonScreen
->driScreen
->fd
);
794 if (rmesa
->cmdbuf
.csm
== NULL
) {
795 /* FIXME: fatal error */
798 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
799 assert(rmesa
->cmdbuf
.cs
!= NULL
);
800 rmesa
->cmdbuf
.size
= size
;
802 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
803 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
806 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
,
807 &mminfo
, sizeof(mminfo
))) {
808 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
809 mminfo
.vram_visible
);
810 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
816 * Destroy the command buffer
818 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
820 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
821 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
824 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
827 const char *function
,
830 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
832 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
833 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
837 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
839 _mesa_meta_Clear(ctx
, mask
);