1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
55 #include "radeon_common.h"
56 #include "radeon_drm.h"
57 #include "radeon_queryobj.h"
60 * Enable verbose debug output for emit code.
63 * 2 also print state alues
65 #define RADEON_CMDBUF 0
67 /* =============================================================
71 static GLboolean
intersect_rect(drm_clip_rect_t
* out
,
72 drm_clip_rect_t
* a
, drm_clip_rect_t
* b
)
83 if (out
->x1
>= out
->x2
)
85 if (out
->y1
>= out
->y2
)
90 void radeonRecalcScissorRects(radeonContextPtr radeon
)
92 struct gl_context
*ctx
= radeon
->glCtx
;
93 drm_clip_rect_t bounds
;
97 bounds
.x2
= ctx
->DrawBuffer
->Width
;
98 bounds
.x2
= ctx
->DrawBuffer
->Height
;
100 if (!radeon
->state
.scissor
.numAllocedClipRects
) {
101 radeon
->state
.scissor
.numAllocedClipRects
= 1;
102 radeon
->state
.scissor
.pClipRects
=
103 MALLOC(sizeof(drm_clip_rect_t
));
105 if (radeon
->state
.scissor
.pClipRects
== NULL
) {
106 radeon
->state
.scissor
.numAllocedClipRects
= 0;
111 radeon
->state
.scissor
.numClipRects
= 0;
112 if (intersect_rect(radeon
->state
.scissor
.pClipRects
,
114 &radeon
->state
.scissor
.rect
)) {
115 radeon
->state
.scissor
.numClipRects
= 1;
118 if (radeon
->vtbl
.update_scissor
)
119 radeon
->vtbl
.update_scissor(radeon
->glCtx
);
123 * Update cliprects and scissors.
125 void radeonSetCliprects(radeonContextPtr radeon
)
127 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
128 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
130 if(drawable
== NULL
&& readable
== NULL
)
133 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
134 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
136 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
137 (draw_rfb
->base
.Height
!= drawable
->h
)) {
138 _mesa_resize_framebuffer(radeon
->glCtx
, &draw_rfb
->base
,
139 drawable
->w
, drawable
->h
);
140 draw_rfb
->base
.Initialized
= GL_TRUE
;
143 if (drawable
!= readable
) {
144 if ((read_rfb
->base
.Width
!= readable
->w
) ||
145 (read_rfb
->base
.Height
!= readable
->h
)) {
146 _mesa_resize_framebuffer(radeon
->glCtx
, &read_rfb
->base
,
147 readable
->w
, readable
->h
);
148 read_rfb
->base
.Initialized
= GL_TRUE
;
152 if (radeon
->state
.scissor
.enabled
)
153 radeonRecalcScissorRects(radeon
);
159 void radeonUpdateScissor( struct gl_context
*ctx
)
161 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
162 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
163 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
165 int min_x
, min_y
, max_x
, max_y
;
167 if (!ctx
->DrawBuffer
)
170 max_x
= ctx
->DrawBuffer
->Width
- 1;
171 max_y
= ctx
->DrawBuffer
->Height
- 1;
173 if ( !ctx
->DrawBuffer
->Name
) {
175 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
186 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
187 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
188 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
189 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
191 radeonRecalcScissorRects( rmesa
);
194 /* =============================================================
198 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
200 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
201 if (ctx
->Scissor
.Enabled
) {
202 /* We don't pipeline cliprect changes */
203 radeon_firevertices(radeon
);
204 radeonUpdateScissor(ctx
);
208 /* ================================================================
209 * SwapBuffers with client-side throttling
212 uint32_t radeonGetAge(radeonContextPtr radeon
)
214 drm_radeon_getparam_t gp
;
218 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
219 gp
.value
= (int *)&age
;
220 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
223 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
232 * Check if we're about to draw into the front color buffer.
233 * If so, set the intel->front_buffer_dirty field to true.
236 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
238 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
239 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
242 /* drawing to window system buffer */
243 if (fb
->_NumColorDrawBuffers
> 0) {
244 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
245 radeon
->front_buffer_dirty
= GL_TRUE
;
252 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
254 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
255 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
261 /* this can happen during the initial context initialization */
265 /* radeons only handle 1 color draw so far */
266 if (fb
->_NumColorDrawBuffers
!= 1) {
267 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
271 /* Do this here, note core Mesa, since this function is called from
272 * many places within the driver.
274 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
275 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
276 _mesa_update_framebuffer(ctx
);
277 /* this updates the DrawBuffer's Width/Height if it's a FBO */
278 _mesa_update_draw_buffer_bounds(ctx
);
281 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
282 /* this may occur when we're called by glBindFrameBuffer() during
283 * the process of someone setting up renderbuffers, etc.
285 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
290 ;/* do something depthy/stencily TODO */
295 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
296 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
297 radeon
->front_cliprects
= GL_TRUE
;
299 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
300 radeon
->front_cliprects
= GL_FALSE
;
303 /* user FBO in theory */
304 struct radeon_renderbuffer
*rrb
;
305 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
307 offset
= rrb
->draw_offset
;
310 radeon
->constant_cliprect
= GL_TRUE
;
313 if (rrbColor
== NULL
)
314 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
316 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
319 if (fb
->_DepthBuffer
&& fb
->_DepthBuffer
->Wrapped
) {
320 rrbDepth
= radeon_renderbuffer(fb
->_DepthBuffer
->Wrapped
);
321 if (rrbDepth
&& rrbDepth
->bo
) {
322 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
324 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
327 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
331 if (fb
->_StencilBuffer
&& fb
->_StencilBuffer
->Wrapped
) {
332 rrbStencil
= radeon_renderbuffer(fb
->_StencilBuffer
->Wrapped
);
333 if (rrbStencil
&& rrbStencil
->bo
) {
334 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
335 /* need to re-compute stencil hw state */
337 rrbDepth
= rrbStencil
;
339 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
342 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
343 if (ctx
->Driver
.Enable
!= NULL
)
344 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
346 ctx
->NewState
|= _NEW_STENCIL
;
349 /* Update culling direction which changes depending on the
350 * orientation of the buffer:
352 if (ctx
->Driver
.FrontFace
)
353 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
355 ctx
->NewState
|= _NEW_POLYGON
;
358 * Update depth test state
360 if (ctx
->Driver
.Enable
) {
361 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
362 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
363 /* Need to update the derived ctx->Stencil._Enabled first */
364 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
365 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
367 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
370 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
);
371 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
);
372 radeon
->state
.color
.draw_offset
= offset
;
375 /* update viewport since it depends on window size */
376 if (ctx
->Driver
.Viewport
) {
377 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
378 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
383 ctx
->NewState
|= _NEW_VIEWPORT
;
385 /* Set state we know depends on drawable parameters:
387 radeonUpdateScissor(ctx
);
388 radeon
->NewGLState
|= _NEW_SCISSOR
;
390 if (ctx
->Driver
.DepthRange
)
391 ctx
->Driver
.DepthRange(ctx
,
395 /* Update culling direction which changes depending on the
396 * orientation of the buffer:
398 if (ctx
->Driver
.FrontFace
)
399 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
401 ctx
->NewState
|= _NEW_POLYGON
;
405 * Called via glDrawBuffer.
407 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
409 if (RADEON_DEBUG
& RADEON_DRI
)
410 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
411 _mesa_lookup_enum_by_nr( mode
));
413 if (ctx
->DrawBuffer
->Name
== 0) {
414 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
416 const GLboolean was_front_buffer_rendering
=
417 radeon
->is_front_buffer_rendering
;
419 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
422 /* If we weren't front-buffer rendering before but we are now, make sure
423 * that the front-buffer has actually been allocated.
425 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
426 radeon_update_renderbuffers(radeon
->dri
.context
,
427 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
431 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
434 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
436 if ((ctx
->DrawBuffer
!= NULL
) && (ctx
->DrawBuffer
->Name
== 0)) {
437 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
438 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
439 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
440 || (mode
== GL_FRONT
);
442 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
443 radeon_update_renderbuffers(rmesa
->dri
.context
,
444 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
447 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
448 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
449 /* This will update FBO completeness status.
450 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
451 * refers to a missing renderbuffer. Calling glReadBuffer can set
452 * that straight and can make the drawing buffer complete.
454 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
458 void radeon_window_moved(radeonContextPtr radeon
)
460 /* Cliprects has to be updated before doing anything else */
461 radeonSetCliprects(radeon
);
464 void radeon_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei width
, GLsizei height
)
466 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
467 __DRIcontext
*driContext
= radeon
->dri
.context
;
468 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
469 GLsizei w
, GLsizei h
);
471 if (!driContext
->driScreenPriv
->dri2
.enabled
)
474 if (ctx
->DrawBuffer
->Name
== 0) {
475 if (radeon
->is_front_buffer_rendering
) {
476 ctx
->Driver
.Flush(ctx
);
478 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
479 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
480 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
483 old_viewport
= ctx
->Driver
.Viewport
;
484 ctx
->Driver
.Viewport
= NULL
;
485 radeon_window_moved(radeon
);
486 radeon_draw_buffer(ctx
, radeon
->glCtx
->DrawBuffer
);
487 ctx
->Driver
.Viewport
= old_viewport
;
490 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
492 int i
, j
, reg
, count
;
495 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
498 dwords
= (*state
->check
) (radeon
->glCtx
, state
);
500 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
502 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
503 if (dwords
> state
->cmd_size
)
504 dwords
= state
->cmd_size
;
505 for (i
= 0; i
< dwords
;) {
506 packet0
= state
->cmd
[i
];
507 reg
= (packet0
& 0x1FFF) << 2;
508 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
509 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
510 state
->name
, i
, reg
, count
);
512 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
513 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
514 state
->name
, i
, reg
, state
->cmd
[i
]);
523 * Count total size for next state emit.
525 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
527 struct radeon_state_atom
*atom
;
529 /* check if we are going to emit full state */
531 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
532 if (!radeon
->hw
.is_dirty
)
534 foreach(atom
, &radeon
->hw
.atomlist
) {
536 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
538 if (RADEON_CMDBUF
&& atom_size
) {
539 radeon_print_state_atom(radeon
, atom
);
544 foreach(atom
, &radeon
->hw
.atomlist
) {
545 const GLuint atom_size
= atom
->check(radeon
->glCtx
, atom
);
547 if (RADEON_CMDBUF
&& atom_size
) {
548 radeon_print_state_atom(radeon
, atom
);
554 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
558 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
560 BATCH_LOCALS(radeon
);
563 dwords
= (*atom
->check
) (radeon
->glCtx
, atom
);
566 radeon_print_state_atom(radeon
, atom
);
569 (*atom
->emit
)(radeon
->glCtx
, atom
);
571 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
572 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
575 atom
->dirty
= GL_FALSE
;
578 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
583 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
585 struct radeon_state_atom
*atom
;
587 if (radeon
->vtbl
.pre_emit_atoms
)
588 radeon
->vtbl
.pre_emit_atoms(radeon
);
590 /* Emit actual atoms */
591 if (radeon
->hw
.all_dirty
|| emitAll
) {
592 foreach(atom
, &radeon
->hw
.atomlist
)
593 radeon_emit_atom( radeon
, atom
);
595 foreach(atom
, &radeon
->hw
.atomlist
) {
597 radeon_emit_atom( radeon
, atom
);
604 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
606 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
609 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
610 if (ret
== RADEON_CS_SPACE_FLUSH
)
615 void radeonEmitState(radeonContextPtr radeon
)
617 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
619 if (radeon
->vtbl
.pre_emit_state
)
620 radeon
->vtbl
.pre_emit_state(radeon
);
622 /* this code used to return here but now it emits zbs */
623 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
626 if (!radeon
->cmdbuf
.cs
->cdw
) {
627 if (RADEON_DEBUG
& RADEON_STATE
)
628 fprintf(stderr
, "Begin reemit state\n");
630 radeonEmitAtoms(radeon
, GL_TRUE
);
633 if (RADEON_DEBUG
& RADEON_STATE
)
634 fprintf(stderr
, "Begin dirty state\n");
636 radeonEmitAtoms(radeon
, GL_FALSE
);
639 radeon
->hw
.is_dirty
= GL_FALSE
;
640 radeon
->hw
.all_dirty
= GL_FALSE
;
644 void radeonFlush(struct gl_context
*ctx
)
646 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
647 if (RADEON_DEBUG
& RADEON_IOCTL
)
648 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
650 /* okay if we have no cmds in the buffer &&
651 we have no DMA flush &&
652 we have no DMA buffer allocated.
653 then no point flushing anything at all.
655 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
658 if (radeon
->dma
.flush
)
659 radeon
->dma
.flush( ctx
);
661 if (radeon
->cmdbuf
.cs
->cdw
)
662 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
665 if ((ctx
->DrawBuffer
->Name
== 0) && radeon
->front_buffer_dirty
) {
666 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
668 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
669 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
670 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
672 /* We set the dirty bit in radeon_prepare_render() if we're
673 * front buffer rendering once we get there.
675 radeon
->front_buffer_dirty
= GL_FALSE
;
677 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
682 /* Make sure all commands have been sent to the hardware and have
683 * completed processing.
685 void radeonFinish(struct gl_context
* ctx
)
687 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
688 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
689 struct radeon_renderbuffer
*rrb
;
692 if (ctx
->Driver
.Flush
)
693 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
695 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
696 struct radeon_renderbuffer
*rrb
;
697 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
699 radeon_bo_wait(rrb
->bo
);
701 rrb
= radeon_get_depthbuffer(radeon
);
703 radeon_bo_wait(rrb
->bo
);
708 * Send the current command buffer via ioctl to the hardware.
710 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
714 if (rmesa
->cmdbuf
.flushing
) {
715 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
718 rmesa
->cmdbuf
.flushing
= 1;
720 if (RADEON_DEBUG
& RADEON_IOCTL
) {
721 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
724 radeonEmitQueryEnd(rmesa
->glCtx
);
726 if (rmesa
->cmdbuf
.cs
->cdw
) {
727 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
728 rmesa
->hw
.all_dirty
= GL_TRUE
;
730 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
731 rmesa
->cmdbuf
.flushing
= 0;
733 if (radeon_revalidate_bos(rmesa
->glCtx
) == GL_FALSE
) {
734 fprintf(stderr
,"failed to revalidate buffers\n");
740 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
744 radeonReleaseDmaRegions(rmesa
);
746 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
749 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
750 "parse or rejected command stream. See dmesg "
751 "for more info.\n", ret
);
759 * Make sure that enough space is available in the command buffer
760 * by flushing if necessary.
762 * \param dwords The number of dwords we need to be free on the command buffer
764 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
766 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
767 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
768 /* If we try to flush empty buffer there is too big rendering operation. */
769 assert(rmesa
->cmdbuf
.cs
->cdw
);
770 rcommonFlushCmdBuf(rmesa
, caller
);
776 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
779 struct drm_radeon_gem_info mminfo
= { 0 };
781 /* Initialize command buffer */
782 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
783 "command_buffer_size");
784 if (size
< 2 * rmesa
->hw
.max_state_size
) {
785 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
790 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
791 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
792 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
793 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
794 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
795 "Allocating %d bytes command buffer (max state is %d bytes)\n",
796 size
* 4, rmesa
->hw
.max_state_size
* 4);
799 radeon_cs_manager_gem_ctor(rmesa
->radeonScreen
->driScreen
->fd
);
800 if (rmesa
->cmdbuf
.csm
== NULL
) {
801 /* FIXME: fatal error */
804 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
805 assert(rmesa
->cmdbuf
.cs
!= NULL
);
806 rmesa
->cmdbuf
.size
= size
;
808 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
809 (void (*)(void *))rmesa
->glCtx
->Driver
.Flush
, rmesa
->glCtx
);
812 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
,
813 &mminfo
, sizeof(mminfo
))) {
814 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
815 mminfo
.vram_visible
);
816 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
822 * Destroy the command buffer
824 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
826 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
827 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
830 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
833 const char *function
,
836 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
838 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
839 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
843 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
845 _mesa_meta_Clear(ctx
, mask
);