1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keith@tungstengraphics.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
59 * Enable verbose debug output for emit code.
62 * 2 also print state alues
64 #define RADEON_CMDBUF 0
66 /* =============================================================
71 * Update cliprects and scissors.
73 void radeonSetCliprects(radeonContextPtr radeon
)
75 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
76 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
78 if(drawable
== NULL
&& readable
== NULL
)
81 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
82 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
84 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
85 (draw_rfb
->base
.Height
!= drawable
->h
)) {
86 _mesa_resize_framebuffer(&radeon
->glCtx
, &draw_rfb
->base
,
87 drawable
->w
, drawable
->h
);
90 if (drawable
!= readable
) {
91 if ((read_rfb
->base
.Width
!= readable
->w
) ||
92 (read_rfb
->base
.Height
!= readable
->h
)) {
93 _mesa_resize_framebuffer(&radeon
->glCtx
, &read_rfb
->base
,
94 readable
->w
, readable
->h
);
98 if (radeon
->state
.scissor
.enabled
)
99 radeonUpdateScissor(&radeon
->glCtx
);
105 void radeonUpdateScissor( struct gl_context
*ctx
)
107 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
108 GLint x
= ctx
->Scissor
.X
, y
= ctx
->Scissor
.Y
;
109 GLsizei w
= ctx
->Scissor
.Width
, h
= ctx
->Scissor
.Height
;
111 int min_x
, min_y
, max_x
, max_y
;
113 if (!ctx
->DrawBuffer
)
116 max_x
= ctx
->DrawBuffer
->Width
- 1;
117 max_y
= ctx
->DrawBuffer
->Height
- 1;
119 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
121 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
132 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
133 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
134 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
135 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
137 if (rmesa
->vtbl
.update_scissor
)
138 rmesa
->vtbl
.update_scissor(ctx
);
141 /* =============================================================
145 void radeonScissor(struct gl_context
* ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
147 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
148 if (ctx
->Scissor
.Enabled
) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon
);
151 radeonUpdateScissor(ctx
);
155 /* ================================================================
156 * SwapBuffers with client-side throttling
159 uint32_t radeonGetAge(radeonContextPtr radeon
)
161 drm_radeon_getparam_t gp
;
165 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
166 gp
.value
= (int *)&age
;
167 ret
= drmCommandWriteRead(radeon
->dri
.fd
, DRM_RADEON_GETPARAM
,
170 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
,
179 * Check if we're about to draw into the front color buffer.
180 * If so, set the intel->front_buffer_dirty field to true.
183 radeon_check_front_buffer_rendering(struct gl_context
*ctx
)
185 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
186 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
189 /* drawing to window system buffer */
190 if (fb
->_NumColorDrawBuffers
> 0) {
191 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
192 radeon
->front_buffer_dirty
= GL_TRUE
;
199 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
201 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
202 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
208 /* this can happen during the initial context initialization */
212 /* radeons only handle 1 color draw so far */
213 if (fb
->_NumColorDrawBuffers
!= 1) {
214 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
218 /* Do this here, note core Mesa, since this function is called from
219 * many places within the driver.
221 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
222 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
223 _mesa_update_framebuffer(ctx
);
224 /* this updates the DrawBuffer's Width/Height if it's a FBO */
225 _mesa_update_draw_buffer_bounds(ctx
);
228 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
229 /* this may occur when we're called by glBindFrameBuffer() during
230 * the process of someone setting up renderbuffers, etc.
232 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
237 ;/* do something depthy/stencily TODO */
242 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
243 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
244 radeon
->front_cliprects
= GL_TRUE
;
246 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
247 radeon
->front_cliprects
= GL_FALSE
;
250 /* user FBO in theory */
251 struct radeon_renderbuffer
*rrb
;
252 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
254 offset
= rrb
->draw_offset
;
259 if (rrbColor
== NULL
)
260 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
262 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
265 if (fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
) {
266 rrbDepth
= radeon_renderbuffer(fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
267 if (rrbDepth
&& rrbDepth
->bo
) {
268 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
270 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
273 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
277 if (fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
) {
278 rrbStencil
= radeon_renderbuffer(fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
279 if (rrbStencil
&& rrbStencil
->bo
) {
280 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
281 /* need to re-compute stencil hw state */
283 rrbDepth
= rrbStencil
;
285 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
288 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
289 if (ctx
->Driver
.Enable
!= NULL
)
290 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
292 ctx
->NewState
|= _NEW_STENCIL
;
295 /* Update culling direction which changes depending on the
296 * orientation of the buffer:
298 if (ctx
->Driver
.FrontFace
)
299 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
301 ctx
->NewState
|= _NEW_POLYGON
;
304 * Update depth test state
306 if (ctx
->Driver
.Enable
) {
307 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
308 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
309 /* Need to update the derived ctx->Stencil._Enabled first */
310 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
311 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
313 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
316 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
.Base
);
317 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
.Base
);
318 radeon
->state
.color
.draw_offset
= offset
;
321 /* update viewport since it depends on window size */
322 if (ctx
->Driver
.Viewport
) {
323 ctx
->Driver
.Viewport(ctx
, ctx
->Viewport
.X
, ctx
->Viewport
.Y
,
324 ctx
->Viewport
.Width
, ctx
->Viewport
.Height
);
329 ctx
->NewState
|= _NEW_VIEWPORT
;
331 /* Set state we know depends on drawable parameters:
333 radeonUpdateScissor(ctx
);
334 radeon
->NewGLState
|= _NEW_SCISSOR
;
336 if (ctx
->Driver
.DepthRange
)
337 ctx
->Driver
.DepthRange(ctx
,
341 /* Update culling direction which changes depending on the
342 * orientation of the buffer:
344 if (ctx
->Driver
.FrontFace
)
345 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
347 ctx
->NewState
|= _NEW_POLYGON
;
351 * Called via glDrawBuffer.
353 void radeonDrawBuffer( struct gl_context
*ctx
, GLenum mode
)
355 if (RADEON_DEBUG
& RADEON_DRI
)
356 fprintf(stderr
, "%s %s\n", __FUNCTION__
,
357 _mesa_lookup_enum_by_nr( mode
));
359 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
360 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
362 const GLboolean was_front_buffer_rendering
=
363 radeon
->is_front_buffer_rendering
;
365 radeon
->is_front_buffer_rendering
= (mode
== GL_FRONT_LEFT
) ||
368 /* If we weren't front-buffer rendering before but we are now, make sure
369 * that the front-buffer has actually been allocated.
371 if (!was_front_buffer_rendering
&& radeon
->is_front_buffer_rendering
) {
372 radeon_update_renderbuffers(radeon
->dri
.context
,
373 radeon
->dri
.context
->driDrawablePriv
, GL_FALSE
);
377 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
380 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
382 if (ctx
->DrawBuffer
&& _mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
383 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
384 const GLboolean was_front_buffer_reading
= rmesa
->is_front_buffer_reading
;
385 rmesa
->is_front_buffer_reading
= (mode
== GL_FRONT_LEFT
)
386 || (mode
== GL_FRONT
);
388 if (!was_front_buffer_reading
&& rmesa
->is_front_buffer_reading
) {
389 radeon_update_renderbuffers(rmesa
->dri
.context
,
390 rmesa
->dri
.context
->driReadablePriv
, GL_FALSE
);
393 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
394 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
395 /* This will update FBO completeness status.
396 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
397 * refers to a missing renderbuffer. Calling glReadBuffer can set
398 * that straight and can make the drawing buffer complete.
400 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
404 void radeon_window_moved(radeonContextPtr radeon
)
406 /* Cliprects has to be updated before doing anything else */
407 radeonSetCliprects(radeon
);
410 void radeon_viewport(struct gl_context
*ctx
)
412 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
413 __DRIcontext
*driContext
= radeon
->dri
.context
;
414 void (*old_viewport
)(struct gl_context
*ctx
, GLint x
, GLint y
,
415 GLsizei w
, GLsizei h
);
417 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
418 if (radeon
->is_front_buffer_rendering
) {
419 ctx
->Driver
.Flush(ctx
);
421 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
422 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
423 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
426 old_viewport
= ctx
->Driver
.Viewport
;
427 ctx
->Driver
.Viewport
= NULL
;
428 radeon_window_moved(radeon
);
429 radeon_draw_buffer(ctx
, radeon
->glCtx
.DrawBuffer
);
430 ctx
->Driver
.Viewport
= old_viewport
;
433 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
435 int i
, j
, reg
, count
;
438 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
441 dwords
= (*state
->check
) (&radeon
->glCtx
, state
);
443 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
445 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
446 if (dwords
> state
->cmd_size
)
447 dwords
= state
->cmd_size
;
448 for (i
= 0; i
< dwords
;) {
449 packet0
= state
->cmd
[i
];
450 reg
= (packet0
& 0x1FFF) << 2;
451 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
452 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
453 state
->name
, i
, reg
, count
);
455 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
456 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
457 state
->name
, i
, reg
, state
->cmd
[i
]);
466 * Count total size for next state emit.
468 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
470 struct radeon_state_atom
*atom
;
472 /* check if we are going to emit full state */
474 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
475 if (!radeon
->hw
.is_dirty
)
477 foreach(atom
, &radeon
->hw
.atomlist
) {
479 const GLuint atom_size
= atom
->check(&radeon
->glCtx
, atom
);
481 if (RADEON_CMDBUF
&& atom_size
) {
482 radeon_print_state_atom(radeon
, atom
);
487 foreach(atom
, &radeon
->hw
.atomlist
) {
488 const GLuint atom_size
= atom
->check(&radeon
->glCtx
, atom
);
490 if (RADEON_CMDBUF
&& atom_size
) {
491 radeon_print_state_atom(radeon
, atom
);
497 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
501 static INLINE
void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
503 BATCH_LOCALS(radeon
);
506 dwords
= (*atom
->check
) (&radeon
->glCtx
, atom
);
509 radeon_print_state_atom(radeon
, atom
);
512 (*atom
->emit
)(&radeon
->glCtx
, atom
);
514 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
515 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
518 atom
->dirty
= GL_FALSE
;
521 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
526 static INLINE
void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
528 struct radeon_state_atom
*atom
;
530 if (radeon
->vtbl
.pre_emit_atoms
)
531 radeon
->vtbl
.pre_emit_atoms(radeon
);
533 /* Emit actual atoms */
534 if (radeon
->hw
.all_dirty
|| emitAll
) {
535 foreach(atom
, &radeon
->hw
.atomlist
)
536 radeon_emit_atom( radeon
, atom
);
538 foreach(atom
, &radeon
->hw
.atomlist
) {
540 radeon_emit_atom( radeon
, atom
);
547 static GLboolean
radeon_revalidate_bos(struct gl_context
*ctx
)
549 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
552 ret
= radeon_cs_space_check(radeon
->cmdbuf
.cs
);
553 if (ret
== RADEON_CS_SPACE_FLUSH
)
558 void radeonEmitState(radeonContextPtr radeon
)
560 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __FUNCTION__
);
562 if (radeon
->vtbl
.pre_emit_state
)
563 radeon
->vtbl
.pre_emit_state(radeon
);
565 /* this code used to return here but now it emits zbs */
566 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
569 if (!radeon
->cmdbuf
.cs
->cdw
) {
570 if (RADEON_DEBUG
& RADEON_STATE
)
571 fprintf(stderr
, "Begin reemit state\n");
573 radeonEmitAtoms(radeon
, GL_TRUE
);
576 if (RADEON_DEBUG
& RADEON_STATE
)
577 fprintf(stderr
, "Begin dirty state\n");
579 radeonEmitAtoms(radeon
, GL_FALSE
);
582 radeon
->hw
.is_dirty
= GL_FALSE
;
583 radeon
->hw
.all_dirty
= GL_FALSE
;
587 void radeonFlush(struct gl_context
*ctx
)
589 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
590 if (RADEON_DEBUG
& RADEON_IOCTL
)
591 fprintf(stderr
, "%s %d\n", __FUNCTION__
, radeon
->cmdbuf
.cs
->cdw
);
593 /* okay if we have no cmds in the buffer &&
594 we have no DMA flush &&
595 we have no DMA buffer allocated.
596 then no point flushing anything at all.
598 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
601 if (radeon
->dma
.flush
)
602 radeon
->dma
.flush( ctx
);
604 if (radeon
->cmdbuf
.cs
->cdw
)
605 rcommonFlushCmdBuf(radeon
, __FUNCTION__
);
608 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
) && radeon
->front_buffer_dirty
) {
609 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
611 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
612 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
613 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
615 /* We set the dirty bit in radeon_prepare_render() if we're
616 * front buffer rendering once we get there.
618 radeon
->front_buffer_dirty
= GL_FALSE
;
620 (*screen
->dri2
.loader
->flushFrontBuffer
)(drawable
, drawable
->loaderPrivate
);
625 /* Make sure all commands have been sent to the hardware and have
626 * completed processing.
628 void radeonFinish(struct gl_context
* ctx
)
630 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
631 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
632 struct radeon_renderbuffer
*rrb
;
635 if (ctx
->Driver
.Flush
)
636 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
638 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
639 struct radeon_renderbuffer
*rrb
;
640 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
642 radeon_bo_wait(rrb
->bo
);
644 rrb
= radeon_get_depthbuffer(radeon
);
646 radeon_bo_wait(rrb
->bo
);
651 * Send the current command buffer via ioctl to the hardware.
653 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
657 if (rmesa
->cmdbuf
.flushing
) {
658 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
661 rmesa
->cmdbuf
.flushing
= 1;
663 if (RADEON_DEBUG
& RADEON_IOCTL
) {
664 fprintf(stderr
, "%s from %s\n", __FUNCTION__
, caller
);
667 radeonEmitQueryEnd(&rmesa
->glCtx
);
669 if (rmesa
->cmdbuf
.cs
->cdw
) {
670 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
671 rmesa
->hw
.all_dirty
= GL_TRUE
;
673 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
674 rmesa
->cmdbuf
.flushing
= 0;
676 if (radeon_revalidate_bos(&rmesa
->glCtx
) == GL_FALSE
) {
677 fprintf(stderr
,"failed to revalidate buffers\n");
683 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
687 radeonReleaseDmaRegions(rmesa
);
689 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
692 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
693 "parse or rejected command stream. See dmesg "
694 "for more info.\n", ret
);
702 * Make sure that enough space is available in the command buffer
703 * by flushing if necessary.
705 * \param dwords The number of dwords we need to be free on the command buffer
707 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
709 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
710 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
711 /* If we try to flush empty buffer there is too big rendering operation. */
712 assert(rmesa
->cmdbuf
.cs
->cdw
);
713 rcommonFlushCmdBuf(rmesa
, caller
);
719 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
722 struct drm_radeon_gem_info mminfo
= { 0 };
724 /* Initialize command buffer */
725 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
726 "command_buffer_size");
727 if (size
< 2 * rmesa
->hw
.max_state_size
) {
728 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
733 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
734 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
735 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
736 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
737 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
738 "Allocating %d bytes command buffer (max state is %d bytes)\n",
739 size
* 4, rmesa
->hw
.max_state_size
* 4);
742 radeon_cs_manager_gem_ctor(rmesa
->radeonScreen
->driScreen
->fd
);
743 if (rmesa
->cmdbuf
.csm
== NULL
) {
744 /* FIXME: fatal error */
747 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
748 assert(rmesa
->cmdbuf
.cs
!= NULL
);
749 rmesa
->cmdbuf
.size
= size
;
751 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
752 (void (*)(void *))rmesa
->glCtx
.Driver
.Flush
, &rmesa
->glCtx
);
755 if (!drmCommandWriteRead(rmesa
->dri
.fd
, DRM_RADEON_GEM_INFO
,
756 &mminfo
, sizeof(mminfo
))) {
757 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
758 mminfo
.vram_visible
);
759 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
765 * Destroy the command buffer
767 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
769 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
770 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
773 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
776 const char *function
,
779 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
781 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
782 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
786 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
788 _mesa_meta_Clear(ctx
, mask
);