1 /**************************************************************************
3 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
33 * Keith Whitwell <keithw@vmware.com>
37 - Scissor implementation
38 - buffer swap/copy ioctls
41 - cmdbuffer management
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/fbobject.h"
50 #include "main/framebuffer.h"
51 #include "main/renderbuffer.h"
52 #include "drivers/common/meta.h"
54 #include "radeon_common.h"
55 #include "radeon_drm.h"
56 #include "radeon_queryobj.h"
59 * Enable verbose debug output for emit code.
62 * 2 also print state alues
64 #define RADEON_CMDBUF 0
66 /* =============================================================
71 * Update cliprects and scissors.
73 void radeonSetCliprects(radeonContextPtr radeon
)
75 __DRIdrawable
*const drawable
= radeon_get_drawable(radeon
);
76 __DRIdrawable
*const readable
= radeon_get_readable(radeon
);
78 if(drawable
== NULL
&& readable
== NULL
)
81 struct radeon_framebuffer
*const draw_rfb
= drawable
->driverPrivate
;
82 struct radeon_framebuffer
*const read_rfb
= readable
->driverPrivate
;
84 if ((draw_rfb
->base
.Width
!= drawable
->w
) ||
85 (draw_rfb
->base
.Height
!= drawable
->h
)) {
86 _mesa_resize_framebuffer(&radeon
->glCtx
, &draw_rfb
->base
,
87 drawable
->w
, drawable
->h
);
90 if (drawable
!= readable
) {
91 if ((read_rfb
->base
.Width
!= readable
->w
) ||
92 (read_rfb
->base
.Height
!= readable
->h
)) {
93 _mesa_resize_framebuffer(&radeon
->glCtx
, &read_rfb
->base
,
94 readable
->w
, readable
->h
);
98 if (radeon
->state
.scissor
.enabled
)
99 radeonUpdateScissor(&radeon
->glCtx
);
105 void radeonUpdateScissor( struct gl_context
*ctx
)
107 radeonContextPtr rmesa
= RADEON_CONTEXT(ctx
);
108 GLint x
= ctx
->Scissor
.ScissorArray
[0].X
, y
= ctx
->Scissor
.ScissorArray
[0].Y
;
109 GLsizei w
= ctx
->Scissor
.ScissorArray
[0].Width
, h
= ctx
->Scissor
.ScissorArray
[0].Height
;
111 int min_x
, min_y
, max_x
, max_y
;
113 if (!ctx
->DrawBuffer
)
116 max_x
= ctx
->DrawBuffer
->Width
- 1;
117 max_y
= ctx
->DrawBuffer
->Height
- 1;
119 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
121 y1
= ctx
->DrawBuffer
->Height
- (y
+ h
);
132 rmesa
->state
.scissor
.rect
.x1
= CLAMP(x1
, min_x
, max_x
);
133 rmesa
->state
.scissor
.rect
.y1
= CLAMP(y1
, min_y
, max_y
);
134 rmesa
->state
.scissor
.rect
.x2
= CLAMP(x2
, min_x
, max_x
);
135 rmesa
->state
.scissor
.rect
.y2
= CLAMP(y2
, min_y
, max_y
);
137 if (rmesa
->vtbl
.update_scissor
)
138 rmesa
->vtbl
.update_scissor(ctx
);
141 /* =============================================================
145 void radeonScissor(struct gl_context
*ctx
)
147 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
148 if (ctx
->Scissor
.EnableFlags
) {
149 /* We don't pipeline cliprect changes */
150 radeon_firevertices(radeon
);
151 radeonUpdateScissor(ctx
);
155 /* ================================================================
156 * SwapBuffers with client-side throttling
159 uint32_t radeonGetAge(radeonContextPtr radeon
)
161 drm_radeon_getparam_t gp
;
165 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
166 gp
.value
= (int *)&age
;
167 ret
= drmCommandWriteRead(radeon
->radeonScreen
->driScreen
->fd
, DRM_RADEON_GETPARAM
,
170 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __func__
,
178 void radeon_draw_buffer(struct gl_context
*ctx
, struct gl_framebuffer
*fb
)
180 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
181 struct radeon_renderbuffer
*rrbDepth
= NULL
, *rrbStencil
= NULL
,
187 /* this can happen during the initial context initialization */
191 /* radeons only handle 1 color draw so far */
192 if (fb
->_NumColorDrawBuffers
!= 1) {
193 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
197 /* Do this here, note core Mesa, since this function is called from
198 * many places within the driver.
200 if (ctx
->NewState
& (_NEW_BUFFERS
| _NEW_COLOR
| _NEW_PIXEL
)) {
201 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
202 _mesa_update_framebuffer(ctx
, ctx
->ReadBuffer
, ctx
->DrawBuffer
);
203 /* this updates the DrawBuffer's Width/Height if it's a FBO */
204 _mesa_update_draw_buffer_bounds(ctx
, ctx
->DrawBuffer
);
207 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
208 /* this may occur when we're called by glBindFrameBuffer() during
209 * the process of someone setting up renderbuffers, etc.
211 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
216 ;/* do something depthy/stencily TODO */
221 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
) {
222 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_FRONT_LEFT
].Renderbuffer
);
223 radeon
->front_cliprects
= GL_TRUE
;
225 rrbColor
= radeon_renderbuffer(fb
->Attachment
[BUFFER_BACK_LEFT
].Renderbuffer
);
226 radeon
->front_cliprects
= GL_FALSE
;
229 /* user FBO in theory */
230 struct radeon_renderbuffer
*rrb
;
231 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[0]);
233 offset
= rrb
->draw_offset
;
238 if (rrbColor
== NULL
)
239 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_TRUE
);
241 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DRAW_BUFFER
, GL_FALSE
);
244 if (fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
) {
245 rrbDepth
= radeon_renderbuffer(fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
246 if (rrbDepth
&& rrbDepth
->bo
) {
247 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
249 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_TRUE
);
252 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_DEPTH_BUFFER
, GL_FALSE
);
256 if (fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
) {
257 rrbStencil
= radeon_renderbuffer(fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
258 if (rrbStencil
&& rrbStencil
->bo
) {
259 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
260 /* need to re-compute stencil hw state */
262 rrbDepth
= rrbStencil
;
264 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_TRUE
);
267 radeon
->vtbl
.fallback(ctx
, RADEON_FALLBACK_STENCIL_BUFFER
, GL_FALSE
);
268 if (ctx
->Driver
.Enable
!= NULL
)
269 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
, ctx
->Stencil
.Enabled
);
271 ctx
->NewState
|= _NEW_STENCIL
;
274 /* Update culling direction which changes depending on the
275 * orientation of the buffer:
277 if (ctx
->Driver
.FrontFace
)
278 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
280 ctx
->NewState
|= _NEW_POLYGON
;
283 * Update depth test state
285 if (ctx
->Driver
.Enable
) {
286 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
,
287 (ctx
->Depth
.Test
&& fb
->Visual
.depthBits
> 0));
288 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
289 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
291 ctx
->NewState
|= (_NEW_DEPTH
| _NEW_STENCIL
);
294 _mesa_reference_renderbuffer(&radeon
->state
.depth
.rb
, &rrbDepth
->base
.Base
);
295 _mesa_reference_renderbuffer(&radeon
->state
.color
.rb
, &rrbColor
->base
.Base
);
296 radeon
->state
.color
.draw_offset
= offset
;
298 ctx
->NewState
|= _NEW_VIEWPORT
;
300 /* Set state we know depends on drawable parameters:
302 radeonUpdateScissor(ctx
);
303 radeon
->NewGLState
|= _NEW_SCISSOR
;
305 if (ctx
->Driver
.DepthRange
)
306 ctx
->Driver
.DepthRange(ctx
);
308 /* Update culling direction which changes depending on the
309 * orientation of the buffer:
311 if (ctx
->Driver
.FrontFace
)
312 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
314 ctx
->NewState
|= _NEW_POLYGON
;
318 * Called via glDrawBuffer.
320 void radeonDrawBuffer(struct gl_context
*ctx
)
322 if (RADEON_DEBUG
& RADEON_DRI
)
323 fprintf(stderr
, "%s\n", __func__
);
325 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
)) {
326 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
328 /* If we might be front-buffer rendering on this buffer for
329 * the first time, invalidate our DRI drawable so we'll ask
330 * for new buffers (including the fake front) before we start
333 radeon_update_renderbuffers(radeon
->driContext
,
334 radeon
->driContext
->driDrawablePriv
,
338 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
341 void radeonReadBuffer( struct gl_context
*ctx
, GLenum mode
)
343 if (_mesa_is_front_buffer_reading(ctx
->ReadBuffer
)) {
344 struct radeon_context
*const rmesa
= RADEON_CONTEXT(ctx
);
345 radeon_update_renderbuffers(rmesa
->driContext
,
346 rmesa
->driContext
->driReadablePriv
, GL_FALSE
);
348 /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
349 if (ctx
->ReadBuffer
== ctx
->DrawBuffer
) {
350 /* This will update FBO completeness status.
351 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
352 * refers to a missing renderbuffer. Calling glReadBuffer can set
353 * that straight and can make the drawing buffer complete.
355 radeon_draw_buffer(ctx
, ctx
->DrawBuffer
);
359 void radeon_window_moved(radeonContextPtr radeon
)
361 /* Cliprects has to be updated before doing anything else */
362 radeonSetCliprects(radeon
);
365 void radeon_viewport(struct gl_context
*ctx
)
367 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
368 __DRIcontext
*driContext
= radeon
->driContext
;
369 void (*old_viewport
)(struct gl_context
*ctx
);
371 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
372 if (_mesa_is_front_buffer_drawing(ctx
->DrawBuffer
)) {
373 ctx
->Driver
.Flush(ctx
);
375 radeon_update_renderbuffers(driContext
, driContext
->driDrawablePriv
, GL_FALSE
);
376 if (driContext
->driDrawablePriv
!= driContext
->driReadablePriv
)
377 radeon_update_renderbuffers(driContext
, driContext
->driReadablePriv
, GL_FALSE
);
380 old_viewport
= ctx
->Driver
.Viewport
;
381 ctx
->Driver
.Viewport
= NULL
;
382 radeon_window_moved(radeon
);
383 radeon_draw_buffer(ctx
, radeon
->glCtx
.DrawBuffer
);
384 ctx
->Driver
.Viewport
= old_viewport
;
387 static void radeon_print_state_atom(radeonContextPtr radeon
, struct radeon_state_atom
*state
)
389 int i
, j
, reg
, count
;
392 if (!radeon_is_debug_enabled(RADEON_STATE
, RADEON_VERBOSE
) )
395 dwords
= state
->check(&radeon
->glCtx
, state
);
397 fprintf(stderr
, " emit %s %d/%d\n", state
->name
, dwords
, state
->cmd_size
);
399 if (state
->cmd
&& radeon_is_debug_enabled(RADEON_STATE
, RADEON_TRACE
)) {
400 if (dwords
> state
->cmd_size
)
401 dwords
= state
->cmd_size
;
402 for (i
= 0; i
< dwords
;) {
403 packet0
= state
->cmd
[i
];
404 reg
= (packet0
& 0x1FFF) << 2;
405 count
= ((packet0
& 0x3FFF0000) >> 16) + 1;
406 fprintf(stderr
, " %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
407 state
->name
, i
, reg
, count
);
409 for (j
= 0; j
< count
&& i
< dwords
; j
++) {
410 fprintf(stderr
, " %s[%d]: 0x%04x = %08x\n",
411 state
->name
, i
, reg
, state
->cmd
[i
]);
420 * Count total size for next state emit.
422 GLuint
radeonCountStateEmitSize(radeonContextPtr radeon
)
424 struct radeon_state_atom
*atom
;
426 /* check if we are going to emit full state */
428 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.all_dirty
) {
429 if (!radeon
->hw
.is_dirty
)
431 foreach(atom
, &radeon
->hw
.atomlist
) {
433 const GLuint atom_size
= atom
->check(&radeon
->glCtx
, atom
);
435 if (RADEON_CMDBUF
&& atom_size
) {
436 radeon_print_state_atom(radeon
, atom
);
441 foreach(atom
, &radeon
->hw
.atomlist
) {
442 const GLuint atom_size
= atom
->check(&radeon
->glCtx
, atom
);
444 if (RADEON_CMDBUF
&& atom_size
) {
445 radeon_print_state_atom(radeon
, atom
);
451 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s %u\n", __func__
, dwords
);
455 static inline void radeon_emit_atom(radeonContextPtr radeon
, struct radeon_state_atom
*atom
)
457 BATCH_LOCALS(radeon
);
460 dwords
= atom
->check(&radeon
->glCtx
, atom
);
463 radeon_print_state_atom(radeon
, atom
);
466 atom
->emit(&radeon
->glCtx
, atom
);
469 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
472 atom
->dirty
= GL_FALSE
;
475 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, " skip state %s\n", atom
->name
);
480 static inline void radeonEmitAtoms(radeonContextPtr radeon
, GLboolean emitAll
)
482 struct radeon_state_atom
*atom
;
484 /* Emit actual atoms */
485 if (radeon
->hw
.all_dirty
|| emitAll
) {
486 foreach(atom
, &radeon
->hw
.atomlist
)
487 radeon_emit_atom( radeon
, atom
);
489 foreach(atom
, &radeon
->hw
.atomlist
) {
491 radeon_emit_atom( radeon
, atom
);
498 void radeonEmitState(radeonContextPtr radeon
)
500 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s\n", __func__
);
502 if (radeon
->vtbl
.pre_emit_state
)
503 radeon
->vtbl
.pre_emit_state(radeon
);
505 /* this code used to return here but now it emits zbs */
506 if (radeon
->cmdbuf
.cs
->cdw
&& !radeon
->hw
.is_dirty
&& !radeon
->hw
.all_dirty
)
509 if (!radeon
->cmdbuf
.cs
->cdw
) {
510 if (RADEON_DEBUG
& RADEON_STATE
)
511 fprintf(stderr
, "Begin reemit state\n");
513 radeonEmitAtoms(radeon
, GL_TRUE
);
516 if (RADEON_DEBUG
& RADEON_STATE
)
517 fprintf(stderr
, "Begin dirty state\n");
519 radeonEmitAtoms(radeon
, GL_FALSE
);
522 radeon
->hw
.is_dirty
= GL_FALSE
;
523 radeon
->hw
.all_dirty
= GL_FALSE
;
527 void radeonFlush(struct gl_context
*ctx
)
529 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
530 if (RADEON_DEBUG
& RADEON_IOCTL
)
531 fprintf(stderr
, "%s %d\n", __func__
, radeon
->cmdbuf
.cs
->cdw
);
533 /* okay if we have no cmds in the buffer &&
534 we have no DMA flush &&
535 we have no DMA buffer allocated.
536 then no point flushing anything at all.
538 if (!radeon
->dma
.flush
&& !radeon
->cmdbuf
.cs
->cdw
&& is_empty_list(&radeon
->dma
.reserved
))
541 if (radeon
->dma
.flush
)
542 radeon
->dma
.flush( ctx
);
544 if (radeon
->cmdbuf
.cs
->cdw
)
545 rcommonFlushCmdBuf(radeon
, __func__
);
548 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
) && radeon
->front_buffer_dirty
) {
549 __DRIscreen
*const screen
= radeon
->radeonScreen
->driScreen
;
551 if (screen
->dri2
.loader
&& (screen
->dri2
.loader
->base
.version
>= 2)
552 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
)) {
553 __DRIdrawable
* drawable
= radeon_get_drawable(radeon
);
555 /* We set the dirty bit in radeon_prepare_render() if we're
556 * front buffer rendering once we get there.
558 radeon
->front_buffer_dirty
= GL_FALSE
;
560 screen
->dri2
.loader
->flushFrontBuffer(drawable
, drawable
->loaderPrivate
);
565 /* Make sure all commands have been sent to the hardware and have
566 * completed processing.
568 void radeonFinish(struct gl_context
* ctx
)
570 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
571 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
572 struct radeon_renderbuffer
*rrb
;
575 if (ctx
->Driver
.Flush
)
576 ctx
->Driver
.Flush(ctx
); /* +r6/r7 */
578 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
579 struct radeon_renderbuffer
*rrb
;
580 rrb
= radeon_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
582 radeon_bo_wait(rrb
->bo
);
584 rrb
= radeon_get_depthbuffer(radeon
);
586 radeon_bo_wait(rrb
->bo
);
591 * Send the current command buffer via ioctl to the hardware.
593 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa
, const char *caller
)
597 if (rmesa
->cmdbuf
.flushing
) {
598 fprintf(stderr
, "Recursive call into r300FlushCmdBufLocked!\n");
601 rmesa
->cmdbuf
.flushing
= 1;
603 if (RADEON_DEBUG
& RADEON_IOCTL
) {
604 fprintf(stderr
, "%s from %s\n", __func__
, caller
);
607 radeonEmitQueryEnd(&rmesa
->glCtx
);
609 if (rmesa
->cmdbuf
.cs
->cdw
) {
610 ret
= radeon_cs_emit(rmesa
->cmdbuf
.cs
);
611 rmesa
->hw
.all_dirty
= GL_TRUE
;
613 radeon_cs_erase(rmesa
->cmdbuf
.cs
);
614 rmesa
->cmdbuf
.flushing
= 0;
616 if (!rmesa
->vtbl
.revalidate_all_buffers(&rmesa
->glCtx
))
617 fprintf(stderr
,"failed to revalidate buffers\n");
622 int rcommonFlushCmdBuf(radeonContextPtr rmesa
, const char *caller
)
626 radeonReleaseDmaRegions(rmesa
);
628 ret
= rcommonFlushCmdBufLocked(rmesa
, caller
);
631 fprintf(stderr
, "drmRadeonCmdBuffer: %d. Kernel failed to "
632 "parse or rejected command stream. See dmesg "
633 "for more info.\n", ret
);
641 * Make sure that enough space is available in the command buffer
642 * by flushing if necessary.
644 * \param dwords The number of dwords we need to be free on the command buffer
646 GLboolean
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa
, int dwords
, const char *caller
)
648 if ((rmesa
->cmdbuf
.cs
->cdw
+ dwords
+ 128) > rmesa
->cmdbuf
.size
649 || radeon_cs_need_flush(rmesa
->cmdbuf
.cs
)) {
650 /* If we try to flush empty buffer there is too big rendering operation. */
651 assert(rmesa
->cmdbuf
.cs
->cdw
);
652 rcommonFlushCmdBuf(rmesa
, caller
);
658 void rcommonInitCmdBuf(radeonContextPtr rmesa
)
661 struct drm_radeon_gem_info mminfo
= { 0 };
662 int fd
= rmesa
->radeonScreen
->driScreen
->fd
;
664 /* Initialize command buffer */
665 size
= 256 * driQueryOptioni(&rmesa
->optionCache
,
666 "command_buffer_size");
667 if (size
< 2 * rmesa
->hw
.max_state_size
) {
668 size
= 2 * rmesa
->hw
.max_state_size
+ 65535;
673 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
674 "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t
));
675 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
676 "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t
));
677 radeon_print(RADEON_CS
, RADEON_VERBOSE
,
678 "Allocating %d bytes command buffer (max state is %d bytes)\n",
679 size
* 4, rmesa
->hw
.max_state_size
* 4);
681 rmesa
->cmdbuf
.csm
= radeon_cs_manager_gem_ctor(fd
);
682 if (rmesa
->cmdbuf
.csm
== NULL
) {
683 /* FIXME: fatal error */
686 rmesa
->cmdbuf
.cs
= radeon_cs_create(rmesa
->cmdbuf
.csm
, size
);
687 assert(rmesa
->cmdbuf
.cs
!= NULL
);
688 rmesa
->cmdbuf
.size
= size
;
690 radeon_cs_space_set_flush(rmesa
->cmdbuf
.cs
,
691 (void (*)(void *))rmesa
->glCtx
.Driver
.Flush
, &rmesa
->glCtx
);
694 if (!drmCommandWriteRead(fd
, DRM_RADEON_GEM_INFO
,
695 &mminfo
, sizeof(mminfo
))) {
696 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_VRAM
,
697 mminfo
.vram_visible
);
698 radeon_cs_set_limit(rmesa
->cmdbuf
.cs
, RADEON_GEM_DOMAIN_GTT
,
704 * Destroy the command buffer
706 void rcommonDestroyCmdBuf(radeonContextPtr rmesa
)
708 radeon_cs_destroy(rmesa
->cmdbuf
.cs
);
709 radeon_cs_manager_gem_dtor(rmesa
->cmdbuf
.csm
);
712 void rcommonBeginBatch(radeonContextPtr rmesa
, int n
,
714 const char *function
,
717 radeon_cs_begin(rmesa
->cmdbuf
.cs
, n
, file
, function
, line
);
719 radeon_print(RADEON_CS
, RADEON_VERBOSE
, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
720 n
, rmesa
->cmdbuf
.cs
->cdw
, function
, line
);
724 void radeonUserClear(struct gl_context
*ctx
, GLuint mask
)
726 _mesa_meta_Clear(ctx
, mask
);