radeon_screen.c \
radeon_screen.h \
radeon_bo_legacy.h \
+ radeon_cs_legacy.h \
radeon_buffer.h \
common_context.h \
common_lock.c \
common_misc.h \
common_misc.c
+
##### TARGETS #####
}
}
- r200UpdateScissor( ctx );
+ radeonUpdateScissor( ctx );
}
case GL_SCISSOR_TEST:
R200_FIREVERTICES( rmesa );
rmesa->radeon.state.scissor.enabled = state;
- r200UpdateScissor( ctx );
+ radeonUpdateScissor( ctx );
break;
case GL_STENCIL_TEST:
*/
#define SPACE_FOR_FLUSHING 4
-/**
- * Send the current command buffer via ioctl to the hardware.
- */
-int r300FlushCmdBufLocked(r300ContextPtr r300, const char *caller)
-{
- int ret = 0;
-
- if (r300->cmdbuf.flushing) {
- fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
- exit(-1);
- }
- r300->cmdbuf.flushing = 1;
- if (r300->cmdbuf.cs->cdw) {
- ret = radeon_cs_emit(r300->cmdbuf.cs);
- r300->hw.all_dirty = 1;
- }
- radeon_cs_erase(r300->cmdbuf.cs);
- r300->cmdbuf.flushing = 0;
- return ret;
-}
-
-int r300FlushCmdBuf(r300ContextPtr r300, const char *caller)
-{
- int ret;
-
- LOCK_HARDWARE(&r300->radeon);
- ret = r300FlushCmdBufLocked(r300, caller);
- UNLOCK_HARDWARE(&r300->radeon);
-
- if (ret) {
- fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
- _mesa_exit(ret);
- }
-
- return ret;
-}
-
-/**
- * Make sure that enough space is available in the command buffer
- * by flushing if necessary.
- *
- * \param dwords The number of dwords we need to be free on the command buffer
- */
-void r300EnsureCmdBufSpace(r300ContextPtr r300, int dwords, const char *caller)
-{
- if ((r300->cmdbuf.cs->cdw + dwords + 128) > r300->cmdbuf.size ||
- radeon_cs_need_flush(r300->cmdbuf.cs)) {
- r300FlushCmdBuf(r300, caller);
- }
-}
-
void r300BeginBatch(r300ContextPtr r300, int n,
int dostate,
const char *file,
const char *function,
int line)
{
- r300EnsureCmdBufSpace(r300, n, function);
- if (!r300->cmdbuf.cs->cdw && dostate) {
+ rcommonEnsureCmdBufSpace(&r300->radeon, n, function);
+ if (!r300->radeon.cmdbuf.cs->cdw && dostate) {
if (RADEON_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "Reemit state after flush (from %s)\n", function);
r300EmitState(r300);
}
- radeon_cs_begin(r300->cmdbuf.cs, n, file, function, line);
+ radeon_cs_begin(r300->radeon.cmdbuf.cs, n, file, function, line);
}
static void r300PrintStateAtom(r300ContextPtr r300,
if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
fprintf(stderr, "%s\n", __FUNCTION__);
- if (r300->cmdbuf.cs->cdw && !r300->hw.is_dirty && !r300->hw.all_dirty)
+ if (r300->radeon.cmdbuf.cs->cdw && !r300->hw.is_dirty && !r300->hw.all_dirty)
return;
/* To avoid going across the entire set of states multiple times, just check
* for enough space for the case of emitting all state.
*/
- r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
+ rcommonEnsureCmdBufSpace(&r300->radeon, r300->hw.max_state_size, __FUNCTION__);
- if (!r300->cmdbuf.cs->cdw) {
+ if (!r300->radeon.cmdbuf.cs->cdw) {
if (RADEON_DEBUG & DEBUG_STATE)
fprintf(stderr, "Begin reemit state\n");
*/
void r300InitCmdBuf(r300ContextPtr r300)
{
- int size, mtu;
+ int mtu;
int has_tcl = 1;
int is_r500 = 0;
int i;
r300->hw.is_dirty = GL_TRUE;
r300->hw.all_dirty = GL_TRUE;
- /* Initialize command buffer */
- size =
- 256 * driQueryOptioni(&r300->radeon.optionCache,
- "command_buffer_size");
- if (size < 2 * r300->hw.max_state_size) {
- size = 2 * r300->hw.max_state_size + 65535;
- }
- if (size > 64 * 256)
- size = 64 * 256;
-
- size = 64 * 1024 / 4;
- if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
- fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
- sizeof(drm_r300_cmd_header_t));
- fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
- sizeof(drm_radeon_cmd_buffer_t));
- fprintf(stderr,
- "Allocating %d bytes command buffer (max state is %d bytes)\n",
- size * 4, r300->hw.max_state_size * 4);
- }
-
- if (r300->radeon.radeonScreen->kernel_mm) {
- int fd = r300->radeon.radeonScreen->driScreen->fd;
- r300->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
- } else {
- r300->cmdbuf.csm = radeon_cs_manager_legacy_ctor(&r300->radeon);
- }
- if (r300->cmdbuf.csm == NULL) {
- /* FIXME: fatal error */
- return;
- }
- r300->cmdbuf.cs = radeon_cs_create(r300->cmdbuf.csm, size);
- assert(r300->cmdbuf.cs != NULL);
- r300->cmdbuf.size = size;
+ rcommonInitCmdBuf(&r300->radeon, r300->hw.max_state_size);
}
/**
{
struct radeon_state_atom *atom;
- radeon_cs_destroy(r300->cmdbuf.cs);
foreach(atom, &r300->hw.atomlist) {
FREE(atom->cmd);
}
- if (r300->radeon.radeonScreen->driScreen->dri2.enabled || r300->radeon.radeonScreen->kernel_mm) {
- radeon_cs_manager_gem_dtor(r300->cmdbuf.csm);
- } else {
- radeon_cs_manager_legacy_dtor(r300->cmdbuf.csm);
- }
+ rcommonDestroyCmdBuf(&r300->radeon);
+
}
#include "r300_context.h"
#include "radeon_cs.h"
-extern int r300FlushCmdBufLocked(r300ContextPtr r300, const char *caller);
-extern int r300FlushCmdBuf(r300ContextPtr r300, const char *caller);
extern void r300EmitState(r300ContextPtr r300);
extern void r300InitCmdBuf(r300ContextPtr r300);
extern void r300DestroyCmdBuf(r300ContextPtr r300);
-extern void r300EnsureCmdBufSpace(r300ContextPtr r300, int dwords, const char *caller);
void r300BeginBatch(r300ContextPtr r300,
int n,
*/
#define OUT_BATCH(data) \
do { \
- radeon_cs_write_dword(b_l_r300->cmdbuf.cs, data);\
+ radeon_cs_write_dword(b_l_r300->radeon.cmdbuf.cs, data);\
} while(0)
/**
fprintf(stderr, "(%s:%s:%d) offset : %d\n",\
__FILE__, __FUNCTION__, __LINE__, offset);\
}\
- radeon_cs_write_dword(b_l_r300->cmdbuf.cs, offset);\
- radeon_cs_write_reloc(b_l_r300->cmdbuf.cs, \
+ radeon_cs_write_dword(b_l_r300->radeon.cmdbuf.cs, offset);\
+ radeon_cs_write_reloc(b_l_r300->radeon.cmdbuf.cs, \
bo, \
rd, \
wd, \
do { \
int _i; \
for (_i=0; _i < n; _i++) {\
- radeon_cs_write_dword(b_l_r300->cmdbuf.cs, ptr[_i]);\
+ radeon_cs_write_dword(b_l_r300->radeon.cmdbuf.cs, ptr[_i]);\
}\
} while(0)
*/
#define END_BATCH() \
do { \
- radeon_cs_end(b_l_r300->cmdbuf.cs, __FILE__, __FUNCTION__, __LINE__);\
+ radeon_cs_end(b_l_r300->radeon.cmdbuf.cs, __FILE__, __FUNCTION__, __LINE__);\
} while(0)
/**
#include "r300_tex.h"
#include "r300_emit.h"
#include "r300_swtcl.h"
+#include "radeon_bo_legacy.h"
#include "vblank.h"
_vbo_DestroyContext(r300->radeon.glCtx);
_swrast_DestroyContext(r300->radeon.glCtx);
- r300FlushCmdBuf(r300, __FUNCTION__);
+ rcommonFlushCmdBuf(&r300->radeon, __FUNCTION__);
r300DestroyCmdBuf(r300);
if (radeon->state.scissor.pClipRects) {
#include "radeon_drm.h"
#include "dri_util.h"
#include "texmem.h"
+#include "radeon_context.h"
#include "radeon_bo.h"
#include "main/macros.h"
r300TexObj *textures[R300_MAX_TEXTURE_UNITS];
};
-/**
- * This structure holds the command buffer while it is being constructed.
- *
- * The first batch of commands in the buffer is always the state that needs
- * to be re-emitted when the context is lost. This batch can be skipped
- * otherwise.
- */
-struct r300_cmdbuf {
- struct radeon_cs_manager *csm;
- struct radeon_cs *cs;
- int size; /** # of dwords total */
- unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
-};
-
/**
* State cache
*/
struct radeon_context radeon; /* parent class, must be first */
struct r300_hw_state hw;
- struct r300_cmdbuf cmdbuf;
+
struct r300_state state;
struct gl_vertex_program *curr_vp;
struct r300_vertex_program *selected_vp;
}
/* Make sure it fits there. */
- r300EnsureCmdBufSpace(r300, 421 * 3, __FUNCTION__);
+ rcommonEnsureCmdBufSpace(&r300->radeon, 421 * 3, __FUNCTION__);
if (flags || bits)
r300EmitClearState(ctx);
rrbd = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
rmesa->swtcl.flush(rmesa);
}
- if (rmesa->cmdbuf.cs->cdw) {
- r300FlushCmdBuf(rmesa, __FUNCTION__);
+ if (rmesa->radeon.cmdbuf.cs->cdw) {
+ rcommonFlushCmdBuf(&rmesa->radeon, __FUNCTION__);
}
}
OUT_BATCH(R300_EB_UNK1 | (0 << 16) | R300_EB_UNK2);
OUT_BATCH(rmesa->state.elt_dma_offset);
OUT_BATCH(vertex_count);
- radeon_cs_write_reloc(rmesa->cmdbuf.cs,
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->state.elt_dma_bo,
RADEON_GEM_DOMAIN_GTT, 0, 0);
}
for (i = 0; i + 1 < nr; i += 2) {
voffset = rmesa->state.aos[i + 0].offset +
offset * 4 * rmesa->state.aos[i + 0].stride;
- radeon_cs_write_reloc(rmesa->cmdbuf.cs,
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->state.aos[i+0].bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
voffset = rmesa->state.aos[i + 1].offset +
offset * 4 * rmesa->state.aos[i + 1].stride;
- radeon_cs_write_reloc(rmesa->cmdbuf.cs,
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->state.aos[i+1].bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
if (nr & 1) {
voffset = rmesa->state.aos[nr - 1].offset +
offset * 4 * rmesa->state.aos[nr - 1].stride;
- radeon_cs_write_reloc(rmesa->cmdbuf.cs,
+ radeon_cs_write_reloc(rmesa->radeon.cmdbuf.cs,
rmesa->state.aos[nr-1].bo,
RADEON_GEM_DOMAIN_GTT,
0, 0);
* This is supposed to ensure that we can get all rendering
* commands into a single command buffer.
*/
- r300EnsureCmdBufSpace(rmesa, 64, __FUNCTION__);
+ rcommonEnsureCmdBufSpace(&rmesa->radeon, 64, __FUNCTION__);
if (vb->Elts) {
if (num_verts > 65535) {
fprintf(stderr, "%s\n", __FUNCTION__);
rmesa->swtcl.flush = NULL;
radeon_bo_unmap(rmesa->swtcl.bo);
- r300EnsureCmdBufSpace(rmesa,
- rmesa->hw.max_state_size + (12*sizeof(int)),
- __FUNCTION__);
+ rcommonEnsureCmdBufSpace(rmesa,
+ rmesa->hw.max_state_size + (12*sizeof(int)),
+ __FUNCTION__);
r300EmitState(rmesa);
r300EmitVertexAOS(rmesa,
rmesa->swtcl.vertex_size,
#include "dri_util.h"
#include "radeon_screen.h"
-#include "common_context.h"
#include "common_misc.h"
#if R200_MERGED
#ifndef __RADEON_LOCK_H__
#define __RADEON_LOCK_H__
-#include "radeon_context.h"
-
-extern void radeonGetLock(radeonContextPtr rmesa, GLuint flags);
-extern void radeonUpdatePageFlipping(radeonContextPtr rmesa);
-
-/* Turn DEBUG_LOCKING on to find locking conflicts.
- */
-#define DEBUG_LOCKING 0
-
-#if DEBUG_LOCKING
-extern char *prevLockFile;
-extern int prevLockLine;
-
-#define DEBUG_LOCK() \
- do { \
- prevLockFile = (__FILE__); \
- prevLockLine = (__LINE__); \
- } while (0)
-
-#define DEBUG_RESET() \
- do { \
- prevLockFile = 0; \
- prevLockLine = 0; \
- } while (0)
-
-#define DEBUG_CHECK_LOCK() \
- do { \
- if (prevLockFile) { \
- fprintf(stderr, \
- "LOCK SET!\n\tPrevious %s:%d\n\tCurrent: %s:%d\n", \
- prevLockFile, prevLockLine, __FILE__, __LINE__); \
- exit(1); \
- } \
- } while (0)
-
-#else
-
-#define DEBUG_LOCK()
-#define DEBUG_RESET()
-#define DEBUG_CHECK_LOCK()
-
-#endif
-
-/*
- * !!! We may want to separate locks from locks with validation. This
- * could be used to improve performance for those things commands that
- * do not do any drawing !!!
- */
-
-/* Lock the hardware and validate our state.
- */
-#define LOCK_HARDWARE( rmesa ) \
- do { \
- char __ret = 0; \
- DEBUG_CHECK_LOCK(); \
- if (!(rmesa)->radeonScreen->driScreen->dri2.enabled) { \
- DRM_CAS((rmesa)->dri.hwLock, (rmesa)->dri.hwContext, \
- (DRM_LOCK_HELD | (rmesa)->dri.hwContext), __ret); \
- if (__ret) \
- radeonGetLock((rmesa), 0); \
- }\
- DEBUG_LOCK(); \
- } while (0)
-
-#define UNLOCK_HARDWARE( rmesa ) \
- do { \
- if (!(rmesa)->radeonScreen->driScreen->dri2.enabled) { \
- DRM_UNLOCK((rmesa)->dri.fd, \
- (rmesa)->dri.hwLock, \
- (rmesa)->dri.hwContext); \
- DEBUG_RESET(); \
- }\
- } while (0)
+#include "common_lock.h"
#endif /* __RADEON_LOCK_H__ */
#ifndef COMMON_CONTEXT_H
#define COMMON_CONTEXT_H
+
+#include "main/mm.h"
+#include "math/m_vector.h"
+#include "texmem.h"
+#include "tnl/t_context.h"
+#include "main/colormac.h"
+
+
/* This union is used to avoid warnings/miscompilation
with float to uint32_t casts due to strict-aliasing */
typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
typedef struct radeon_context radeonContextRec;
typedef struct radeon_context *radeonContextPtr;
-#include "main/mm.h"
-#include "math/m_vector.h"
-#include "texmem.h"
-#include "tnl/t_context.h"
-
#define TEX_0 0x1
#define TEX_1 0x2
struct radeon_stencilbuffer_state stencil;
};
+/**
+ * This structure holds the command buffer while it is being constructed.
+ *
+ * The first batch of commands in the buffer is always the state that needs
+ * to be re-emitted when the context is lost. This batch can be skipped
+ * otherwise.
+ */
+struct radeon_cmdbuf {
+ struct radeon_cs_manager *csm;
+ struct radeon_cs *cs;
+ int size; /** # of dwords total */
+ unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
+};
+
struct radeon_context {
GLcontext *glCtx;
radeonScreenPtr radeonScreen; /* Screen private DRI data */
*/
driOptionCache optionCache;
+ struct radeon_cmdbuf cmdbuf;
+
struct {
void (*get_lock)(radeonContextPtr radeon);
void (*update_viewport_offset)(GLcontext *ctx);
#else
/* Turn on/off page flipping according to the flags in the sarea:
*/
-static void radeonUpdatePageFlipping(radeonContextPtr rmesa)
+void radeonUpdatePageFlipping(radeonContextPtr rmesa)
{
rmesa->doPageFlip = rmesa->sarea->pfState;
if (rmesa->glCtx->WinSysDrawBuffer) {
#ifndef COMMON_LOCK_H
#define COMMON_LOCK_H
+
+#include "main/colormac.h"
+#include "radeon_screen.h"
+#include "common_context.h"
+
extern void radeonGetLock(radeonContextPtr rmesa, GLuint flags);
/* Turn DEBUG_LOCKING on to find locking conflicts.
do { \
char __ret = 0; \
DEBUG_CHECK_LOCK(); \
- DRM_CAS( (rmesa)->dri.hwLock, (rmesa)->dri.hwContext, \
- (DRM_LOCK_HELD | (rmesa)->dri.hwContext), __ret ); \
- if ( __ret ) \
+ if (!(rmesa)->radeonScreen->driScreen->dri2.enabled) { \
+ DRM_CAS( (rmesa)->dri.hwLock, (rmesa)->dri.hwContext, \
+ (DRM_LOCK_HELD | (rmesa)->dri.hwContext), __ret ); \
+ if ( __ret ) \
radeonGetLock( (rmesa), 0 ); \
+ } \
DEBUG_LOCK(); \
} while (0)
#define UNLOCK_HARDWARE( rmesa ) \
do { \
- DRM_UNLOCK( (rmesa)->dri.fd, \
- (rmesa)->dri.hwLock, \
- (rmesa)->dri.hwContext ); \
- DEBUG_RESET(); \
+ if (!(rmesa)->radeonScreen->driScreen->dri2.enabled) { \
+ DRM_UNLOCK( (rmesa)->dri.fd, \
+ (rmesa)->dri.hwLock, \
+ (rmesa)->dri.hwContext ); \
+ DEBUG_RESET(); \
+ } \
} while (0)
#endif
#include "drirenderbuffer.h"
#include "vblank.h"
-
+#include "radeon_bo.h"
+#include "radeon_cs.h"
+#include "radeon_bo_legacy.h"
+#include "radeon_cs_legacy.h"
+#include "radeon_bo_gem.h"
+#include "radeon_cs_gem.h"
#include "dri_util.h"
#include "radeon_drm.h"
-#include "radeon_screen.h"
#include "radeon_buffer.h"
+#include "radeon_screen.h"
#include "common_context.h"
#include "common_misc.h"
#include "common_lock.h"
}
}
+/* cmdbuffer */
+/**
+ * Send the current command buffer via ioctl to the hardware.
+ */
+int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
+{
+ int ret = 0;
+
+ if (rmesa->cmdbuf.flushing) {
+ fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
+ exit(-1);
+ }
+ rmesa->cmdbuf.flushing = 1;
+ if (rmesa->cmdbuf.cs->cdw) {
+ ret = radeon_cs_emit(rmesa->cmdbuf.cs);
+ rmesa->vtbl.set_all_dirty(rmesa->glCtx);
+ }
+ radeon_cs_erase(rmesa->cmdbuf.cs);
+ rmesa->cmdbuf.flushing = 0;
+ return ret;
+}
+
+int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
+{
+ int ret;
+
+ LOCK_HARDWARE(rmesa);
+ ret = rcommonFlushCmdBufLocked(rmesa, caller);
+ UNLOCK_HARDWARE(rmesa);
+
+ if (ret) {
+ fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
+ _mesa_exit(ret);
+ }
+
+ return ret;
+}
+
+/**
+ * Make sure that enough space is available in the command buffer
+ * by flushing if necessary.
+ *
+ * \param dwords The number of dwords we need to be free on the command buffer
+ */
+void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
+{
+ if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
+ radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
+ rcommonFlushCmdBuf(rmesa, caller);
+ }
+}
+
+void rcommonInitCmdBuf(radeonContextPtr rmesa, int max_state_size)
+{
+ GLuint size;
+ /* Initialize command buffer */
+ size = 256 * driQueryOptioni(&rmesa->optionCache,
+ "command_buffer_size");
+ if (size < 2 * max_state_size) {
+ size = 2 * max_state_size + 65535;
+ }
+ if (size > 64 * 256)
+ size = 64 * 256;
+
+ size = 64 * 1024 / 4;
+
+ if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
+ fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
+ sizeof(drm_r300_cmd_header_t));
+ fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
+ sizeof(drm_radeon_cmd_buffer_t));
+ fprintf(stderr,
+ "Allocating %d bytes command buffer (max state is %d bytes)\n",
+ size * 4, max_state_size * 4);
+ }
+
+ if (rmesa->radeonScreen->kernel_mm) {
+ int fd = rmesa->radeonScreen->driScreen->fd;
+ rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
+ } else {
+ rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
+ }
+ if (rmesa->cmdbuf.csm == NULL) {
+ /* FIXME: fatal error */
+ return;
+ }
+ rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
+ assert(rmesa->cmdbuf.cs != NULL);
+ rmesa->cmdbuf.size = size;
+
+}
+/**
+ * Destroy the command buffer
+ */
+void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
+{
+ radeon_cs_destroy(rmesa->cmdbuf.cs);
+ if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
+ radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
+ } else {
+ radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
+ }
+}
#ifndef COMMON_MISC_H
#define COMMON_MISC_H
+#include "common_context.h"
void radeonRecalcScissorRects(radeonContextPtr radeon);
void radeonSetCliprects(radeonContextPtr radeon);
void radeonUpdateScissor( GLcontext *ctx );
void radeonUpdatePageFlipping(radeonContextPtr rmesa);
+void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller);
+int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller);
+int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller);
+void rcommonInitCmdBuf(radeonContextPtr rmesa, int max_state_size);
+void rcommonDestroyCmdBuf(radeonContextPtr rmesa);
#endif
#define RADEON_CS_LEGACY_H
#include "radeon_cs.h"
-#include "radeon_context.h"
+#include "common_context.h"
struct radeon_cs_manager *radeon_cs_manager_legacy_ctor(struct radeon_context *ctx);
void radeon_cs_manager_legacy_dtor(struct radeon_cs_manager *csm);
#include "radeon_macros.h"
#include "radeon_screen.h"
#include "radeon_buffer.h"
+#include "common_misc.h"
#if !RADEON_COMMON
#include "radeon_context.h"
#include "radeon_span.h"