#ifndef __RADEON_IOCTL_H__
#define __RADEON_IOCTL_H__
-#ifdef GLX_DIRECT_RENDERING
-
#include "simple_list.h"
#include "radeon_lock.h"
extern void radeonWaitForVBlank( radeonContextPtr rmesa );
extern void radeonInitIoctlFuncs( GLcontext *ctx );
extern void radeonGetAllParams( radeonContextPtr rmesa );
+extern void radeonSetUpAtomList( radeonContextPtr rmesa );
/* radeon_compat.c:
*/
GLuint hw_primitive,
GLuint nrverts );
-
/* ================================================================
* Helper macros:
*/
#define RADEON_STATECHANGE( rmesa, ATOM ) \
do { \
RADEON_NEWPRIM( rmesa ); \
- move_to_head( &(rmesa->hw.dirty), &(rmesa->hw.ATOM)); \
+ rmesa->hw.ATOM.dirty = GL_TRUE; \
+ rmesa->hw.is_dirty = GL_TRUE; \
} while (0)
#define RADEON_DB_STATE( ATOM ) \
if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) {
int *tmp;
RADEON_NEWPRIM( rmesa );
- move_to_head( &(rmesa->hw.dirty), atom );
+ atom->dirty = GL_TRUE;
+ rmesa->hw.is_dirty = GL_TRUE;
tmp = atom->cmd;
atom->cmd = atom->lastcmd;
atom->lastcmd = tmp;
} \
} while (0)
+/* Command lengths. Note that any time you ensure ELTS_BUFSZ or VBUF_BUFSZ
+ * are available, you will also be adding an rmesa->state.max_state_size because
+ * r200EmitState is called from within r200EmitVbufPrim and r200FlushElts.
+ */
+#if RADEON_OLD_PACKETS
+#define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
+#define VERT_AOS_BUFSZ (0)
+#define ELTS_BUFSZ(nr) (24 + nr * 2)
+#define VBUF_BUFSZ (6 * sizeof(int))
+#else
+#define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
+#define VERT_AOS_BUFSZ (5 * sizeof(int))
+#define ELTS_BUFSZ(nr) (16 + nr * 2)
+#define VBUF_BUFSZ (4 * sizeof(int))
+#endif
+
+/* Ensure that a minimum amount of space is available in the command buffer.
+ * This is used to ensure atomicity of state updates with the rendering requests
+ * that rely on them.
+ *
+ * An alternative would be to implement a "soft lock" such that when the buffer
+ * wraps at an inopportune time, we grab the lock, flush the current buffer,
+ * and hang on to the lock until the critical section is finished and we flush
+ * the buffer again and unlock.
+ */
+static __inline void radeonEnsureCmdBufSpace( radeonContextPtr rmesa,
+ int bytes )
+{
+ if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
+ radeonFlushCmdBuf( rmesa, __FUNCTION__ );
+ assert( bytes <= RADEON_CMD_BUF_SZ );
+}
+
/* Alloc space in the command buffer
*/
static __inline char *radeonAllocCmdBuf( radeonContextPtr rmesa,
}
}
-
-
-
-#endif
#endif /* __RADEON_IOCTL_H__ */