1 /**************************************************************************
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
36 #ifndef __RADEON_IOCTL_H__
37 #define __RADEON_IOCTL_H__
39 #include "main/simple_list.h"
40 #include "radeon_lock.h"
43 extern void radeonEmitState( radeonContextPtr rmesa
);
44 extern void radeonEmitVertexAOS( radeonContextPtr rmesa
,
48 extern void radeonEmitVbufPrim( radeonContextPtr rmesa
,
53 extern void radeonFlushElts( radeonContextPtr rmesa
);
55 extern GLushort
*radeonAllocEltsOpenEnded( radeonContextPtr rmesa
,
60 extern void radeonEmitAOS( radeonContextPtr rmesa
,
61 struct radeon_dma_region
**regions
,
65 extern void radeonEmitBlit( radeonContextPtr rmesa
,
71 GLint srcx
, GLint srcy
,
72 GLint dstx
, GLint dsty
,
75 extern void radeonEmitWait( radeonContextPtr rmesa
, GLuint flags
);
77 extern void radeonFlushCmdBuf( radeonContextPtr rmesa
, const char * );
78 extern void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa
);
80 extern void radeonAllocDmaRegion( radeonContextPtr rmesa
,
81 struct radeon_dma_region
*region
,
85 extern void radeonReleaseDmaRegion( radeonContextPtr rmesa
,
86 struct radeon_dma_region
*region
,
89 extern void radeonCopyBuffer( __DRIdrawablePrivate
*drawable
,
90 const drm_clip_rect_t
*rect
);
91 extern void radeonPageFlip( __DRIdrawablePrivate
*drawable
);
92 extern void radeonFlush( GLcontext
*ctx
);
93 extern void radeonFinish( GLcontext
*ctx
);
94 extern void radeonWaitForIdleLocked( radeonContextPtr rmesa
);
95 extern void radeonWaitForVBlank( radeonContextPtr rmesa
);
96 extern void radeonInitIoctlFuncs( GLcontext
*ctx
);
97 extern void radeonGetAllParams( radeonContextPtr rmesa
);
98 extern void radeonSetUpAtomList( radeonContextPtr rmesa
);
100 /* ================================================================
104 /* Close off the last primitive, if it exists.
106 #define RADEON_NEWPRIM( rmesa ) \
108 if ( rmesa->dma.flush ) \
109 rmesa->dma.flush( rmesa ); \
112 /* Can accomodate several state changes and primitive changes without
113 * actually firing the buffer.
115 #define RADEON_STATECHANGE( rmesa, ATOM ) \
117 RADEON_NEWPRIM( rmesa ); \
118 rmesa->hw.ATOM.dirty = GL_TRUE; \
119 rmesa->hw.is_dirty = GL_TRUE; \
122 #define RADEON_DB_STATE( ATOM ) \
123 memcpy( rmesa->hw.ATOM.lastcmd, rmesa->hw.ATOM.cmd, \
124 rmesa->hw.ATOM.cmd_size * 4)
126 static INLINE
int RADEON_DB_STATECHANGE(
127 radeonContextPtr rmesa
,
128 struct radeon_state_atom
*atom
)
130 if (memcmp(atom
->cmd
, atom
->lastcmd
, atom
->cmd_size
*4)) {
132 RADEON_NEWPRIM( rmesa
);
133 atom
->dirty
= GL_TRUE
;
134 rmesa
->hw
.is_dirty
= GL_TRUE
;
136 atom
->cmd
= atom
->lastcmd
;
145 /* Fire the buffered vertices no matter what.
147 #define RADEON_FIREVERTICES( rmesa ) \
149 if ( rmesa->store.cmd_used || rmesa->dma.flush ) { \
150 radeonFlush( rmesa->glCtx ); \
154 /* Command lengths. Note that any time you ensure ELTS_BUFSZ or VBUF_BUFSZ
155 * are available, you will also be adding an rmesa->state.max_state_size because
156 * r200EmitState is called from within r200EmitVbufPrim and r200FlushElts.
158 #if RADEON_OLD_PACKETS
159 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
160 #define VERT_AOS_BUFSZ (0)
161 #define ELTS_BUFSZ(nr) (24 + nr * 2)
162 #define VBUF_BUFSZ (6 * sizeof(int))
164 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
165 #define VERT_AOS_BUFSZ (5 * sizeof(int))
166 #define ELTS_BUFSZ(nr) (16 + nr * 2)
167 #define VBUF_BUFSZ (4 * sizeof(int))
170 /* Ensure that a minimum amount of space is available in the command buffer.
171 * This is used to ensure atomicity of state updates with the rendering requests
174 * An alternative would be to implement a "soft lock" such that when the buffer
175 * wraps at an inopportune time, we grab the lock, flush the current buffer,
176 * and hang on to the lock until the critical section is finished and we flush
177 * the buffer again and unlock.
179 static INLINE
void radeonEnsureCmdBufSpace( radeonContextPtr rmesa
,
182 if (rmesa
->store
.cmd_used
+ bytes
> RADEON_CMD_BUF_SZ
)
183 radeonFlushCmdBuf( rmesa
, __FUNCTION__
);
184 assert( bytes
<= RADEON_CMD_BUF_SZ
);
187 /* Alloc space in the command buffer
189 static INLINE
char *radeonAllocCmdBuf( radeonContextPtr rmesa
,
190 int bytes
, const char *where
)
192 if (rmesa
->store
.cmd_used
+ bytes
> RADEON_CMD_BUF_SZ
)
193 radeonFlushCmdBuf( rmesa
, __FUNCTION__
);
196 char *head
= rmesa
->store
.cmd_buf
+ rmesa
->store
.cmd_used
;
197 rmesa
->store
.cmd_used
+= bytes
;
202 #endif /* __RADEON_IOCTL_H__ */