The previous code would emit a full set of state during the first EmitState on
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.h
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.h,v 1.6 2002/12/16 16:18:58 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 */
36
37 #ifndef __RADEON_IOCTL_H__
38 #define __RADEON_IOCTL_H__
39
40 #ifdef GLX_DIRECT_RENDERING
41
42 #include "simple_list.h"
43 #include "radeon_lock.h"
44
45
46 extern void radeonEmitState( radeonContextPtr rmesa );
47 extern void radeonEmitVertexAOS( radeonContextPtr rmesa,
48 GLuint vertex_size,
49 GLuint offset );
50
51 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
52 GLuint vertex_format,
53 GLuint primitive,
54 GLuint vertex_nr );
55
56 extern void radeonFlushElts( radeonContextPtr rmesa );
57
58 extern GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
59 GLuint vertex_format,
60 GLuint primitive,
61 GLuint min_nr );
62
63 extern void radeonEmitAOS( radeonContextPtr rmesa,
64 struct radeon_dma_region **regions,
65 GLuint n,
66 GLuint offset );
67
68 extern void radeonEmitBlit( radeonContextPtr rmesa,
69 GLuint color_fmt,
70 GLuint src_pitch,
71 GLuint src_offset,
72 GLuint dst_pitch,
73 GLuint dst_offset,
74 GLint srcx, GLint srcy,
75 GLint dstx, GLint dsty,
76 GLuint w, GLuint h );
77
78 extern void radeonEmitWait( radeonContextPtr rmesa, GLuint flags );
79
80 extern void radeonFlushCmdBuf( radeonContextPtr rmesa, const char * );
81 extern void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa );
82
83 extern void radeonAllocDmaRegion( radeonContextPtr rmesa,
84 struct radeon_dma_region *region,
85 int bytes,
86 int alignment );
87
88 extern void radeonAllocDmaRegionVerts( radeonContextPtr rmesa,
89 struct radeon_dma_region *region,
90 int numverts,
91 int vertsize,
92 int alignment );
93
94 extern void radeonReleaseDmaRegion( radeonContextPtr rmesa,
95 struct radeon_dma_region *region,
96 const char *caller );
97
98 extern void radeonCopyBuffer( const __DRIdrawablePrivate *drawable );
99 extern void radeonPageFlip( const __DRIdrawablePrivate *drawable );
100 extern void radeonFlush( GLcontext *ctx );
101 extern void radeonFinish( GLcontext *ctx );
102 extern void radeonWaitForIdleLocked( radeonContextPtr rmesa );
103 extern void radeonWaitForVBlank( radeonContextPtr rmesa );
104 extern void radeonInitIoctlFuncs( GLcontext *ctx );
105 extern void radeonGetAllParams( radeonContextPtr rmesa );
106
107 extern void radeonSaveHwState( radeonContextPtr rmesa );
108 extern void radeonSetUpAtomList( radeonContextPtr rmesa );
109
110 /* radeon_compat.c:
111 */
112 extern void radeonCompatEmitPrimitive( radeonContextPtr rmesa,
113 GLuint vertex_format,
114 GLuint hw_primitive,
115 GLuint nrverts );
116
117 /* ================================================================
118 * Helper macros:
119 */
120
121 /* Close off the last primitive, if it exists.
122 */
123 #define RADEON_NEWPRIM( rmesa ) \
124 do { \
125 if ( rmesa->dma.flush ) \
126 rmesa->dma.flush( rmesa ); \
127 } while (0)
128
129 /* Can accomodate several state changes and primitive changes without
130 * actually firing the buffer.
131 */
132 #define RADEON_STATECHANGE( rmesa, ATOM ) \
133 do { \
134 RADEON_NEWPRIM( rmesa ); \
135 rmesa->hw.ATOM.dirty = GL_TRUE; \
136 rmesa->hw.is_dirty = GL_TRUE; \
137 } while (0)
138
139 #define RADEON_DB_STATE( ATOM ) \
140 memcpy( rmesa->hw.ATOM.lastcmd, rmesa->hw.ATOM.cmd, \
141 rmesa->hw.ATOM.cmd_size * 4)
142
143 static __inline int RADEON_DB_STATECHANGE(
144 radeonContextPtr rmesa,
145 struct radeon_state_atom *atom )
146 {
147 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) {
148 int *tmp;
149 RADEON_NEWPRIM( rmesa );
150 atom->dirty = GL_TRUE;
151 rmesa->hw.is_dirty = GL_TRUE;
152 tmp = atom->cmd;
153 atom->cmd = atom->lastcmd;
154 atom->lastcmd = tmp;
155 return 1;
156 }
157 else
158 return 0;
159 }
160
161
162 /* Fire the buffered vertices no matter what.
163 */
164 #define RADEON_FIREVERTICES( rmesa ) \
165 do { \
166 if ( rmesa->store.cmd_used || rmesa->dma.flush ) { \
167 radeonFlush( rmesa->glCtx ); \
168 } \
169 } while (0)
170
171 /* Command lengths. Note that any time you ensure ELTS_BUFSZ or VBUF_BUFSZ
172 * are available, you will also be adding an rmesa->state.max_state_size because
173 * r200EmitState is called from within r200EmitVbufPrim and r200FlushElts.
174 */
175 #if RADEON_OLD_PACKETS
176 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
177 #define VERT_AOS_BUFSZ (0)
178 #define ELTS_BUFSZ(nr) (24 + nr * 2)
179 #define VBUF_BUFSZ (6 * sizeof(int))
180 #else
181 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
182 #define VERT_AOS_BUFSZ (5 * sizeof(int))
183 #define ELTS_BUFSZ(nr) (16 + nr * 2)
184 #define VBUF_BUFSZ (4 * sizeof(int))
185 #endif
186
187 /* Ensure that a minimum amount of space is available in the command buffer.
188 * This is used to ensure atomicity of state updates with the rendering requests
189 * that rely on them.
190 *
191 * An alternative would be to implement a "soft lock" such that when the buffer
192 * wraps at an inopportune time, we grab the lock, flush the current buffer,
193 * and hang on to the lock until the critical section is finished and we flush
194 * the buffer again and unlock.
195 */
196 static __inline void radeonEnsureCmdBufSpace( radeonContextPtr rmesa,
197 int bytes )
198 {
199 if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
200 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
201 assert( bytes <= RADEON_CMD_BUF_SZ );
202 }
203
204 /* Alloc space in the command buffer
205 */
206 static __inline char *radeonAllocCmdBuf( radeonContextPtr rmesa,
207 int bytes, const char *where )
208 {
209 if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
210 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
211
212 assert(rmesa->dri.drmMinor >= 3);
213
214 {
215 char *head = rmesa->store.cmd_buf + rmesa->store.cmd_used;
216 rmesa->store.cmd_used += bytes;
217 return head;
218 }
219 }
220
221
222
223
224 #endif
225 #endif /* __RADEON_IOCTL_H__ */