radeon/r200/r300: initial attempt to convert to common context code
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.h
1 /**************************************************************************
2
3 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
4 VA Linux Systems Inc., Fremont, California.
5
6 All Rights Reserved.
7
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
15
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
19
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28 **************************************************************************/
29
30 /*
31 * Authors:
32 * Kevin E. Martin <martin@valinux.com>
33 * Gareth Hughes <gareth@valinux.com>
34 */
35
36 #ifndef __RADEON_IOCTL_H__
37 #define __RADEON_IOCTL_H__
38
39 #include "main/simple_list.h"
40 #include "radeon_lock.h"
41
42
43 extern void radeonEmitState( r100ContextPtr rmesa );
44 extern void radeonEmitVertexAOS( r100ContextPtr rmesa,
45 GLuint vertex_size,
46 GLuint offset );
47
48 extern void radeonEmitVbufPrim( r100ContextPtr rmesa,
49 GLuint vertex_format,
50 GLuint primitive,
51 GLuint vertex_nr );
52
53 extern void radeonFlushElts( GLcontext *ctx );
54
55
56 extern GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
57 GLuint vertex_format,
58 GLuint primitive,
59 GLuint min_nr );
60
61 extern void radeonEmitAOS( r100ContextPtr rmesa,
62 struct radeon_dma_region **regions,
63 GLuint n,
64 GLuint offset );
65
66 extern void radeonEmitBlit( r100ContextPtr rmesa,
67 GLuint color_fmt,
68 GLuint src_pitch,
69 GLuint src_offset,
70 GLuint dst_pitch,
71 GLuint dst_offset,
72 GLint srcx, GLint srcy,
73 GLint dstx, GLint dsty,
74 GLuint w, GLuint h );
75
76 extern void radeonEmitWait( r100ContextPtr rmesa, GLuint flags );
77
78 extern void radeonFlushCmdBuf( r100ContextPtr rmesa, const char * );
79 extern void radeonRefillCurrentDmaRegion( r100ContextPtr rmesa );
80
81 extern void radeonAllocDmaRegion( r100ContextPtr rmesa,
82 struct radeon_dma_region *region,
83 int bytes,
84 int alignment );
85
86 extern void radeonReleaseDmaRegion( r100ContextPtr rmesa,
87 struct radeon_dma_region *region,
88 const char *caller );
89
90 extern void radeonCopyBuffer( __DRIdrawablePrivate *drawable,
91 const drm_clip_rect_t *rect);
92 extern void radeonPageFlip( __DRIdrawablePrivate *drawable );
93 extern void radeonFlush( GLcontext *ctx );
94 extern void radeonFinish( GLcontext *ctx );
95 extern void radeonWaitForIdleLocked( r100ContextPtr rmesa );
96 extern void radeonWaitForVBlank( r100ContextPtr rmesa );
97 extern void radeonInitIoctlFuncs( GLcontext *ctx );
98 extern void radeonGetAllParams( r100ContextPtr rmesa );
99 extern void radeonSetUpAtomList( r100ContextPtr rmesa );
100
101 /* ================================================================
102 * Helper macros:
103 */
104
105 /* Close off the last primitive, if it exists.
106 */
107 #define RADEON_NEWPRIM( rmesa ) \
108 do { \
109 if ( rmesa->dma.flush ) \
110 rmesa->dma.flush( rmesa->radeon.glCtx ); \
111 } while (0)
112
113 /* Can accomodate several state changes and primitive changes without
114 * actually firing the buffer.
115 */
116 #define RADEON_STATECHANGE( rmesa, ATOM ) \
117 do { \
118 RADEON_NEWPRIM( rmesa ); \
119 rmesa->hw.ATOM.dirty = GL_TRUE; \
120 rmesa->hw.is_dirty = GL_TRUE; \
121 } while (0)
122
123 #define RADEON_DB_STATE( ATOM ) \
124 memcpy( rmesa->hw.ATOM.lastcmd, rmesa->hw.ATOM.cmd, \
125 rmesa->hw.ATOM.cmd_size * 4)
126
127 static INLINE int RADEON_DB_STATECHANGE(
128 r100ContextPtr rmesa,
129 struct radeon_state_atom *atom )
130 {
131 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) {
132 GLuint *tmp;
133 RADEON_NEWPRIM( rmesa );
134 atom->dirty = GL_TRUE;
135 rmesa->hw.is_dirty = GL_TRUE;
136 tmp = atom->cmd;
137 atom->cmd = atom->lastcmd;
138 atom->lastcmd = tmp;
139 return 1;
140 }
141 else
142 return 0;
143 }
144
145
146 /* Fire the buffered vertices no matter what.
147 */
148 #define RADEON_FIREVERTICES( rmesa ) \
149 do { \
150 if ( rmesa->store.cmd_used || rmesa->dma.flush ) { \
151 radeonFlush( rmesa->radeon.glCtx ); \
152 } \
153 } while (0)
154
155 /* Command lengths. Note that any time you ensure ELTS_BUFSZ or VBUF_BUFSZ
156 * are available, you will also be adding an rmesa->state.max_state_size because
157 * r200EmitState is called from within r200EmitVbufPrim and r200FlushElts.
158 */
159 #if RADEON_OLD_PACKETS
160 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
161 #define VERT_AOS_BUFSZ (0)
162 #define ELTS_BUFSZ(nr) (24 + nr * 2)
163 #define VBUF_BUFSZ (6 * sizeof(int))
164 #else
165 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
166 #define VERT_AOS_BUFSZ (5 * sizeof(int))
167 #define ELTS_BUFSZ(nr) (16 + nr * 2)
168 #define VBUF_BUFSZ (4 * sizeof(int))
169 #endif
170
171 /* Ensure that a minimum amount of space is available in the command buffer.
172 * This is used to ensure atomicity of state updates with the rendering requests
173 * that rely on them.
174 *
175 * An alternative would be to implement a "soft lock" such that when the buffer
176 * wraps at an inopportune time, we grab the lock, flush the current buffer,
177 * and hang on to the lock until the critical section is finished and we flush
178 * the buffer again and unlock.
179 */
180 static INLINE void radeonEnsureCmdBufSpace( r100ContextPtr rmesa,
181 int bytes )
182 {
183 if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
184 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
185 assert( bytes <= RADEON_CMD_BUF_SZ );
186 }
187
188 /* Alloc space in the command buffer
189 */
190 static INLINE char *radeonAllocCmdBuf( r100ContextPtr rmesa,
191 int bytes, const char *where )
192 {
193 if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
194 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
195
196 {
197 char *head = rmesa->store.cmd_buf + rmesa->store.cmd_used;
198 rmesa->store.cmd_used += bytes;
199 return head;
200 }
201 }
202
203 #endif /* __RADEON_IOCTL_H__ */