Merge branch 'master' of git+ssh://znh@git.freedesktop.org/git/mesa/mesa
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_ioctl.h
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.h,v 1.6 2002/12/16 16:18:58 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 */
36
37 #ifndef __RADEON_IOCTL_H__
38 #define __RADEON_IOCTL_H__
39
40 #include "simple_list.h"
41 #include "radeon_lock.h"
42
43
44 extern void radeonEmitState( radeonContextPtr rmesa );
45 extern void radeonEmitVertexAOS( radeonContextPtr rmesa,
46 GLuint vertex_size,
47 GLuint offset );
48
49 extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
50 GLuint vertex_format,
51 GLuint primitive,
52 GLuint vertex_nr );
53
54 extern void radeonFlushElts( radeonContextPtr rmesa );
55
56 extern GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
57 GLuint vertex_format,
58 GLuint primitive,
59 GLuint min_nr );
60
61 extern void radeonEmitAOS( radeonContextPtr rmesa,
62 struct radeon_dma_region **regions,
63 GLuint n,
64 GLuint offset );
65
66 extern void radeonEmitBlit( radeonContextPtr rmesa,
67 GLuint color_fmt,
68 GLuint src_pitch,
69 GLuint src_offset,
70 GLuint dst_pitch,
71 GLuint dst_offset,
72 GLint srcx, GLint srcy,
73 GLint dstx, GLint dsty,
74 GLuint w, GLuint h );
75
76 extern void radeonEmitWait( radeonContextPtr rmesa, GLuint flags );
77
78 extern void radeonFlushCmdBuf( radeonContextPtr rmesa, const char * );
79 extern void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa );
80
81 extern void radeonAllocDmaRegion( radeonContextPtr rmesa,
82 struct radeon_dma_region *region,
83 int bytes,
84 int alignment );
85
86 extern void radeonReleaseDmaRegion( radeonContextPtr rmesa,
87 struct radeon_dma_region *region,
88 const char *caller );
89
90 extern void radeonCopyBuffer( const __DRIdrawablePrivate *drawable,
91 const drm_clip_rect_t *rect);
92 extern void radeonPageFlip( const __DRIdrawablePrivate *drawable );
93 extern void radeonFlush( GLcontext *ctx );
94 extern void radeonFinish( GLcontext *ctx );
95 extern void radeonWaitForIdleLocked( radeonContextPtr rmesa );
96 extern void radeonWaitForVBlank( radeonContextPtr rmesa );
97 extern void radeonInitIoctlFuncs( GLcontext *ctx );
98 extern void radeonGetAllParams( radeonContextPtr rmesa );
99 extern void radeonSetUpAtomList( radeonContextPtr rmesa );
100
101 /* ================================================================
102 * Helper macros:
103 */
104
105 /* Close off the last primitive, if it exists.
106 */
107 #define RADEON_NEWPRIM( rmesa ) \
108 do { \
109 if ( rmesa->dma.flush ) \
110 rmesa->dma.flush( rmesa ); \
111 } while (0)
112
113 /* Can accomodate several state changes and primitive changes without
114 * actually firing the buffer.
115 */
116 #define RADEON_STATECHANGE( rmesa, ATOM ) \
117 do { \
118 RADEON_NEWPRIM( rmesa ); \
119 rmesa->hw.ATOM.dirty = GL_TRUE; \
120 rmesa->hw.is_dirty = GL_TRUE; \
121 } while (0)
122
123 #define RADEON_DB_STATE( ATOM ) \
124 memcpy( rmesa->hw.ATOM.lastcmd, rmesa->hw.ATOM.cmd, \
125 rmesa->hw.ATOM.cmd_size * 4)
126
127 static __inline int RADEON_DB_STATECHANGE(
128 radeonContextPtr rmesa,
129 struct radeon_state_atom *atom )
130 {
131 if (memcmp(atom->cmd, atom->lastcmd, atom->cmd_size*4)) {
132 int *tmp;
133 RADEON_NEWPRIM( rmesa );
134 atom->dirty = GL_TRUE;
135 rmesa->hw.is_dirty = GL_TRUE;
136 tmp = atom->cmd;
137 atom->cmd = atom->lastcmd;
138 atom->lastcmd = tmp;
139 return 1;
140 }
141 else
142 return 0;
143 }
144
145
146 /* Fire the buffered vertices no matter what.
147 */
148 #define RADEON_FIREVERTICES( rmesa ) \
149 do { \
150 if ( rmesa->store.cmd_used || rmesa->dma.flush ) { \
151 radeonFlush( rmesa->glCtx ); \
152 } \
153 } while (0)
154
155 /* Command lengths. Note that any time you ensure ELTS_BUFSZ or VBUF_BUFSZ
156 * are available, you will also be adding an rmesa->state.max_state_size because
157 * r200EmitState is called from within r200EmitVbufPrim and r200FlushElts.
158 */
159 #if RADEON_OLD_PACKETS
160 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
161 #define VERT_AOS_BUFSZ (0)
162 #define ELTS_BUFSZ(nr) (24 + nr * 2)
163 #define VBUF_BUFSZ (6 * sizeof(int))
164 #else
165 #define AOS_BUFSZ(nr) ((3 + ((nr / 2) * 3) + ((nr & 1) * 2)) * sizeof(int))
166 #define VERT_AOS_BUFSZ (5 * sizeof(int))
167 #define ELTS_BUFSZ(nr) (16 + nr * 2)
168 #define VBUF_BUFSZ (4 * sizeof(int))
169 #endif
170
171 /* Ensure that a minimum amount of space is available in the command buffer.
172 * This is used to ensure atomicity of state updates with the rendering requests
173 * that rely on them.
174 *
175 * An alternative would be to implement a "soft lock" such that when the buffer
176 * wraps at an inopportune time, we grab the lock, flush the current buffer,
177 * and hang on to the lock until the critical section is finished and we flush
178 * the buffer again and unlock.
179 */
180 static __inline void radeonEnsureCmdBufSpace( radeonContextPtr rmesa,
181 int bytes )
182 {
183 if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
184 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
185 assert( bytes <= RADEON_CMD_BUF_SZ );
186 }
187
188 /* Alloc space in the command buffer
189 */
190 static __inline char *radeonAllocCmdBuf( radeonContextPtr rmesa,
191 int bytes, const char *where )
192 {
193 if (rmesa->store.cmd_used + bytes > RADEON_CMD_BUF_SZ)
194 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
195
196 {
197 char *head = rmesa->store.cmd_buf + rmesa->store.cmd_used;
198 rmesa->store.cmd_used += bytes;
199 return head;
200 }
201 }
202
203 #endif /* __RADEON_IOCTL_H__ */