Added support for ELTS to the _savage_render_stage. Requires at least
[mesa.git] / src / mesa / drivers / dri / savage / savageioctl.h
1 /*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25
26 #ifndef SAVAGE_IOCTL_H
27 #define SAVAGE_IOCTL_H
28
29 #include "savagecontext.h"
30
31 void savageGetGeneralDmaBufferLocked( savageContextPtr mmesa );
32
33 void savageFlushVertices( savageContextPtr mmesa );
34
35 void savageFlushGeneralLocked( savageContextPtr imesa );
36 void savageWaitAgeLocked( savageContextPtr imesa, int age );
37 void savageWaitAge( savageContextPtr imesa, int age );
38
39 unsigned int savageEmitEventLocked( savageContextPtr imesa, unsigned int flags );
40 unsigned int savageEmitEvent( savageContextPtr imesa, unsigned int flags );
41 void savageWaitEvent( savageContextPtr imesa, unsigned int event);
42
43 void savageFlushCmdBufLocked( savageContextPtr imesa, GLboolean discard );
44 void savageFlushCmdBuf( savageContextPtr imesa, GLboolean discard );
45
46 void savageDmaFinish( savageContextPtr imesa );
47
48 void savageRegetLockQuiescent( savageContextPtr imesa );
49
50 void savageDDInitIoctlFuncs( GLcontext *ctx );
51
52 void savageSwapBuffers( __DRIdrawablePrivate *dPriv );
53
54 #define WAIT_IDLE_EMPTY do { \
55 savageWaitEvent(imesa, \
56 savageEmitEvent(imesa, SAVAGE_WAIT_3D|SAVAGE_WAIT_2D)); \
57 } while (0)
58
59 #define FLUSH_BATCH(imesa) do { \
60 if (SAVAGE_DEBUG & DEBUG_VERBOSE_MSG) \
61 fprintf (stderr, "FLUSH_BATCH in %s\n", __FUNCTION__); \
62 savageFlushVertices(imesa); \
63 savageFlushCmdBuf(imesa, GL_FALSE); \
64 } while (0)
65
66 extern void savageGetDMABuffer( savageContextPtr imesa );
67
68 static __inline
69 void savageReleaseIndexedVerts( savageContextPtr imesa )
70 {
71 imesa->firstElt = -1;
72 }
73
74 static __inline
75 GLboolean savageHaveIndexedVerts( savageContextPtr imesa )
76 {
77 return (imesa->firstElt != -1);
78 }
79
80 static __inline
81 u_int32_t *savageAllocVtxBuf( savageContextPtr imesa, GLuint words )
82 {
83 struct savage_vtxbuf_t *buffer = imesa->vtxBuf;
84 u_int32_t *head;
85
86 if (buffer == &imesa->dmaVtxBuf) {
87 if (!buffer->total) {
88 LOCK_HARDWARE(imesa);
89 savageGetDMABuffer(imesa);
90 UNLOCK_HARDWARE(imesa);
91 } else if (buffer->used + words > buffer->total) {
92 if (SAVAGE_DEBUG & DEBUG_VERBOSE_MSG)
93 fprintf (stderr, "... flushing DMA buffer in %s\n",
94 __FUNCTION__);
95 savageReleaseIndexedVerts(imesa);
96 savageFlushVertices(imesa);
97 LOCK_HARDWARE(imesa);
98 savageFlushCmdBufLocked(imesa, GL_TRUE); /* discard DMA buffer */
99 savageGetDMABuffer(imesa);
100 UNLOCK_HARDWARE(imesa);
101 }
102 } else if (buffer->used + words > buffer->total) {
103 if (SAVAGE_DEBUG & DEBUG_VERBOSE_MSG)
104 fprintf (stderr, "... flushing client vertex buffer in %s\n",
105 __FUNCTION__);
106 savageReleaseIndexedVerts(imesa);
107 savageFlushVertices(imesa);
108 LOCK_HARDWARE(imesa);
109 savageFlushCmdBufLocked(imesa, GL_FALSE); /* free clientVtxBuf */
110 UNLOCK_HARDWARE(imesa);
111 }
112
113 head = &buffer->buf[buffer->used];
114
115 buffer->used += words;
116 return head;
117 }
118
119 static __inline
120 u_int32_t *savageAllocIndexedVerts( savageContextPtr imesa, GLuint n )
121 {
122 u_int32_t *ret;
123 savageFlushVertices(imesa);
124 ret = savageAllocVtxBuf(imesa, n*imesa->HwVertexSize);
125 imesa->firstElt = imesa->vtxBuf->flushed / imesa->HwVertexSize;
126 imesa->vtxBuf->flushed = imesa->vtxBuf->used;
127 return ret;
128 }
129
130 /* Flush Elts:
131 * - Complete the drawing command with the correct number of indices.
132 * - Actually allocate entries for the indices in the command buffer.
133 * (This allocation must succeed without wrapping the cmd buffer!)
134 */
135 static __inline
136 void savageFlushElts( savageContextPtr imesa )
137 {
138 if (imesa->elts.cmd) {
139 GLuint qwords = (imesa->elts.n + 3) >> 2;
140 assert(imesa->cmdBuf.write - imesa->cmdBuf.base + qwords
141 <= imesa->cmdBuf.size);
142 imesa->cmdBuf.write += qwords;
143
144 imesa->elts.cmd->idx.count = imesa->elts.n;
145 imesa->elts.cmd = NULL;
146 }
147 }
148
149 /* Allocate a command buffer entry with <bytes> bytes of arguments:
150 * - implies savageFlushElts
151 */
152 static __inline
153 drm_savage_cmd_header_t *savageAllocCmdBuf( savageContextPtr imesa, GLuint bytes )
154 {
155 drm_savage_cmd_header_t *ret;
156 GLuint qwords = ((bytes + 7) >> 3) + 1; /* round up */
157 assert (qwords < imesa->cmdBuf.size);
158
159 savageFlushElts(imesa);
160
161 if (imesa->cmdBuf.write - imesa->cmdBuf.base + qwords > imesa->cmdBuf.size)
162 savageFlushCmdBuf(imesa, GL_FALSE);
163
164 ret = (drm_savage_cmd_header_t *)imesa->cmdBuf.write;
165 imesa->cmdBuf.write += qwords;
166 return ret;
167 }
168
169 /* Allocate Elts:
170 * - if it doesn't fit, flush the cmd buffer first
171 * - allocates the drawing command on the cmd buffer if there is no
172 * incomplete indexed drawing command yet
173 * - increments the number of elts. Final allocation is done in savageFlushElts
174 */
175 static __inline
176 u_int16_t *savageAllocElts( savageContextPtr imesa, GLuint n )
177 {
178 u_int16_t *ret;
179 GLuint qwords;
180 assert (savageHaveIndexedVerts(imesa));
181
182 if (imesa->elts.cmd)
183 qwords = (imesa->elts.n + n + 3) >> 2;
184 else
185 qwords = ((n + 3) >> 2) + 1;
186 if (imesa->cmdBuf.write - imesa->cmdBuf.base + qwords > imesa->cmdBuf.size)
187 savageFlushCmdBuf(imesa, GL_FALSE); /* implies savageFlushElts */
188
189 if (!imesa->elts.cmd) {
190 savageFlushVertices(imesa);
191 imesa->elts.cmd = savageAllocCmdBuf(imesa, 0);
192 imesa->elts.cmd->idx.cmd = (imesa->vtxBuf == &imesa->dmaVtxBuf) ?
193 SAVAGE_CMD_DMA_IDX : SAVAGE_CMD_VB_IDX;
194 imesa->elts.cmd->idx.prim = imesa->HwPrim;
195 imesa->elts.cmd->idx.skip = imesa->skip;
196 imesa->elts.n = 0;
197 }
198
199 ret = (u_int16_t *)(imesa->elts.cmd+1) + imesa->elts.n;
200 imesa->elts.n += n;
201 return ret;
202 }
203
204 #endif