Minor r200 vertex program cleanups. Remove disabled leftovers from r300 vertex progra...
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "imports.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_ioctl.h"
31 #include "bufmgr.h"
32
33
34 static void intel_batchbuffer_reset( struct intel_batchbuffer *batch )
35 {
36 assert(batch->map == NULL);
37
38 batch->offset = (unsigned long)batch->ptr;
39 batch->offset = (batch->offset + 63) & ~63;
40 batch->ptr = (unsigned char *) batch->offset;
41
42 if (BATCH_SZ - batch->offset < BATCH_REFILL) {
43 bmBufferData(batch->intel,
44 batch->buffer,
45 BATCH_SZ,
46 NULL,
47 0);
48 batch->offset = 0;
49 batch->ptr = NULL;
50 }
51
52 batch->flags = 0;
53 }
54
55 static void intel_batchbuffer_reset_cb( struct intel_context *intel,
56 void *ptr )
57 {
58 struct intel_batchbuffer *batch = (struct intel_batchbuffer *)ptr;
59 assert(batch->map == NULL);
60 batch->flags = 0;
61 batch->offset = 0;
62 batch->ptr = NULL;
63 }
64
65 void intel_batchbuffer_map( struct intel_batchbuffer *batch )
66 {
67 if (!batch->map) {
68 batch->map = bmMapBuffer(batch->intel, batch->buffer,
69 BM_MEM_AGP|BM_MEM_LOCAL|BM_CLIENT|BM_WRITE);
70 batch->ptr += (unsigned long)batch->map;
71 }
72 assert(batch->map);
73 }
74
75 void intel_batchbuffer_unmap( struct intel_batchbuffer *batch )
76 {
77 if (batch->map) {
78 batch->ptr -= (unsigned long)batch->map;
79 batch->map = NULL;
80 bmUnmapBuffer(batch->intel, batch->buffer);
81 }
82 }
83
84
85
86 /*======================================================================
87 * Public functions
88 */
89 struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel )
90 {
91 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
92
93 batch->intel = intel;
94
95 bmGenBuffers(intel, "batch", 1, &batch->buffer);
96
97 bmBufferSetInvalidateCB(intel, batch->buffer,
98 intel_batchbuffer_reset_cb,
99 batch,
100 GL_TRUE);
101
102 bmBufferData(batch->intel,
103 batch->buffer,
104 BATCH_SZ,
105 NULL,
106 0);
107
108
109 return batch;
110 }
111
112 void intel_batchbuffer_free( struct intel_batchbuffer *batch )
113 {
114 if (batch->map)
115 bmUnmapBuffer(batch->intel, batch->buffer);
116
117 bmDeleteBuffers(batch->intel, 1, &batch->buffer);
118 free(batch);
119 }
120
121
122 #define MI_BATCH_BUFFER_END (0xA<<23)
123
124
125 void intel_batchbuffer_flush( struct intel_batchbuffer *batch )
126 {
127 struct intel_context *intel = batch->intel;
128 GLuint used = batch->ptr - (batch->map + batch->offset);
129 GLuint offset;
130 GLboolean ignore_cliprects = (batch->flags & INTEL_BATCH_CLIPRECTS) ? GL_FALSE : GL_TRUE;
131
132 assert(intel->locked);
133
134 if (used == 0) {
135 bmReleaseBuffers( batch->intel );
136 return;
137 }
138
139 /* Throw away non-effective packets.
140 */
141 if (intel->numClipRects == 0 && !ignore_cliprects) {
142 batch->ptr = batch->map + batch->offset;
143 bmReleaseBuffers( batch->intel );
144 intel->vtbl.lost_hardware(intel);
145 batch->flags = 0;
146
147 UNLOCK_HARDWARE(intel);
148 sched_yield();
149 LOCK_HARDWARE(intel);
150
151 return;
152 }
153
154
155 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
156 * performance drain that we would like to avoid.
157 */
158 if (used & 4) {
159 ((int *)batch->ptr)[0] = MI_BATCH_BUFFER_END;
160 batch->ptr += 4;
161 used += 4;
162 }
163 else {
164 ((int *)batch->ptr)[0] = 0;
165 ((int *)batch->ptr)[1] = MI_BATCH_BUFFER_END;
166
167 batch->ptr += 8;
168 used += 8;
169 }
170
171 intel_batchbuffer_unmap(batch);
172
173 /* Get the batch buffer offset: Must call bmBufferOffset() before
174 * bmValidateBuffers(), otherwise the buffer won't be on the inuse
175 * list.
176 */
177 offset = bmBufferOffset(batch->intel, batch->buffer);
178
179 if (!bmValidateBuffers( batch->intel )) {
180 assert(0);
181 }
182
183
184 if (intel->aub_file) {
185 /* Send buffered commands to aubfile as a single packet.
186 */
187 intel_batchbuffer_map(batch);
188 ((int *)batch->ptr)[-1] = intel->vtbl.flush_cmd();
189 intel->vtbl.aub_commands(intel,
190 offset, /* Fulsim wierdness - don't adjust */
191 batch->map + batch->offset,
192 used);
193 ((int *)batch->ptr)[-1] = MI_BATCH_BUFFER_END;
194 intel_batchbuffer_unmap(batch);
195 }
196
197
198 /* Fire the batch buffer, which was uploaded above:
199 */
200 intel_batch_ioctl(batch->intel,
201 offset + batch->offset,
202 used,
203 ignore_cliprects);
204
205 if (intel->aub_file &&
206 intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT)
207 intel->vtbl.aub_dump_bmp( intel, 0 );
208
209 /* Reset the buffer:
210 */
211 intel_batchbuffer_reset( batch );
212 intel_batchbuffer_map( batch );
213 }
214
215
216
217
218
219
220
221 void intel_batchbuffer_align( struct intel_batchbuffer *batch,
222 GLuint align,
223 GLuint sz )
224 {
225 unsigned long ptr = (unsigned long) batch->ptr;
226 unsigned long aptr = (ptr + align) & ~((unsigned long)align-1);
227 GLuint fixup = aptr - ptr;
228
229 if (intel_batchbuffer_space(batch) < fixup + sz)
230 intel_batchbuffer_flush(batch);
231 else {
232 memset(batch->ptr, 0, fixup);
233 batch->ptr += fixup;
234 }
235 }
236
237
238
239
240 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
241 const void *data,
242 GLuint bytes,
243 GLuint flags)
244 {
245 assert((bytes & 3) == 0);
246 intel_batchbuffer_require_space(batch, bytes, flags);
247 __memcpy(batch->ptr, data, bytes);
248 batch->ptr += bytes;
249 }
250