598ce08735dedaf071d3025f64204c8ec92090b0
[mesa.git] / src / mesa / drivers / dri / i965 / intel_batchbuffer.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "imports.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_ioctl.h"
31 #include "bufmgr.h"
32
33
34 static void intel_batchbuffer_reset( struct intel_batchbuffer *batch )
35 {
36 assert(batch->map == NULL);
37
38 batch->offset = (unsigned long)batch->ptr;
39 batch->offset = (batch->offset + 63) & ~63;
40 batch->ptr = (unsigned char *) batch->offset;
41
42 if (BATCH_SZ - batch->offset < BATCH_REFILL) {
43 bmBufferData(batch->intel,
44 batch->buffer,
45 BATCH_SZ,
46 NULL,
47 0);
48 batch->offset = 0;
49 batch->ptr = NULL;
50 }
51
52 batch->flags = 0;
53 }
54
55 static void intel_batchbuffer_reset_cb( struct intel_context *intel,
56 void *ptr )
57 {
58 struct intel_batchbuffer *batch = (struct intel_batchbuffer *)ptr;
59 assert(batch->map == NULL);
60 batch->flags = 0;
61 batch->offset = 0;
62 batch->ptr = NULL;
63 }
64
65 GLubyte *intel_batchbuffer_map( struct intel_batchbuffer *batch )
66 {
67 if (!batch->map) {
68 batch->map = bmMapBuffer(batch->intel, batch->buffer,
69 BM_MEM_AGP|BM_MEM_LOCAL|BM_CLIENT|BM_WRITE);
70 batch->ptr += (unsigned long)batch->map;
71 }
72
73 return batch->map;
74 }
75
76 void intel_batchbuffer_unmap( struct intel_batchbuffer *batch )
77 {
78 if (batch->map) {
79 batch->ptr -= (unsigned long)batch->map;
80 batch->map = NULL;
81 bmUnmapBuffer(batch->intel, batch->buffer);
82 }
83 }
84
85
86
87 /*======================================================================
88 * Public functions
89 */
90 struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel )
91 {
92 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
93
94 batch->intel = intel;
95
96 bmGenBuffers(intel, "batch", 1, &batch->buffer, 12);
97
98 bmBufferSetInvalidateCB(intel, batch->buffer,
99 intel_batchbuffer_reset_cb,
100 batch,
101 GL_TRUE);
102
103 bmBufferData(batch->intel,
104 batch->buffer,
105 BATCH_SZ,
106 NULL,
107 0);
108
109
110 return batch;
111 }
112
113 void intel_batchbuffer_free( struct intel_batchbuffer *batch )
114 {
115 if (batch->map)
116 bmUnmapBuffer(batch->intel, batch->buffer);
117
118 bmDeleteBuffers(batch->intel, 1, &batch->buffer);
119 free(batch);
120 }
121
122
123 #define MI_BATCH_BUFFER_END (0xA<<23)
124
125
126 GLboolean intel_batchbuffer_flush( struct intel_batchbuffer *batch )
127 {
128 struct intel_context *intel = batch->intel;
129 GLuint used = batch->ptr - (batch->map + batch->offset);
130 GLuint offset;
131 GLboolean ignore_cliprects = (batch->flags & INTEL_BATCH_CLIPRECTS) ? GL_FALSE : GL_TRUE;
132 GLint retval = GL_TRUE;
133
134 assert(intel->locked);
135
136 if (used == 0) {
137 bmReleaseBuffers( batch->intel );
138 return GL_TRUE;
139 }
140
141 /* Throw away non-effective packets.
142 */
143 if (intel->numClipRects == 0 && !ignore_cliprects) {
144 batch->ptr = batch->map + batch->offset;
145 bmReleaseBuffers( batch->intel );
146 intel->vtbl.lost_hardware(intel);
147 batch->flags = 0;
148
149 UNLOCK_HARDWARE(intel);
150 sched_yield();
151 LOCK_HARDWARE(intel);
152
153 return GL_TRUE;
154 }
155
156
157 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
158 * performance drain that we would like to avoid.
159 */
160 if (used & 4) {
161 ((int *)batch->ptr)[0] = MI_BATCH_BUFFER_END;
162 batch->ptr += 4;
163 used += 4;
164 }
165 else {
166 ((int *)batch->ptr)[0] = 0;
167 ((int *)batch->ptr)[1] = MI_BATCH_BUFFER_END;
168
169 batch->ptr += 8;
170 used += 8;
171 }
172
173 intel_batchbuffer_unmap(batch);
174
175 /* Get the batch buffer offset: Must call bmBufferOffset() before
176 * bmValidateBuffers(), otherwise the buffer won't be on the inuse
177 * list.
178 */
179 offset = bmBufferOffset(batch->intel, batch->buffer);
180
181 if (bmValidateBuffers( batch->intel ) != 0) {
182 assert(intel->locked);
183 bmReleaseBuffers( batch->intel );
184 retval = GL_FALSE;
185 goto out;
186 }
187
188
189 if (intel->aub_file) {
190 /* Send buffered commands to aubfile as a single packet.
191 */
192 intel_batchbuffer_map(batch);
193 ((int *)batch->ptr)[-1] = intel->vtbl.flush_cmd();
194 intel->vtbl.aub_commands(intel,
195 offset, /* Fulsim wierdness - don't adjust */
196 batch->map + batch->offset,
197 used);
198 ((int *)batch->ptr)[-1] = MI_BATCH_BUFFER_END;
199 intel_batchbuffer_unmap(batch);
200 }
201
202
203 /* Fire the batch buffer, which was uploaded above:
204 */
205 intel_batch_ioctl(batch->intel,
206 offset + batch->offset,
207 used,
208 ignore_cliprects);
209
210 if (intel->aub_file &&
211 intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT)
212 intel->vtbl.aub_dump_bmp( intel, 0 );
213
214 /* Reset the buffer:
215 */
216 out:
217 intel_batchbuffer_reset( batch );
218 intel_batchbuffer_map( batch );
219
220 if (!retval)
221 DBG("%s failed\n", __FUNCTION__);
222
223 return retval;
224 }
225
226
227
228
229
230
231
232 void intel_batchbuffer_align( struct intel_batchbuffer *batch,
233 GLuint align,
234 GLuint sz )
235 {
236 unsigned long ptr = (unsigned long) batch->ptr;
237 unsigned long aptr = (ptr + align) & ~((unsigned long)align-1);
238 GLuint fixup = aptr - ptr;
239
240 if (intel_batchbuffer_space(batch) < fixup + sz)
241 intel_batchbuffer_flush(batch);
242 else {
243 memset(batch->ptr, 0, fixup);
244 batch->ptr += fixup;
245 }
246 }
247
248
249
250
251 void intel_batchbuffer_data(struct intel_batchbuffer *batch,
252 const void *data,
253 GLuint bytes,
254 GLuint flags)
255 {
256 assert((bytes & 3) == 0);
257 intel_batchbuffer_require_space(batch, bytes, flags);
258 __memcpy(batch->ptr, data, bytes);
259 batch->ptr += bytes;
260 }
261