i965: Remove unnecessary malloc/free in VS binding table setup.
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_decode.h"
31 #include "intel_reg.h"
32 #include "intel_bufmgr.h"
33 #include "intel_buffers.h"
34
35 /* Relocations in kernel space:
36 * - pass dma buffer seperately
37 * - memory manager knows how to patch
38 * - pass list of dependent buffers
39 * - pass relocation list
40 *
41 * Either:
42 * - get back an offset for buffer to fire
43 * - memory manager knows how to fire buffer
44 *
45 * Really want the buffer to be AGP and pinned.
46 *
47 */
48
49 /* Cliprect fence: The highest fence protecting a dma buffer
50 * containing explicit cliprect information. Like the old drawable
51 * lock but irq-driven. X server must wait for this fence to expire
52 * before changing cliprects [and then doing sw rendering?]. For
53 * other dma buffers, the scheduler will grab current cliprect info
54 * and mix into buffer. X server must hold the lock while changing
55 * cliprects??? Make per-drawable. Need cliprects in shared memory
56 * -- beats storing them with every cmd buffer in the queue.
57 *
58 * ==> X server must wait for this fence to expire before touching the
59 * framebuffer with new cliprects.
60 *
61 * ==> Cliprect-dependent buffers associated with a
62 * cliprect-timestamp. All of the buffers associated with a timestamp
63 * must go to hardware before any buffer with a newer timestamp.
64 *
65 * ==> Dma should be queued per-drawable for correct X/GL
66 * synchronization. Or can fences be used for this?
67 *
68 * Applies to: Blit operations, metaops, X server operations -- X
69 * server automatically waits on its own dma to complete before
70 * modifying cliprects ???
71 */
72
73 void
74 intel_batchbuffer_reset(struct intel_batchbuffer *batch)
75 {
76 struct intel_context *intel = batch->intel;
77
78 if (batch->buf != NULL) {
79 dri_bo_unreference(batch->buf);
80 batch->buf = NULL;
81 }
82
83 if (!batch->buffer)
84 batch->buffer = malloc (intel->maxBatchSize);
85
86 batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer",
87 intel->maxBatchSize, 4096);
88 if (batch->buffer)
89 batch->map = batch->buffer;
90 else {
91 dri_bo_map(batch->buf, GL_TRUE);
92 batch->map = batch->buf->virtual;
93 }
94 batch->size = intel->maxBatchSize;
95 batch->ptr = batch->map;
96 batch->dirty_state = ~0;
97 }
98
99 struct intel_batchbuffer *
100 intel_batchbuffer_alloc(struct intel_context *intel)
101 {
102 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
103
104 batch->intel = intel;
105 intel_batchbuffer_reset(batch);
106
107 return batch;
108 }
109
110 void
111 intel_batchbuffer_free(struct intel_batchbuffer *batch)
112 {
113 if (batch->buffer)
114 free (batch->buffer);
115 else {
116 if (batch->map) {
117 dri_bo_unmap(batch->buf);
118 batch->map = NULL;
119 }
120 }
121 dri_bo_unreference(batch->buf);
122 batch->buf = NULL;
123 free(batch);
124 }
125
126
127
128 /* TODO: Push this whole function into bufmgr.
129 */
130 static void
131 do_flush_locked(struct intel_batchbuffer *batch, GLuint used)
132 {
133 struct intel_context *intel = batch->intel;
134 int ret = 0;
135 int x_off = 0, y_off = 0;
136
137 if (batch->buffer)
138 dri_bo_subdata (batch->buf, 0, used, batch->buffer);
139 else
140 dri_bo_unmap(batch->buf);
141
142 batch->map = NULL;
143 batch->ptr = NULL;
144
145 dri_bo_exec(batch->buf, used, NULL, 0, (x_off & 0xffff) | (y_off << 16));
146
147 if (INTEL_DEBUG & DEBUG_BATCH) {
148 dri_bo_map(batch->buf, GL_FALSE);
149 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset,
150 intel->intelScreen->deviceID);
151 dri_bo_unmap(batch->buf);
152
153 if (intel->vtbl.debug_batch != NULL)
154 intel->vtbl.debug_batch(intel);
155 }
156
157 if (ret != 0) {
158 exit(1);
159 }
160 intel->vtbl.new_batch(intel);
161 }
162
163 void
164 _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
165 int line)
166 {
167 struct intel_context *intel = batch->intel;
168 GLuint used = batch->ptr - batch->map;
169
170 if (!intel->using_dri2_swapbuffers &&
171 intel->first_post_swapbuffers_batch == NULL) {
172 intel->first_post_swapbuffers_batch = intel->batch->buf;
173 drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
174 }
175
176 if (used == 0)
177 return;
178
179 if (INTEL_DEBUG & DEBUG_BATCH)
180 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
181 used);
182
183 batch->reserved_space = 0;
184 /* Emit a flush if the bufmgr doesn't do it for us. */
185 if (intel->always_flush_cache) {
186 intel_batchbuffer_emit_mi_flush(batch);
187 used = batch->ptr - batch->map;
188 }
189
190 /* Round batchbuffer usage to 2 DWORDs. */
191
192 if ((used & 4) == 0) {
193 *(GLuint *) (batch->ptr) = 0; /* noop */
194 batch->ptr += 4;
195 used = batch->ptr - batch->map;
196 }
197
198 /* Mark the end of the buffer. */
199 *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */
200 batch->ptr += 4;
201 used = batch->ptr - batch->map;
202
203 /* Workaround for recursive batchbuffer flushing: If the window is
204 * moved, we can get into a case where we try to flush during a
205 * flush. What happens is that when we try to grab the lock for
206 * the first flush, we detect that the window moved which then
207 * causes another flush (from the intel_draw_buffer() call in
208 * intelUpdatePageFlipping()). To work around this we reset the
209 * batchbuffer tail pointer before trying to get the lock. This
210 * prevent the nested buffer flush, but a better fix would be to
211 * avoid that in the first place. */
212 batch->ptr = batch->map;
213
214 if (intel->vtbl.finish_batch)
215 intel->vtbl.finish_batch(intel);
216
217 /* Check that we didn't just wrap our batchbuffer at a bad time. */
218 assert(!intel->no_batch_wrap);
219
220 batch->reserved_space = BATCH_RESERVED;
221
222 /* TODO: Just pass the relocation list and dma buffer up to the
223 * kernel.
224 */
225 do_flush_locked(batch, used);
226
227 if (INTEL_DEBUG & DEBUG_SYNC) {
228 fprintf(stderr, "waiting for idle\n");
229 dri_bo_map(batch->buf, GL_TRUE);
230 dri_bo_unmap(batch->buf);
231 }
232
233 /* Reset the buffer:
234 */
235 intel_batchbuffer_reset(batch);
236 }
237
238
239 /* This is the only way buffers get added to the validate list.
240 */
241 GLboolean
242 intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
243 dri_bo *buffer,
244 uint32_t read_domains, uint32_t write_domain,
245 uint32_t delta)
246 {
247 int ret;
248
249 if (batch->ptr - batch->map > batch->buf->size)
250 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
251 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
252 ret = dri_bo_emit_reloc(batch->buf, read_domains, write_domain,
253 delta, batch->ptr - batch->map, buffer);
254
255 /*
256 * Using the old buffer offset, write in what the right data would be, in case
257 * the buffer doesn't move and we can short-circuit the relocation processing
258 * in the kernel
259 */
260 intel_batchbuffer_emit_dword (batch, buffer->offset + delta);
261
262 return GL_TRUE;
263 }
264
265 void
266 intel_batchbuffer_data(struct intel_batchbuffer *batch,
267 const void *data, GLuint bytes)
268 {
269 assert((bytes & 3) == 0);
270 intel_batchbuffer_require_space(batch, bytes);
271 __memcpy(batch->ptr, data, bytes);
272 batch->ptr += bytes;
273 }
274
275 /* Emit a pipelined flush to either flush render and texture cache for
276 * reading from a FBO-drawn texture, or flush so that frontbuffer
277 * render appears on the screen in DRI1.
278 *
279 * This is also used for the always_flush_cache driconf debug option.
280 */
281 void
282 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
283 {
284 struct intel_context *intel = batch->intel;
285
286 if (intel->gen >= 4) {
287 BEGIN_BATCH(4);
288 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
289 PIPE_CONTROL_INSTRUCTION_FLUSH |
290 PIPE_CONTROL_WRITE_FLUSH |
291 PIPE_CONTROL_NO_WRITE);
292 OUT_BATCH(0); /* write address */
293 OUT_BATCH(0); /* write data */
294 OUT_BATCH(0); /* write data */
295 ADVANCE_BATCH();
296 } else {
297 BEGIN_BATCH(1);
298 OUT_BATCH(MI_FLUSH);
299 ADVANCE_BATCH();
300 }
301 }