1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_decode.h"
31 #include "intel_reg.h"
32 #include "intel_bufmgr.h"
34 /* Relocations in kernel space:
35 * - pass dma buffer seperately
36 * - memory manager knows how to patch
37 * - pass list of dependent buffers
38 * - pass relocation list
41 * - get back an offset for buffer to fire
42 * - memory manager knows how to fire buffer
44 * Really want the buffer to be AGP and pinned.
48 /* Cliprect fence: The highest fence protecting a dma buffer
49 * containing explicit cliprect information. Like the old drawable
50 * lock but irq-driven. X server must wait for this fence to expire
51 * before changing cliprects [and then doing sw rendering?]. For
52 * other dma buffers, the scheduler will grab current cliprect info
53 * and mix into buffer. X server must hold the lock while changing
54 * cliprects??? Make per-drawable. Need cliprects in shared memory
55 * -- beats storing them with every cmd buffer in the queue.
57 * ==> X server must wait for this fence to expire before touching the
58 * framebuffer with new cliprects.
60 * ==> Cliprect-dependent buffers associated with a
61 * cliprect-timestamp. All of the buffers associated with a timestamp
62 * must go to hardware before any buffer with a newer timestamp.
64 * ==> Dma should be queued per-drawable for correct X/GL
65 * synchronization. Or can fences be used for this?
67 * Applies to: Blit operations, metaops, X server operations -- X
68 * server automatically waits on its own dma to complete before
69 * modifying cliprects ???
73 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
75 struct intel_context
*intel
= batch
->intel
;
77 if (batch
->buf
!= NULL
) {
78 dri_bo_unreference(batch
->buf
);
82 if (!batch
->buffer
&& intel
->ttm
== GL_TRUE
)
83 batch
->buffer
= malloc (intel
->maxBatchSize
);
85 batch
->buf
= dri_bo_alloc(intel
->bufmgr
, "batchbuffer",
86 intel
->maxBatchSize
, 4096);
88 batch
->map
= batch
->buffer
;
90 dri_bo_map(batch
->buf
, GL_TRUE
);
91 batch
->map
= batch
->buf
->virtual;
93 batch
->size
= intel
->maxBatchSize
;
94 batch
->ptr
= batch
->map
;
95 batch
->dirty_state
= ~0;
96 batch
->cliprect_mode
= IGNORE_CLIPRECTS
;
99 struct intel_batchbuffer
*
100 intel_batchbuffer_alloc(struct intel_context
*intel
)
102 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
104 batch
->intel
= intel
;
105 intel_batchbuffer_reset(batch
);
111 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
114 free (batch
->buffer
);
117 dri_bo_unmap(batch
->buf
);
121 dri_bo_unreference(batch
->buf
);
128 /* TODO: Push this whole function into bufmgr.
131 do_flush_locked(struct intel_batchbuffer
*batch
,
132 GLuint used
, GLboolean allow_unlock
)
134 struct intel_context
*intel
= batch
->intel
;
138 dri_bo_subdata (batch
->buf
, 0, used
, batch
->buffer
);
140 dri_bo_unmap(batch
->buf
);
145 /* Throw away non-effective packets. Won't work once we have
146 * hardware contexts which would preserve statechanges beyond a
150 if (!(intel
->numClipRects
== 0 &&
151 batch
->cliprect_mode
== LOOP_CLIPRECTS
) || intel
->no_hw
) {
152 dri_bo_exec(batch
->buf
, used
,
154 batch
->cliprect_mode
!= LOOP_CLIPRECTS
?
155 0 : intel
->numClipRects
,
156 (((GLuint
) intel
->drawX
) & 0xffff) |
157 (((GLuint
) intel
->drawY
) << 16));
160 if (intel
->numClipRects
== 0 &&
161 batch
->cliprect_mode
== LOOP_CLIPRECTS
) {
163 /* If we are not doing any actual user-visible rendering,
164 * do a sched_yield to keep the app from pegging the cpu while
167 UNLOCK_HARDWARE(intel
);
169 LOCK_HARDWARE(intel
);
173 if (INTEL_DEBUG
& DEBUG_BATCH
) {
174 dri_bo_map(batch
->buf
, GL_FALSE
);
175 intel_decode(batch
->buf
->virtual, used
/ 4, batch
->buf
->offset
,
176 intel
->intelScreen
->deviceID
);
177 dri_bo_unmap(batch
->buf
);
179 if (intel
->vtbl
.debug_batch
!= NULL
)
180 intel
->vtbl
.debug_batch(intel
);
184 UNLOCK_HARDWARE(intel
);
187 intel
->vtbl
.new_batch(intel
);
191 _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
, const char *file
,
194 struct intel_context
*intel
= batch
->intel
;
195 GLuint used
= batch
->ptr
- batch
->map
;
196 GLboolean was_locked
= intel
->locked
;
201 if (INTEL_DEBUG
& DEBUG_BATCH
)
202 fprintf(stderr
, "%s:%d: Batchbuffer flush with %db used\n", file
, line
,
205 /* Emit a flush if the bufmgr doesn't do it for us. */
207 *(GLuint
*) (batch
->ptr
) = intel
->vtbl
.flush_cmd();
209 used
= batch
->ptr
- batch
->map
;
212 /* Round batchbuffer usage to 2 DWORDs. */
214 if ((used
& 4) == 0) {
215 *(GLuint
*) (batch
->ptr
) = 0; /* noop */
217 used
= batch
->ptr
- batch
->map
;
220 /* Mark the end of the buffer. */
221 *(GLuint
*) (batch
->ptr
) = MI_BATCH_BUFFER_END
; /* noop */
223 used
= batch
->ptr
- batch
->map
;
225 /* Workaround for recursive batchbuffer flushing: If the window is
226 * moved, we can get into a case where we try to flush during a
227 * flush. What happens is that when we try to grab the lock for
228 * the first flush, we detect that the window moved which then
229 * causes another flush (from the intel_draw_buffer() call in
230 * intelUpdatePageFlipping()). To work around this we reset the
231 * batchbuffer tail pointer before trying to get the lock. This
232 * prevent the nested buffer flush, but a better fix would be to
233 * avoid that in the first place. */
234 batch
->ptr
= batch
->map
;
236 if (intel
->vtbl
.finish_batch
)
237 intel
->vtbl
.finish_batch(intel
);
239 /* TODO: Just pass the relocation list and dma buffer up to the
243 LOCK_HARDWARE(intel
);
245 do_flush_locked(batch
, used
, GL_FALSE
);
248 UNLOCK_HARDWARE(intel
);
250 if (INTEL_DEBUG
& DEBUG_SYNC
) {
251 fprintf(stderr
, "waiting for idle\n");
252 dri_bo_map(batch
->buf
, GL_TRUE
);
253 dri_bo_unmap(batch
->buf
);
258 intel_batchbuffer_reset(batch
);
262 /* This is the only way buffers get added to the validate list.
265 intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
267 uint32_t read_domains
, uint32_t write_domain
,
272 if (batch
->ptr
- batch
->map
> batch
->buf
->size
)
273 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
274 batch
->ptr
, batch
->map
, batch
->ptr
- batch
->map
, batch
->buf
->size
);
275 ret
= dri_bo_emit_reloc(batch
->buf
, read_domains
, write_domain
,
276 delta
, batch
->ptr
- batch
->map
, buffer
);
279 * Using the old buffer offset, write in what the right data would be, in case
280 * the buffer doesn't move and we can short-circuit the relocation processing
283 intel_batchbuffer_emit_dword (batch
, buffer
->offset
+ delta
);
289 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
290 const void *data
, GLuint bytes
,
291 enum cliprect_mode cliprect_mode
)
293 assert((bytes
& 3) == 0);
294 intel_batchbuffer_require_space(batch
, bytes
, cliprect_mode
);
295 __memcpy(batch
->ptr
, data
, bytes
);