1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_decode.h"
31 #include "intel_reg.h"
32 #include "intel_bufmgr.h"
33 #include "intel_buffers.h"
35 /* Relocations in kernel space:
36 * - pass dma buffer seperately
37 * - memory manager knows how to patch
38 * - pass list of dependent buffers
39 * - pass relocation list
42 * - get back an offset for buffer to fire
43 * - memory manager knows how to fire buffer
45 * Really want the buffer to be AGP and pinned.
49 /* Cliprect fence: The highest fence protecting a dma buffer
50 * containing explicit cliprect information. Like the old drawable
51 * lock but irq-driven. X server must wait for this fence to expire
52 * before changing cliprects [and then doing sw rendering?]. For
53 * other dma buffers, the scheduler will grab current cliprect info
54 * and mix into buffer. X server must hold the lock while changing
55 * cliprects??? Make per-drawable. Need cliprects in shared memory
56 * -- beats storing them with every cmd buffer in the queue.
58 * ==> X server must wait for this fence to expire before touching the
59 * framebuffer with new cliprects.
61 * ==> Cliprect-dependent buffers associated with a
62 * cliprect-timestamp. All of the buffers associated with a timestamp
63 * must go to hardware before any buffer with a newer timestamp.
65 * ==> Dma should be queued per-drawable for correct X/GL
66 * synchronization. Or can fences be used for this?
68 * Applies to: Blit operations, metaops, X server operations -- X
69 * server automatically waits on its own dma to complete before
70 * modifying cliprects ???
74 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
76 struct intel_context
*intel
= batch
->intel
;
78 if (batch
->buf
!= NULL
) {
79 dri_bo_unreference(batch
->buf
);
83 if (!batch
->buffer
&& intel
->ttm
== GL_TRUE
)
84 batch
->buffer
= malloc (intel
->maxBatchSize
);
86 batch
->buf
= dri_bo_alloc(intel
->bufmgr
, "batchbuffer",
87 intel
->maxBatchSize
, 4096);
89 batch
->map
= batch
->buffer
;
91 dri_bo_map(batch
->buf
, GL_TRUE
);
92 batch
->map
= batch
->buf
->virtual;
94 batch
->size
= intel
->maxBatchSize
;
95 batch
->ptr
= batch
->map
;
96 batch
->dirty_state
= ~0;
97 batch
->cliprect_mode
= IGNORE_CLIPRECTS
;
100 struct intel_batchbuffer
*
101 intel_batchbuffer_alloc(struct intel_context
*intel
)
103 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
105 batch
->intel
= intel
;
106 intel_batchbuffer_reset(batch
);
112 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
115 free (batch
->buffer
);
118 dri_bo_unmap(batch
->buf
);
122 dri_bo_unreference(batch
->buf
);
129 /* TODO: Push this whole function into bufmgr.
132 do_flush_locked(struct intel_batchbuffer
*batch
,
133 GLuint used
, GLboolean allow_unlock
)
135 struct intel_context
*intel
= batch
->intel
;
137 unsigned int num_cliprects
= 0;
138 struct drm_clip_rect
*cliprects
= NULL
;
139 int x_off
= 0, y_off
= 0;
142 dri_bo_subdata (batch
->buf
, 0, used
, batch
->buffer
);
144 dri_bo_unmap(batch
->buf
);
150 if (batch
->cliprect_mode
== LOOP_CLIPRECTS
) {
151 intel_get_cliprects(intel
, &cliprects
, &num_cliprects
, &x_off
, &y_off
);
153 /* Dispatch the batchbuffer, if it has some effect (nonzero cliprects).
154 * Can't short-circuit like this once we have hardware contexts, but we
155 * should always be in DRI2 mode by then anyway.
157 if ((batch
->cliprect_mode
!= LOOP_CLIPRECTS
||
158 num_cliprects
!= 0) && !intel
->no_hw
) {
159 dri_bo_exec(batch
->buf
, used
, cliprects
, num_cliprects
,
160 (x_off
& 0xffff) | (y_off
<< 16));
163 if (batch
->cliprect_mode
== LOOP_CLIPRECTS
&& num_cliprects
== 0) {
165 /* If we are not doing any actual user-visible rendering,
166 * do a sched_yield to keep the app from pegging the cpu while
169 UNLOCK_HARDWARE(intel
);
171 LOCK_HARDWARE(intel
);
175 if (INTEL_DEBUG
& DEBUG_BATCH
) {
176 dri_bo_map(batch
->buf
, GL_FALSE
);
177 intel_decode(batch
->buf
->virtual, used
/ 4, batch
->buf
->offset
,
178 intel
->intelScreen
->deviceID
);
179 dri_bo_unmap(batch
->buf
);
181 if (intel
->vtbl
.debug_batch
!= NULL
)
182 intel
->vtbl
.debug_batch(intel
);
186 UNLOCK_HARDWARE(intel
);
189 intel
->vtbl
.new_batch(intel
);
193 _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
, const char *file
,
196 struct intel_context
*intel
= batch
->intel
;
197 GLuint used
= batch
->ptr
- batch
->map
;
198 GLboolean was_locked
= intel
->locked
;
201 batch
->cliprect_mode
= IGNORE_CLIPRECTS
;
205 if (INTEL_DEBUG
& DEBUG_BATCH
)
206 fprintf(stderr
, "%s:%d: Batchbuffer flush with %db used\n", file
, line
,
209 /* Emit a flush if the bufmgr doesn't do it for us. */
210 if (intel
->always_flush_cache
|| !intel
->ttm
) {
211 *(GLuint
*) (batch
->ptr
) = intel
->vtbl
.flush_cmd();
213 used
= batch
->ptr
- batch
->map
;
216 /* Round batchbuffer usage to 2 DWORDs. */
218 if ((used
& 4) == 0) {
219 *(GLuint
*) (batch
->ptr
) = 0; /* noop */
221 used
= batch
->ptr
- batch
->map
;
224 /* Mark the end of the buffer. */
225 *(GLuint
*) (batch
->ptr
) = MI_BATCH_BUFFER_END
; /* noop */
227 used
= batch
->ptr
- batch
->map
;
229 /* Workaround for recursive batchbuffer flushing: If the window is
230 * moved, we can get into a case where we try to flush during a
231 * flush. What happens is that when we try to grab the lock for
232 * the first flush, we detect that the window moved which then
233 * causes another flush (from the intel_draw_buffer() call in
234 * intelUpdatePageFlipping()). To work around this we reset the
235 * batchbuffer tail pointer before trying to get the lock. This
236 * prevent the nested buffer flush, but a better fix would be to
237 * avoid that in the first place. */
238 batch
->ptr
= batch
->map
;
240 if (intel
->vtbl
.finish_batch
)
241 intel
->vtbl
.finish_batch(intel
);
243 /* TODO: Just pass the relocation list and dma buffer up to the
247 LOCK_HARDWARE(intel
);
249 do_flush_locked(batch
, used
, GL_FALSE
);
252 UNLOCK_HARDWARE(intel
);
254 if (INTEL_DEBUG
& DEBUG_SYNC
) {
255 fprintf(stderr
, "waiting for idle\n");
256 dri_bo_map(batch
->buf
, GL_TRUE
);
257 dri_bo_unmap(batch
->buf
);
262 intel_batchbuffer_reset(batch
);
266 /* This is the only way buffers get added to the validate list.
269 intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
271 uint32_t read_domains
, uint32_t write_domain
,
276 if (batch
->ptr
- batch
->map
> batch
->buf
->size
)
277 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
278 batch
->ptr
, batch
->map
, batch
->ptr
- batch
->map
, batch
->buf
->size
);
279 ret
= dri_bo_emit_reloc(batch
->buf
, read_domains
, write_domain
,
280 delta
, batch
->ptr
- batch
->map
, buffer
);
283 * Using the old buffer offset, write in what the right data would be, in case
284 * the buffer doesn't move and we can short-circuit the relocation processing
287 intel_batchbuffer_emit_dword (batch
, buffer
->offset
+ delta
);
293 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
294 const void *data
, GLuint bytes
,
295 enum cliprect_mode cliprect_mode
)
297 assert((bytes
& 3) == 0);
298 intel_batchbuffer_require_space(batch
, bytes
, cliprect_mode
);
299 __memcpy(batch
->ptr
, data
, bytes
);