1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_batchbuffer.h"
29 #include "intel_ioctl.h"
30 #include "intel_decode.h"
31 #include "intel_reg.h"
33 /* Relocations in kernel space:
34 * - pass dma buffer seperately
35 * - memory manager knows how to patch
36 * - pass list of dependent buffers
37 * - pass relocation list
40 * - get back an offset for buffer to fire
41 * - memory manager knows how to fire buffer
43 * Really want the buffer to be AGP and pinned.
47 /* Cliprect fence: The highest fence protecting a dma buffer
48 * containing explicit cliprect information. Like the old drawable
49 * lock but irq-driven. X server must wait for this fence to expire
50 * before changing cliprects [and then doing sw rendering?]. For
51 * other dma buffers, the scheduler will grab current cliprect info
52 * and mix into buffer. X server must hold the lock while changing
53 * cliprects??? Make per-drawable. Need cliprects in shared memory
54 * -- beats storing them with every cmd buffer in the queue.
56 * ==> X server must wait for this fence to expire before touching the
57 * framebuffer with new cliprects.
59 * ==> Cliprect-dependent buffers associated with a
60 * cliprect-timestamp. All of the buffers associated with a timestamp
61 * must go to hardware before any buffer with a newer timestamp.
63 * ==> Dma should be queued per-drawable for correct X/GL
64 * synchronization. Or can fences be used for this?
66 * Applies to: Blit operations, metaops, X server operations -- X
67 * server automatically waits on its own dma to complete before
68 * modifying cliprects ???
72 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
74 struct intel_context
*intel
= batch
->intel
;
76 if (batch
->buf
!= NULL
) {
77 dri_bo_unreference(batch
->buf
);
81 batch
->buf
= dri_bo_alloc(intel
->bufmgr
, "batchbuffer",
82 intel
->maxBatchSize
, 4096,
83 DRM_BO_FLAG_MEM_LOCAL
| DRM_BO_FLAG_CACHED
| DRM_BO_FLAG_CACHED_MAPPED
);
84 dri_bo_map(batch
->buf
, GL_TRUE
);
85 batch
->map
= batch
->buf
->virtual;
86 batch
->size
= intel
->maxBatchSize
;
87 batch
->ptr
= batch
->map
;
88 batch
->dirty_state
= ~0;
89 batch
->cliprect_mode
= IGNORE_CLIPRECTS
;
91 /* account batchbuffer in aperture */
92 dri_bufmgr_check_aperture_space(batch
->buf
);
96 struct intel_batchbuffer
*
97 intel_batchbuffer_alloc(struct intel_context
*intel
)
99 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
101 batch
->intel
= intel
;
102 batch
->last_fence
= NULL
;
103 intel_batchbuffer_reset(batch
);
109 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
111 if (batch
->last_fence
) {
112 dri_fence_wait(batch
->last_fence
);
113 dri_fence_unreference(batch
->last_fence
);
114 batch
->last_fence
= NULL
;
117 dri_bo_unmap(batch
->buf
);
120 dri_bo_unreference(batch
->buf
);
127 /* TODO: Push this whole function into bufmgr.
130 do_flush_locked(struct intel_batchbuffer
*batch
,
131 GLuint used
, GLboolean allow_unlock
)
133 struct intel_context
*intel
= batch
->intel
;
137 dri_bo_unmap(batch
->buf
);
138 start
= dri_process_relocs(batch
->buf
, &count
);
143 /* Throw away non-effective packets. Won't work once we have
144 * hardware contexts which would preserve statechanges beyond a
148 if (!(intel
->numClipRects
== 0 &&
149 batch
->cliprect_mode
== LOOP_CLIPRECTS
)) {
150 if (intel
->ttm
== GL_TRUE
) {
151 intel_exec_ioctl(batch
->intel
,
153 batch
->cliprect_mode
!= LOOP_CLIPRECTS
,
155 start
, count
, &batch
->last_fence
);
157 intel_batch_ioctl(batch
->intel
,
160 batch
->cliprect_mode
!= LOOP_CLIPRECTS
,
165 dri_post_submit(batch
->buf
, &batch
->last_fence
);
167 if (intel
->numClipRects
== 0 &&
168 batch
->cliprect_mode
== LOOP_CLIPRECTS
) {
170 /* If we are not doing any actual user-visible rendering,
171 * do a sched_yield to keep the app from pegging the cpu while
174 UNLOCK_HARDWARE(intel
);
176 LOCK_HARDWARE(intel
);
180 if (INTEL_DEBUG
& DEBUG_BATCH
) {
181 dri_bo_map(batch
->buf
, GL_FALSE
);
182 intel_decode(batch
->buf
->virtual, used
/ 4, batch
->buf
->offset
,
183 intel
->intelScreen
->deviceID
);
184 dri_bo_unmap(batch
->buf
);
186 if (intel
->vtbl
.debug_batch
!= NULL
)
187 intel
->vtbl
.debug_batch(intel
);
190 intel
->vtbl
.new_batch(intel
);
194 _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
, const char *file
,
197 struct intel_context
*intel
= batch
->intel
;
198 GLuint used
= batch
->ptr
- batch
->map
;
199 GLboolean was_locked
= intel
->locked
;
204 if (INTEL_DEBUG
& DEBUG_BATCH
)
205 fprintf(stderr
, "%s:%d: Batchbuffer flush with %db used\n", file
, line
,
207 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
208 * performance drain that we would like to avoid.
211 ((int *) batch
->ptr
)[0] = intel
->vtbl
.flush_cmd();
212 ((int *) batch
->ptr
)[1] = 0;
213 ((int *) batch
->ptr
)[2] = MI_BATCH_BUFFER_END
;
217 ((int *) batch
->ptr
)[0] = intel
->vtbl
.flush_cmd();
218 ((int *) batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
222 /* Workaround for recursive batchbuffer flushing: If the window is
223 * moved, we can get into a case where we try to flush during a
224 * flush. What happens is that when we try to grab the lock for
225 * the first flush, we detect that the window moved which then
226 * causes another flush (from the intel_draw_buffer() call in
227 * intelUpdatePageFlipping()). To work around this we reset the
228 * batchbuffer tail pointer before trying to get the lock. This
229 * prevent the nested buffer flush, but a better fix would be to
230 * avoid that in the first place. */
231 batch
->ptr
= batch
->map
;
233 /* TODO: Just pass the relocation list and dma buffer up to the
237 LOCK_HARDWARE(intel
);
239 do_flush_locked(batch
, used
, GL_FALSE
);
242 UNLOCK_HARDWARE(intel
);
244 if (INTEL_DEBUG
& DEBUG_SYNC
) {
245 fprintf(stderr
, "waiting for idle\n");
246 if (batch
->last_fence
!= NULL
)
247 dri_fence_wait(batch
->last_fence
);
252 intel_batchbuffer_reset(batch
);
256 intel_batchbuffer_finish(struct intel_batchbuffer
*batch
)
258 intel_batchbuffer_flush(batch
);
259 if (batch
->last_fence
!= NULL
)
260 dri_fence_wait(batch
->last_fence
);
264 /* This is the only way buffers get added to the validate list.
267 intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
269 GLuint flags
, GLuint delta
)
273 ret
= dri_emit_reloc(batch
->buf
, flags
, delta
, batch
->ptr
- batch
->map
, buffer
);
276 * Using the old buffer offset, write in what the right data would be, in case
277 * the buffer doesn't move and we can short-circuit the relocation processing
280 intel_batchbuffer_emit_dword (batch
, buffer
->offset
+ delta
);
286 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
287 const void *data
, GLuint bytes
,
288 enum cliprect_mode cliprect_mode
)
290 assert((bytes
& 3) == 0);
291 intel_batchbuffer_require_space(batch
, bytes
, cliprect_mode
);
292 __memcpy(batch
->ptr
, data
, bytes
);