1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_context.h"
29 #include "intel_batchbuffer.h"
30 #include "intel_decode.h"
31 #include "intel_reg.h"
32 #include "intel_bufmgr.h"
33 #include "intel_buffers.h"
35 /* Relocations in kernel space:
36 * - pass dma buffer seperately
37 * - memory manager knows how to patch
38 * - pass list of dependent buffers
39 * - pass relocation list
42 * - get back an offset for buffer to fire
43 * - memory manager knows how to fire buffer
45 * Really want the buffer to be AGP and pinned.
49 /* Cliprect fence: The highest fence protecting a dma buffer
50 * containing explicit cliprect information. Like the old drawable
51 * lock but irq-driven. X server must wait for this fence to expire
52 * before changing cliprects [and then doing sw rendering?]. For
53 * other dma buffers, the scheduler will grab current cliprect info
54 * and mix into buffer. X server must hold the lock while changing
55 * cliprects??? Make per-drawable. Need cliprects in shared memory
56 * -- beats storing them with every cmd buffer in the queue.
58 * ==> X server must wait for this fence to expire before touching the
59 * framebuffer with new cliprects.
61 * ==> Cliprect-dependent buffers associated with a
62 * cliprect-timestamp. All of the buffers associated with a timestamp
63 * must go to hardware before any buffer with a newer timestamp.
65 * ==> Dma should be queued per-drawable for correct X/GL
66 * synchronization. Or can fences be used for this?
68 * Applies to: Blit operations, metaops, X server operations -- X
69 * server automatically waits on its own dma to complete before
70 * modifying cliprects ???
74 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
76 struct intel_context
*intel
= batch
->intel
;
78 if (batch
->buf
!= NULL
) {
79 dri_bo_unreference(batch
->buf
);
84 batch
->buffer
= malloc (intel
->maxBatchSize
);
86 batch
->buf
= dri_bo_alloc(intel
->bufmgr
, "batchbuffer",
87 intel
->maxBatchSize
, 4096);
89 batch
->map
= batch
->buffer
;
91 dri_bo_map(batch
->buf
, GL_TRUE
);
92 batch
->map
= batch
->buf
->virtual;
94 batch
->size
= intel
->maxBatchSize
;
95 batch
->ptr
= batch
->map
;
96 batch
->dirty_state
= ~0;
99 struct intel_batchbuffer
*
100 intel_batchbuffer_alloc(struct intel_context
*intel
)
102 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
104 batch
->intel
= intel
;
105 intel_batchbuffer_reset(batch
);
111 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
114 free (batch
->buffer
);
117 dri_bo_unmap(batch
->buf
);
121 dri_bo_unreference(batch
->buf
);
128 /* TODO: Push this whole function into bufmgr.
131 do_flush_locked(struct intel_batchbuffer
*batch
, GLuint used
)
133 struct intel_context
*intel
= batch
->intel
;
135 int x_off
= 0, y_off
= 0;
138 dri_bo_subdata (batch
->buf
, 0, used
, batch
->buffer
);
140 dri_bo_unmap(batch
->buf
);
145 dri_bo_exec(batch
->buf
, used
, NULL
, 0, (x_off
& 0xffff) | (y_off
<< 16));
147 if (INTEL_DEBUG
& DEBUG_BATCH
) {
148 dri_bo_map(batch
->buf
, GL_FALSE
);
149 intel_decode(batch
->buf
->virtual, used
/ 4, batch
->buf
->offset
,
150 intel
->intelScreen
->deviceID
);
151 dri_bo_unmap(batch
->buf
);
153 if (intel
->vtbl
.debug_batch
!= NULL
)
154 intel
->vtbl
.debug_batch(intel
);
160 intel
->vtbl
.new_batch(intel
);
164 _intel_batchbuffer_flush(struct intel_batchbuffer
*batch
, const char *file
,
167 struct intel_context
*intel
= batch
->intel
;
168 GLuint used
= batch
->ptr
- batch
->map
;
170 if (!intel
->using_dri2_swapbuffers
&&
171 intel
->first_post_swapbuffers_batch
== NULL
) {
172 intel
->first_post_swapbuffers_batch
= intel
->batch
->buf
;
173 drm_intel_bo_reference(intel
->first_post_swapbuffers_batch
);
179 if (INTEL_DEBUG
& DEBUG_BATCH
)
180 fprintf(stderr
, "%s:%d: Batchbuffer flush with %db used\n", file
, line
,
183 batch
->reserved_space
= 0;
184 /* Emit a flush if the bufmgr doesn't do it for us. */
185 if (intel
->always_flush_cache
) {
186 intel_batchbuffer_emit_mi_flush(batch
);
187 used
= batch
->ptr
- batch
->map
;
190 /* Round batchbuffer usage to 2 DWORDs. */
192 if ((used
& 4) == 0) {
193 *(GLuint
*) (batch
->ptr
) = 0; /* noop */
195 used
= batch
->ptr
- batch
->map
;
198 /* Mark the end of the buffer. */
199 *(GLuint
*) (batch
->ptr
) = MI_BATCH_BUFFER_END
; /* noop */
201 used
= batch
->ptr
- batch
->map
;
203 /* Workaround for recursive batchbuffer flushing: If the window is
204 * moved, we can get into a case where we try to flush during a
205 * flush. What happens is that when we try to grab the lock for
206 * the first flush, we detect that the window moved which then
207 * causes another flush (from the intel_draw_buffer() call in
208 * intelUpdatePageFlipping()). To work around this we reset the
209 * batchbuffer tail pointer before trying to get the lock. This
210 * prevent the nested buffer flush, but a better fix would be to
211 * avoid that in the first place. */
212 batch
->ptr
= batch
->map
;
214 if (intel
->vtbl
.finish_batch
)
215 intel
->vtbl
.finish_batch(intel
);
217 /* Check that we didn't just wrap our batchbuffer at a bad time. */
218 assert(!intel
->no_batch_wrap
);
220 batch
->reserved_space
= BATCH_RESERVED
;
222 /* TODO: Just pass the relocation list and dma buffer up to the
225 do_flush_locked(batch
, used
);
227 if (INTEL_DEBUG
& DEBUG_SYNC
) {
228 fprintf(stderr
, "waiting for idle\n");
229 dri_bo_map(batch
->buf
, GL_TRUE
);
230 dri_bo_unmap(batch
->buf
);
235 intel_batchbuffer_reset(batch
);
239 /* This is the only way buffers get added to the validate list.
242 intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
244 uint32_t read_domains
, uint32_t write_domain
,
249 if (batch
->ptr
- batch
->map
> batch
->buf
->size
)
250 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n",
251 batch
->ptr
, batch
->map
, batch
->ptr
- batch
->map
, batch
->buf
->size
);
252 ret
= dri_bo_emit_reloc(batch
->buf
, read_domains
, write_domain
,
253 delta
, batch
->ptr
- batch
->map
, buffer
);
256 * Using the old buffer offset, write in what the right data would be, in case
257 * the buffer doesn't move and we can short-circuit the relocation processing
260 intel_batchbuffer_emit_dword (batch
, buffer
->offset
+ delta
);
266 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
267 const void *data
, GLuint bytes
)
269 assert((bytes
& 3) == 0);
270 intel_batchbuffer_require_space(batch
, bytes
);
271 __memcpy(batch
->ptr
, data
, bytes
);
275 /* Emit a pipelined flush to either flush render and texture cache for
276 * reading from a FBO-drawn texture, or flush so that frontbuffer
277 * render appears on the screen in DRI1.
279 * This is also used for the always_flush_cache driconf debug option.
282 intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer
*batch
)
284 struct intel_context
*intel
= batch
->intel
;
286 if (intel
->gen
>= 4) {
288 OUT_BATCH(_3DSTATE_PIPE_CONTROL
|
289 PIPE_CONTROL_INSTRUCTION_FLUSH
|
290 PIPE_CONTROL_WRITE_FLUSH
|
291 PIPE_CONTROL_NO_WRITE
);
292 OUT_BATCH(0); /* write address */
293 OUT_BATCH(0); /* write data */
294 OUT_BATCH(0); /* write data */