1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "intel_batchbuffer.h"
30 #include "intel_context.h"
31 #include "intel_screen.h"
32 #include "intel_reg.h"
35 /* Relocations in kernel space:
36 * - pass dma buffer seperately
37 * - memory manager knows how to patch
38 * - pass list of dependent buffers
39 * - pass relocation list
42 * - get back an offset for buffer to fire
43 * - memory manager knows how to fire buffer
45 * Really want the buffer to be AGP and pinned.
49 /* Cliprect fence: The highest fence protecting a dma buffer
50 * containing explicit cliprect information. Like the old drawable
51 * lock but irq-driven. X server must wait for this fence to expire
52 * before changing cliprects [and then doing sw rendering?]. For
53 * other dma buffers, the scheduler will grab current cliprect info
54 * and mix into buffer. X server must hold the lock while changing
55 * cliprects??? Make per-drawable. Need cliprects in shared memory
56 * -- beats storing them with every cmd buffer in the queue.
58 * ==> X server must wait for this fence to expire before touching the
59 * framebuffer with new cliprects.
61 * ==> Cliprect-dependent buffers associated with a
62 * cliprect-timestamp. All of the buffers associated with a timestamp
63 * must go to hardware before any buffer with a newer timestamp.
65 * ==> Dma should be queued per-drawable for correct X/GL
66 * synchronization. Or can fences be used for this?
68 * Applies to: Blit operations, metaops, X server operations -- X
69 * server automatically waits on its own dma to complete before
70 * modifying cliprects ???
74 intel_dump_batchbuffer(uint offset
, uint
* ptr
, uint count
)
77 printf("\n\n\nSTART BATCH (%d dwords):\n", count
/ 4);
78 for (i
= 0; i
< count
/ 4; i
+= 1)
79 printf("\t0x%08x\n", ptr
[i
]);
80 printf("END BATCH\n\n\n");
85 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
90 driBOUnmap(batch
->buffer
);
95 * Get a new, free batchbuffer.
97 batch
->size
= BATCH_SZ
;
98 driBOData(batch
->buffer
, batch
->size
, NULL
, 0);
100 driBOResetList(&batch
->list
);
103 * Unreference buffers previously on the relocation list.
105 for (i
= 0; i
< batch
->nr_relocs
; i
++) {
106 struct buffer_reloc
*r
= &batch
->reloc
[i
];
107 driBOUnReference(r
->buf
);
110 batch
->list_count
= 0;
111 batch
->nr_relocs
= 0;
115 * We don't refcount the batchbuffer itself since we can't destroy it
116 * while it's on the list.
119 driBOAddListItem(&batch
->list
, batch
->buffer
,
120 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_EXE
,
121 DRM_BO_MASK_MEM
| DRM_BO_FLAG_EXE
);
124 batch
->map
= driBOMap(batch
->buffer
, DRM_BO_FLAG_WRITE
, 0);
125 batch
->ptr
= batch
->map
;
129 /*======================================================================
132 struct intel_batchbuffer
*
133 intel_batchbuffer_alloc(struct intel_context
*intel
)
135 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
137 batch
->intel
= intel
;
139 driGenBuffers(intel
->intelScreen
->batchPool
, "batchbuffer", 1,
140 &batch
->buffer
, 4096,
141 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_EXE
, 0);
142 batch
->last_fence
= NULL
;
143 driBOCreateList(20, &batch
->list
);
144 intel_batchbuffer_reset(batch
);
150 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
152 if (batch
->last_fence
) {
153 driFenceFinish(batch
->last_fence
,
154 DRM_FENCE_TYPE_EXE
| DRM_I915_FENCE_TYPE_RW
,
156 driFenceUnReference(batch
->last_fence
);
157 batch
->last_fence
= NULL
;
160 driBOUnmap(batch
->buffer
);
163 driBOUnReference(batch
->buffer
);
164 batch
->buffer
= NULL
;
170 intel_batch_ioctl(struct intel_context
*intel
,
171 uint start_offset
, uint used
, boolean allow_unlock
)
173 drmI830BatchBuffer batch
;
175 batch
.start
= start_offset
;
177 batch
.cliprects
= NULL
; /* unused */
178 batch
.num_cliprects
= 0;
180 batch
.DR4
= 0; /* still need this ? */
182 DBG(IOCTL
, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
185 batch
.start
+ batch
.used
* 4, batch
.DR4
, batch
.num_cliprects
);
187 if (drmCommandWrite(intel
->driFd
, DRM_I830_BATCHBUFFER
, &batch
,
189 printf("DRM_I830_BATCHBUFFER: %d\n", -errno
);
190 UNLOCK_HARDWARE(intel
);
196 /* TODO: Push this whole function into bufmgr.
199 do_flush_locked(struct intel_batchbuffer
*batch
,
200 uint used
, boolean allow_unlock
)
204 struct _DriFenceObject
*fo
;
206 driBOValidateList(batch
->intel
->driFd
, &batch
->list
);
208 /* Apply the relocations. This nasty map indicates to me that the
209 * whole task should be done internally by the memory manager, and
210 * that dma buffers probably need to be pinned within agp space.
212 ptr
= (uint
*) driBOMap(batch
->buffer
, DRM_BO_FLAG_WRITE
,
213 DRM_BO_HINT_ALLOW_UNFENCED_MAP
);
215 for (i
= 0; i
< batch
->nr_relocs
; i
++) {
216 struct buffer_reloc
*r
= &batch
->reloc
[i
];
218 ptr
[r
->offset
/ 4] = driBOOffset(r
->buf
) + r
->delta
;
222 intel_dump_batchbuffer(0, ptr
, used
);
224 driBOUnmap(batch
->buffer
);
227 intel_batch_ioctl(batch
->intel
,
228 driBOOffset(batch
->buffer
),
232 * Kernel fencing. The flags tells the kernel that we've
233 * programmed an MI_FLUSH.
235 fenceFlags
= DRM_I915_FENCE_FLAG_FLUSHED
;
236 fo
= driFenceBuffers(batch
->intel
->driFd
, "Batch fence", fenceFlags
);
239 * User space fencing.
241 driBOFence(batch
->buffer
, fo
);
243 if (driFenceType(fo
) == DRM_FENCE_TYPE_EXE
) {
245 * Oops. We only validated a batch buffer. This means we
246 * didn't do any proper rendering. Discard this fence object.
248 driFenceUnReference(fo
);
251 driFenceUnReference(batch
->last_fence
);
252 batch
->last_fence
= fo
;
253 for (i
= 0; i
< batch
->nr_relocs
; i
++) {
254 struct buffer_reloc
*r
= &batch
->reloc
[i
];
255 driBOFence(r
->buf
, fo
);
261 struct _DriFenceObject
*
262 intel_batchbuffer_flush(struct intel_batchbuffer
*batch
)
264 struct intel_context
*intel
= batch
->intel
;
265 uint used
= batch
->ptr
- batch
->map
;
266 const boolean was_locked
= intel
->locked
;
269 return batch
->last_fence
;
271 #define MI_FLUSH ((0 << 29) | (4 << 23))
273 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
274 * performance drain that we would like to avoid.
277 ((int *) batch
->ptr
)[0] = MI_FLUSH
;
278 ((int *) batch
->ptr
)[1] = 0;
279 ((int *) batch
->ptr
)[2] = MI_BATCH_BUFFER_END
;
283 ((int *) batch
->ptr
)[0] = MI_FLUSH
;
284 ((int *) batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
288 driBOUnmap(batch
->buffer
);
292 /* TODO: Just pass the relocation list and dma buffer up to the
296 LOCK_HARDWARE(intel
);
298 do_flush_locked(batch
, used
, GL_FALSE
);
301 UNLOCK_HARDWARE(intel
);
305 intel_batchbuffer_reset(batch
);
306 return batch
->last_fence
;
311 intel_batchbuffer_finish(struct intel_batchbuffer
*batch
)
313 struct _DriFenceObject
*fence
= intel_batchbuffer_flush(batch
);
315 driFenceReference(fence
);
316 driFenceFinish(fence
,
317 DRM_FENCE_TYPE_EXE
| DRM_I915_FENCE_TYPE_RW
,
319 driFenceUnReference(fence
);
324 /* This is the only way buffers get added to the validate list.
327 intel_batchbuffer_emit_reloc(struct intel_batchbuffer
*batch
,
328 struct _DriBufferObject
*buffer
,
329 uint flags
, uint mask
, uint delta
)
331 assert(batch
->nr_relocs
< MAX_RELOCS
);
333 driBOAddListItem(&batch
->list
, buffer
, flags
, mask
);
336 struct buffer_reloc
*r
= &batch
->reloc
[batch
->nr_relocs
++];
337 driBOReference(buffer
);
339 r
->offset
= batch
->ptr
- batch
->map
;
341 *(uint
*) batch
->ptr
= 0x12345678;
350 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
351 const void *data
, uint bytes
, uint flags
)
353 assert((bytes
& 3) == 0);
354 intel_batchbuffer_require_space(batch
, bytes
, flags
);
355 memcpy(batch
->ptr
, data
, bytes
);