1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
34 intel_dump_batchbuffer(GLuint offset
, GLuint
* ptr
, GLuint count
)
37 fprintf(stderr
, "\n\n\nSTART BATCH (%d dwords):\n", count
/ 4);
38 for (i
= 0; i
< count
/ 4; i
+= 4)
39 fprintf(stderr
, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
40 offset
+ i
* 4, ptr
[i
], ptr
[i
+ 1], ptr
[i
+ 2], ptr
[i
+ 3]);
41 fprintf(stderr
, "END BATCH\n\n\n");
46 intel_realloc_relocs(struct intel_batchbuffer
*batch
, int num_relocs
)
48 unsigned long size
= num_relocs
* I915_RELOC0_STRIDE
+ I915_RELOC_HEADER
;
50 size
*= sizeof(uint32_t);
51 batch
->reloc
= realloc(batch
->reloc
, size
);
52 batch
->reloc_size
= num_relocs
;
57 intel_batchbuffer_reset(struct intel_batchbuffer
*batch
)
60 * Get a new, free batchbuffer.
63 struct drm_bo_info_req
*req
;
65 driBOUnrefUserList(batch
->list
);
66 driBOResetList(batch
->list
);
68 batch
->size
= 4 * 4096; // ZZZ JB batch->intel->intelScreen->maxBatchSize;
69 driBOData(batch
->buffer
, batch
->size
, NULL
, NULL
, 0);
72 * Add the batchbuffer to the validate list.
75 driBOAddListItem(batch
->list
, batch
->buffer
,
76 DRM_BO_FLAG_EXE
| DRM_BO_FLAG_MEM_TT
,
77 DRM_BO_FLAG_EXE
| DRM_BO_MASK_MEM
,
78 &batch
->dest_location
, &batch
->node
);
80 req
= &batch
->node
->bo_arg
.d
.req
.bo_req
;
83 * Set up information needed for us to make relocations
84 * relative to the underlying drm buffer objects.
87 driReadLockKernelBO();
88 bo
= driBOKernel(batch
->buffer
);
89 req
->presumed_offset
= (uint64_t) bo
->offset
;
90 req
->hint
= DRM_BO_HINT_PRESUMED_OFFSET
;
91 batch
->drmBOVirtual
= (uint8_t *) bo
->virtual;
92 driReadUnlockKernelBO();
95 * Adjust the relocation buffer size.
98 if (batch
->reloc_size
> INTEL_MAX_RELOCS
||
100 intel_realloc_relocs(batch
, INTEL_DEFAULT_RELOCS
);
102 assert(batch
->reloc
!= NULL
);
103 batch
->reloc
[0] = 0; /* No relocs yet. */
104 batch
->reloc
[1] = 1; /* Reloc type 1 */
105 batch
->reloc
[2] = 0; /* Only a single relocation list. */
106 batch
->reloc
[3] = 0; /* Only a single relocation list. */
108 batch
->map
= driBOMap(batch
->buffer
, DRM_BO_FLAG_WRITE
, 0);
109 batch
->poolOffset
= driBOPoolOffset(batch
->buffer
);
110 batch
->ptr
= batch
->map
;
111 batch
->dirty_state
= ~0;
112 batch
->nr_relocs
= 0;
114 batch
->id
= 0;//batch->intel->intelScreen->batch_id++;
117 /*======================================================================
120 struct intel_batchbuffer
*
121 intel_batchbuffer_alloc(struct intel_context
*intel
)
123 struct intel_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
125 batch
->intel
= intel
;
127 driGenBuffers(intel
->intelScreen
->batchPool
, "batchbuffer", 1,
128 &batch
->buffer
, 4096,
129 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_EXE
, 0);
130 batch
->last_fence
= NULL
;
131 batch
->list
= driBOCreateList(20);
133 intel_batchbuffer_reset(batch
);
138 intel_batchbuffer_free(struct intel_batchbuffer
*batch
)
140 if (batch
->last_fence
) {
141 driFenceFinish(batch
->last_fence
,
142 DRM_FENCE_TYPE_EXE
, GL_FALSE
);
143 driFenceUnReference(&batch
->last_fence
);
146 driBOUnmap(batch
->buffer
);
149 driBOUnReference(batch
->buffer
);
150 driBOFreeList(batch
->list
);
153 batch
->buffer
= NULL
;
158 intel_offset_relocation(struct intel_batchbuffer
*batch
,
160 struct _DriBufferObject
*driBO
,
165 struct _drmBONode
*node
;
167 struct drm_bo_info_req
*req
;
169 driBOAddListItem(batch
->list
, driBO
, val_flags
, val_mask
,
171 req
= &node
->bo_arg
.d
.req
.bo_req
;
173 if (!(req
->hint
& DRM_BO_HINT_PRESUMED_OFFSET
)) {
176 * Stop other threads from tampering with the underlying
177 * drmBO while we're reading its offset.
180 driReadLockKernelBO();
181 req
->presumed_offset
= (uint64_t) driBOKernel(driBO
)->offset
;
182 driReadUnlockKernelBO();
183 req
->hint
= DRM_BO_HINT_PRESUMED_OFFSET
;
186 pre_add
+= driBOPoolOffset(driBO
);
188 if (batch
->nr_relocs
== batch
->reloc_size
)
189 intel_realloc_relocs(batch
, batch
->reloc_size
* 2);
191 reloc
= batch
->reloc
+
192 (I915_RELOC_HEADER
+ batch
->nr_relocs
* I915_RELOC0_STRIDE
);
194 reloc
[0] = ((uint8_t *)batch
->ptr
- batch
->drmBOVirtual
);
195 intel_batchbuffer_emit_dword(batch
, req
->presumed_offset
+ pre_add
);
198 reloc
[3] = batch
->dest_location
;
203 i915_drm_copy_reply(const struct drm_bo_info_rep
* rep
, drmBO
* buf
)
205 buf
->handle
= rep
->handle
;
206 buf
->flags
= rep
->flags
;
207 buf
->size
= rep
->size
;
208 buf
->offset
= rep
->offset
;
209 buf
->mapHandle
= rep
->arg_handle
;
210 buf
->proposedFlags
= rep
->proposed_flags
;
211 buf
->start
= rep
->buffer_start
;
212 buf
->fenceFlags
= rep
->fence_flags
;
213 buf
->replyFlags
= rep
->rep_flags
;
214 buf
->pageAlignment
= rep
->page_alignment
;
218 i915_execbuf(struct intel_batchbuffer
*batch
,
220 GLboolean ignore_cliprects
,
222 struct drm_i915_execbuffer
*ea
)
224 struct intel_context
*intel
= batch
->intel
;
227 struct drm_i915_op_arg
*arg
, *first
;
228 struct drm_bo_op_req
*req
;
229 struct drm_bo_info_rep
*rep
;
230 uint64_t *prevNext
= NULL
;
236 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
237 node
= DRMLISTENTRY(drmBONode
, l
, head
);
246 *prevNext
= (unsigned long)arg
;
248 prevNext
= &arg
->next
;
249 req
->bo_req
.handle
= node
->buf
->handle
;
250 req
->op
= drm_bo_validate
;
251 req
->bo_req
.flags
= node
->arg0
;
252 req
->bo_req
.mask
= node
->arg1
;
253 req
->bo_req
.hint
|= 0;
257 memset(ea
, 0, sizeof(*ea
));
258 ea
->num_buffers
= count
;
259 ea
->batch
.start
= batch
->poolOffset
;
260 ea
->batch
.used
= used
;
261 #if 0 /* ZZZ JB: no cliprects used */
262 ea
->batch
.cliprects
= intel
->pClipRects
;
263 ea
->batch
.num_cliprects
= ignore_cliprects
? 0 : intel
->numClipRects
;
265 ea
->batch
.DR4
= 0;((((GLuint
) intel
->drawX
) & 0xffff) |
266 (((GLuint
) intel
->drawY
) << 16));
268 ea
->batch
.cliprects
= NULL
;
269 ea
->batch
.num_cliprects
= 0;
273 ea
->fence_arg
.flags
= DRM_I915_FENCE_FLAG_FLUSHED
;
274 ea
->ops_list
= (unsigned long) first
;
275 first
->reloc_ptr
= (unsigned long) batch
->reloc
;
276 batch
->reloc
[0] = batch
->nr_relocs
;
280 ret
= drmCommandWriteRead(intel
->driFd
, DRM_I915_EXECBUFFER
, ea
,
282 } while (ret
== -EAGAIN
);
287 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
288 node
= DRMLISTENTRY(drmBONode
, l
, head
);
290 rep
= &arg
->d
.rep
.bo_info
;
296 return arg
->d
.rep
.ret
;
299 i915_drm_copy_reply(rep
, buf
);
304 /* TODO: Push this whole function into bufmgr.
306 static struct _DriFenceObject
*
307 do_flush_locked(struct intel_batchbuffer
*batch
,
309 GLboolean ignore_cliprects
, GLboolean allow_unlock
)
311 struct intel_context
*intel
= batch
->intel
;
312 struct _DriFenceObject
*fo
;
315 struct drm_i915_execbuffer ea
;
318 driBOValidateUserList(batch
->list
);
319 boList
= driGetdrmBOList(batch
->list
);
321 #if 0 /* ZZZ JB Allways run */
322 if (!(intel
->numClipRects
== 0 && !ignore_cliprects
)) {
326 ret
= i915_execbuf(batch
, used
, ignore_cliprects
, boList
, &ea
);
328 driPutdrmBOList(batch
->list
);
332 driPutdrmBOList(batch
->list
);
336 if (ea
.fence_arg
.error
!= 0) {
339 * The hardware has been idled by the kernel.
340 * Don't fence the driBOs.
343 if (batch
->last_fence
)
344 driFenceUnReference(&batch
->last_fence
);
345 #if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
346 _mesa_printf("fence error\n");
348 batch
->last_fence
= NULL
;
353 fence
.handle
= ea
.fence_arg
.handle
;
354 fence
.fence_class
= ea
.fence_arg
.fence_class
;
355 fence
.type
= ea
.fence_arg
.type
;
356 fence
.flags
= ea
.fence_arg
.flags
;
357 fence
.signaled
= ea
.fence_arg
.signaled
;
359 fo
= driBOFenceUserList(batch
->intel
->intelScreen
->mgr
, batch
->list
,
360 "SuperFence", &fence
);
362 if (driFenceType(fo
) & DRM_I915_FENCE_TYPE_RW
) {
363 if (batch
->last_fence
)
364 driFenceUnReference(&batch
->last_fence
);
366 * FIXME: Context last fence??
368 batch
->last_fence
= fo
;
369 driFenceReference(fo
);
372 #if 0 /* ZZZ JB: fix this */
373 intel
->vtbl
.lost_hardware(intel
);
381 struct _DriFenceObject
*
382 intel_batchbuffer_flush(struct intel_batchbuffer
*batch
)
384 struct intel_context
*intel
= batch
->intel
;
385 GLuint used
= batch
->ptr
- batch
->map
;
386 GLboolean was_locked
= intel
->locked
;
387 struct _DriFenceObject
*fence
;
390 driFenceReference(batch
->last_fence
);
391 return batch
->last_fence
;
394 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
395 * performance drain that we would like to avoid.
397 #if 0 /* ZZZ JB: what should we do here? */
399 ((int *) batch
->ptr
)[0] = intel
->vtbl
.flush_cmd();
400 ((int *) batch
->ptr
)[1] = 0;
401 ((int *) batch
->ptr
)[2] = MI_BATCH_BUFFER_END
;
405 ((int *) batch
->ptr
)[0] = intel
->vtbl
.flush_cmd();
406 ((int *) batch
->ptr
)[1] = MI_BATCH_BUFFER_END
;
411 ((int *) batch
->ptr
)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
412 ((int *) batch
->ptr
)[1] = 0;
413 ((int *) batch
->ptr
)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
417 ((int *) batch
->ptr
)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
418 ((int *) batch
->ptr
)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
422 driBOUnmap(batch
->buffer
);
426 /* TODO: Just pass the relocation list and dma buffer up to the
430 LOCK_HARDWARE(intel
);
432 fence
= do_flush_locked(batch
, used
, !(batch
->flags
& INTEL_BATCH_CLIPRECTS
),
436 UNLOCK_HARDWARE(intel
);
440 intel_batchbuffer_reset(batch
);
445 intel_batchbuffer_finish(struct intel_batchbuffer
*batch
)
447 struct _DriFenceObject
*fence
= intel_batchbuffer_flush(batch
);
448 driFenceFinish(fence
, driFenceType(fence
), GL_FALSE
);
449 driFenceUnReference(&fence
);
453 intel_batchbuffer_data(struct intel_batchbuffer
*batch
,
454 const void *data
, GLuint bytes
, GLuint flags
)
456 assert((bytes
& 3) == 0);
457 intel_batchbuffer_require_space(batch
, bytes
, flags
);
458 memcpy(batch
->ptr
, data
, bytes
);