2 #include "intel_be_batchbuffer.h"
3 #include "intel_be_context.h"
4 #include "intel_be_device.h"
10 intel_realloc_relocs(struct intel_be_batchbuffer
*batch
, int num_relocs
)
12 unsigned long size
= num_relocs
* I915_RELOC0_STRIDE
+ I915_RELOC_HEADER
;
14 size
*= sizeof(uint32_t);
15 batch
->reloc
= realloc(batch
->reloc
, size
);
16 batch
->reloc_size
= num_relocs
;
21 intel_be_batchbuffer_reset(struct intel_be_batchbuffer
*batch
)
24 * Get a new, free batchbuffer.
27 struct drm_bo_info_req
*req
;
29 driBOUnrefUserList(batch
->list
);
30 driBOResetList(batch
->list
);
32 /* base.size is the size available to the i915simple driver */
33 batch
->base
.size
= batch
->device
->max_batch_size
- BATCH_RESERVED
;
34 batch
->base
.actual_size
= batch
->device
->max_batch_size
;
35 driBOData(batch
->buffer
, batch
->base
.actual_size
, NULL
, NULL
, 0);
38 * Add the batchbuffer to the validate list.
41 driBOAddListItem(batch
->list
, batch
->buffer
,
42 DRM_BO_FLAG_EXE
| DRM_BO_FLAG_MEM_TT
,
43 DRM_BO_FLAG_EXE
| DRM_BO_MASK_MEM
,
44 &batch
->dest_location
, &batch
->node
);
46 req
= &batch
->node
->bo_arg
.d
.req
.bo_req
;
49 * Set up information needed for us to make relocations
50 * relative to the underlying drm buffer objects.
53 driReadLockKernelBO();
54 bo
= driBOKernel(batch
->buffer
);
55 req
->presumed_offset
= (uint64_t) bo
->offset
;
56 req
->hint
= DRM_BO_HINT_PRESUMED_OFFSET
;
57 batch
->drmBOVirtual
= (uint8_t *) bo
->virtual;
58 driReadUnlockKernelBO();
61 * Adjust the relocation buffer size.
64 if (batch
->reloc_size
> INTEL_MAX_RELOCS
||
66 intel_realloc_relocs(batch
, INTEL_DEFAULT_RELOCS
);
68 assert(batch
->reloc
!= NULL
);
69 batch
->reloc
[0] = 0; /* No relocs yet. */
70 batch
->reloc
[1] = 1; /* Reloc type 1 */
71 batch
->reloc
[2] = 0; /* Only a single relocation list. */
72 batch
->reloc
[3] = 0; /* Only a single relocation list. */
74 batch
->base
.map
= driBOMap(batch
->buffer
, DRM_BO_FLAG_WRITE
, 0);
75 batch
->poolOffset
= driBOPoolOffset(batch
->buffer
);
76 batch
->base
.ptr
= batch
->base
.map
;
77 batch
->dirty_state
= ~0;
80 batch
->id
= 0;//batch->intel->intelScreen->batch_id++;
83 /*======================================================================
86 struct intel_be_batchbuffer
*
87 intel_be_batchbuffer_alloc(struct intel_be_context
*intel
)
89 struct intel_be_batchbuffer
*batch
= calloc(sizeof(*batch
), 1);
92 batch
->device
= intel
->device
;
94 driGenBuffers(intel
->device
->batchPool
, "batchbuffer", 1,
96 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_EXE
, 0);
97 batch
->last_fence
= NULL
;
98 batch
->list
= driBOCreateList(20);
100 intel_be_batchbuffer_reset(batch
);
105 intel_be_batchbuffer_free(struct intel_be_batchbuffer
*batch
)
107 if (batch
->last_fence
) {
108 driFenceFinish(batch
->last_fence
,
109 DRM_FENCE_TYPE_EXE
, FALSE
);
110 driFenceUnReference(&batch
->last_fence
);
112 if (batch
->base
.map
) {
113 driBOUnmap(batch
->buffer
);
114 batch
->base
.map
= NULL
;
116 driBOUnReference(batch
->buffer
);
117 driBOFreeList(batch
->list
);
120 batch
->buffer
= NULL
;
125 intel_be_offset_relocation(struct intel_be_batchbuffer
*batch
,
127 struct _DriBufferObject
*driBO
,
132 struct _drmBONode
*node
;
134 struct drm_bo_info_req
*req
;
136 driBOAddListItem(batch
->list
, driBO
, val_flags
, val_mask
,
138 req
= &node
->bo_arg
.d
.req
.bo_req
;
140 if (!(req
->hint
& DRM_BO_HINT_PRESUMED_OFFSET
)) {
143 * Stop other threads from tampering with the underlying
144 * drmBO while we're reading its offset.
147 driReadLockKernelBO();
148 req
->presumed_offset
= (uint64_t) driBOKernel(driBO
)->offset
;
149 driReadUnlockKernelBO();
150 req
->hint
= DRM_BO_HINT_PRESUMED_OFFSET
;
153 pre_add
+= driBOPoolOffset(driBO
);
155 if (batch
->nr_relocs
== batch
->reloc_size
)
156 intel_realloc_relocs(batch
, batch
->reloc_size
* 2);
158 reloc
= batch
->reloc
+
159 (I915_RELOC_HEADER
+ batch
->nr_relocs
* I915_RELOC0_STRIDE
);
161 reloc
[0] = ((uint8_t *)batch
->base
.ptr
- batch
->drmBOVirtual
);
162 i915_batchbuffer_dword(&batch
->base
, req
->presumed_offset
+ pre_add
);
165 reloc
[3] = batch
->dest_location
;
170 i915_drm_copy_reply(const struct drm_bo_info_rep
* rep
, drmBO
* buf
)
172 buf
->handle
= rep
->handle
;
173 buf
->flags
= rep
->flags
;
174 buf
->size
= rep
->size
;
175 buf
->offset
= rep
->offset
;
176 buf
->mapHandle
= rep
->arg_handle
;
177 buf
->proposedFlags
= rep
->proposed_flags
;
178 buf
->start
= rep
->buffer_start
;
179 buf
->fenceFlags
= rep
->fence_flags
;
180 buf
->replyFlags
= rep
->rep_flags
;
181 buf
->pageAlignment
= rep
->page_alignment
;
185 i915_execbuf(struct intel_be_batchbuffer
*batch
,
187 boolean ignore_cliprects
,
189 struct drm_i915_execbuffer
*ea
)
191 // struct intel_be_context *intel = batch->intel;
194 struct drm_i915_op_arg
*arg
, *first
;
195 struct drm_bo_op_req
*req
;
196 struct drm_bo_info_rep
*rep
;
197 uint64_t *prevNext
= NULL
;
203 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
204 node
= DRMLISTENTRY(drmBONode
, l
, head
);
213 *prevNext
= (unsigned long)arg
;
215 prevNext
= &arg
->next
;
216 req
->bo_req
.handle
= node
->buf
->handle
;
217 req
->op
= drm_bo_validate
;
218 req
->bo_req
.flags
= node
->arg0
;
219 req
->bo_req
.mask
= node
->arg1
;
220 req
->bo_req
.hint
|= 0;
224 memset(ea
, 0, sizeof(*ea
));
225 ea
->num_buffers
= count
;
226 ea
->batch
.start
= batch
->poolOffset
;
227 ea
->batch
.used
= used
;
228 #if 0 /* ZZZ JB: no cliprects used */
229 ea
->batch
.cliprects
= intel
->pClipRects
;
230 ea
->batch
.num_cliprects
= ignore_cliprects
? 0 : intel
->numClipRects
;
232 ea
->batch
.DR4
= 0;((((GLuint
) intel
->drawX
) & 0xffff) |
233 (((GLuint
) intel
->drawY
) << 16));
235 ea
->batch
.cliprects
= NULL
;
236 ea
->batch
.num_cliprects
= 0;
240 ea
->fence_arg
.flags
= DRM_I915_FENCE_FLAG_FLUSHED
;
241 ea
->ops_list
= (unsigned long) first
;
242 first
->reloc_ptr
= (unsigned long) batch
->reloc
;
243 batch
->reloc
[0] = batch
->nr_relocs
;
247 ret
= drmCommandWriteRead(batch
->device
->fd
, DRM_I915_EXECBUFFER
, ea
,
249 } while (ret
== -EAGAIN
);
254 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
255 node
= DRMLISTENTRY(drmBONode
, l
, head
);
257 rep
= &arg
->d
.rep
.bo_info
;
263 return arg
->d
.rep
.ret
;
266 i915_drm_copy_reply(rep
, buf
);
271 /* TODO: Push this whole function into bufmgr.
273 static struct _DriFenceObject
*
274 do_flush_locked(struct intel_be_batchbuffer
*batch
,
276 boolean ignore_cliprects
, boolean allow_unlock
)
278 struct intel_be_context
*intel
= batch
->intel
;
279 struct _DriFenceObject
*fo
;
282 struct drm_i915_execbuffer ea
;
285 driBOValidateUserList(batch
->list
);
286 boList
= driGetdrmBOList(batch
->list
);
288 #if 0 /* ZZZ JB Allways run */
289 if (!(intel
->numClipRects
== 0 && !ignore_cliprects
)) {
293 ret
= i915_execbuf(batch
, used
, ignore_cliprects
, boList
, &ea
);
295 driPutdrmBOList(batch
->list
);
299 driPutdrmBOList(batch
->list
);
303 if (ea
.fence_arg
.error
!= 0) {
306 * The hardware has been idled by the kernel.
307 * Don't fence the driBOs.
310 if (batch
->last_fence
)
311 driFenceUnReference(&batch
->last_fence
);
312 #if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
313 _mesa_printf("fence error\n");
315 batch
->last_fence
= NULL
;
320 fence
.handle
= ea
.fence_arg
.handle
;
321 fence
.fence_class
= ea
.fence_arg
.fence_class
;
322 fence
.type
= ea
.fence_arg
.type
;
323 fence
.flags
= ea
.fence_arg
.flags
;
324 fence
.signaled
= ea
.fence_arg
.signaled
;
326 fo
= driBOFenceUserList(batch
->device
->fenceMgr
, batch
->list
,
327 "SuperFence", &fence
);
329 if (driFenceType(fo
) & DRM_I915_FENCE_TYPE_RW
) {
330 if (batch
->last_fence
)
331 driFenceUnReference(&batch
->last_fence
);
333 * FIXME: Context last fence??
335 batch
->last_fence
= fo
;
336 driFenceReference(fo
);
339 #if 0 /* ZZZ JB: fix this */
340 intel
->vtbl
.lost_hardware(intel
);
348 struct _DriFenceObject
*
349 intel_be_batchbuffer_flush(struct intel_be_batchbuffer
*batch
)
351 struct intel_be_context
*intel
= batch
->intel
;
352 unsigned int used
= batch
->base
.ptr
- batch
->base
.map
;
353 boolean was_locked
= batch
->intel
->hardware_locked(intel
);
354 struct _DriFenceObject
*fence
;
357 driFenceReference(batch
->last_fence
);
358 return batch
->last_fence
;
361 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
362 * performance drain that we would like to avoid.
364 #if 0 /* ZZZ JB: what should we do here? */
366 ((int *) batch
->base
.ptr
)[0] = intel
->vtbl
.flush_cmd();
367 ((int *) batch
->base
.ptr
)[1] = 0;
368 ((int *) batch
->base
.ptr
)[2] = MI_BATCH_BUFFER_END
;
372 ((int *) batch
->base
.ptr
)[0] = intel
->vtbl
.flush_cmd();
373 ((int *) batch
->base
.ptr
)[1] = MI_BATCH_BUFFER_END
;
378 ((int *) batch
->base
.ptr
)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
379 ((int *) batch
->base
.ptr
)[1] = 0;
380 ((int *) batch
->base
.ptr
)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
384 ((int *) batch
->base
.ptr
)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
385 ((int *) batch
->base
.ptr
)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
389 driBOUnmap(batch
->buffer
);
390 batch
->base
.ptr
= NULL
;
391 batch
->base
.map
= NULL
;
393 /* TODO: Just pass the relocation list and dma buffer up to the
397 intel
->hardware_lock(intel
);
399 fence
= do_flush_locked(batch
, used
, !(batch
->flags
& INTEL_BATCH_CLIPRECTS
),
403 intel
->hardware_unlock(intel
);
407 intel_be_batchbuffer_reset(batch
);
412 intel_be_batchbuffer_finish(struct intel_be_batchbuffer
*batch
)
414 struct _DriFenceObject
*fence
= intel_be_batchbuffer_flush(batch
);
415 driFenceFinish(fence
, driFenceType(fence
), FALSE
);
416 driFenceUnReference(&fence
);
421 intel_be_batchbuffer_data(struct intel_be_batchbuffer
*batch
,
422 const void *data
, unsigned int bytes
, unsigned int flags
)
424 assert((bytes
& 3) == 0);
425 intel_batchbuffer_require_space(batch
, bytes
, flags
);
426 memcpy(batch
->base
.ptr
, data
, bytes
);
427 batch
->base
.ptr
+= bytes
;