COMMON_BM_SOURCES = \
../common/dri_bufmgr.c \
- ../common/dri_bufmgr_ttm.c \
../common/dri_bufmgr_fake.c
return buf->bufmgr->bo_unmap(buf);
}
-int
-dri_bo_validate(dri_bo *buf, unsigned int flags)
-{
- return buf->bufmgr->bo_validate(buf, flags);
-}
-
-dri_fence *
-dri_fence_validated(dri_bufmgr *bufmgr, const char *name, GLboolean flushed)
-{
- return bufmgr->fence_validated(bufmgr, name, flushed);
-}
-
void
dri_fence_wait(dri_fence *fence)
{
batch_buf->bufmgr->emit_reloc(batch_buf, flags, delta, offset, relocatee);
}
-void *dri_process_relocs(dri_bo *batch_buf)
+void *dri_process_relocs(dri_bo *batch_buf, GLuint *count)
{
- return batch_buf->bufmgr->process_relocs(batch_buf);
+ return batch_buf->bufmgr->process_relocs(batch_buf, count);
}
void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(dri_bo *buf);
- /**
- * Makes the buffer accessible to the graphics chip.
- *
- * The resulting offset of the buffer within the graphics aperture is then
- * available at buf->offset until the buffer is fenced.
- *
- * Flags should consist of the memory types that the buffer may be validated
- * into and the read/write/exe flags appropriate to the use of the buffer.
- */
- int (*bo_validate)(dri_bo *buf, unsigned int flags);
-
- /**
- * Associates the current set of validated buffers with a fence.
- *
- * Once fenced, the buffer manager will allow the validated buffers to be
- * evicted when the graphics device's execution has passed the fence
- * command.
- *
- * The fence object will have flags for the sum of the read/write/exe flags
- * of the validated buffers associated with it.
- */
- dri_fence * (*fence_validated)(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed);
-
/** Takes a reference on a fence object */
void (*fence_reference)(dri_fence *fence);
*/
void (*emit_reloc)(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset, dri_bo *relocatee);
- void *(*process_relocs)(dri_bo *batch_buf);
+ void *(*process_relocs)(dri_bo *batch_buf, GLuint *count);
void (*post_submit)(dri_bo *batch_buf, dri_fence **fence);
};
void dri_bo_unreference(dri_bo *bo);
int dri_bo_map(dri_bo *buf, GLboolean write_enable);
int dri_bo_unmap(dri_bo *buf);
-int dri_bo_validate(dri_bo *buf, unsigned int flags);
-dri_fence *dri_fence_validated(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed);
void dri_fence_wait(dri_fence *fence);
void dri_fence_reference(dri_fence *fence);
void dri_fence_unreference(dri_fence *fence);
unsigned int handle);
void dri_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset, dri_bo *relocatee);
-void *dri_process_relocs(dri_bo *batch_buf);
+void *dri_process_relocs(dri_bo *batch_buf, uint32_t *count);
void dri_post_process_relocs(dri_bo *batch_buf);
void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence);
#endif
}
static void *
-dri_fake_process_reloc(dri_bo *batch_buf)
+dri_fake_process_relocs(dri_bo *batch_buf, GLuint *count_p)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
GLuint i;
GLuint *ptr;
+ GLuint count = 0;
assert(batch_buf->virtual != NULL);
ptr = batch_buf->virtual;
/* Validate. If we fail, fence to clear the unfenced list and bail
* out.
*/
- ret = dri_bo_validate(r->buf, validate_flags);
+ ret = dri_fake_bo_validate(r->buf, validate_flags);
if (ret != 0) {
dri_fence *fo;
dri_bo_unmap(batch_buf);
- fo = dri_fence_validated(batch_buf->bufmgr,
- "batchbuffer failure fence", GL_TRUE);
+ fo = dri_fake_fence_validated(batch_buf->bufmgr,
+ "batchbuffer failure fence", GL_TRUE);
dri_fence_unreference(fo);
goto done;
}
+ count++;
}
ptr[r->offset / 4] = r->buf->offset + r->delta;
dri_bo_unreference(r->buf);
}
dri_bo_unmap(batch_buf);
- dri_bo_validate(batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
+ dri_fake_bo_validate(batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
+ *count_p = count;
bufmgr_fake->nr_relocs = 0;
done:
return NULL;
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
dri_fence *fo;
- fo = dri_fence_validated(batch_buf->bufmgr, "Batch fence", GL_TRUE);
+ fo = dri_fake_fence_validated(batch_buf->bufmgr, "Batch fence", GL_TRUE);
if (bufmgr_fake->performed_rendering) {
dri_fence_unreference(*last_fence);
bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
- bufmgr_fake->bufmgr.bo_validate = dri_fake_bo_validate;
- bufmgr_fake->bufmgr.fence_validated = dri_fake_fence_validated;
bufmgr_fake->bufmgr.fence_wait = dri_fake_fence_wait;
bufmgr_fake->bufmgr.fence_reference = dri_fake_fence_reference;
bufmgr_fake->bufmgr.fence_unreference = dri_fake_fence_unreference;
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
bufmgr_fake->bufmgr.emit_reloc = dri_fake_emit_reloc;
- bufmgr_fake->bufmgr.process_relocs = dri_fake_process_reloc;
+ bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
bufmgr_fake->fence_emit = fence_emit;
bufmgr_fake->fence_wait = fence_wait;
{
struct intel_context *intel = batch->intel;
void *start;
+ GLuint count;
- start = dri_process_relocs(batch->buf);
+ start = dri_process_relocs(batch->buf, &count);
batch->map = NULL;
batch->ptr = NULL;
if (intel->intelScreen->ttm == GL_TRUE) {
intel_exec_ioctl(batch->intel,
used, ignore_cliprects, allow_unlock,
- start, &batch->last_fence);
+ start, count, &batch->last_fence);
} else {
intel_batch_ioctl(batch->intel,
batch->buf->offset,
static struct drm_i915_op_arg *
-intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list)
+intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list, GLuint *count_p)
{
struct intel_bo_node *node;
struct intel_bo_reloc_node *rl_node;
struct drm_i915_op_arg *arg, *first;
struct drm_bo_op_req *req;
uint64_t *prevNext = NULL;
+ GLuint count = 0;
first = NULL;
arg->reloc_handle = rl_node->type_list.buf.handle;
}
}
+ count++;
}
if (!first)
return 0;
+ *count_p = count;
return first;
}
cur_type->relocs[0] = (reloc_info->type << 16);
cur_type->relocs[1] = 0;
- // cur->relocs[cur->nr_reloc_lists-1][1] = 0;// TODO ADD HANDLE HERE
-
cur->nr_reloc_types++;
}
}
return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
}
-static int
-dri_ttm_validate(dri_bo *buf, unsigned int flags)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- unsigned int mask;
- int err;
-
- /* XXX: Sanity-check whether we've already validated this one under
- * different flags. See drmAddValidateItem().
- */
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- /* Calculate the appropriate mask to pass to the DRM. There appears to be
- * be a direct relationship to flags, so it's unnecessary to have it passed
- * in as an argument.
- */
- mask = DRM_BO_MASK_MEM;
- mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
-
- err = drmBOValidate(bufmgr_ttm->fd, &ttm_buf->drm_bo, 0, flags, mask, 0);
-
- if (err == 0) {
- /* XXX: add to fence list for sanity checking */
- } else {
- fprintf(stderr, "failed to validate buffer (%s): %s\n",
- ttm_buf->name, strerror(-err));
- }
-
- buf->offset = ttm_buf->drm_bo.offset;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_validate: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return err;
-}
-
/* Returns a dri_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
return &ttm_fence->fence;
}
-static dri_fence *
-dri_ttm_fence_validated(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed)
-{
- return NULL;
-}
static void
dri_ttm_fence_reference(dri_fence *fence)
static void *
-dri_ttm_process_reloc(dri_bo *batch_buf)
+dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
{
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
void *ptr;
intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
- ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list);
+ ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list, count);
return ptr;
}
bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
- bufmgr_ttm->bufmgr.bo_validate = dri_ttm_validate;
- bufmgr_ttm->bufmgr.fence_validated = dri_ttm_fence_validated;
bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
intel_exec_ioctl(struct intel_context *intel,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock,
- void *start, dri_fence **fence)
+ void *start, GLuint count, dri_fence **fence)
{
struct drm_i915_execbuffer execbuf;
dri_fence *fo;
memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.num_buffers = count;
execbuf.batch.used = used;
execbuf.batch.cliprects = intel->pClipRects;
execbuf.batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
void intel_exec_ioctl(struct intel_context *intel,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock,
- void *start, dri_fence **fence);
+ void *start, GLuint count, dri_fence **fence);
#endif