struct intel_batchbuffer *batch = &brw->batch;
struct brw_bufmgr *bufmgr = screen->bufmgr;
+ /* We can't grow buffers when using softpin, so just overallocate them. */
+ if (brw_using_softpin(bufmgr))
+ size *= 2;
+
grow->bo = brw_bo_alloc(bufmgr, name, size, memzone);
grow->bo->kflags |= can_do_exec_capture(screen) ? EXEC_OBJECT_CAPTURE : 0;
grow->partial_bo = NULL;
struct brw_bufmgr *bufmgr = brw->bufmgr;
struct brw_bo *bo = grow->bo;
+ /* We can't grow buffers that are softpinned, as the growing mechanism
+ * involves putting a larger buffer at the same gtt_offset...and we've
+ * only allocated the smaller amount of VMA. Without relocations, this
+ * simply won't work. This should never happen, however.
+ */
+ assert(!(bo->kflags & EXEC_OBJECT_PINNED));
+
perf_debug("Growing %s - ran out of space\n", bo->name);
if (grow->partial_bo) {
/* Update brw_bo::gtt_offset */
if (batch->validation_list[i].offset != bo->gtt_offset) {
+ assert(!(bo->kflags & EXEC_OBJECT_PINNED));
DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
bo->gem_handle, bo->gtt_offset,
batch->validation_list[i].offset);
{
assert(target != NULL);
+ if (target->kflags & EXEC_OBJECT_PINNED) {
+ brw_use_pinned_bo(batch, target, reloc_flags & RELOC_WRITE);
+ return target->gtt_offset + target_offset;
+ }
+
+ unsigned int index = add_exec_bo(batch, target);
+ struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+
if (rlist->reloc_count == rlist->reloc_array_size) {
rlist->reloc_array_size *= 2;
rlist->relocs = realloc(rlist->relocs,
sizeof(struct drm_i915_gem_relocation_entry));
}
- unsigned int index = add_exec_bo(batch, target);
- struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
-
if (reloc_flags & RELOC_32BIT) {
/* Restrict this buffer to the low 32 bits of the address space.
*
return entry->offset + target_offset;
}
+void
+brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
+ unsigned writable_flag)
+{
+ assert(bo->kflags & EXEC_OBJECT_PINNED);
+ assert((writable_flag & ~EXEC_OBJECT_WRITE) == 0);
+
+ unsigned int index = add_exec_bo(batch, bo);
+ struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+ assert(entry->offset == bo->gtt_offset);
+
+ if (writable_flag)
+ entry->flags |= EXEC_OBJECT_WRITE;
+}
+
uint64_t
brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
struct brw_bo *target, uint32_t target_offset,