uint32_t gem_handle;
/**
- * Last seen card virtual address (offset from the beginning of the
- * aperture) for the object. This should be used to fill relocation
- * entries when calling brw_bo_emit_reloc()
+ * Offset of the buffer inside the Graphics Translation Table.
+ *
+ * This is effectively our GPU address for the buffer and we use it
+ * as our base for all state pointers into the buffer. However, since the
+ * kernel may be forced to move it around during the course of the
+ * buffer's lifetime, we can only know where the buffer was on the last
+ * execbuf. We presume, and are usually right, that the buffer will not
+ * move and so we use that last offset for the next batch and by doing
+ * so we can avoid having the kernel perform a relocation fixup pass as
+ * our pointers inside the batch will be using the correct base offset.
+ *
+ * Since we do use it as a base address for the next batch of pointers,
+ * the kernel treats our offset as a request, and if possible will
+ * arrange the buffer to placed at that address (trying to balance
+ * the cost of buffer migration versus the cost of performing
+ * relocations). Furthermore, we can force the kernel to place the buffer,
+ * or report a failure if we specified a conflicting offset, at our chosen
+ * offset by specifying EXEC_OBJECT_PINNED.
+ *
+ * Note the GTT may be either per context, or shared globally across the
+ * system. On a shared system, our buffers have to contend for address
+ * space with both aperture mappings and framebuffers and so are more
+ * likely to be moved. On a full ppGTT system, each batch exists in its
+ * own GTT, and so each buffer may have their own offset within each
+ * context.
*/
- uint64_t offset64;
+ uint64_t gtt_offset;
/**
* The validation list index for this buffer, or -1 when not in a batch.
(struct drm_i915_gem_exec_object2) {
.handle = bo->gem_handle,
.alignment = bo->align,
- .offset = bo->offset64,
+ .offset = bo->gtt_offset,
.flags = bo->kflags,
};
uint32_t *data = map ? map : batch->map;
uint32_t *end = data + USED_BATCH(*batch);
- uint32_t gtt_offset = map ? batch->bo->offset64 : 0;
+ uint32_t gtt_offset = map ? batch->bo->gtt_offset : 0;
int length;
bool color = INTEL_DEBUG & DEBUG_COLOR;
bo->idle = false;
bo->index = -1;
- /* Update brw_bo::offset64 */
- if (batch->validation_list[i].offset != bo->offset64) {
+ /* Update brw_bo::gtt_offset */
+ if (batch->validation_list[i].offset != bo->gtt_offset) {
DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
- bo->gem_handle, bo->offset64, batch->validation_list[i].offset);
- bo->offset64 = batch->validation_list[i].offset;
+ bo->gem_handle, bo->gtt_offset,
+ batch->validation_list[i].offset);
+ bo->gtt_offset = batch->validation_list[i].offset;
}
}
/* The requirement for using I915_EXEC_NO_RELOC are:
*
* The addresses written in the objects must match the corresponding
- * reloc.presumed_offset which in turn must match the corresponding
+ * reloc.gtt_offset which in turn must match the corresponding
* execobject.offset.
*
* Any render targets written to in the batch must be flagged with