return true;
}
+bool
+intel_batchbuffer_emit_reloc64(struct brw_context *brw,
+ drm_intel_bo *buffer,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
+ buffer, delta,
+ read_domains, write_domain);
+ assert(ret == 0);
+ (void) ret;
+
+ /* Using the old buffer offset, write in what the right data would be, in
+ * case the buffer doesn't move and we can short-circuit the relocation
+ * processing in the kernel
+ */
+ uint64_t offset = buffer->offset64 + delta;
+ intel_batchbuffer_emit_dword(brw, offset);
+ intel_batchbuffer_emit_dword(brw, offset >> 32);
+
+ return true;
+}
+
+
void
intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, enum brw_gpu_ring ring)
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
+bool intel_batchbuffer_emit_reloc64(struct brw_context *brw,
+ drm_intel_bo *buffer,
+ uint32_t read_domains,
+ uint32_t write_domain,
+ uint32_t offset);
void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
void intel_emit_depth_stall_flushes(struct brw_context *brw);
read_domains, write_domain, delta); \
} while (0)
+/* Handle 48-bit address relocations for Gen8+ */
+#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
+ intel_batchbuffer_emit_reloc64(brw, buf, read_domains, write_domain, delta); \
+} while (0)
+
#define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
#ifdef __cplusplus