struct brw_context *brw = batch->driver_batch;
uint32_t offset = (char *)location - (char *)brw->batch.map;
- if (brw->gen >= 8) {
- return intel_batchbuffer_reloc64(&brw->batch, address.buffer, offset,
- address.read_domains,
- address.write_domain,
- address.offset + delta);
- } else {
- return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
- address.read_domains,
- address.write_domain,
- address.offset + delta);
- }
+ return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
+ address.read_domains,
+ address.write_domain,
+ address.offset + delta);
}
static void
/* This is the only way buffers get added to the validate list.
*/
-uint32_t
+uint64_t
intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
return buffer->offset64 + delta;
}
-uint64_t
-intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
- drm_intel_bo *buffer, uint32_t offset,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta)
-{
- int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
- buffer, delta,
- read_domains, write_domain);
- assert(ret == 0);
- (void) ret;
-
- /* Using the old buffer offset, write in what the right data would be, in
- * case the buffer doesn't move and we can short-circuit the relocation
- * processing in the kernel
- */
- return buffer->offset64 + delta;
-}
-
-
void
intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, enum brw_gpu_ring ring)
const void *data, GLuint bytes,
enum brw_gpu_ring ring);
-uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
+uint64_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
drm_intel_bo *buffer,
uint32_t offset,
uint32_t read_domains,
uint32_t write_domain,
uint32_t delta);
-uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
- drm_intel_bo *buffer,
- uint32_t offset,
- uint32_t read_domains,
- uint32_t write_domain,
- uint32_t delta);
#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
#define OUT_BATCH(d) *__map++ = (d)
#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
-#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
- OUT_BATCH(intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
- (read_domains), \
- (write_domain), \
- (delta))); \
+#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint32_t reloc = \
+ intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
+ (read_domains), (write_domain), (delta)); \
+ OUT_BATCH(reloc); \
} while (0)
/* Handle 48-bit address relocations for Gen8+ */
-#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
- uint64_t reloc64 = intel_batchbuffer_reloc64(&brw->batch, (buf), __offset, \
- (read_domains), \
- (write_domain), \
- (delta)); \
- OUT_BATCH(reloc64); \
- OUT_BATCH(reloc64 >> 32); \
+#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ uint64_t reloc64 = \
+ intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
+ (read_domains), (write_domain), (delta)); \
+ OUT_BATCH(reloc64); \
+ OUT_BATCH(reloc64 >> 32); \
} while (0)
#define ADVANCE_BATCH() \