i965: Combine intel_batchbuffer_reloc and intel_batchbuffer_reloc64
authorKenneth Graunke <kenneth@whitecape.org>
Tue, 28 Mar 2017 21:45:59 +0000 (14:45 -0700)
committerKenneth Graunke <kenneth@whitecape.org>
Thu, 30 Mar 2017 07:15:28 +0000 (00:15 -0700)
These two functions do the exact same thing.  One returns a uint64_t,
and the other takes the same uint64_t and truncates it to a uint32_t.

We only need the uint64_t variant - the caller can truncate if it wants.
This patch gives us one function, intel_batchbuffer_reloc, that does
the 64-bit thing.

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
src/mesa/drivers/dri/i965/genX_blorp_exec.c
src/mesa/drivers/dri/i965/intel_batchbuffer.c
src/mesa/drivers/dri/i965/intel_batchbuffer.h

index 35310fa03df69a353a8f8210058818fc7d0eafad..b8dcf9fd3b540525571f2162d6fb2fe382bb5247 100644 (file)
@@ -55,17 +55,10 @@ blorp_emit_reloc(struct blorp_batch *batch,
    struct brw_context *brw = batch->driver_batch;
 
    uint32_t offset = (char *)location - (char *)brw->batch.map;
    struct brw_context *brw = batch->driver_batch;
 
    uint32_t offset = (char *)location - (char *)brw->batch.map;
-   if (brw->gen >= 8) {
-      return intel_batchbuffer_reloc64(&brw->batch, address.buffer, offset,
-                                       address.read_domains,
-                                       address.write_domain,
-                                       address.offset + delta);
-   } else {
-      return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
-                                     address.read_domains,
-                                     address.write_domain,
-                                     address.offset + delta);
-   }
+   return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
+                                  address.read_domains,
+                                  address.write_domain,
+                                  address.offset + delta);
 }
 
 static void
 }
 
 static void
index 49a00c12ba2bd9660e50b6a178641312de19923e..1599a2c2a53097c09d0d4535a79c7b521808f8e5 100644 (file)
@@ -576,7 +576,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
 
 /*  This is the only way buffers get added to the validate list.
  */
 
 /*  This is the only way buffers get added to the validate list.
  */
-uint32_t
+uint64_t
 intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
                         drm_intel_bo *buffer, uint32_t offset,
                         uint32_t read_domains, uint32_t write_domain,
 intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
                         drm_intel_bo *buffer, uint32_t offset,
                         uint32_t read_domains, uint32_t write_domain,
@@ -597,26 +597,6 @@ intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
    return buffer->offset64 + delta;
 }
 
    return buffer->offset64 + delta;
 }
 
-uint64_t
-intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
-                          drm_intel_bo *buffer, uint32_t offset,
-                          uint32_t read_domains, uint32_t write_domain,
-                          uint32_t delta)
-{
-   int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
-                                     buffer, delta,
-                                     read_domains, write_domain);
-   assert(ret == 0);
-   (void) ret;
-
-   /* Using the old buffer offset, write in what the right data would be, in
-    * case the buffer doesn't move and we can short-circuit the relocation
-    * processing in the kernel
-    */
-   return buffer->offset64 + delta;
-}
-
-
 void
 intel_batchbuffer_data(struct brw_context *brw,
                        const void *data, GLuint bytes, enum brw_gpu_ring ring)
 void
 intel_batchbuffer_data(struct brw_context *brw,
                        const void *data, GLuint bytes, enum brw_gpu_ring ring)
index 01d48048a68c29aa7f0382469e85f89b534facc2..cf545ec1886748d3e072cf12f9c06a8f777f8358 100644 (file)
@@ -64,18 +64,12 @@ void intel_batchbuffer_data(struct brw_context *brw,
                             const void *data, GLuint bytes,
                             enum brw_gpu_ring ring);
 
                             const void *data, GLuint bytes,
                             enum brw_gpu_ring ring);
 
-uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
+uint64_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
                                  drm_intel_bo *buffer,
                                  uint32_t offset,
                                  uint32_t read_domains,
                                  uint32_t write_domain,
                                  uint32_t delta);
                                  drm_intel_bo *buffer,
                                  uint32_t offset,
                                  uint32_t read_domains,
                                  uint32_t write_domain,
                                  uint32_t delta);
-uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
-                                   drm_intel_bo *buffer,
-                                   uint32_t offset,
-                                   uint32_t read_domains,
-                                   uint32_t write_domain,
-                                   uint32_t delta);
 
 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
 
 
 #define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
 
@@ -161,23 +155,22 @@ intel_batchbuffer_advance(struct brw_context *brw)
 #define OUT_BATCH(d) *__map++ = (d)
 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
 
 #define OUT_BATCH(d) *__map++ = (d)
 #define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
 
-#define OUT_RELOC(buf, read_domains, write_domain, delta) do {    \
-   uint32_t __offset = (__map - brw->batch.map) * 4;              \
-   OUT_BATCH(intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
-                                     (read_domains),              \
-                                     (write_domain),              \
-                                     (delta)));                   \
+#define OUT_RELOC(buf, read_domains, write_domain, delta) do {          \
+   uint32_t __offset = (__map - brw->batch.map) * 4;                    \
+   uint32_t reloc =                                                     \
+      intel_batchbuffer_reloc(&brw->batch, (buf), __offset,             \
+                              (read_domains), (write_domain), (delta)); \
+   OUT_BATCH(reloc);                                                    \
 } while (0)
 
 /* Handle 48-bit address relocations for Gen8+ */
 } while (0)
 
 /* Handle 48-bit address relocations for Gen8+ */
-#define OUT_RELOC64(buf, read_domains, write_domain, delta) do {      \
-   uint32_t __offset = (__map - brw->batch.map) * 4;                  \
-   uint64_t reloc64 = intel_batchbuffer_reloc64(&brw->batch, (buf), __offset, \
-                                                (read_domains),       \
-                                                (write_domain),       \
-                                                (delta));             \
-   OUT_BATCH(reloc64);                                                \
-   OUT_BATCH(reloc64 >> 32);                                          \
+#define OUT_RELOC64(buf, read_domains, write_domain, delta) do {        \
+   uint32_t __offset = (__map - brw->batch.map) * 4;                    \
+   uint64_t reloc64 =                                                   \
+      intel_batchbuffer_reloc(&brw->batch, (buf), __offset,             \
+                              (read_domains), (write_domain), (delta)); \
+   OUT_BATCH(reloc64);                                                  \
+   OUT_BATCH(reloc64 >> 32);                                            \
 } while (0)
 
 #define ADVANCE_BATCH()                  \
 } while (0)
 
 #define ADVANCE_BATCH()                  \