uint32_t offset = (char *)location - (char *)brw->batch.map;
if (brw->gen >= 8) {
- return intel_batchbuffer_reloc64(brw, address.buffer, offset,
+ return intel_batchbuffer_reloc64(&brw->batch, address.buffer, offset,
address.read_domains,
address.write_domain,
address.offset + delta);
} else {
- return intel_batchbuffer_reloc(brw, address.buffer, offset,
+ return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
address.read_domains,
address.write_domain,
address.offset + delta);
/* This is the only way buffers get added to the validate list.
*/
uint32_t
-intel_batchbuffer_reloc(struct brw_context *brw,
+intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
int ret;
- ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
+ ret = drm_intel_bo_emit_reloc(batch->bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
}
uint64_t
-intel_batchbuffer_reloc64(struct brw_context *brw,
+intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
- int ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
+ int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
const void *data, GLuint bytes,
enum brw_gpu_ring ring);
-uint32_t intel_batchbuffer_reloc(struct brw_context *brw,
+uint32_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
drm_intel_bo *buffer,
uint32_t offset,
uint32_t read_domains,
uint32_t write_domain,
uint32_t delta);
-uint64_t intel_batchbuffer_reloc64(struct brw_context *brw,
+uint64_t intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
drm_intel_bo *buffer,
uint32_t offset,
uint32_t read_domains,
#define OUT_BATCH(d) *__map++ = (d)
#define OUT_BATCH_F(f) OUT_BATCH(float_as_int((f)))
-#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
- uint32_t __offset = (__map - brw->batch.map) * 4; \
- OUT_BATCH(intel_batchbuffer_reloc(brw, (buf), __offset, \
- (read_domains), \
- (write_domain), \
- (delta))); \
+#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
+ uint32_t __offset = (__map - brw->batch.map) * 4; \
+ OUT_BATCH(intel_batchbuffer_reloc(&brw->batch, (buf), __offset, \
+ (read_domains), \
+ (write_domain), \
+ (delta))); \
} while (0)
/* Handle 48-bit address relocations for Gen8+ */
#define OUT_RELOC64(buf, read_domains, write_domain, delta) do { \
uint32_t __offset = (__map - brw->batch.map) * 4; \
- uint64_t reloc64 = intel_batchbuffer_reloc64(brw, (buf), __offset, \
+ uint64_t reloc64 = intel_batchbuffer_reloc64(&brw->batch, (buf), __offset, \
(read_domains), \
(write_domain), \
(delta)); \