brw_batch_reloc emits a relocation from the batchbuffer to elsewhere.
brw_state_reloc emits a relocation from the statebuffer to elsewhere.
For now, they do the same thing, but when we actually split the two
buffers, we'll change brw_state_reloc to use the state buffer.
Reviewed-by: Matt Turner <mattst88@gmail.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
surf_offset);
isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
- .address = brw_emit_reloc(&brw->batch,
- *surf_offset + brw->isl_dev.ss.addr_offset,
- mt->bo, offset, reloc_flags),
+ .address = brw_state_reloc(&brw->batch,
+ *surf_offset + brw->isl_dev.ss.addr_offset,
+ mt->bo, offset, reloc_flags),
.aux_surf = aux_surf, .aux_usage = aux_usage,
.aux_address = aux_offset,
.mocs = mocs, .clear_color = clear_color,
*/
assert((aux_offset & 0xfff) == 0);
uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
- *aux_addr = brw_emit_reloc(&brw->batch,
- *surf_offset +
- brw->isl_dev.ss.aux_addr_offset,
- aux_bo, *aux_addr,
- reloc_flags);
+ *aux_addr = brw_state_reloc(&brw->batch,
+ *surf_offset +
+ brw->isl_dev.ss.aux_addr_offset,
+ aux_bo, *aux_addr,
+ reloc_flags);
}
}
isl_buffer_fill_state(&brw->isl_dev, dw,
.address = !bo ? buffer_offset :
- brw_emit_reloc(&brw->batch,
- *out_offset + brw->isl_dev.ss.addr_offset,
- bo, buffer_offset,
- reloc_flags),
+ brw_state_reloc(&brw->batch,
+ *out_offset + brw->isl_dev.ss.addr_offset,
+ bo, buffer_offset,
+ reloc_flags),
.size = buffer_size,
.format = surface_format,
.stride = pitch,
BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
surface_format << BRW_SURFACE_FORMAT_SHIFT |
BRW_SURFACE_RC_READ_WRITE;
- surf[1] = brw_emit_reloc(&brw->batch,
- *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
+ surf[1] = brw_state_reloc(&brw->batch,
+ *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
height << BRW_SURFACE_HEIGHT_SHIFT);
surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
- surf[1] = brw_emit_reloc(&brw->batch, *out_offset + 4,
- brw->wm.multisampled_null_render_target_bo,
- 0, RELOC_WRITE);
+ surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
+ brw->wm.multisampled_null_render_target_bo,
+ 0, RELOC_WRITE);
surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
(height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
/* reloc */
assert(mt->offset % mt->cpp == 0);
- surf[1] = brw_emit_reloc(&brw->batch, offset + 4, mt->bo,
- mt->offset +
- intel_renderbuffer_get_tile_offsets(irb,
- &tile_x,
- &tile_y),
- RELOC_WRITE);
+ surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
+ mt->offset +
+ intel_renderbuffer_get_tile_offsets(irb,
+ &tile_x,
+ &tile_y),
+ RELOC_WRITE);
surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
(rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
struct brw_context *brw = batch->driver_batch;
uint32_t offset = (char *)location - (char *)brw->batch.map;
- return brw_emit_reloc(&brw->batch, offset,
- address.buffer, address.offset + delta,
- address.reloc_flags);
+ return brw_batch_reloc(&brw->batch, offset,
+ address.buffer, address.offset + delta,
+ address.reloc_flags);
}
static void
struct brw_bo *bo = address.buffer;
uint64_t reloc_val =
- brw_emit_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
- address.reloc_flags);
+ brw_state_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
+ address.reloc_flags);
void *reloc_ptr = (void *)brw->batch.map + ss_offset;
#if GEN_GEN >= 8
} else {
uint32_t offset = (char *) location - (char *) brw->batch.map;
- return brw_emit_reloc(&brw->batch, offset, address.bo,
- address.offset + delta,
- address.reloc_flags);
+ /* TODO: Use brw_state_reloc for some things on Gen4-5 */
+ return brw_batch_reloc(&brw->batch, offset, address.bo,
+ address.offset + delta,
+ address.reloc_flags);
}
}
/* This is the only way buffers get added to the validate list.
*/
-uint64_t
-brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
- struct brw_bo *target, uint32_t target_offset,
- unsigned int reloc_flags)
+static uint64_t
+emit_reloc(struct intel_batchbuffer *batch,
+ struct brw_reloc_list *rlist, uint32_t offset,
+ struct brw_bo *target, uint32_t target_offset,
+ unsigned int reloc_flags)
{
- struct brw_reloc_list *rlist = &batch->batch_relocs;
-
assert(target != NULL);
if (rlist->reloc_count == rlist->reloc_array_size) {
sizeof(struct drm_i915_gem_relocation_entry));
}
- /* Check args */
- assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
-
unsigned int index = add_exec_bo(batch, target);
struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
rlist->relocs[rlist->reloc_count++] =
(struct drm_i915_gem_relocation_entry) {
- .offset = batch_offset,
+ .offset = offset,
.delta = target_offset,
.target_handle = batch->use_batch_first ? index : target->gem_handle,
.presumed_offset = entry->offset,
return entry->offset + target_offset;
}
+uint64_t
+brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
+ struct brw_bo *target, uint32_t target_offset,
+ unsigned int reloc_flags)
+{
+ assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
+
+ return emit_reloc(batch, &batch->batch_relocs, batch_offset,
+ target, target_offset, reloc_flags);
+}
+
+uint64_t
+brw_state_reloc(struct intel_batchbuffer *batch, uint32_t state_offset,
+ struct brw_bo *target, uint32_t target_offset,
+ unsigned int reloc_flags)
+{
+ assert(state_offset <= batch->bo->size - sizeof(uint32_t));
+
+ return emit_reloc(batch, &batch->batch_relocs, state_offset,
+ target, target_offset, reloc_flags);
+}
+
+
uint32_t
brw_state_batch_size(struct brw_context *brw, uint32_t offset)
{
#define RELOC_WRITE EXEC_OBJECT_WRITE
#define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
-uint64_t brw_emit_reloc(struct intel_batchbuffer *batch,
- uint32_t batch_offset,
- struct brw_bo *target,
- uint32_t target_offset,
- unsigned flags);
+uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
+ uint32_t batch_offset,
+ struct brw_bo *target,
+ uint32_t target_offset,
+ unsigned flags);
+uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
+ uint32_t batch_offset,
+ struct brw_bo *target,
+ uint32_t target_offset,
+ unsigned flags);
#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
#define OUT_RELOC(buf, flags, delta) do { \
uint32_t __offset = (__map - brw->batch.map) * 4; \
uint32_t reloc = \
- brw_emit_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
+ brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
OUT_BATCH(reloc); \
} while (0)
#define OUT_RELOC64(buf, flags, delta) do { \
uint32_t __offset = (__map - brw->batch.map) * 4; \
uint64_t reloc64 = \
- brw_emit_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
+ brw_batch_reloc(&brw->batch, __offset, (buf), (delta), (flags)); \
OUT_BATCH(reloc64); \
OUT_BATCH(reloc64 >> 32); \
} while (0)