const unsigned entry_size[4]);
void genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *dst, uint32_t dst_offset,
- struct anv_bo *src, uint32_t src_offset,
+ struct anv_address dst, struct anv_address src,
uint32_t size);
void genX(cmd_buffer_mi_memcpy)(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *dst, uint32_t dst_offset,
- struct anv_bo *src, uint32_t src_offset,
+ struct anv_address dst, struct anv_address src,
uint32_t size);
void genX(blorp_exec)(struct blorp_batch *batch,
assert(cmd_buffer && image);
assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
- struct anv_bo *ss_bo =
- &cmd_buffer->device->surface_state_pool.block_pool.bo;
- uint32_t ss_clear_offset = surface_state.offset +
- cmd_buffer->device->isl_dev.ss.clear_value_offset;
+ struct anv_address ss_clear_addr = {
+ .bo = &cmd_buffer->device->surface_state_pool.block_pool.bo,
+ .offset = surface_state.offset +
+ cmd_buffer->device->isl_dev.ss.clear_value_offset,
+ };
const struct anv_address entry_addr =
anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
if (copy_from_surface_state) {
- genX(cmd_buffer_mi_memcpy)(cmd_buffer, entry_addr.bo, entry_addr.offset,
- ss_bo, ss_clear_offset, copy_size);
+ genX(cmd_buffer_mi_memcpy)(cmd_buffer, entry_addr,
+ ss_clear_addr, copy_size);
} else {
- genX(cmd_buffer_mi_memcpy)(cmd_buffer, ss_bo, ss_clear_offset,
- entry_addr.bo, entry_addr.offset, copy_size);
+ genX(cmd_buffer_mi_memcpy)(cmd_buffer, ss_clear_addr,
+ entry_addr, copy_size);
/* Updating a surface state object may require that the state cache be
* invalidated. From the SKL PRM, Shared Functions -> State -> State
struct anv_state dst_state = secondary->state.render_pass_states;
assert(src_state.alloc_size == dst_state.alloc_size);
- genX(cmd_buffer_so_memcpy)(primary, ss_bo, dst_state.offset,
- ss_bo, src_state.offset,
+ genX(cmd_buffer_so_memcpy)(primary,
+ (struct anv_address) {
+ .bo = ss_bo,
+ .offset = dst_state.offset,
+ },
+ (struct anv_address) {
+ .bo = ss_bo,
+ .offset = src_state.offset,
+ },
src_state.alloc_size);
}
void
genX(cmd_buffer_mi_memcpy)(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *dst, uint32_t dst_offset,
- struct anv_bo *src, uint32_t src_offset,
+ struct anv_address dst, struct anv_address src,
uint32_t size)
{
/* This memcpy operates in units of dwords. */
assert(size % 4 == 0);
- assert(dst_offset % 4 == 0);
- assert(src_offset % 4 == 0);
+ assert(dst.offset % 4 == 0);
+ assert(src.offset % 4 == 0);
#if GEN_GEN == 7
/* On gen7, the combination of commands used here(MI_LOAD_REGISTER_MEM
#endif
for (uint32_t i = 0; i < size; i += 4) {
- const struct anv_address src_addr =
- (struct anv_address) { src, src_offset + i};
- const struct anv_address dst_addr =
- (struct anv_address) { dst, dst_offset + i};
#if GEN_GEN >= 8
anv_batch_emit(&cmd_buffer->batch, GENX(MI_COPY_MEM_MEM), cp) {
- cp.DestinationMemoryAddress = dst_addr;
- cp.SourceMemoryAddress = src_addr;
+ cp.DestinationMemoryAddress = anv_address_add(dst, i);
+ cp.SourceMemoryAddress = anv_address_add(src, i);
}
#else
/* IVB does not have a general purpose register for command streamer
#define TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), load) {
load.RegisterAddress = TEMP_REG;
- load.MemoryAddress = src_addr;
+ load.MemoryAddress = anv_address_add(src, i);
}
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), store) {
store.RegisterAddress = TEMP_REG;
- store.MemoryAddress = dst_addr;
+ store.MemoryAddress = anv_address_add(dst, i);
}
#undef TEMP_REG
#endif
void
genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *dst, uint32_t dst_offset,
- struct anv_bo *src, uint32_t src_offset,
+ struct anv_address dst, struct anv_address src,
uint32_t size)
{
if (size == 0)
return;
- assert(dst_offset + size <= dst->size);
- assert(src_offset + size <= src->size);
+ assert(dst.offset + size <= dst.bo->size);
+ assert(src.offset + size <= src.bo->size);
/* The maximum copy block size is 4 32-bit components at a time. */
unsigned bs = 16;
&(struct GENX(VERTEX_BUFFER_STATE)) {
.VertexBufferIndex = 32, /* Reserved for this */
.AddressModifyEnable = true,
- .BufferStartingAddress = { src, src_offset },
+ .BufferStartingAddress = src,
.BufferPitch = bs,
#if (GEN_GEN >= 8)
.MemoryObjectControlState = GENX(MOCS),
.BufferSize = size,
#else
.VertexBufferMemoryObjectControlState = GENX(MOCS),
- .EndAddress = { src, src_offset + size - 1 },
+ .EndAddress = anv_address_add(src, size - 1),
#endif
});
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
sob.SOBufferIndex = 0;
sob.SOBufferObjectControlState = GENX(MOCS);
- sob.SurfaceBaseAddress = (struct anv_address) { dst, dst_offset };
+ sob.SurfaceBaseAddress = dst;
#if GEN_GEN >= 8
sob.SOBufferEnable = true;
sob.SurfaceSize = size / 4 - 1;
#else
sob.SurfacePitch = bs;
- sob.SurfaceEndAddress = sob.SurfaceBaseAddress;
- sob.SurfaceEndAddress.offset += size;
+ sob.SurfaceEndAddress = anv_address_add(dst, size);
#endif
#if GEN_GEN >= 8