* Marek Olšák <marek.olsak@amd.com>
*/
#include "../radeon/r600_cs.h"
-#include "radeonsi_pipe.h"
-#include "radeonsi_resource.h"
-#include "radeonsi_shader.h"
+#include "si_pipe.h"
+#include "si_shader.h"
+#include "sid.h"
#include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
#define SI_NUM_CONTEXTS 16
/* Emit a CP DMA packet to do a copy from one buffer to another.
* The size must fit in bits [20:0].
*/
-static void si_emit_cp_dma_copy_buffer(struct r600_context *rctx,
+static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
uint64_t dst_va, uint64_t src_va,
unsigned size, unsigned flags)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
assert(size);
assert((size & ((1<<21)-1)) == size);
- if (rctx->b.chip_class >= CIK) {
+ if (sctx->b.chip_class >= CIK) {
radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
radeon_emit(cs, sync_flag); /* CP_SYNC [31] */
radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
}
/* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
-static void si_emit_cp_dma_clear_buffer(struct r600_context *rctx,
+static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
uint64_t dst_va, unsigned size,
uint32_t clear_value, unsigned flags)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
assert(size);
assert((size & ((1<<21)-1)) == size);
- if (rctx->b.chip_class >= CIK) {
+ if (sctx->b.chip_class >= CIK) {
radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
radeon_emit(cs, clear_value); /* DATA [31:0] */
}
}
-static void si_init_descriptors(struct r600_context *rctx,
+static void si_init_descriptors(struct si_context *sctx,
struct si_descriptors *desc,
unsigned shader_userdata_reg,
unsigned element_dw_size,
unsigned num_elements,
- void (*emit_func)(struct r600_context *ctx, struct r600_atom *state))
+ void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
{
uint64_t va;
desc->context_size = num_elements * element_dw_size * 4;
desc->buffer = (struct r600_resource*)
- pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_STATIC,
+ pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_DEFAULT,
SI_NUM_CONTEXTS * desc->context_size);
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE);
- va = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE);
+ va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
/* We don't check for CS space here, because this should be called
* only once at context initialization. */
- si_emit_cp_dma_clear_buffer(rctx, va, desc->buffer->b.b.width0, 0,
+ si_emit_cp_dma_clear_buffer(sctx, va, desc->buffer->b.b.width0, 0,
R600_CP_DMA_SYNC);
}
pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
}
-static void si_update_descriptors(struct r600_context *rctx,
+static void si_update_descriptors(struct si_context *sctx,
struct si_descriptors *desc)
{
if (desc->dirty_mask) {
7 + /* copy */
(4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
4; /* pointer update */
+#if HAVE_LLVM >= 0x0305
+ if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
+ desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0)
+ desc->atom.num_dw += 4; /* second pointer update */
+#endif
desc->atom.dirty = true;
/* The descriptors are read with the K cache. */
- rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
+ sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
} else {
desc->atom.dirty = false;
}
}
-static void si_emit_shader_pointer(struct r600_context *rctx,
+static void si_emit_shader_pointer(struct si_context *sctx,
struct si_descriptors *desc)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
- uint64_t va = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b) +
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ uint64_t va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b) +
desc->current_context_id * desc->context_size;
radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
+
+#if HAVE_LLVM >= 0x0305
+ if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
+ desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0) {
+ radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
+ radeon_emit(cs, (desc->shader_userdata_reg +
+ (R_00B330_SPI_SHADER_USER_DATA_ES_0 -
+ R_00B130_SPI_SHADER_USER_DATA_VS_0) -
+ SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ }
+#endif
}
-static void si_emit_descriptors(struct r600_context *rctx,
+static void si_emit_descriptors(struct si_context *sctx,
struct si_descriptors *desc,
uint32_t **descriptors)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
uint64_t va_base;
int packet_start;
int packet_size = 0;
assert(dirty_mask);
- va_base = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b);
+ va_base = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
/* Copy the descriptors to a new context slot. */
/* XXX Consider using TC or L2 for this copy on CIK. */
- si_emit_cp_dma_copy_buffer(rctx,
+ si_emit_cp_dma_copy_buffer(sctx,
va_base + new_context_id * desc->context_size,
va_base + desc->current_context_id * desc->context_size,
desc->context_size, R600_CP_DMA_SYNC);
desc->current_context_id = new_context_id;
/* Now update the shader userdata pointer. */
- si_emit_shader_pointer(rctx, desc);
+ si_emit_shader_pointer(sctx, desc);
}
static unsigned si_get_shader_user_data_base(unsigned shader)
/* SAMPLER VIEWS */
-static void si_emit_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
+static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
{
struct si_sampler_views *views = (struct si_sampler_views*)atom;
- si_emit_descriptors(rctx, &views->desc, views->desc_data);
+ si_emit_descriptors(sctx, &views->desc, views->desc_data);
}
-static void si_init_sampler_views(struct r600_context *rctx,
+static void si_init_sampler_views(struct si_context *sctx,
struct si_sampler_views *views,
unsigned shader)
{
- si_init_descriptors(rctx, &views->desc,
+ si_init_descriptors(sctx, &views->desc,
si_get_shader_user_data_base(shader) +
SI_SGPR_RESOURCE * 4,
8, NUM_SAMPLER_VIEWS, si_emit_sampler_views);
si_release_descriptors(&views->desc);
}
-static void si_sampler_views_begin_new_cs(struct r600_context *rctx,
+static void si_sampler_views_begin_new_cs(struct si_context *sctx,
struct si_sampler_views *views)
{
unsigned mask = views->desc.enabled_mask;
struct si_pipe_sampler_view *rview =
(struct si_pipe_sampler_view*)views->views[i];
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
}
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE);
- si_emit_shader_pointer(rctx, &views->desc);
+ si_emit_shader_pointer(sctx, &views->desc);
}
-void si_set_sampler_view(struct r600_context *rctx, unsigned shader,
+void si_set_sampler_view(struct si_context *sctx, unsigned shader,
unsigned slot, struct pipe_sampler_view *view,
unsigned *view_desc)
{
- struct si_sampler_views *views = &rctx->samplers[shader].views;
+ struct si_sampler_views *views = &sctx->samplers[shader].views;
if (views->views[slot] == view)
return;
struct si_pipe_sampler_view *rview =
(struct si_pipe_sampler_view*)view;
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
pipe_sampler_view_reference(&views->views[slot], view);
views->desc_data[slot] = view_desc;
}
views->desc.dirty_mask |= 1 << slot;
- si_update_descriptors(rctx, &views->desc);
+ si_update_descriptors(sctx, &views->desc);
}
/* BUFFER RESOURCES */
-static void si_emit_buffer_resources(struct r600_context *rctx, struct r600_atom *atom)
+static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
{
struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
- si_emit_descriptors(rctx, &buffers->desc, buffers->desc_data);
+ si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
}
-static void si_init_buffer_resources(struct r600_context *rctx,
+static void si_init_buffer_resources(struct si_context *sctx,
struct si_buffer_resources *buffers,
unsigned num_buffers, unsigned shader,
unsigned shader_userdata_index,
buffers->desc_data[i] = &buffers->desc_storage[i*4];
}
- si_init_descriptors(rctx, &buffers->desc,
+ si_init_descriptors(sctx, &buffers->desc,
si_get_shader_user_data_base(shader) +
shader_userdata_index*4, 4, num_buffers,
si_emit_buffer_resources);
si_release_descriptors(&buffers->desc);
}
-static void si_buffer_resources_begin_new_cs(struct r600_context *rctx,
+static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
struct si_buffer_resources *buffers)
{
unsigned mask = buffers->desc.enabled_mask;
while (mask) {
int i = u_bit_scan(&mask);
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
(struct r600_resource*)buffers->buffers[i],
buffers->shader_usage);
}
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
buffers->desc.buffer, RADEON_USAGE_READWRITE);
- si_emit_shader_pointer(rctx, &buffers->desc);
+ si_emit_shader_pointer(sctx, &buffers->desc);
}
/* CONSTANT BUFFERS */
+void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
+ const uint8_t *ptr, unsigned size, uint32_t *const_offset)
+{
+ if (SI_BIG_ENDIAN) {
+ uint32_t *tmpPtr;
+ unsigned i;
+
+ if (!(tmpPtr = malloc(size))) {
+ R600_ERR("Failed to allocate BE swap buffer.\n");
+ return;
+ }
+
+ for (i = 0; i < size / 4; ++i) {
+ tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]);
+ }
+
+ u_upload_data(sctx->b.uploader, 0, size, tmpPtr, const_offset,
+ (struct pipe_resource**)rbuffer);
+
+ free(tmpPtr);
+ } else {
+ u_upload_data(sctx->b.uploader, 0, size, ptr, const_offset,
+ (struct pipe_resource**)rbuffer);
+ }
+}
+
static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
struct pipe_constant_buffer *input)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
- struct si_buffer_resources *buffers = &rctx->const_buffers[shader];
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
if (shader >= SI_NUM_SHADERS)
return;
/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
* with a NULL buffer). We need to use a dummy buffer instead. */
- if (rctx->b.chip_class == CIK &&
+ if (sctx->b.chip_class == CIK &&
(!input || (!input->buffer && !input->user_buffer)))
- input = &rctx->null_const_buf;
+ input = &sctx->null_const_buf;
if (input && (input->buffer || input->user_buffer)) {
struct pipe_resource *buffer = NULL;
if (input->user_buffer) {
unsigned buffer_offset;
- r600_upload_const_buffer(rctx,
- (struct r600_resource**)&buffer, input->user_buffer,
- input->buffer_size, &buffer_offset);
+ si_upload_const_buffer(sctx,
+ (struct r600_resource**)&buffer, input->user_buffer,
+ input->buffer_size, &buffer_offset);
va = r600_resource_va(ctx->screen, buffer) + buffer_offset;
} else {
pipe_resource_reference(&buffer, input->buffer);
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
buffers->buffers[slot] = buffer;
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
(struct r600_resource*)buffer, buffers->shader_usage);
buffers->desc.enabled_mask |= 1 << slot;
} else {
}
buffers->desc.dirty_mask |= 1 << slot;
- si_update_descriptors(rctx, &buffers->desc);
+ si_update_descriptors(sctx, &buffers->desc);
+}
+
+/* RING BUFFERS */
+
+void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
+ struct pipe_constant_buffer *input,
+ unsigned stride, unsigned num_records,
+ bool add_tid, bool swizzle,
+ unsigned element_size, unsigned index_stride)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
+
+ if (shader >= SI_NUM_SHADERS)
+ return;
+
+ /* The stride field in the resource descriptor has 14 bits */
+ assert(stride < (1 << 14));
+
+ assert(slot < buffers->num_buffers);
+ pipe_resource_reference(&buffers->buffers[slot], NULL);
+
+ if (input && input->buffer) {
+ uint64_t va;
+
+ va = r600_resource_va(ctx->screen, input->buffer);
+
+ switch (element_size) {
+ default:
+ assert(!"Unsupported ring buffer element size");
+ case 0:
+ case 2:
+ element_size = 0;
+ break;
+ case 4:
+ element_size = 1;
+ break;
+ case 8:
+ element_size = 2;
+ break;
+ case 16:
+ element_size = 3;
+ break;
+ }
+
+ switch (index_stride) {
+ default:
+ assert(!"Unsupported ring buffer index stride");
+ case 0:
+ case 8:
+ index_stride = 0;
+ break;
+ case 16:
+ index_stride = 1;
+ break;
+ case 32:
+ index_stride = 2;
+ break;
+ case 64:
+ index_stride = 3;
+ break;
+ }
+
+ /* Set the descriptor. */
+ uint32_t *desc = buffers->desc_data[slot];
+ desc[0] = va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
+ S_008F04_STRIDE(stride) |
+ S_008F04_SWIZZLE_ENABLE(swizzle);
+ desc[2] = num_records;
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
+ S_008F0C_ELEMENT_SIZE(element_size) |
+ S_008F0C_INDEX_STRIDE(index_stride) |
+ S_008F0C_ADD_TID_ENABLE(add_tid);
+
+ pipe_resource_reference(&buffers->buffers[slot], input->buffer);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ (struct r600_resource*)input->buffer,
+ buffers->shader_usage);
+ buffers->desc.enabled_mask |= 1 << slot;
+ } else {
+ /* Clear the descriptor. */
+ memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
+ buffers->desc.enabled_mask &= ~(1 << slot);
+ }
+
+ buffers->desc.dirty_mask |= 1 << slot;
+ si_update_descriptors(sctx, &buffers->desc);
}
/* STREAMOUT BUFFERS */
struct pipe_stream_output_target **targets,
unsigned append_bitmask)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
- struct si_buffer_resources *buffers = &rctx->streamout_buffers;
- unsigned old_num_targets = rctx->b.streamout.num_targets;
- unsigned i;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
+ unsigned old_num_targets = sctx->b.streamout.num_targets;
+ unsigned i, bufidx;
/* Streamout buffers must be bound in 2 places:
* 1) in VGT by setting the VGT_STRMOUT registers
/* Set the shader resources.*/
for (i = 0; i < num_targets; i++) {
+ bufidx = SI_RW_SO + i;
+
if (targets[i]) {
struct pipe_resource *buffer = targets[i]->buffer;
uint64_t va = r600_resource_va(ctx->screen, buffer);
/* Set the descriptor. */
- uint32_t *desc = buffers->desc_data[i];
+ uint32_t *desc = buffers->desc_data[bufidx];
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
desc[2] = 0xffffffff;
S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
/* Set the resource. */
- pipe_resource_reference(&buffers->buffers[i], buffer);
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
+ pipe_resource_reference(&buffers->buffers[bufidx],
+ buffer);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage);
- buffers->desc.enabled_mask |= 1 << i;
+ buffers->desc.enabled_mask |= 1 << bufidx;
} else {
/* Clear the descriptor and unset the resource. */
- memset(buffers->desc_data[i], 0, sizeof(uint32_t) * 4);
- pipe_resource_reference(&buffers->buffers[i], NULL);
- buffers->desc.enabled_mask &= ~(1 << i);
+ memset(buffers->desc_data[bufidx], 0,
+ sizeof(uint32_t) * 4);
+ pipe_resource_reference(&buffers->buffers[bufidx],
+ NULL);
+ buffers->desc.enabled_mask &= ~(1 << bufidx);
}
- buffers->desc.dirty_mask |= 1 << i;
+ buffers->desc.dirty_mask |= 1 << bufidx;
}
for (; i < old_num_targets; i++) {
+ bufidx = SI_RW_SO + i;
/* Clear the descriptor and unset the resource. */
- memset(buffers->desc_data[i], 0, sizeof(uint32_t) * 4);
- pipe_resource_reference(&buffers->buffers[i], NULL);
- buffers->desc.enabled_mask &= ~(1 << i);
- buffers->desc.dirty_mask |= 1 << i;
+ memset(buffers->desc_data[bufidx], 0, sizeof(uint32_t) * 4);
+ pipe_resource_reference(&buffers->buffers[bufidx], NULL);
+ buffers->desc.enabled_mask &= ~(1 << bufidx);
+ buffers->desc.dirty_mask |= 1 << bufidx;
+ }
+
+ si_update_descriptors(sctx, &buffers->desc);
+}
+
+static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
+ uint32_t *desc, uint64_t old_buf_va,
+ struct pipe_resource *new_buf)
+{
+ /* Retrieve the buffer offset from the descriptor. */
+ uint64_t old_desc_va =
+ desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
+
+ assert(old_buf_va <= old_desc_va);
+ uint64_t offset_within_buffer = old_desc_va - old_buf_va;
+
+ /* Update the descriptor. */
+ uint64_t va = r600_resource_va(ctx->screen, new_buf) + offset_within_buffer;
+
+ desc[0] = va;
+ desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
+ S_008F04_BASE_ADDRESS_HI(va >> 32);
+}
+
+/* BUFFER DISCARD/INVALIDATION */
+
+/* Reallocate a buffer a update all resource bindings where the buffer is
+ * bound.
+ *
+ * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
+ * idle by discarding its contents. Apps usually tell us when to do this using
+ * map_buffer flags, for example.
+ */
+static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
+{
+ struct si_context *sctx = (struct si_context*)ctx;
+ struct r600_resource *rbuffer = r600_resource(buf);
+ unsigned i, shader, alignment = rbuffer->buf->alignment;
+ uint64_t old_va = r600_resource_va(ctx->screen, buf);
+
+ /* Discard the buffer. */
+ pb_reference(&rbuffer->buf, NULL);
+
+ /* Create a new one in the same pipe_resource. */
+ r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
+ alignment, TRUE);
+
+ /* We changed the buffer, now we need to bind it where the old one
+ * was bound. This consists of 2 things:
+ * 1) Updating the resource descriptor and dirtying it.
+ * 2) Adding a relocation to the CS, so that it's usable.
+ */
+
+ /* Vertex buffers. */
+ /* Nothing to do. Vertex buffer bindings are updated before every draw call. */
+
+ /* Read/Write buffers. */
+ for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
+ struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
+ bool found = false;
+ uint32_t mask = buffers->desc.enabled_mask;
+
+ while (mask) {
+ i = u_bit_scan(&mask);
+ if (buffers->buffers[i] == buf) {
+ si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
+ old_va, buf);
+
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ rbuffer, buffers->shader_usage);
+
+ buffers->desc.dirty_mask |= 1 << i;
+ found = true;
+
+ if (i >= SI_RW_SO && shader == PIPE_SHADER_VERTEX) {
+ /* Update the streamout state. */
+ if (sctx->b.streamout.begin_emitted) {
+ r600_emit_streamout_end(&sctx->b);
+ }
+ sctx->b.streamout.append_bitmask =
+ sctx->b.streamout.enabled_mask;
+ r600_streamout_buffers_dirty(&sctx->b);
+ }
+ }
+ }
+ if (found) {
+ si_update_descriptors(sctx, &buffers->desc);
+ }
}
- si_update_descriptors(rctx, &buffers->desc);
+ /* Constant buffers. */
+ for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
+ struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
+ bool found = false;
+ uint32_t mask = buffers->desc.enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (buffers->buffers[i] == buf) {
+ si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
+ old_va, buf);
+
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ rbuffer, buffers->shader_usage);
+
+ buffers->desc.dirty_mask |= 1 << i;
+ found = true;
+ }
+ }
+ if (found) {
+ si_update_descriptors(sctx, &buffers->desc);
+ }
+ }
+
+ /* Texture buffers. */
+ for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
+ struct si_sampler_views *views = &sctx->samplers[shader].views;
+ bool found = false;
+ uint32_t mask = views->desc.enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (views->views[i]->texture == buf) {
+ /* This updates the sampler view directly. */
+ si_desc_reset_buffer_offset(ctx, views->desc_data[i],
+ old_va, buf);
+
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ rbuffer, RADEON_USAGE_READ);
+
+ views->desc.dirty_mask |= 1 << i;
+ found = true;
+ }
+ }
+ if (found) {
+ si_update_descriptors(sctx, &views->desc);
+ }
+ }
}
/* CP DMA */
static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
unsigned offset, unsigned size, unsigned value)
{
- struct r600_context *rctx = (struct r600_context*)ctx;
+ struct si_context *sctx = (struct si_context*)ctx;
if (!size)
return;
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
+ offset + size);
+
/* Fallback for unaligned clears. */
if (offset % 4 != 0 || size % 4 != 0) {
- uint32_t *map = rctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
- rctx->b.rings.gfx.cs,
+ uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
+ sctx->b.rings.gfx.cs,
PIPE_TRANSFER_WRITE);
size /= 4;
for (unsigned i = 0; i < size; i++)
*map++ = value;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
- offset + size);
return;
}
- uint64_t va = r600_resource_va(&rctx->screen->b.b, dst) + offset;
+ uint64_t va = r600_resource_va(&sctx->screen->b.b, dst) + offset;
/* Flush the caches where the resource is bound. */
/* XXX only flush the caches where the buffer is bound. */
- rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
+ sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
R600_CONTEXT_FLUSH_AND_INV_CB_META |
R600_CONTEXT_FLUSH_AND_INV_DB_META;
- rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
+ sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
while (size) {
unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
unsigned dma_flags = 0;
- si_need_cs_space(rctx, 7 + (rctx->b.flags ? rctx->cache_flush.num_dw : 0),
+ si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
FALSE);
/* This must be done after need_cs_space. */
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
(struct r600_resource*)dst, RADEON_USAGE_WRITE);
/* Flush the caches for the first copy only.
* Also wait for the previous CP DMA operations. */
- if (rctx->b.flags) {
- si_emit_cache_flush(&rctx->b, NULL);
+ if (sctx->b.flags) {
+ si_emit_cache_flush(&sctx->b, NULL);
dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
}
dma_flags |= R600_CP_DMA_SYNC;
/* Emit the clear packet. */
- si_emit_cp_dma_clear_buffer(rctx, va, byte_count, value, dma_flags);
+ si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
size -= byte_count;
va += byte_count;
/* Flush the caches again in case the 3D engine has been prefetching
* the resource. */
/* XXX only flush the caches where the buffer is bound. */
- rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
+ sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
R600_CONTEXT_FLUSH_AND_INV_CB_META |
R600_CONTEXT_FLUSH_AND_INV_DB_META;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
- offset + size);
}
-void si_copy_buffer(struct r600_context *rctx,
+void si_copy_buffer(struct si_context *sctx,
struct pipe_resource *dst, struct pipe_resource *src,
uint64_t dst_offset, uint64_t src_offset, unsigned size)
{
if (!size)
return;
- dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
- src_offset += r600_resource_va(&rctx->screen->b.b, src);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
+ dst_offset += r600_resource_va(&sctx->screen->b.b, dst);
+ src_offset += r600_resource_va(&sctx->screen->b.b, src);
/* Flush the caches where the resource is bound. */
- rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
+ sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
unsigned sync_flags = 0;
unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
- si_need_cs_space(rctx, 7 + (rctx->b.flags ? rctx->cache_flush.num_dw : 0), FALSE);
+ si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
/* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
- if (rctx->b.flags) {
- si_emit_cache_flush(&rctx->b, NULL);
+ if (sctx->b.flags) {
+ si_emit_cache_flush(&sctx->b, NULL);
sync_flags |= SI_CP_DMA_RAW_WAIT;
}
}
/* This must be done after r600_need_cs_space. */
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
- si_emit_cp_dma_copy_buffer(rctx, dst_offset, src_offset, byte_count, sync_flags);
+ si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
size -= byte_count;
src_offset += byte_count;
dst_offset += byte_count;
}
- rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
+ sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
R600_CONTEXT_FLUSH_AND_INV_CB_META |
R600_CONTEXT_FLUSH_AND_INV_DB_META;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
}
/* INIT/DEINIT */
-void si_init_all_descriptors(struct r600_context *rctx)
+void si_init_all_descriptors(struct si_context *sctx)
{
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_init_buffer_resources(rctx, &rctx->const_buffers[i],
+ si_init_buffer_resources(sctx, &sctx->const_buffers[i],
NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
RADEON_USAGE_READ);
+ si_init_buffer_resources(sctx, &sctx->rw_buffers[i],
+ i == PIPE_SHADER_VERTEX ?
+ SI_RW_SO + 4 : SI_RW_SO,
+ i, SI_SGPR_RW_BUFFERS,
+ RADEON_USAGE_READWRITE);
- si_init_sampler_views(rctx, &rctx->samplers[i].views, i);
+ si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
- rctx->atoms.const_buffers[i] = &rctx->const_buffers[i].desc.atom;
- rctx->atoms.sampler_views[i] = &rctx->samplers[i].views.desc.atom;
+ sctx->atoms.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
+ sctx->atoms.rw_buffers[i] = &sctx->rw_buffers[i].desc.atom;
+ sctx->atoms.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
}
- si_init_buffer_resources(rctx, &rctx->streamout_buffers, 4, PIPE_SHADER_VERTEX,
- SI_SGPR_SO_BUFFER, RADEON_USAGE_WRITE);
- rctx->atoms.streamout_buffers = &rctx->streamout_buffers.desc.atom;
/* Set pipe_context functions. */
- rctx->b.b.set_constant_buffer = si_set_constant_buffer;
- rctx->b.b.set_stream_output_targets = si_set_streamout_targets;
- rctx->b.clear_buffer = si_clear_buffer;
+ sctx->b.b.set_constant_buffer = si_set_constant_buffer;
+ sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
+ sctx->b.clear_buffer = si_clear_buffer;
+ sctx->b.invalidate_buffer = si_invalidate_buffer;
}
-void si_release_all_descriptors(struct r600_context *rctx)
+void si_release_all_descriptors(struct si_context *sctx)
{
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_release_buffer_resources(&rctx->const_buffers[i]);
- si_release_sampler_views(&rctx->samplers[i].views);
+ si_release_buffer_resources(&sctx->const_buffers[i]);
+ si_release_buffer_resources(&sctx->rw_buffers[i]);
+ si_release_sampler_views(&sctx->samplers[i].views);
}
- si_release_buffer_resources(&rctx->streamout_buffers);
}
-void si_all_descriptors_begin_new_cs(struct r600_context *rctx)
+void si_all_descriptors_begin_new_cs(struct si_context *sctx)
{
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_buffer_resources_begin_new_cs(rctx, &rctx->const_buffers[i]);
- si_sampler_views_begin_new_cs(rctx, &rctx->samplers[i].views);
+ si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
+ si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
+ si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
}
- si_buffer_resources_begin_new_cs(rctx, &rctx->streamout_buffers);
}