rbuffer->b.b.screen = screen;
rbuffer->b.vtbl = &r600_buffer_vtbl;
rbuffer->buf = NULL;
+ rbuffer->bind_history = 0;
rbuffer->TC_L2_dirty = false;
rbuffer->is_shared = false;
util_range_init(&rbuffer->valid_buffer_range);
unsigned bo_alignment;
enum radeon_bo_domain domains;
enum radeon_bo_flag flags;
+ unsigned bind_history;
/* The buffer range which is initialized (with a write transfer,
* streamout, DMA, or as a random access target). The rest of
pipe_sampler_view_reference(&views->views[slot], view);
memcpy(desc, rview->state, 8*4);
- if (rtex->resource.b.b.target != PIPE_BUFFER) {
+ if (rtex->resource.b.b.target == PIPE_BUFFER) {
+ rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ } else {
bool is_separate_stencil =
rtex->db_compatible &&
rview->is_stencil_sampler;
view->u.buf.size,
descs->list + slot * 8);
images->compressed_colortex_mask &= ~(1 << slot);
+ res->bind_history |= PIPE_BIND_SHADER_IMAGE;
} else {
static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
struct r600_texture *tex = (struct r600_texture *)res;
} else {
pipe_resource_reference(&buffer, input->buffer);
va = r600_resource(buffer)->gpu_address + input->buffer_offset;
+ /* Only track usage for non-user buffers. */
+ r600_resource(buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
}
/* Set the descriptor. */
radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx, buf,
buffers->shader_usage,
buffers->priority, true);
+ buf->bind_history |= PIPE_BIND_SHADER_BUFFER;
+
buffers->enabled_mask |= 1u << slot;
descs->dirty_mask |= 1u << slot;
sctx->descriptors_dirty |=
buffers->shader_usage,
RADEON_PRIO_SHADER_RW_BUFFER,
true);
+ r600_resource(buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
+
buffers->enabled_mask |= 1u << bufidx;
} else {
/* Clear the descriptor and unset the resource. */
for (i = 0; i < count; i++) {
const struct pipe_vertex_buffer *src = buffers + i;
struct pipe_vertex_buffer *dsti = dst + i;
+ struct pipe_resource *buf = src->buffer;
- pipe_resource_reference(&dsti->buffer, src->buffer);
+ pipe_resource_reference(&dsti->buffer, buf);
dsti->buffer_offset = src->buffer_offset;
dsti->stride = src->stride;
- r600_context_add_resource_size(ctx, src->buffer);
+ r600_context_add_resource_size(ctx, buf);
+ if (buf)
+ r600_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
}
} else {
for (i = 0; i < count; i++) {
struct si_context *sctx = (struct si_context *)ctx;
if (ib) {
- pipe_resource_reference(&sctx->index_buffer.buffer, ib->buffer);
+ struct pipe_resource *buf = ib->buffer;
+
+ pipe_resource_reference(&sctx->index_buffer.buffer, buf);
memcpy(&sctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, ib->buffer);
+ r600_context_add_resource_size(ctx, buf);
+ if (buf)
+ r600_resource(buf)->bind_history |= PIPE_BIND_INDEX_BUFFER;
} else {
pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
}