}
static struct pipe_sampler_view *
-texture_buffer_sampler_view(struct r600_pipe_sampler_view *view,
+texture_buffer_sampler_view(struct r600_context *rctx,
+ struct r600_pipe_sampler_view *view,
unsigned width0, unsigned height0)
{
view->tex_resource_words[4] = 0;
view->tex_resource_words[5] = view->tex_resource_words[6] = 0;
view->tex_resource_words[7] = S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER);
+
+ if (tmp->resource.gpu_address)
+ LIST_ADDTAIL(&view->list, &rctx->b.texture_buffers);
return &view->base;
}
unsigned width0, unsigned height0,
unsigned force_level)
{
+ struct r600_context *rctx = (struct r600_context*)ctx;
struct r600_screen *rscreen = (struct r600_screen*)ctx->screen;
struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
struct r600_texture *tmp = (struct r600_texture*)texture;
view->base.context = ctx;
if (texture->target == PIPE_BUFFER)
- return texture_buffer_sampler_view(view, width0, height0);
+ return texture_buffer_sampler_view(rctx, view, width0, height0);
swizzle[0] = state->swizzle_r;
swizzle[1] = state->swizzle_g;
static void r600_sampler_view_destroy(struct pipe_context *ctx,
struct pipe_sampler_view *state)
{
- struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
+ struct r600_pipe_sampler_view *view = (struct r600_pipe_sampler_view *)state;
+
+ if (view->tex_resource->gpu_address &&
+ view->tex_resource->b.b.target == PIPE_BUFFER)
+ LIST_DELINIT(&view->list);
pipe_resource_reference(&state->texture, NULL);
- FREE(resource);
+ FREE(view);
}
void r600_sampler_states_dirty(struct r600_context *rctx,
struct r600_context *rctx = (struct r600_context*)ctx;
struct r600_resource *rbuffer = r600_resource(buf);
unsigned i, shader, mask, alignment = rbuffer->buf->alignment;
+ struct r600_pipe_sampler_view *view;
/* Reallocate the buffer in the same pipe_resource. */
r600_init_resource(&rctx->screen->b, rbuffer, rbuffer->b.b.width0,
}
}
- /* XXX TODO: texture buffer objects */
+ /* Texture buffer objects - update the virtual addresses in descriptors. */
+ LIST_FOR_EACH_ENTRY(view, &rctx->b.texture_buffers, list) {
+ if (view->base.texture == &rbuffer->b.b) {
+ unsigned stride = util_format_get_blocksize(view->base.format);
+ uint64_t offset = (uint64_t)view->base.u.buf.first_element * stride;
+ uint64_t va = rbuffer->gpu_address + offset;
+
+ view->tex_resource_words[0] = va;
+ view->tex_resource_words[2] &= C_038008_BASE_ADDRESS_HI;
+ view->tex_resource_words[2] |= S_038008_BASE_ADDRESS_HI(va >> 32);
+ }
+ }
+ /* Texture buffer objects - make bindings dirty if needed. */
+ for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
+ struct r600_samplerview_state *state = &rctx->samplers[shader].views;
+ bool found = false;
+ uint32_t mask = state->enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (state->views[i]->base.texture == &rbuffer->b.b) {
+ found = true;
+ state->dirty_mask |= 1 << i;
+ }
+ }
+ if (found) {
+ r600_sampler_views_dirty(rctx, state);
+ }
+ }
}
static void r600_set_occlusion_query_state(struct pipe_context *ctx, bool enable)
float sample_locations_8x[8][2];
float sample_locations_16x[16][2];
+ /* The list of all texture buffer objects in this context.
+ * This list is walked when a buffer is invalidated/reallocated and
+ * the GPU addresses are updated. */
+ struct list_head texture_buffers;
+
/* Copy one resource to another using async DMA. */
void (*dma_copy)(struct pipe_context *ctx,
struct pipe_resource *dst,