#include "sid.h"
#include "gfx9d.h"
+#include "util/hash_table.h"
#include "util/u_format.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
* descriptor */
};
+static void si_init_descriptor_list(uint32_t *desc_list,
+ unsigned element_dw_size,
+ unsigned num_elements,
+ const uint32_t *null_descriptor)
+{
+ int i;
+
+ /* Initialize the array to NULL descriptors if the element size is 8. */
+ if (null_descriptor) {
+ assert(element_dw_size % 8 == 0);
+ for (i = 0; i < num_elements * element_dw_size / 8; i++)
+ memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
+ }
+}
+
static void si_init_descriptors(struct si_context *sctx,
struct si_descriptors *desc,
unsigned shader_userdata_index,
if (sscreen->b.chip_class <= VI)
meta_va += base_level_info->dcc_offset;
- } else if (tex->tc_compatible_htile) {
+ } else if (tex->tc_compatible_htile && first_level == 0) {
meta_va = tex->resource.gpu_address + tex->htile_offset;
}
}
}
+static void si_set_sampler_view_desc(struct si_context *sctx,
+ struct si_sampler_view *sview,
+ struct si_sampler_state *sstate,
+ uint32_t *desc)
+{
+ struct pipe_sampler_view *view = &sview->base;
+ struct r600_texture *rtex = (struct r600_texture *)view->texture;
+ bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
+
+ if (unlikely(!is_buffer && sview->dcc_incompatible)) {
+ if (vi_dcc_enabled(rtex, view->u.tex.first_level))
+ if (!r600_texture_disable_dcc(&sctx->b, rtex))
+ sctx->b.decompress_dcc(&sctx->b.b, rtex);
+
+ sview->dcc_incompatible = false;
+ }
+
+ assert(rtex); /* views with texture == NULL aren't supported */
+ memcpy(desc, sview->state, 8*4);
+
+ if (is_buffer) {
+ si_set_buf_desc_address(&rtex->resource,
+ sview->base.u.buf.offset,
+ desc + 4);
+ } else {
+ bool is_separate_stencil = rtex->db_compatible &&
+ sview->is_stencil_sampler;
+
+ si_set_mutable_tex_desc_fields(sctx->screen, rtex,
+ sview->base_level_info,
+ sview->base_level,
+ sview->base.u.tex.first_level,
+ sview->block_width,
+ is_separate_stencil,
+ desc);
+ }
+
+ if (!is_buffer && rtex->fmask.size) {
+ memcpy(desc + 8, sview->fmask_state, 8*4);
+ } else {
+ /* Disable FMASK and bind sampler state in [12:15]. */
+ memcpy(desc + 8, null_texture_descriptor, 4*4);
+
+ if (sstate)
+ memcpy(desc + 12, sstate->val, 4*4);
+ }
+}
+
static void si_set_sampler_view(struct si_context *sctx,
unsigned shader,
unsigned slot, struct pipe_sampler_view *view,
if (view) {
struct r600_texture *rtex = (struct r600_texture *)view->texture;
- bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
-
- if (unlikely(!is_buffer && rview->dcc_incompatible)) {
- if (vi_dcc_enabled(rtex, view->u.tex.first_level))
- if (!r600_texture_disable_dcc(&sctx->b, rtex))
- sctx->b.decompress_dcc(&sctx->b.b, rtex);
- rview->dcc_incompatible = false;
- }
-
- assert(rtex); /* views with texture == NULL aren't supported */
- pipe_sampler_view_reference(&views->views[slot], view);
- memcpy(desc, rview->state, 8*4);
+ si_set_sampler_view_desc(sctx, rview,
+ views->sampler_states[slot], desc);
- if (is_buffer) {
+ if (rtex->resource.b.b.target == PIPE_BUFFER)
rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
- si_set_buf_desc_address(&rtex->resource,
- view->u.buf.offset,
- desc + 4);
- } else {
- bool is_separate_stencil =
- rtex->db_compatible &&
- rview->is_stencil_sampler;
-
- si_set_mutable_tex_desc_fields(sctx->screen, rtex,
- rview->base_level_info,
- rview->base_level,
- rview->base.u.tex.first_level,
- rview->block_width,
- is_separate_stencil,
- desc);
- }
-
- if (!is_buffer && rtex->fmask.size) {
- memcpy(desc + 8,
- rview->fmask_state, 8*4);
- } else {
- /* Disable FMASK and bind sampler state in [12:15]. */
- memcpy(desc + 8,
- null_texture_descriptor, 4*4);
-
- if (views->sampler_states[slot])
- memcpy(desc + 12,
- views->sampler_states[slot]->val, 4*4);
- }
-
+ pipe_sampler_view_reference(&views->views[slot], view);
views->enabled_mask |= 1u << slot;
/* Since this can flush, it must be done after enabled_mask is
(rtex->cmask.size || rtex->dcc_offset));
}
-static bool depth_needs_decompression(struct r600_texture *rtex,
- struct si_sampler_view *sview)
+static bool depth_needs_decompression(struct r600_texture *rtex)
{
- return rtex->db_compatible &&
- (!rtex->tc_compatible_htile ||
- !r600_can_sample_zs(rtex, sview->is_stencil_sampler));
+ /* If the depth/stencil texture is TC-compatible, no decompression
+ * will be done. The decompression function will only flush DB caches
+ * to make it coherent with shaders. That's necessary because the driver
+ * doesn't flush DB caches in any other case.
+ */
+ return rtex->db_compatible;
}
-static void si_update_compressed_tex_shader_mask(struct si_context *sctx,
- unsigned shader)
+static void si_update_shader_needs_decompress_mask(struct si_context *sctx,
+ unsigned shader)
{
struct si_textures_info *samplers = &sctx->samplers[shader];
unsigned shader_bit = 1 << shader;
if (samplers->needs_depth_decompress_mask ||
samplers->needs_color_decompress_mask ||
sctx->images[shader].needs_color_decompress_mask)
- sctx->compressed_tex_shader_mask |= shader_bit;
+ sctx->shader_needs_decompress_mask |= shader_bit;
else
- sctx->compressed_tex_shader_mask &= ~shader_bit;
+ sctx->shader_needs_decompress_mask &= ~shader_bit;
}
static void si_set_sampler_views(struct pipe_context *ctx,
if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
struct r600_texture *rtex =
(struct r600_texture*)views[i]->texture;
- struct si_sampler_view *rview = (struct si_sampler_view *)views[i];
- if (depth_needs_decompression(rtex, rview)) {
+ if (depth_needs_decompression(rtex)) {
samplers->needs_depth_decompress_mask |= 1u << slot;
} else {
samplers->needs_depth_decompress_mask &= ~(1u << slot);
}
}
- si_update_compressed_tex_shader_mask(sctx, shader);
+ si_update_shader_needs_decompress_mask(sctx, shader);
}
static void
-si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
+si_samplers_update_needs_color_decompress_mask(struct si_textures_info *samplers)
{
unsigned mask = samplers->views.enabled_mask;
view->u.buf.offset + view->u.buf.size);
}
-static void si_set_shader_image(struct si_context *ctx,
- unsigned shader,
- unsigned slot, const struct pipe_image_view *view,
- bool skip_decompress)
+static void si_set_shader_image_desc(struct si_context *ctx,
+ const struct pipe_image_view *view,
+ bool skip_decompress,
+ uint32_t *desc)
{
struct si_screen *screen = ctx->screen;
- struct si_images_info *images = &ctx->images[shader];
- struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
struct r600_resource *res;
- unsigned desc_slot = si_get_image_slot(slot);
- uint32_t *desc = descs->list + desc_slot * 8;
-
- if (!view || !view->resource) {
- si_disable_shader_image(ctx, shader, slot);
- return;
- }
res = (struct r600_resource *)view->resource;
- if (&images->views[slot] != view)
- util_copy_image_view(&images->views[slot], view);
-
if (res->b.b.target == PIPE_BUFFER) {
if (view->access & PIPE_IMAGE_ACCESS_WRITE)
si_mark_image_range_valid(view);
view->u.buf.offset,
view->u.buf.size, desc);
si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
-
- images->needs_color_decompress_mask &= ~(1 << slot);
- res->bind_history |= PIPE_BIND_SHADER_IMAGE;
} else {
static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
struct r600_texture *tex = (struct r600_texture *)res;
* The decompression is relatively cheap if the surface
* has been decompressed already.
*/
- if (r600_texture_disable_dcc(&ctx->b, tex))
- uses_dcc = false;
- else
+ if (!r600_texture_disable_dcc(&ctx->b, tex))
ctx->b.decompress_dcc(&ctx->b.b, tex);
}
- if (color_needs_decompression(tex)) {
- images->needs_color_decompress_mask |= 1 << slot;
- } else {
- images->needs_color_decompress_mask &= ~(1 << slot);
- }
-
- if (uses_dcc &&
- p_atomic_read(&tex->framebuffers_bound))
- ctx->need_check_render_feedback = true;
-
if (ctx->b.chip_class >= GFX9) {
/* Always set the base address. The swizzle modes don't
* allow setting mipmap level offsets as the base.
util_format_get_blockwidth(view->format),
false, desc);
}
+}
+
+static void si_set_shader_image(struct si_context *ctx,
+ unsigned shader,
+ unsigned slot, const struct pipe_image_view *view,
+ bool skip_decompress)
+{
+ struct si_images_info *images = &ctx->images[shader];
+ struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
+ struct r600_resource *res;
+ unsigned desc_slot = si_get_image_slot(slot);
+ uint32_t *desc = descs->list + desc_slot * 8;
+
+ if (!view || !view->resource) {
+ si_disable_shader_image(ctx, shader, slot);
+ return;
+ }
+
+ res = (struct r600_resource *)view->resource;
+
+ if (&images->views[slot] != view)
+ util_copy_image_view(&images->views[slot], view);
+
+ si_set_shader_image_desc(ctx, view, skip_decompress, desc);
+
+ if (res->b.b.target == PIPE_BUFFER) {
+ images->needs_color_decompress_mask &= ~(1 << slot);
+ res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+ } else {
+ struct r600_texture *tex = (struct r600_texture *)res;
+ unsigned level = view->u.tex.level;
+
+ if (color_needs_decompression(tex)) {
+ images->needs_color_decompress_mask |= 1 << slot;
+ } else {
+ images->needs_color_decompress_mask &= ~(1 << slot);
+ }
+
+ if (vi_dcc_enabled(tex, level) &&
+ p_atomic_read(&tex->framebuffers_bound))
+ ctx->need_check_render_feedback = true;
+ }
images->enabled_mask |= 1u << slot;
/* two 8-byte images share one 16-byte slot */
/* Since this can flush, it must be done after enabled_mask is updated. */
si_sampler_view_add_buffer(ctx, &res->b.b,
- RADEON_USAGE_READWRITE, false, true);
+ (view->access & PIPE_IMAGE_ACCESS_WRITE) ?
+ RADEON_USAGE_READWRITE : RADEON_USAGE_READ,
+ false, true);
}
static void
si_set_shader_image(ctx, shader, slot, NULL, false);
}
- si_update_compressed_tex_shader_mask(ctx, shader);
+ si_update_shader_needs_decompress_mask(ctx, shader);
}
static void
-si_images_update_compressed_colortex_mask(struct si_images_info *images)
+si_images_update_needs_color_decompress_mask(struct si_images_info *images)
{
unsigned mask = images->enabled_mask;
int i;
for (i = 0; i < count; i++) {
- int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
+ int vb = sctx->vertex_elements->vertex_buffer_index[i];
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
{
- struct si_vertex_element *velems = sctx->vertex_elements;
+ struct si_vertex_elements *velems = sctx->vertex_elements;
struct si_descriptors *desc = &sctx->vertex_buffers;
unsigned i, count;
unsigned desc_list_byte_size;
assert(count <= SI_MAX_ATTRIBS);
for (i = 0; i < count; i++) {
- struct pipe_vertex_element *ve = &velems->elements[i];
struct pipe_vertex_buffer *vb;
struct r600_resource *rbuffer;
unsigned offset;
- unsigned vbo_index = ve->vertex_buffer_index;
+ unsigned vbo_index = velems->vertex_buffer_index[i];
uint32_t *desc = &ptr[i*4];
vb = &sctx->vertex_buffer[vbo_index];
continue;
}
- offset = vb->buffer_offset + ve->src_offset;
+ offset = vb->buffer_offset + velems->src_offset[i];
va = rbuffer->gpu_address + offset;
/* Fill in T# buffer resource description */
/* TEXTURE METADATA ENABLE/DISABLE */
+static void
+si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
+{
+ util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
+ util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
+
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct pipe_resource *res = (*tex_handle)->view->texture;
+ struct r600_texture *rtex;
+
+ if (!res || res->target == PIPE_BUFFER)
+ continue;
+
+ rtex = (struct r600_texture *)res;
+ if (!color_needs_decompression(rtex))
+ continue;
+
+ util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
+ struct si_texture_handle *, *tex_handle);
+ }
+
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct pipe_image_view *view = &(*img_handle)->view;
+ struct pipe_resource *res = view->resource;
+ struct r600_texture *rtex;
+
+ if (!res || res->target == PIPE_BUFFER)
+ continue;
+
+ rtex = (struct r600_texture *)res;
+ if (!color_needs_decompression(rtex))
+ continue;
+
+ util_dynarray_append(&sctx->resident_img_needs_color_decompress,
+ struct si_image_handle *, *img_handle);
+ }
+}
+
/* CMASK can be enabled (for fast clear) and disabled (for texture export)
* while the texture is bound, possibly by a different context. In that case,
- * call this function to update compressed_colortex_masks.
+ * call this function to update needs_*_decompress_masks.
*/
-void si_update_compressed_colortex_masks(struct si_context *sctx)
+void si_update_needs_color_decompress_masks(struct si_context *sctx)
{
for (int i = 0; i < SI_NUM_SHADERS; ++i) {
- si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
- si_images_update_compressed_colortex_mask(&sctx->images[i]);
- si_update_compressed_tex_shader_mask(sctx, i);
+ si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
+ si_images_update_needs_color_decompress_mask(&sctx->images[i]);
+ si_update_shader_needs_decompress_mask(sctx, i);
}
+
+ si_resident_handles_update_needs_color_decompress(sctx);
}
/* BUFFER DISCARD/INVALIDATION */
/* Vertex buffers. */
if (rbuffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
for (i = 0; i < num_elems; i++) {
- int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
+ int vb = sctx->vertex_elements->vertex_buffer_index[i];
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
}
}
}
+
+ /* Bindless texture handles */
+ if (rbuffer->texture_handle_allocated) {
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct pipe_sampler_view *view = (*tex_handle)->view;
+ struct si_bindless_descriptor *desc = (*tex_handle)->desc;
+
+ if (view->texture == buf) {
+ si_set_buf_desc_address(rbuffer,
+ view->u.buf.offset,
+ &desc->desc_list[4]);
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+
+ radeon_add_to_buffer_list_check_mem(
+ &sctx->b, &sctx->b.gfx, rbuffer,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_SAMPLER_BUFFER, true);
+ }
+ }
+ }
+
+ /* Bindless image handles */
+ if (rbuffer->image_handle_allocated) {
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct pipe_image_view *view = &(*img_handle)->view;
+ struct si_bindless_descriptor *desc = (*img_handle)->desc;
+
+ if (view->resource == buf) {
+ if (view->access & PIPE_IMAGE_ACCESS_WRITE)
+ si_mark_image_range_valid(view);
+
+ si_set_buf_desc_address(rbuffer,
+ view->u.buf.offset,
+ &desc->desc_list[4]);
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+
+ radeon_add_to_buffer_list_check_mem(
+ &sctx->b, &sctx->b.gfx, rbuffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SAMPLER_BUFFER, true);
+ }
+ }
+ }
}
/* Reallocate a buffer a update all resource bindings where the buffer is
si_rebind_buffer(ctx, buf, old_va);
}
+static void si_upload_bindless_descriptor(struct si_context *sctx,
+ struct si_bindless_descriptor *desc)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ uint64_t va = desc->buffer->gpu_address + desc->offset;
+ unsigned num_dwords = sizeof(desc->desc_list) / 4;
+
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + num_dwords, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_TC_L2) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit_array(cs, desc->desc_list, num_dwords);
+}
+
+static void si_upload_bindless_descriptors(struct si_context *sctx)
+{
+ if (!sctx->bindless_descriptors_dirty)
+ return;
+
+ /* Wait for graphics/compute to be idle before updating the resident
+ * descriptors directly in memory, in case the GPU is using them.
+ */
+ sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ si_emit_cache_flush(sctx);
+
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct si_bindless_descriptor *desc = (*tex_handle)->desc;
+
+ if (!desc->dirty)
+ continue;
+
+ si_upload_bindless_descriptor(sctx, desc);
+ desc->dirty = false;
+ }
+
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct si_bindless_descriptor *desc = (*img_handle)->desc;
+
+ if (!desc->dirty)
+ continue;
+
+ si_upload_bindless_descriptor(sctx, desc);
+ desc->dirty = false;
+ }
+
+ /* Invalidate L1 because it doesn't know that L2 changed. */
+ sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1;
+ si_emit_cache_flush(sctx);
+
+ sctx->bindless_descriptors_dirty = false;
+}
+
+/* Update mutable image descriptor fields of all resident textures. */
+static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
+{
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct si_bindless_descriptor *desc = (*tex_handle)->desc;
+ struct si_sampler_view *sview =
+ (struct si_sampler_view *)(*tex_handle)->view;
+ uint32_t desc_list[16];
+
+ if (sview->base.texture->target == PIPE_BUFFER)
+ continue;
+
+ memcpy(desc_list, desc->desc_list, sizeof(desc_list));
+ si_set_sampler_view_desc(sctx, sview, &(*tex_handle)->sstate,
+ &desc->desc_list[0]);
+
+ if (memcmp(desc_list, desc->desc_list, sizeof(desc_list))) {
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+ }
+ }
+
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct si_bindless_descriptor *desc = (*img_handle)->desc;
+ struct pipe_image_view *view = &(*img_handle)->view;
+ uint32_t desc_list[16];
+
+ if (view->resource->target == PIPE_BUFFER)
+ continue;
+
+ memcpy(desc_list, desc->desc_list, sizeof(desc_list));
+ si_set_shader_image_desc(sctx, view, true,
+ &desc->desc_list[0]);
+
+ if (memcmp(desc_list, desc->desc_list, sizeof(desc_list))) {
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+ }
+ }
+
+ si_upload_bindless_descriptors(sctx);
+}
+
/* Update mutable image descriptor fields of all bound textures. */
void si_update_all_texture_descriptors(struct si_context *sctx)
{
samplers->views[i], true);
}
- si_update_compressed_tex_shader_mask(sctx, shader);
+ si_update_shader_needs_decompress_mask(sctx, shader);
}
+
+ si_update_all_resident_texture_descriptors(sctx);
}
/* SHADER USER DATA */
R_00B330_SPI_SHADER_USER_DATA_ES_0);
si_emit_shader_pointer(sctx, descs,
R_00B430_SPI_SHADER_USER_DATA_HS_0);
+ si_emit_shader_pointer(sctx, descs,
+ R_00B530_SPI_SHADER_USER_DATA_LS_0);
}
}
sctx->shader_pointers_dirty &= ~compute_mask;
}
+/* BINDLESS */
+
+struct si_bindless_descriptor_slab
+{
+ struct pb_slab base;
+ struct r600_resource *buffer;
+ struct si_bindless_descriptor *entries;
+};
+
+bool si_bindless_descriptor_can_reclaim_slab(void *priv,
+ struct pb_slab_entry *entry)
+{
+ /* Do not allow to reclaim any bindless descriptors for now because the
+ * GPU might be using them. This should be improved later on.
+ */
+ return false;
+}
+
+struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap,
+ unsigned entry_size,
+ unsigned group_index)
+{
+ struct si_context *sctx = priv;
+ struct si_screen *sscreen = sctx->screen;
+ struct si_bindless_descriptor_slab *slab;
+
+ slab = CALLOC_STRUCT(si_bindless_descriptor_slab);
+ if (!slab)
+ return NULL;
+
+ /* Create a buffer in VRAM for 1024 bindless descriptors. */
+ slab->buffer = (struct r600_resource *)
+ pipe_buffer_create(&sscreen->b.b, 0,
+ PIPE_USAGE_DEFAULT, 64 * 1024);
+ if (!slab->buffer)
+ goto fail;
+
+ slab->base.num_entries = slab->buffer->bo_size / entry_size;
+ slab->base.num_free = slab->base.num_entries;
+ slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
+ if (!slab->entries)
+ goto fail_buffer;
+
+ LIST_INITHEAD(&slab->base.free);
+
+ for (unsigned i = 0; i < slab->base.num_entries; ++i) {
+ struct si_bindless_descriptor *desc = &slab->entries[i];
+
+ desc->entry.slab = &slab->base;
+ desc->entry.group_index = group_index;
+ desc->buffer = slab->buffer;
+ desc->offset = i * entry_size;
+
+ LIST_ADDTAIL(&desc->entry.head, &slab->base.free);
+ }
+
+ /* Add the descriptor to the per-context list. */
+ util_dynarray_append(&sctx->bindless_descriptors,
+ struct r600_resource *, slab->buffer);
+
+ return &slab->base;
+
+fail_buffer:
+ r600_resource_reference(&slab->buffer, NULL);
+fail:
+ FREE(slab);
+ return NULL;
+}
+
+void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab)
+{
+ struct si_context *sctx = priv;
+ struct si_bindless_descriptor_slab *slab =
+ (struct si_bindless_descriptor_slab *)pslab;
+
+ /* Remove the descriptor from the per-context list. */
+ util_dynarray_delete_unordered(&sctx->bindless_descriptors,
+ struct r600_resource *, slab->buffer);
+
+ r600_resource_reference(&slab->buffer, NULL);
+ FREE(slab->entries);
+ FREE(slab);
+}
+
+static struct si_bindless_descriptor *
+si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
+ unsigned size)
+{
+ struct si_screen *sscreen = sctx->screen;
+ struct si_bindless_descriptor *desc;
+ struct pb_slab_entry *entry;
+ void *ptr;
+
+ /* Sub-allocate the bindless descriptor from a slab to avoid dealing
+ * with a ton of buffers and for reducing the winsys overhead.
+ */
+ entry = pb_slab_alloc(&sctx->bindless_descriptor_slabs, 64, 0);
+ if (!entry)
+ return NULL;
+
+ desc = NULL;
+ desc = container_of(entry, desc, entry);
+
+ /* Upload the descriptor directly in VRAM. Because the slabs are
+ * currently never reclaimed, we don't need to synchronize the
+ * operation.
+ */
+ ptr = sscreen->b.ws->buffer_map(desc->buffer->buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
+ util_memcpy_cpu_to_le32(ptr + desc->offset, desc_list, size);
+
+ /* Keep track of the initial descriptor especially for buffers
+ * invalidation because we might need to know the previous address.
+ */
+ memcpy(desc->desc_list, desc_list, sizeof(desc->desc_list));
+
+ return desc;
+}
+
+static void si_invalidate_bindless_buf_desc(struct si_context *sctx,
+ struct si_bindless_descriptor *desc,
+ struct pipe_resource *resource,
+ uint64_t offset)
+{
+ struct r600_resource *buf = r600_resource(resource);
+ uint32_t *desc_list = desc->desc_list + 4;
+ uint64_t old_desc_va;
+
+ assert(resource->target == PIPE_BUFFER);
+
+ /* Retrieve the old buffer addr from the descriptor. */
+ old_desc_va = desc_list[0];
+ old_desc_va |= ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc_list[1]) << 32);
+
+ if (old_desc_va != buf->gpu_address + offset) {
+ /* The buffer has been invalidated when the handle wasn't
+ * resident, update the descriptor and the dirty flag.
+ */
+ si_set_buf_desc_address(buf, offset, &desc_list[0]);
+
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+ }
+}
+
+static uint64_t si_create_texture_handle(struct pipe_context *ctx,
+ struct pipe_sampler_view *view,
+ const struct pipe_sampler_state *state)
+{
+ struct si_sampler_view *sview = (struct si_sampler_view *)view;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_texture_handle *tex_handle;
+ struct si_sampler_state *sstate;
+ uint32_t desc_list[16];
+ uint64_t handle;
+
+ tex_handle = CALLOC_STRUCT(si_texture_handle);
+ if (!tex_handle)
+ return 0;
+
+ memset(desc_list, 0, sizeof(desc_list));
+ si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
+
+ sstate = ctx->create_sampler_state(ctx, state);
+ if (!sstate) {
+ FREE(tex_handle);
+ return 0;
+ }
+
+ si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
+ memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
+ ctx->delete_sampler_state(ctx, sstate);
+
+ tex_handle->desc = si_create_bindless_descriptor(sctx, desc_list,
+ sizeof(desc_list));
+ if (!tex_handle->desc) {
+ FREE(tex_handle);
+ return 0;
+ }
+
+ handle = tex_handle->desc->buffer->gpu_address +
+ tex_handle->desc->offset;
+
+ if (!_mesa_hash_table_insert(sctx->tex_handles, (void *)handle,
+ tex_handle)) {
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &tex_handle->desc->entry);
+ FREE(tex_handle);
+ return 0;
+ }
+
+ pipe_sampler_view_reference(&tex_handle->view, view);
+
+ r600_resource(sview->base.texture)->texture_handle_allocated = true;
+
+ return handle;
+}
+
+static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_texture_handle *tex_handle;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->tex_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ tex_handle = (struct si_texture_handle *)entry->data;
+
+ pipe_sampler_view_reference(&tex_handle->view, NULL);
+ _mesa_hash_table_remove(sctx->tex_handles, entry);
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &tex_handle->desc->entry);
+ FREE(tex_handle);
+}
+
+static void si_make_texture_handle_resident(struct pipe_context *ctx,
+ uint64_t handle, bool resident)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_texture_handle *tex_handle;
+ struct si_sampler_view *sview;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->tex_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ tex_handle = (struct si_texture_handle *)entry->data;
+ sview = (struct si_sampler_view *)tex_handle->view;
+
+ if (resident) {
+ if (sview->base.texture->target != PIPE_BUFFER) {
+ struct r600_texture *rtex =
+ (struct r600_texture *)sview->base.texture;
+
+ if (depth_needs_decompression(rtex)) {
+ util_dynarray_append(
+ &sctx->resident_tex_needs_depth_decompress,
+ struct si_texture_handle *,
+ tex_handle);
+ }
+
+ if (color_needs_decompression(rtex)) {
+ util_dynarray_append(
+ &sctx->resident_tex_needs_color_decompress,
+ struct si_texture_handle *,
+ tex_handle);
+ }
+
+ if (rtex->dcc_offset &&
+ p_atomic_read(&rtex->framebuffers_bound))
+ sctx->need_check_render_feedback = true;
+ } else {
+ si_invalidate_bindless_buf_desc(sctx, tex_handle->desc,
+ sview->base.texture,
+ sview->base.u.buf.offset);
+ }
+
+ /* Add the texture handle to the per-context list. */
+ util_dynarray_append(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle);
+
+ /* Add the buffers to the current CS in case si_begin_new_cs()
+ * is not going to be called.
+ */
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ tex_handle->desc->buffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_DESCRIPTORS);
+
+ si_sampler_view_add_buffer(sctx, sview->base.texture,
+ RADEON_USAGE_READ,
+ sview->is_stencil_sampler, false);
+ } else {
+ /* Remove the texture handle from the per-context list. */
+ util_dynarray_delete_unordered(&sctx->resident_tex_handles,
+ struct si_texture_handle *,
+ tex_handle);
+
+ if (sview->base.texture->target != PIPE_BUFFER) {
+ util_dynarray_delete_unordered(
+ &sctx->resident_tex_needs_depth_decompress,
+ struct si_texture_handle *, tex_handle);
+
+ util_dynarray_delete_unordered(
+ &sctx->resident_tex_needs_color_decompress,
+ struct si_texture_handle *, tex_handle);
+ }
+ }
+}
+
+static uint64_t si_create_image_handle(struct pipe_context *ctx,
+ const struct pipe_image_view *view)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_image_handle *img_handle;
+ uint32_t desc_list[16];
+ uint64_t handle;
+
+ if (!view || !view->resource)
+ return 0;
+
+ img_handle = CALLOC_STRUCT(si_image_handle);
+ if (!img_handle)
+ return 0;
+
+ memset(desc_list, 0, sizeof(desc_list));
+ si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
+
+ si_set_shader_image_desc(sctx, view, false, &desc_list[0]);
+
+ img_handle->desc = si_create_bindless_descriptor(sctx, desc_list,
+ sizeof(desc_list));
+ if (!img_handle->desc) {
+ FREE(img_handle);
+ return 0;
+ }
+
+ handle = img_handle->desc->buffer->gpu_address +
+ img_handle->desc->offset;
+
+ if (!_mesa_hash_table_insert(sctx->img_handles, (void *)handle,
+ img_handle)) {
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &img_handle->desc->entry);
+ FREE(img_handle);
+ return 0;
+ }
+
+ util_copy_image_view(&img_handle->view, view);
+
+ r600_resource(view->resource)->image_handle_allocated = true;
+
+ return handle;
+}
+
+static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_image_handle *img_handle;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->img_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ img_handle = (struct si_image_handle *)entry->data;
+
+ util_copy_image_view(&img_handle->view, NULL);
+ _mesa_hash_table_remove(sctx->img_handles, entry);
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &img_handle->desc->entry);
+ FREE(img_handle);
+}
+
+static void si_make_image_handle_resident(struct pipe_context *ctx,
+ uint64_t handle, unsigned access,
+ bool resident)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_image_handle *img_handle;
+ struct pipe_image_view *view;
+ struct r600_resource *res;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->img_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ img_handle = (struct si_image_handle *)entry->data;
+ view = &img_handle->view;
+ res = (struct r600_resource *)view->resource;
+
+ if (resident) {
+ if (res->b.b.target != PIPE_BUFFER) {
+ struct r600_texture *rtex = (struct r600_texture *)res;
+ unsigned level = view->u.tex.level;
+
+ if (color_needs_decompression(rtex)) {
+ util_dynarray_append(
+ &sctx->resident_img_needs_color_decompress,
+ struct si_image_handle *,
+ img_handle);
+ }
+
+ if (vi_dcc_enabled(rtex, level) &&
+ p_atomic_read(&rtex->framebuffers_bound))
+ sctx->need_check_render_feedback = true;
+ } else {
+ si_invalidate_bindless_buf_desc(sctx, img_handle->desc,
+ view->resource,
+ view->u.buf.offset);
+ }
+
+ /* Add the image handle to the per-context list. */
+ util_dynarray_append(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle);
+
+ /* Add the buffers to the current CS in case si_begin_new_cs()
+ * is not going to be called.
+ */
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ img_handle->desc->buffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_DESCRIPTORS);
+
+ si_sampler_view_add_buffer(sctx, view->resource,
+ (access & PIPE_IMAGE_ACCESS_WRITE) ?
+ RADEON_USAGE_READWRITE :
+ RADEON_USAGE_READ, false, false);
+ } else {
+ /* Remove the image handle from the per-context list. */
+ util_dynarray_delete_unordered(&sctx->resident_img_handles,
+ struct si_image_handle *,
+ img_handle);
+
+ if (res->b.b.target != PIPE_BUFFER) {
+ util_dynarray_delete_unordered(
+ &sctx->resident_img_needs_color_decompress,
+ struct si_image_handle *,
+ img_handle);
+ }
+ }
+}
+
+
+void si_all_resident_buffers_begin_new_cs(struct si_context *sctx)
+{
+ unsigned num_resident_tex_handles, num_resident_img_handles;
+
+ num_resident_tex_handles = sctx->resident_tex_handles.size /
+ sizeof(struct si_texture_handle *);
+ num_resident_img_handles = sctx->resident_img_handles.size /
+ sizeof(struct si_image_handle *);
+
+ /* Skip adding the bindless descriptors when no handles are resident.
+ */
+ if (!num_resident_tex_handles && !num_resident_img_handles)
+ return;
+
+ /* Add all bindless descriptors. */
+ util_dynarray_foreach(&sctx->bindless_descriptors,
+ struct r600_resource *, desc) {
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *desc,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_DESCRIPTORS);
+ }
+
+ /* Add all resident texture handles. */
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct si_sampler_view *sview =
+ (struct si_sampler_view *)(*tex_handle)->view;
+
+ si_sampler_view_add_buffer(sctx, sview->base.texture,
+ RADEON_USAGE_READ,
+ sview->is_stencil_sampler, false);
+ }
+
+ /* Add all resident image handles. */
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct pipe_image_view *view = &(*img_handle)->view;
+
+ si_sampler_view_add_buffer(sctx, view->resource,
+ RADEON_USAGE_READWRITE,
+ false, false);
+ }
+
+ sctx->b.num_resident_handles += num_resident_tex_handles +
+ num_resident_img_handles;
+}
+
/* INIT/DEINIT/UPLOAD */
/* GFX9 has only 4KB of CE, while previous chips had 32KB. In order
sctx->b.b.set_shader_buffers = si_set_shader_buffers;
sctx->b.b.set_sampler_views = si_set_sampler_views;
sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
+ sctx->b.b.create_texture_handle = si_create_texture_handle;
+ sctx->b.b.delete_texture_handle = si_delete_texture_handle;
+ sctx->b.b.make_texture_handle_resident = si_make_texture_handle_resident;
+ sctx->b.b.create_image_handle = si_create_image_handle;
+ sctx->b.b.delete_image_handle = si_delete_image_handle;
+ sctx->b.b.make_image_handle_resident = si_make_image_handle_resident;
sctx->b.invalidate_buffer = si_invalidate_buffer;
sctx->b.rebind_buffer = si_rebind_buffer;
}
sctx->descriptors_dirty &= ~mask;
+
+ si_upload_bindless_descriptors(sctx);
+
return true;
}
sctx->descriptors_dirty &= ~mask;
+ si_upload_bindless_descriptors(sctx);
+
return true;
}
}
si_release_buffer_resources(&sctx->rw_buffers,
&sctx->descriptors[SI_DESCS_RW_BUFFERS]);
+ for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
+ pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
for (i = 0; i < SI_NUM_DESCS; ++i)
si_release_descriptors(&sctx->descriptors[i]);