#include "radeon/r600_cs.h"
#include "si_pipe.h"
-#include "si_shader.h"
#include "sid.h"
+#include "gfx9d.h"
+#include "util/hash_table.h"
#include "util/u_format.h"
-#include "util/u_math.h"
#include "util/u_memory.h"
-#include "util/u_suballoc.h"
#include "util/u_upload_mgr.h"
* descriptor */
};
-static void si_init_descriptors(struct si_descriptors *desc,
+static void si_init_descriptor_list(uint32_t *desc_list,
+ unsigned element_dw_size,
+ unsigned num_elements,
+ const uint32_t *null_descriptor)
+{
+ int i;
+
+ /* Initialize the array to NULL descriptors if the element size is 8. */
+ if (null_descriptor) {
+ assert(element_dw_size % 8 == 0);
+ for (i = 0; i < num_elements * element_dw_size / 8; i++)
+ memcpy(desc_list + i * 8, null_descriptor, 8 * 4);
+ }
+}
+
+static void si_init_descriptors(struct si_context *sctx,
+ struct si_descriptors *desc,
unsigned shader_userdata_index,
unsigned element_dw_size,
unsigned num_elements,
- const uint32_t *null_descriptor,
+ unsigned first_ce_slot,
+ unsigned num_ce_slots,
unsigned *ce_offset)
{
- int i;
-
assert(num_elements <= sizeof(desc->dirty_mask)*8);
desc->list = CALLOC(num_elements, element_dw_size * 4);
desc->element_dw_size = element_dw_size;
desc->num_elements = num_elements;
- desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
+ desc->first_ce_slot = sctx->ce_ib ? first_ce_slot : 0;
+ desc->num_ce_slots = sctx->ce_ib ? num_ce_slots : 0;
+ desc->dirty_mask = u_bit_consecutive64(0, num_elements);
desc->shader_userdata_offset = shader_userdata_index * 4;
- if (ce_offset) {
+ if (desc->num_ce_slots) {
+ desc->uses_ce = true;
desc->ce_offset = *ce_offset;
- /* make sure that ce_offset stays 32 byte aligned */
- *ce_offset += align(element_dw_size * num_elements * 4, 32);
- }
-
- /* Initialize the array to NULL descriptors if the element size is 8. */
- if (null_descriptor) {
- assert(element_dw_size % 8 == 0);
- for (i = 0; i < num_elements * element_dw_size / 8; i++)
- memcpy(desc->list + i * 8, null_descriptor,
- 8 * 4);
+ *ce_offset += element_dw_size * desc->num_ce_slots * 4;
}
}
}
static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
- unsigned *out_offset, struct r600_resource **out_buf) {
+ unsigned *out_offset, struct r600_resource **out_buf)
+{
uint64_t va;
- u_suballocator_alloc(sctx->ce_suballocator, size, 64, out_offset,
+ u_suballocator_alloc(sctx->ce_suballocator, size,
+ si_optimal_tcc_alignment(sctx, size),
+ out_offset,
(struct pipe_resource**)out_buf);
if (!out_buf)
return false;
return true;
}
-static void si_ce_reinitialize_descriptors(struct si_context *sctx,
- struct si_descriptors *desc)
+void si_ce_save_all_descriptors_at_ib_end(struct si_context* sctx)
{
- if (desc->buffer) {
- struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
- unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
- uint64_t va = buffer->gpu_address + desc->buffer_offset;
- struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
-
- if (!ib)
- ib = sctx->ce_ib;
+ bool success = si_ce_upload(sctx, 0, sctx->total_ce_ram_allocated,
+ &sctx->ce_ram_saved_offset,
+ &sctx->ce_ram_saved_buffer);
+ (void)success;
+ assert(success);
+}
- list_size = align(list_size, 32);
+void si_ce_restore_all_descriptors_at_ib_start(struct si_context *sctx)
+{
+ if (!sctx->ce_ram_saved_buffer)
+ return;
- radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
- radeon_emit(ib, va);
- radeon_emit(ib, va >> 32);
- radeon_emit(ib, list_size / 4);
- radeon_emit(ib, desc->ce_offset);
+ struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
+ if (!ib)
+ ib = sctx->ce_ib;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
- RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
- }
- desc->ce_ram_dirty = false;
-}
+ uint64_t va = sctx->ce_ram_saved_buffer->gpu_address +
+ sctx->ce_ram_saved_offset;
-void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
-{
- int i;
+ radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
+ radeon_emit(ib, va);
+ radeon_emit(ib, va >> 32);
+ radeon_emit(ib, sctx->total_ce_ram_allocated / 4);
+ radeon_emit(ib, 0);
- for (i = 0; i < SI_NUM_DESCS; ++i)
- si_ce_reinitialize_descriptors(sctx, &sctx->descriptors[i]);
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ sctx->ce_ram_saved_buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
}
void si_ce_enable_loads(struct radeon_winsys_cs *ib)
struct si_descriptors *desc,
struct r600_atom * atom)
{
- unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
+ unsigned slot_size = desc->element_dw_size * 4;
+ unsigned first_slot_offset = desc->first_active_slot * slot_size;
+ unsigned upload_size = desc->num_active_slots * slot_size;
- if (!desc->dirty_mask)
+ /* Skip the upload if no shader is using the descriptors. dirty_mask
+ * will stay dirty and the descriptors will be uploaded when there is
+ * a shader using them.
+ */
+ if (!upload_size)
return true;
- if (sctx->ce_ib) {
- uint32_t const* list = (uint32_t const*)desc->list;
+ if (desc->uses_ce) {
+ const uint32_t *list = desc->list +
+ desc->first_ce_slot * desc->element_dw_size;
+ uint64_t mask = (desc->dirty_mask >> desc->first_ce_slot) &
+ u_bit_consecutive64(0, desc->num_ce_slots);
- if (desc->ce_ram_dirty)
- si_ce_reinitialize_descriptors(sctx, desc);
- while(desc->dirty_mask) {
+ while (mask) {
int begin, count;
- u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
- &count);
+ u_bit_scan_consecutive_range64(&mask, &begin, &count);
begin *= desc->element_dw_size;
count *= desc->element_dw_size;
radeon_emit_array(sctx->ce_ib, list + begin, count);
}
- if (!si_ce_upload(sctx, desc->ce_offset, list_size,
- &desc->buffer_offset, &desc->buffer))
+ if (!si_ce_upload(sctx,
+ desc->ce_offset +
+ (first_slot_offset - desc->first_ce_slot * slot_size),
+ upload_size, (unsigned*)&desc->buffer_offset,
+ &desc->buffer))
return false;
} else {
- void *ptr;
+ uint32_t *ptr;
- u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
- &desc->buffer_offset,
- (struct pipe_resource**)&desc->buffer, &ptr);
+ u_upload_alloc(sctx->b.b.const_uploader, 0, upload_size,
+ si_optimal_tcc_alignment(sctx, upload_size),
+ (unsigned*)&desc->buffer_offset,
+ (struct pipe_resource**)&desc->buffer,
+ (void**)&ptr);
if (!desc->buffer)
return false; /* skip the draw call */
- util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
+ util_memcpy_cpu_to_le32(ptr, (char*)desc->list + first_slot_offset,
+ upload_size);
+ desc->gpu_list = ptr - first_slot_offset / 4;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
}
- desc->pointer_dirty = true;
+
+ /* The shader pointer should point to slot 0. */
+ desc->buffer_offset -= first_slot_offset;
+
desc->dirty_mask = 0;
if (atom)
static void
si_descriptors_begin_new_cs(struct si_context *sctx, struct si_descriptors *desc)
{
- desc->ce_ram_dirty = true;
-
if (!desc->buffer)
return;
/* SAMPLER VIEWS */
static unsigned
-si_sampler_descriptors_idx(unsigned shader)
+si_sampler_and_image_descriptors_idx(unsigned shader)
{
return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
- SI_SHADER_DESCS_SAMPLERS;
+ SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
}
static struct si_descriptors *
-si_sampler_descriptors(struct si_context *sctx, unsigned shader)
+si_sampler_and_image_descriptors(struct si_context *sctx, unsigned shader)
{
- return &sctx->descriptors[si_sampler_descriptors_idx(shader)];
+ return &sctx->descriptors[si_sampler_and_image_descriptors_idx(shader)];
}
static void si_release_sampler_views(struct si_sampler_views *views)
if (resource->target == PIPE_BUFFER)
return;
- /* Now add separate DCC if it's present. */
+ /* Now add separate DCC or HTILE. */
rtex = (struct r600_texture*)resource;
- if (!rtex->dcc_separate_buffer)
- return;
-
- radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
- rtex->dcc_separate_buffer, usage,
- RADEON_PRIO_DCC, check_mem);
+ if (rtex->dcc_separate_buffer) {
+ radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
+ rtex->dcc_separate_buffer, usage,
+ RADEON_PRIO_DCC, check_mem);
+ }
}
static void si_sampler_views_begin_new_cs(struct si_context *sctx,
}
}
+/* Set buffer descriptor fields that can be changed by reallocations. */
+static void si_set_buf_desc_address(struct r600_resource *buf,
+ uint64_t offset, uint32_t *state)
+{
+ uint64_t va = buf->gpu_address + offset;
+
+ state[0] = va;
+ state[1] &= C_008F04_BASE_ADDRESS_HI;
+ state[1] |= S_008F04_BASE_ADDRESS_HI(va >> 32);
+}
+
/* Set texture descriptor fields that can be changed by reallocations.
*
* \param tex texture
* \param is_stencil select between separate Z & Stencil
* \param state descriptor to update
*/
-void si_set_mutable_tex_desc_fields(struct r600_texture *tex,
- const struct radeon_surf_level *base_level_info,
+void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
+ struct r600_texture *tex,
+ const struct legacy_surf_level *base_level_info,
unsigned base_level, unsigned first_level,
unsigned block_width, bool is_stencil,
uint32_t *state)
{
- uint64_t va;
- unsigned pitch = base_level_info->nblk_x * block_width;
+ uint64_t va, meta_va = 0;
if (tex->is_depth && !r600_can_sample_zs(tex, is_stencil)) {
tex = tex->flushed_depth_texture;
is_stencil = false;
}
- va = tex->resource.gpu_address + base_level_info->offset;
+ va = tex->resource.gpu_address;
- state[1] &= C_008F14_BASE_ADDRESS_HI;
- state[3] &= C_008F1C_TILING_INDEX;
- state[4] &= C_008F20_PITCH;
- state[6] &= C_008F28_COMPRESSION_EN;
+ if (sscreen->b.chip_class >= GFX9) {
+ /* Only stencil_offset needs to be added here. */
+ if (is_stencil)
+ va += tex->surface.u.gfx9.stencil_offset;
+ else
+ va += tex->surface.u.gfx9.surf_offset;
+ } else {
+ va += base_level_info->offset;
+ }
state[0] = va >> 8;
+ state[1] &= C_008F14_BASE_ADDRESS_HI;
state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
- state[3] |= S_008F1C_TILING_INDEX(si_tile_mode_index(tex, base_level,
- is_stencil));
- state[4] |= S_008F20_PITCH(pitch - 1);
- if (tex->dcc_offset && tex->surface.level[first_level].dcc_enabled) {
- state[6] |= S_008F28_COMPRESSION_EN(1);
- state[7] = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
- tex->dcc_offset +
- base_level_info->dcc_offset) >> 8;
+ if (sscreen->b.chip_class >= VI) {
+ state[6] &= C_008F28_COMPRESSION_EN;
+ state[7] = 0;
+
+ if (vi_dcc_enabled(tex, first_level)) {
+ meta_va = (!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ tex->dcc_offset;
+
+ if (sscreen->b.chip_class <= VI)
+ meta_va += base_level_info->dcc_offset;
+ } else if (tex->tc_compatible_htile && first_level == 0) {
+ meta_va = tex->resource.gpu_address + tex->htile_offset;
+ }
+
+ if (meta_va) {
+ state[6] |= S_008F28_COMPRESSION_EN(1);
+ state[7] = meta_va >> 8;
+ }
+ }
+
+ if (sscreen->b.chip_class >= GFX9) {
+ state[3] &= C_008F1C_SW_MODE;
+ state[4] &= C_008F20_PITCH_GFX9;
+
+ if (is_stencil) {
+ state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
+ state[4] |= S_008F20_PITCH_GFX9(tex->surface.u.gfx9.stencil.epitch);
+ } else {
+ state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode);
+ state[4] |= S_008F20_PITCH_GFX9(tex->surface.u.gfx9.surf.epitch);
+ }
+
+ state[5] &= C_008F24_META_DATA_ADDRESS &
+ C_008F24_META_PIPE_ALIGNED &
+ C_008F24_META_RB_ALIGNED;
+ if (meta_va) {
+ struct gfx9_surf_meta_flags meta;
+
+ if (tex->dcc_offset)
+ meta = tex->surface.u.gfx9.dcc;
+ else
+ meta = tex->surface.u.gfx9.htile;
+
+ state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
+ S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
+ S_008F24_META_RB_ALIGNED(meta.rb_aligned);
+ }
+ } else {
+ /* SI-CI-VI */
+ unsigned pitch = base_level_info->nblk_x * block_width;
+ unsigned index = si_tile_mode_index(tex, base_level, is_stencil);
+
+ state[3] &= C_008F1C_TILING_INDEX;
+ state[3] |= S_008F1C_TILING_INDEX(index);
+ state[4] &= C_008F20_PITCH_GFX6;
+ state[4] |= S_008F20_PITCH_GFX6(pitch - 1);
+ }
+}
+
+static void si_set_sampler_view_desc(struct si_context *sctx,
+ struct si_sampler_view *sview,
+ struct si_sampler_state *sstate,
+ uint32_t *desc)
+{
+ struct pipe_sampler_view *view = &sview->base;
+ struct r600_texture *rtex = (struct r600_texture *)view->texture;
+ bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
+
+ if (unlikely(!is_buffer && sview->dcc_incompatible)) {
+ if (vi_dcc_enabled(rtex, view->u.tex.first_level))
+ if (!r600_texture_disable_dcc(&sctx->b, rtex))
+ sctx->b.decompress_dcc(&sctx->b.b, rtex);
+
+ sview->dcc_incompatible = false;
+ }
+
+ assert(rtex); /* views with texture == NULL aren't supported */
+ memcpy(desc, sview->state, 8*4);
+
+ if (is_buffer) {
+ si_set_buf_desc_address(&rtex->resource,
+ sview->base.u.buf.offset,
+ desc + 4);
+ } else {
+ bool is_separate_stencil = rtex->db_compatible &&
+ sview->is_stencil_sampler;
+
+ si_set_mutable_tex_desc_fields(sctx->screen, rtex,
+ sview->base_level_info,
+ sview->base_level,
+ sview->base.u.tex.first_level,
+ sview->block_width,
+ is_separate_stencil,
+ desc);
+ }
+
+ if (!is_buffer && rtex->fmask.size) {
+ memcpy(desc + 8, sview->fmask_state, 8*4);
+ } else {
+ /* Disable FMASK and bind sampler state in [12:15]. */
+ memcpy(desc + 8, null_texture_descriptor, 4*4);
+
+ if (sstate)
+ memcpy(desc + 12, sstate->val, 4*4);
}
}
{
struct si_sampler_views *views = &sctx->samplers[shader].views;
struct si_sampler_view *rview = (struct si_sampler_view*)view;
- struct si_descriptors *descs = si_sampler_descriptors(sctx, shader);
+ struct si_descriptors *descs = si_sampler_and_image_descriptors(sctx, shader);
+ unsigned desc_slot = si_get_sampler_slot(slot);
+ uint32_t *desc = descs->list + desc_slot * 16;
if (views->views[slot] == view && !disallow_early_out)
return;
if (view) {
struct r600_texture *rtex = (struct r600_texture *)view->texture;
- uint32_t *desc = descs->list + slot * 16;
-
- si_sampler_view_add_buffer(sctx, view->texture,
- RADEON_USAGE_READ,
- rview->is_stencil_sampler, true);
-
- pipe_sampler_view_reference(&views->views[slot], view);
- memcpy(desc, rview->state, 8*4);
-
- if (view->texture && view->texture->target != PIPE_BUFFER) {
- bool is_separate_stencil =
- rtex->db_compatible &&
- rview->is_stencil_sampler;
-
- si_set_mutable_tex_desc_fields(rtex,
- rview->base_level_info,
- rview->base_level,
- rview->base.u.tex.first_level,
- rview->block_width,
- is_separate_stencil,
- desc);
- }
- if (view->texture && view->texture->target != PIPE_BUFFER &&
- rtex->fmask.size) {
- memcpy(desc + 8,
- rview->fmask_state, 8*4);
- } else {
- /* Disable FMASK and bind sampler state in [12:15]. */
- memcpy(desc + 8,
- null_texture_descriptor, 4*4);
+ si_set_sampler_view_desc(sctx, rview,
+ views->sampler_states[slot], desc);
- if (views->sampler_states[slot])
- memcpy(desc + 12,
- views->sampler_states[slot], 4*4);
- }
+ if (rtex->resource.b.b.target == PIPE_BUFFER)
+ rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ pipe_sampler_view_reference(&views->views[slot], view);
views->enabled_mask |= 1u << slot;
+
+ /* Since this can flush, it must be done after enabled_mask is
+ * updated. */
+ si_sampler_view_add_buffer(sctx, view->texture,
+ RADEON_USAGE_READ,
+ rview->is_stencil_sampler, true);
} else {
pipe_sampler_view_reference(&views->views[slot], NULL);
- memcpy(descs->list + slot*16, null_texture_descriptor, 8*4);
+ memcpy(desc, null_texture_descriptor, 8*4);
/* Only clear the lower dwords of FMASK. */
- memcpy(descs->list + slot*16 + 8, null_texture_descriptor, 4*4);
+ memcpy(desc + 8, null_texture_descriptor, 4*4);
+ /* Re-set the sampler state if we are transitioning from FMASK. */
+ if (views->sampler_states[slot])
+ memcpy(desc + 12,
+ views->sampler_states[slot]->val, 4*4);
+
views->enabled_mask &= ~(1u << slot);
}
- descs->dirty_mask |= 1u << slot;
- sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
+ descs->dirty_mask |= 1ull << desc_slot;
+ sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
+}
+
+static bool color_needs_decompression(struct r600_texture *rtex)
+{
+ return rtex->fmask.size ||
+ (rtex->dirty_level_mask &&
+ (rtex->cmask.size || rtex->dcc_offset));
+}
+
+static bool depth_needs_decompression(struct r600_texture *rtex)
+{
+ /* If the depth/stencil texture is TC-compatible, no decompression
+ * will be done. The decompression function will only flush DB caches
+ * to make it coherent with shaders. That's necessary because the driver
+ * doesn't flush DB caches in any other case.
+ */
+ return rtex->db_compatible;
}
-static bool is_compressed_colortex(struct r600_texture *rtex)
+static void si_update_shader_needs_decompress_mask(struct si_context *sctx,
+ unsigned shader)
{
- return rtex->cmask.size || rtex->fmask.size ||
- (rtex->dcc_offset && rtex->dirty_level_mask);
+ struct si_textures_info *samplers = &sctx->samplers[shader];
+ unsigned shader_bit = 1 << shader;
+
+ if (samplers->needs_depth_decompress_mask ||
+ samplers->needs_color_decompress_mask ||
+ sctx->images[shader].needs_color_decompress_mask)
+ sctx->shader_needs_decompress_mask |= shader_bit;
+ else
+ sctx->shader_needs_decompress_mask &= ~shader_bit;
}
static void si_set_sampler_views(struct pipe_context *ctx,
- unsigned shader, unsigned start,
+ enum pipe_shader_type shader, unsigned start,
unsigned count,
struct pipe_sampler_view **views)
{
unsigned slot = start + i;
if (!views || !views[i]) {
- samplers->depth_texture_mask &= ~(1u << slot);
- samplers->compressed_colortex_mask &= ~(1u << slot);
+ samplers->needs_depth_decompress_mask &= ~(1u << slot);
+ samplers->needs_color_decompress_mask &= ~(1u << slot);
si_set_sampler_view(sctx, shader, slot, NULL, false);
continue;
}
struct r600_texture *rtex =
(struct r600_texture*)views[i]->texture;
- if (rtex->db_compatible) {
- samplers->depth_texture_mask |= 1u << slot;
+ if (depth_needs_decompression(rtex)) {
+ samplers->needs_depth_decompress_mask |= 1u << slot;
} else {
- samplers->depth_texture_mask &= ~(1u << slot);
+ samplers->needs_depth_decompress_mask &= ~(1u << slot);
}
- if (is_compressed_colortex(rtex)) {
- samplers->compressed_colortex_mask |= 1u << slot;
+ if (color_needs_decompression(rtex)) {
+ samplers->needs_color_decompress_mask |= 1u << slot;
} else {
- samplers->compressed_colortex_mask &= ~(1u << slot);
+ samplers->needs_color_decompress_mask &= ~(1u << slot);
}
if (rtex->dcc_offset &&
p_atomic_read(&rtex->framebuffers_bound))
sctx->need_check_render_feedback = true;
} else {
- samplers->depth_texture_mask &= ~(1u << slot);
- samplers->compressed_colortex_mask &= ~(1u << slot);
+ samplers->needs_depth_decompress_mask &= ~(1u << slot);
+ samplers->needs_color_decompress_mask &= ~(1u << slot);
}
}
+
+ si_update_shader_needs_decompress_mask(sctx, shader);
}
static void
-si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
+si_samplers_update_needs_color_decompress_mask(struct si_textures_info *samplers)
{
unsigned mask = samplers->views.enabled_mask;
if (res && res->target != PIPE_BUFFER) {
struct r600_texture *rtex = (struct r600_texture *)res;
- if (is_compressed_colortex(rtex)) {
- samplers->compressed_colortex_mask |= 1u << i;
+ if (color_needs_decompression(rtex)) {
+ samplers->needs_color_decompress_mask |= 1u << i;
} else {
- samplers->compressed_colortex_mask &= ~(1u << i);
+ samplers->needs_color_decompress_mask &= ~(1u << i);
}
}
}
/* IMAGE VIEWS */
-static unsigned
-si_image_descriptors_idx(unsigned shader)
-{
- return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
- SI_SHADER_DESCS_IMAGES;
-}
-
-static struct si_descriptors*
-si_image_descriptors(struct si_context *sctx, unsigned shader)
-{
- return &sctx->descriptors[si_image_descriptors_idx(shader)];
-}
-
static void
si_release_image_views(struct si_images_info *images)
{
struct si_images_info *images = &ctx->images[shader];
if (images->enabled_mask & (1u << slot)) {
- struct si_descriptors *descs = si_image_descriptors(ctx, shader);
+ struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
+ unsigned desc_slot = si_get_image_slot(slot);
pipe_resource_reference(&images->views[slot].resource, NULL);
- images->compressed_colortex_mask &= ~(1 << slot);
+ images->needs_color_decompress_mask &= ~(1 << slot);
- memcpy(descs->list + slot*8, null_image_descriptor, 8*4);
+ memcpy(descs->list + desc_slot*8, null_image_descriptor, 8*4);
images->enabled_mask &= ~(1u << slot);
- descs->dirty_mask |= 1u << slot;
- ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
+ /* two 8-byte images share one 16-byte slot */
+ descs->dirty_mask |= 1u << (desc_slot / 2);
+ ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
}
}
view->u.buf.offset + view->u.buf.size);
}
-static void si_set_shader_image(struct si_context *ctx,
- unsigned shader,
- unsigned slot, const struct pipe_image_view *view)
+static void si_set_shader_image_desc(struct si_context *ctx,
+ const struct pipe_image_view *view,
+ bool skip_decompress,
+ uint32_t *desc)
{
struct si_screen *screen = ctx->screen;
- struct si_images_info *images = &ctx->images[shader];
- struct si_descriptors *descs = si_image_descriptors(ctx, shader);
struct r600_resource *res;
- if (!view || !view->resource) {
- si_disable_shader_image(ctx, shader, slot);
- return;
- }
-
res = (struct r600_resource *)view->resource;
- if (&images->views[slot] != view)
- util_copy_image_view(&images->views[slot], view);
-
- si_sampler_view_add_buffer(ctx, &res->b.b,
- RADEON_USAGE_READWRITE, false, true);
-
if (res->b.b.target == PIPE_BUFFER) {
if (view->access & PIPE_IMAGE_ACCESS_WRITE)
si_mark_image_range_valid(view);
si_make_buffer_descriptor(screen, res,
view->format,
view->u.buf.offset,
- view->u.buf.size,
- descs->list + slot * 8);
- images->compressed_colortex_mask &= ~(1 << slot);
+ view->u.buf.size, desc);
+ si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
} else {
static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
struct r600_texture *tex = (struct r600_texture *)res;
unsigned level = view->u.tex.level;
- unsigned width, height, depth;
- uint32_t *desc = descs->list + slot * 8;
- bool uses_dcc = tex->dcc_offset &&
- tex->surface.level[level].dcc_enabled;
+ unsigned width, height, depth, hw_level;
+ bool uses_dcc = vi_dcc_enabled(tex, level);
assert(!tex->is_depth);
assert(tex->fmask.size == 0);
- if (uses_dcc &&
- view->access & PIPE_IMAGE_ACCESS_WRITE) {
+ if (uses_dcc && !skip_decompress &&
+ (view->access & PIPE_IMAGE_ACCESS_WRITE ||
+ !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
/* If DCC can't be disabled, at least decompress it.
* The decompression is relatively cheap if the surface
* has been decompressed already.
*/
- if (r600_texture_disable_dcc(&ctx->b, tex))
- uses_dcc = false;
- else
+ if (!r600_texture_disable_dcc(&ctx->b, tex))
ctx->b.decompress_dcc(&ctx->b.b, tex);
}
- if (is_compressed_colortex(tex)) {
- images->compressed_colortex_mask |= 1 << slot;
+ if (ctx->b.chip_class >= GFX9) {
+ /* Always set the base address. The swizzle modes don't
+ * allow setting mipmap level offsets as the base.
+ */
+ width = res->b.b.width0;
+ height = res->b.b.height0;
+ depth = res->b.b.depth0;
+ hw_level = level;
} else {
- images->compressed_colortex_mask &= ~(1 << slot);
+ /* Always force the base level to the selected level.
+ *
+ * This is required for 3D textures, where otherwise
+ * selecting a single slice for non-layered bindings
+ * fails. It doesn't hurt the other targets.
+ */
+ width = u_minify(res->b.b.width0, level);
+ height = u_minify(res->b.b.height0, level);
+ depth = u_minify(res->b.b.depth0, level);
+ hw_level = 0;
}
- if (uses_dcc &&
- p_atomic_read(&tex->framebuffers_bound))
- ctx->need_check_render_feedback = true;
-
- /* Always force the base level to the selected level.
- *
- * This is required for 3D textures, where otherwise
- * selecting a single slice for non-layered bindings
- * fails. It doesn't hurt the other targets.
- */
- width = u_minify(res->b.b.width0, level);
- height = u_minify(res->b.b.height0, level);
- depth = u_minify(res->b.b.depth0, level);
-
si_make_texture_descriptor(screen, tex,
false, res->b.b.target,
view->format, swizzle,
- 0, 0,
+ hw_level, hw_level,
view->u.tex.first_layer,
view->u.tex.last_layer,
width, height, depth,
desc, NULL);
- si_set_mutable_tex_desc_fields(tex, &tex->surface.level[level],
+ si_set_mutable_tex_desc_fields(screen, tex,
+ &tex->surface.u.legacy.level[level],
level, level,
util_format_get_blockwidth(view->format),
false, desc);
}
+}
+
+static void si_set_shader_image(struct si_context *ctx,
+ unsigned shader,
+ unsigned slot, const struct pipe_image_view *view,
+ bool skip_decompress)
+{
+ struct si_images_info *images = &ctx->images[shader];
+ struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
+ struct r600_resource *res;
+ unsigned desc_slot = si_get_image_slot(slot);
+ uint32_t *desc = descs->list + desc_slot * 8;
+
+ if (!view || !view->resource) {
+ si_disable_shader_image(ctx, shader, slot);
+ return;
+ }
+
+ res = (struct r600_resource *)view->resource;
+
+ if (&images->views[slot] != view)
+ util_copy_image_view(&images->views[slot], view);
+
+ si_set_shader_image_desc(ctx, view, skip_decompress, desc);
+
+ if (res->b.b.target == PIPE_BUFFER) {
+ images->needs_color_decompress_mask &= ~(1 << slot);
+ res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+ } else {
+ struct r600_texture *tex = (struct r600_texture *)res;
+ unsigned level = view->u.tex.level;
+
+ if (color_needs_decompression(tex)) {
+ images->needs_color_decompress_mask |= 1 << slot;
+ } else {
+ images->needs_color_decompress_mask &= ~(1 << slot);
+ }
+
+ if (vi_dcc_enabled(tex, level) &&
+ p_atomic_read(&tex->framebuffers_bound))
+ ctx->need_check_render_feedback = true;
+ }
images->enabled_mask |= 1u << slot;
- descs->dirty_mask |= 1u << slot;
- ctx->descriptors_dirty |= 1u << si_image_descriptors_idx(shader);
+ /* two 8-byte images share one 16-byte slot */
+ descs->dirty_mask |= 1u << (desc_slot / 2);
+ ctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
+
+ /* Since this can flush, it must be done after enabled_mask is updated. */
+ si_sampler_view_add_buffer(ctx, &res->b.b,
+ (view->access & PIPE_IMAGE_ACCESS_WRITE) ?
+ RADEON_USAGE_READWRITE : RADEON_USAGE_READ,
+ false, true);
}
static void
-si_set_shader_images(struct pipe_context *pipe, unsigned shader,
+si_set_shader_images(struct pipe_context *pipe,
+ enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
const struct pipe_image_view *views)
{
if (views) {
for (i = 0, slot = start_slot; i < count; ++i, ++slot)
- si_set_shader_image(ctx, shader, slot, &views[i]);
+ si_set_shader_image(ctx, shader, slot, &views[i], false);
} else {
for (i = 0, slot = start_slot; i < count; ++i, ++slot)
- si_set_shader_image(ctx, shader, slot, NULL);
+ si_set_shader_image(ctx, shader, slot, NULL, false);
}
+
+ si_update_shader_needs_decompress_mask(ctx, shader);
}
static void
-si_images_update_compressed_colortex_mask(struct si_images_info *images)
+si_images_update_needs_color_decompress_mask(struct si_images_info *images)
{
unsigned mask = images->enabled_mask;
if (res && res->target != PIPE_BUFFER) {
struct r600_texture *rtex = (struct r600_texture *)res;
- if (is_compressed_colortex(rtex)) {
- images->compressed_colortex_mask |= 1 << i;
+ if (color_needs_decompression(rtex)) {
+ images->needs_color_decompress_mask |= 1 << i;
} else {
- images->compressed_colortex_mask &= ~(1 << i);
+ images->needs_color_decompress_mask &= ~(1 << i);
}
}
}
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_textures_info *samplers = &sctx->samplers[shader];
- struct si_descriptors *desc = si_sampler_descriptors(sctx, shader);
+ struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, shader);
struct si_sampler_state **sstates = (struct si_sampler_state**)states;
int i;
for (i = 0; i < count; i++) {
unsigned slot = start + i;
+ unsigned desc_slot = si_get_sampler_slot(slot);
if (!sstates[i] ||
sstates[i] == samplers->views.sampler_states[slot])
continue;
+#ifdef DEBUG
+ assert(sstates[i]->magic == SI_SAMPLER_STATE_MAGIC);
+#endif
samplers->views.sampler_states[slot] = sstates[i];
/* If FMASK is bound, don't overwrite it.
* The sampler state will be set after FMASK is unbound.
*/
- if (samplers->views.views[i] &&
- samplers->views.views[i]->texture &&
- samplers->views.views[i]->texture->target != PIPE_BUFFER &&
- ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
+ if (samplers->views.views[slot] &&
+ samplers->views.views[slot]->texture &&
+ samplers->views.views[slot]->texture->target != PIPE_BUFFER &&
+ ((struct r600_texture*)samplers->views.views[slot]->texture)->fmask.size)
continue;
- memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
- desc->dirty_mask |= 1u << slot;
- sctx->descriptors_dirty |= 1u << si_sampler_descriptors_idx(shader);
+ memcpy(desc->list + desc_slot * 16 + 12, sstates[i]->val, 4*4);
+ desc->dirty_mask |= 1ull << desc_slot;
+ sctx->descriptors_dirty |= 1u << si_sampler_and_image_descriptors_idx(shader);
}
}
/* BUFFER RESOURCES */
-static void si_init_buffer_resources(struct si_buffer_resources *buffers,
+static void si_init_buffer_resources(struct si_context *sctx,
+ struct si_buffer_resources *buffers,
struct si_descriptors *descs,
unsigned num_buffers,
+ unsigned first_ce_slot,
+ unsigned num_ce_slots,
unsigned shader_userdata_index,
enum radeon_bo_usage shader_usage,
+ enum radeon_bo_usage shader_usage_constbuf,
enum radeon_bo_priority priority,
+ enum radeon_bo_priority priority_constbuf,
unsigned *ce_offset)
{
buffers->shader_usage = shader_usage;
+ buffers->shader_usage_constbuf = shader_usage_constbuf;
buffers->priority = priority;
+ buffers->priority_constbuf = priority_constbuf;
buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
- si_init_descriptors(descs, shader_userdata_index, 4,
- num_buffers, NULL, ce_offset);
+ si_init_descriptors(sctx, descs, shader_userdata_index, 4, num_buffers,
+ first_ce_slot, num_ce_slots, ce_offset);
}
static void si_release_buffer_resources(struct si_buffer_resources *buffers,
int i = u_bit_scan(&mask);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)buffers->buffers[i],
- buffers->shader_usage, buffers->priority);
+ r600_resource(buffers->buffers[i]),
+ i < SI_NUM_SHADER_BUFFERS ? buffers->shader_usage :
+ buffers->shader_usage_constbuf,
+ i < SI_NUM_SHADER_BUFFERS ? buffers->priority :
+ buffers->priority_constbuf);
+ }
+}
+
+static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
+ struct si_descriptors *descs,
+ unsigned idx, struct pipe_resource **buf,
+ unsigned *offset, unsigned *size)
+{
+ pipe_resource_reference(buf, buffers->buffers[idx]);
+ if (*buf) {
+ struct r600_resource *res = r600_resource(*buf);
+ const uint32_t *desc = descs->list + idx * 4;
+ uint64_t va;
+
+ *size = desc[2];
+
+ assert(G_008F04_STRIDE(desc[1]) == 0);
+ va = ((uint64_t)desc[1] << 32) | desc[0];
+
+ assert(va >= res->gpu_address && va + *size <= res->gpu_address + res->bo_size);
+ *offset = va - res->gpu_address;
}
}
int i;
for (i = 0; i < count; i++) {
- int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
+ int vb = sctx->vertex_elements->vertex_buffer_index[i];
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
- if (!sctx->vertex_buffer[vb].buffer)
+ if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
+ (struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
{
+ struct si_vertex_elements *velems = sctx->vertex_elements;
struct si_descriptors *desc = &sctx->vertex_buffers;
- bool bound[SI_NUM_VERTEX_BUFFERS] = {};
- unsigned i, count = sctx->vertex_elements->count;
+ unsigned i, count;
+ unsigned desc_list_byte_size;
+ unsigned first_vb_use_mask;
uint64_t va;
uint32_t *ptr;
- if (!sctx->vertex_buffers_dirty)
+ if (!sctx->vertex_buffers_dirty || !velems)
return true;
- if (!count || !sctx->vertex_elements)
+
+ count = velems->count;
+
+ if (!count)
return true;
+ desc_list_byte_size = velems->desc_list_byte_size;
+ first_vb_use_mask = velems->first_vb_use_mask;
+
/* Vertex buffer descriptors are the only ones which are uploaded
* directly through a staging buffer and don't go through
* the fine-grained upload path.
*/
- u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
+ u_upload_alloc(sctx->b.b.const_uploader, 0,
+ desc_list_byte_size,
+ si_optimal_tcc_alignment(sctx, desc_list_byte_size),
+ (unsigned*)&desc->buffer_offset,
(struct pipe_resource**)&desc->buffer, (void**)&ptr);
if (!desc->buffer)
return false;
desc->buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
- assert(count <= SI_NUM_VERTEX_BUFFERS);
+ assert(count <= SI_MAX_ATTRIBS);
for (i = 0; i < count; i++) {
- struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
struct pipe_vertex_buffer *vb;
struct r600_resource *rbuffer;
unsigned offset;
+ unsigned vbo_index = velems->vertex_buffer_index[i];
uint32_t *desc = &ptr[i*4];
- if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
- memset(desc, 0, 16);
- continue;
- }
-
- vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
- rbuffer = (struct r600_resource*)vb->buffer;
+ vb = &sctx->vertex_buffer[vbo_index];
+ rbuffer = (struct r600_resource*)vb->buffer.resource;
if (!rbuffer) {
memset(desc, 0, 16);
continue;
}
- offset = vb->buffer_offset + ve->src_offset;
+ offset = vb->buffer_offset + velems->src_offset[i];
va = rbuffer->gpu_address + offset;
/* Fill in T# buffer resource description */
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
S_008F04_STRIDE(vb->stride);
- if (sctx->b.chip_class <= CIK && vb->stride)
+ if (sctx->b.chip_class != VI && vb->stride) {
/* Round up by rounding down and adding 1 */
- desc[2] = (vb->buffer->width0 - offset -
- sctx->vertex_elements->format_size[i]) /
+ desc[2] = (vb->buffer.resource->width0 - offset -
+ velems->format_size[i]) /
vb->stride + 1;
- else
- desc[2] = vb->buffer->width0 - offset;
+ } else {
+ desc[2] = vb->buffer.resource->width0 - offset;
+ }
- desc[3] = sctx->vertex_elements->rsrc_word3[i];
+ desc[3] = velems->rsrc_word3[i];
- if (!bound[ve->vertex_buffer_index]) {
+ if (first_vb_use_mask & (1 << i)) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)vb->buffer,
+ (struct r600_resource*)vb->buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
- bound[ve->vertex_buffer_index] = true;
}
}
* on performance (confirmed by testing). New descriptors are always
* uploaded to a fresh new buffer, so I don't think flushing the const
* cache is needed. */
- desc->pointer_dirty = true;
si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
+ if (sctx->b.chip_class >= CIK)
+ si_mark_atom_dirty(sctx, &sctx->prefetch_L2);
sctx->vertex_buffers_dirty = false;
+ sctx->vertex_buffer_pointer_dirty = true;
return true;
}
/* CONSTANT BUFFERS */
static unsigned
-si_const_buffer_descriptors_idx(unsigned shader)
+si_const_and_shader_buffer_descriptors_idx(unsigned shader)
{
return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
- SI_SHADER_DESCS_CONST_BUFFERS;
+ SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
}
static struct si_descriptors *
-si_const_buffer_descriptors(struct si_context *sctx, unsigned shader)
+si_const_and_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
{
- return &sctx->descriptors[si_const_buffer_descriptors_idx(shader)];
+ return &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(shader)];
}
void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
{
void *tmp;
- u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
+ u_upload_alloc(sctx->b.b.const_uploader, 0, size,
+ si_optimal_tcc_alignment(sctx, size),
+ const_offset,
(struct pipe_resource**)rbuffer, &tmp);
if (*rbuffer)
util_memcpy_cpu_to_le32(tmp, ptr, size);
} else {
pipe_resource_reference(&buffer, input->buffer);
va = r600_resource(buffer)->gpu_address + input->buffer_offset;
+ /* Only track usage for non-user buffers. */
+ r600_resource(buffer)->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
}
/* Set the descriptor. */
buffers->buffers[slot] = buffer;
radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
- buffers->shader_usage,
- buffers->priority, true);
+ buffers->shader_usage_constbuf,
+ buffers->priority_constbuf, true);
buffers->enabled_mask |= 1u << slot;
} else {
/* Clear the descriptor. */
}
static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
- uint shader, uint slot,
+ enum pipe_shader_type shader, uint slot,
const struct pipe_constant_buffer *input)
{
struct si_context *sctx = (struct si_context *)ctx;
if (shader >= SI_NUM_SHADERS)
return;
- si_set_constant_buffer(sctx, &sctx->const_buffers[shader],
- si_const_buffer_descriptors_idx(shader),
+ slot = si_get_constbuf_slot(slot);
+ si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
+ si_const_and_shader_buffer_descriptors_idx(shader),
slot, input);
}
-/* SHADER BUFFERS */
-
-static unsigned
-si_shader_buffer_descriptors_idx(unsigned shader)
+void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader,
+ uint slot, struct pipe_constant_buffer *cbuf)
{
- return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
- SI_SHADER_DESCS_SHADER_BUFFERS;
+ cbuf->user_buffer = NULL;
+ si_get_buffer_from_descriptors(
+ &sctx->const_and_shader_buffers[shader],
+ si_const_and_shader_buffer_descriptors(sctx, shader),
+ si_get_constbuf_slot(slot),
+ &cbuf->buffer, &cbuf->buffer_offset, &cbuf->buffer_size);
}
-static struct si_descriptors *
-si_shader_buffer_descriptors(struct si_context *sctx, unsigned shader)
-{
- return &sctx->descriptors[si_shader_buffer_descriptors_idx(shader)];
-}
+/* SHADER BUFFERS */
-static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
+static void si_set_shader_buffers(struct pipe_context *ctx,
+ enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
const struct pipe_shader_buffer *sbuffers)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
- struct si_descriptors *descs = si_shader_buffer_descriptors(sctx, shader);
+ struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
+ struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
unsigned i;
assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
for (i = 0; i < count; ++i) {
const struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
struct r600_resource *buf;
- unsigned slot = start_slot + i;
+ unsigned slot = si_get_shaderbuf_slot(start_slot + i);
uint32_t *desc = descs->list + slot * 4;
uint64_t va;
buffers->enabled_mask &= ~(1u << slot);
descs->dirty_mask |= 1u << slot;
sctx->descriptors_dirty |=
- 1u << si_shader_buffer_descriptors_idx(shader);
+ 1u << si_const_and_shader_buffer_descriptors_idx(shader);
continue;
}
radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx, buf,
buffers->shader_usage,
buffers->priority, true);
+ buf->bind_history |= PIPE_BIND_SHADER_BUFFER;
+
buffers->enabled_mask |= 1u << slot;
descs->dirty_mask |= 1u << slot;
sctx->descriptors_dirty |=
- 1u << si_shader_buffer_descriptors_idx(shader);
+ 1u << si_const_and_shader_buffer_descriptors_idx(shader);
+
+ util_range_add(&buf->valid_buffer_range, sbuffer->buffer_offset,
+ sbuffer->buffer_offset + sbuffer->buffer_size);
+ }
+}
+
+void si_get_shader_buffers(struct si_context *sctx,
+ enum pipe_shader_type shader,
+ uint start_slot, uint count,
+ struct pipe_shader_buffer *sbuf)
+{
+ struct si_buffer_resources *buffers = &sctx->const_and_shader_buffers[shader];
+ struct si_descriptors *descs = si_const_and_shader_buffer_descriptors(sctx, shader);
+
+ for (unsigned i = 0; i < count; ++i) {
+ si_get_buffer_from_descriptors(
+ buffers, descs,
+ si_get_shaderbuf_slot(start_slot + i),
+ &sbuf[i].buffer, &sbuf[i].buffer_offset,
+ &sbuf[i].buffer_size);
}
}
S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
- S_008F0C_ELEMENT_SIZE(element_size) |
S_008F0C_INDEX_STRIDE(index_stride) |
S_008F0C_ADD_TID_ENABLE(add_tid);
+ if (sctx->b.chip_class >= GFX9)
+ assert(!swizzle || element_size == 1); /* always 4 bytes on GFX9 */
+ else
+ desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
+
pipe_resource_reference(&buffers->buffers[slot], buffer);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage,
RADEON_PRIO_SHADER_RW_BUFFER,
true);
+ r600_resource(buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
+
buffers->enabled_mask |= 1u << bufidx;
} else {
/* Clear the descriptor and unset the resource. */
uint64_t offset_within_buffer = old_desc_va - old_buf_va;
/* Update the descriptor. */
- uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
-
- desc[0] = va;
- desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
- S_008F04_BASE_ADDRESS_HI(va >> 32);
+ si_set_buf_desc_address(r600_resource(new_buf), offset_within_buffer,
+ desc);
}
/* INTERNAL CONST BUFFERS */
/* TEXTURE METADATA ENABLE/DISABLE */
+static void
+si_resident_handles_update_needs_color_decompress(struct si_context *sctx)
+{
+ util_dynarray_clear(&sctx->resident_tex_needs_color_decompress);
+ util_dynarray_clear(&sctx->resident_img_needs_color_decompress);
+
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct pipe_resource *res = (*tex_handle)->view->texture;
+ struct r600_texture *rtex;
+
+ if (!res || res->target == PIPE_BUFFER)
+ continue;
+
+ rtex = (struct r600_texture *)res;
+ if (!color_needs_decompression(rtex))
+ continue;
+
+ util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
+ struct si_texture_handle *, *tex_handle);
+ }
+
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct pipe_image_view *view = &(*img_handle)->view;
+ struct pipe_resource *res = view->resource;
+ struct r600_texture *rtex;
+
+ if (!res || res->target == PIPE_BUFFER)
+ continue;
+
+ rtex = (struct r600_texture *)res;
+ if (!color_needs_decompression(rtex))
+ continue;
+
+ util_dynarray_append(&sctx->resident_img_needs_color_decompress,
+ struct si_image_handle *, *img_handle);
+ }
+}
+
/* CMASK can be enabled (for fast clear) and disabled (for texture export)
* while the texture is bound, possibly by a different context. In that case,
- * call this function to update compressed_colortex_masks.
+ * call this function to update needs_*_decompress_masks.
*/
-void si_update_compressed_colortex_masks(struct si_context *sctx)
+void si_update_needs_color_decompress_masks(struct si_context *sctx)
{
for (int i = 0; i < SI_NUM_SHADERS; ++i) {
- si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
- si_images_update_compressed_colortex_mask(&sctx->images[i]);
+ si_samplers_update_needs_color_decompress_mask(&sctx->samplers[i]);
+ si_images_update_needs_color_decompress_mask(&sctx->images[i]);
+ si_update_shader_needs_decompress_mask(sctx, i);
}
+
+ si_resident_handles_update_needs_color_decompress(sctx);
}
/* BUFFER DISCARD/INVALIDATION */
static void si_reset_buffer_resources(struct si_context *sctx,
struct si_buffer_resources *buffers,
unsigned descriptors_idx,
+ unsigned slot_mask,
struct pipe_resource *buf,
- uint64_t old_va)
+ uint64_t old_va,
+ enum radeon_bo_usage usage,
+ enum radeon_bo_priority priority)
{
struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
- unsigned mask = buffers->enabled_mask;
+ unsigned mask = buffers->enabled_mask & slot_mask;
while (mask) {
unsigned i = u_bit_scan(&mask);
radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)buf,
- buffers->shader_usage,
- buffers->priority, true);
+ usage, priority, true);
}
}
}
-/* Reallocate a buffer a update all resource bindings where the buffer is
- * bound.
- *
- * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
- * idle by discarding its contents. Apps usually tell us when to do this using
- * map_buffer flags, for example.
- */
-static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
+static void si_rebind_buffer(struct pipe_context *ctx, struct pipe_resource *buf,
+ uint64_t old_va)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_resource *rbuffer = r600_resource(buf);
- unsigned i, shader, alignment = rbuffer->buf->alignment;
- uint64_t old_va = rbuffer->gpu_address;
+ unsigned i, shader;
unsigned num_elems = sctx->vertex_elements ?
sctx->vertex_elements->count : 0;
- struct si_sampler_view *view;
-
- /* Reallocate the buffer in the same pipe_resource. */
- r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
- alignment);
/* We changed the buffer, now we need to bind it where the old one
* was bound. This consists of 2 things:
*/
/* Vertex buffers. */
- for (i = 0; i < num_elems; i++) {
- int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
+ if (rbuffer->bind_history & PIPE_BIND_VERTEX_BUFFER) {
+ for (i = 0; i < num_elems; i++) {
+ int vb = sctx->vertex_elements->vertex_buffer_index[i];
- if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
- continue;
- if (!sctx->vertex_buffer[vb].buffer)
- continue;
+ if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
+ continue;
+ if (!sctx->vertex_buffer[vb].buffer.resource)
+ continue;
- if (sctx->vertex_buffer[vb].buffer == buf) {
- sctx->vertex_buffers_dirty = true;
- break;
+ if (sctx->vertex_buffer[vb].buffer.resource == buf) {
+ sctx->vertex_buffers_dirty = true;
+ break;
+ }
}
}
/* Streamout buffers. (other internal buffers can't be invalidated) */
- for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
- struct si_buffer_resources *buffers = &sctx->rw_buffers;
- struct si_descriptors *descs =
- &sctx->descriptors[SI_DESCS_RW_BUFFERS];
-
- if (buffers->buffers[i] != buf)
- continue;
+ if (rbuffer->bind_history & PIPE_BIND_STREAM_OUTPUT) {
+ for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
+ struct si_buffer_resources *buffers = &sctx->rw_buffers;
+ struct si_descriptors *descs =
+ &sctx->descriptors[SI_DESCS_RW_BUFFERS];
- si_desc_reset_buffer_offset(ctx, descs->list + i*4,
- old_va, buf);
- descs->dirty_mask |= 1u << i;
- sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
+ if (buffers->buffers[i] != buf)
+ continue;
- radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
- rbuffer, buffers->shader_usage,
- RADEON_PRIO_SHADER_RW_BUFFER,
- true);
+ si_desc_reset_buffer_offset(ctx, descs->list + i*4,
+ old_va, buf);
+ descs->dirty_mask |= 1u << i;
+ sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
- /* Update the streamout state. */
- if (sctx->b.streamout.begin_emitted)
- r600_emit_streamout_end(&sctx->b);
- sctx->b.streamout.append_bitmask =
- sctx->b.streamout.enabled_mask;
- r600_streamout_buffers_dirty(&sctx->b);
+ radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
+ rbuffer, buffers->shader_usage,
+ RADEON_PRIO_SHADER_RW_BUFFER,
+ true);
+
+ /* Update the streamout state. */
+ if (sctx->b.streamout.begin_emitted)
+ r600_emit_streamout_end(&sctx->b);
+ sctx->b.streamout.append_bitmask =
+ sctx->b.streamout.enabled_mask;
+ r600_streamout_buffers_dirty(&sctx->b);
+ }
}
/* Constant and shader buffers. */
- for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
- si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
- si_const_buffer_descriptors_idx(shader),
- buf, old_va);
- si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
- si_shader_buffer_descriptors_idx(shader),
- buf, old_va);
- }
-
- /* Texture buffers - update virtual addresses in sampler view descriptors. */
- LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
- if (view->base.texture == buf) {
- si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
- }
+ if (rbuffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
+ for (shader = 0; shader < SI_NUM_SHADERS; shader++)
+ si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
+ si_const_and_shader_buffer_descriptors_idx(shader),
+ u_bit_consecutive(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
+ buf, old_va,
+ sctx->const_and_shader_buffers[shader].shader_usage_constbuf,
+ sctx->const_and_shader_buffers[shader].priority_constbuf);
}
- /* Texture buffers - update bindings. */
- for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
- struct si_sampler_views *views = &sctx->samplers[shader].views;
- struct si_descriptors *descs =
- si_sampler_descriptors(sctx, shader);
- unsigned mask = views->enabled_mask;
- while (mask) {
- unsigned i = u_bit_scan(&mask);
- if (views->views[i]->texture == buf) {
- si_desc_reset_buffer_offset(ctx,
- descs->list +
- i * 16 + 4,
- old_va, buf);
- descs->dirty_mask |= 1u << i;
- sctx->descriptors_dirty |=
- 1u << si_sampler_descriptors_idx(shader);
-
- radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
- rbuffer, RADEON_USAGE_READ,
- RADEON_PRIO_SAMPLER_BUFFER,
- true);
+ if (rbuffer->bind_history & PIPE_BIND_SHADER_BUFFER) {
+ for (shader = 0; shader < SI_NUM_SHADERS; shader++)
+ si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
+ si_const_and_shader_buffer_descriptors_idx(shader),
+ u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS),
+ buf, old_va,
+ sctx->const_and_shader_buffers[shader].shader_usage,
+ sctx->const_and_shader_buffers[shader].priority);
+ }
+
+ if (rbuffer->bind_history & PIPE_BIND_SAMPLER_VIEW) {
+ /* Texture buffers - update bindings. */
+ for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
+ struct si_sampler_views *views = &sctx->samplers[shader].views;
+ struct si_descriptors *descs =
+ si_sampler_and_image_descriptors(sctx, shader);
+ unsigned mask = views->enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (views->views[i]->texture == buf) {
+ unsigned desc_slot = si_get_sampler_slot(i);
+
+ si_desc_reset_buffer_offset(ctx,
+ descs->list +
+ desc_slot * 16 + 4,
+ old_va, buf);
+ descs->dirty_mask |= 1ull << desc_slot;
+ sctx->descriptors_dirty |=
+ 1u << si_sampler_and_image_descriptors_idx(shader);
+
+ radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
+ rbuffer, RADEON_USAGE_READ,
+ RADEON_PRIO_SAMPLER_BUFFER,
+ true);
+ }
}
}
}
/* Shader images */
- for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
- struct si_images_info *images = &sctx->images[shader];
- struct si_descriptors *descs =
- si_image_descriptors(sctx, shader);
- unsigned mask = images->enabled_mask;
+ if (rbuffer->bind_history & PIPE_BIND_SHADER_IMAGE) {
+ for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
+ struct si_images_info *images = &sctx->images[shader];
+ struct si_descriptors *descs =
+ si_sampler_and_image_descriptors(sctx, shader);
+ unsigned mask = images->enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+
+ if (images->views[i].resource == buf) {
+ unsigned desc_slot = si_get_image_slot(i);
+
+ if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
+ si_mark_image_range_valid(&images->views[i]);
+
+ si_desc_reset_buffer_offset(
+ ctx, descs->list + desc_slot * 8 + 4,
+ old_va, buf);
+ /* two 8-byte images share one 16-byte slot */
+ descs->dirty_mask |= 1u << (desc_slot / 2);
+ sctx->descriptors_dirty |=
+ 1u << si_sampler_and_image_descriptors_idx(shader);
+
+ radeon_add_to_buffer_list_check_mem(
+ &sctx->b, &sctx->b.gfx, rbuffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SAMPLER_BUFFER, true);
+ }
+ }
+ }
+ }
- while (mask) {
- unsigned i = u_bit_scan(&mask);
+ /* Bindless texture handles */
+ if (rbuffer->texture_handle_allocated) {
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct pipe_sampler_view *view = (*tex_handle)->view;
+ struct si_bindless_descriptor *desc = (*tex_handle)->desc;
- if (images->views[i].resource == buf) {
- if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
- si_mark_image_range_valid(&images->views[i]);
+ if (view->texture == buf) {
+ si_set_buf_desc_address(rbuffer,
+ view->u.buf.offset,
+ &desc->desc_list[4]);
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
- si_desc_reset_buffer_offset(
- ctx, descs->list + i * 8 + 4,
- old_va, buf);
- descs->dirty_mask |= 1u << i;
- sctx->descriptors_dirty |=
- 1u << si_image_descriptors_idx(shader);
+ radeon_add_to_buffer_list_check_mem(
+ &sctx->b, &sctx->b.gfx, rbuffer,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_SAMPLER_BUFFER, true);
+ }
+ }
+ }
+
+ /* Bindless image handles */
+ if (rbuffer->image_handle_allocated) {
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct pipe_image_view *view = &(*img_handle)->view;
+ struct si_bindless_descriptor *desc = (*img_handle)->desc;
+
+ if (view->resource == buf) {
+ if (view->access & PIPE_IMAGE_ACCESS_WRITE)
+ si_mark_image_range_valid(view);
+
+ si_set_buf_desc_address(rbuffer,
+ view->u.buf.offset,
+ &desc->desc_list[4]);
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
radeon_add_to_buffer_list_check_mem(
&sctx->b, &sctx->b.gfx, rbuffer,
}
}
+/* Reallocate a buffer a update all resource bindings where the buffer is
+ * bound.
+ *
+ * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
+ * idle by discarding its contents. Apps usually tell us when to do this using
+ * map_buffer flags, for example.
+ */
+static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
+{
+ struct si_context *sctx = (struct si_context*)ctx;
+ struct r600_resource *rbuffer = r600_resource(buf);
+ uint64_t old_va = rbuffer->gpu_address;
+
+ /* Reallocate the buffer in the same pipe_resource. */
+ r600_alloc_resource(&sctx->screen->b, rbuffer);
+
+ si_rebind_buffer(ctx, buf, old_va);
+}
+
+static void si_upload_bindless_descriptor(struct si_context *sctx,
+ struct si_bindless_descriptor *desc)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ uint64_t va = desc->buffer->gpu_address + desc->offset;
+ unsigned num_dwords = sizeof(desc->desc_list) / 4;
+
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + num_dwords, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_TC_L2) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit_array(cs, desc->desc_list, num_dwords);
+}
+
+static void si_upload_bindless_descriptors(struct si_context *sctx)
+{
+ if (!sctx->bindless_descriptors_dirty)
+ return;
+
+ /* Wait for graphics/compute to be idle before updating the resident
+ * descriptors directly in memory, in case the GPU is using them.
+ */
+ sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ si_emit_cache_flush(sctx);
+
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct si_bindless_descriptor *desc = (*tex_handle)->desc;
+
+ if (!desc->dirty)
+ continue;
+
+ si_upload_bindless_descriptor(sctx, desc);
+ desc->dirty = false;
+ }
+
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct si_bindless_descriptor *desc = (*img_handle)->desc;
+
+ if (!desc->dirty)
+ continue;
+
+ si_upload_bindless_descriptor(sctx, desc);
+ desc->dirty = false;
+ }
+
+ /* Invalidate L1 because it doesn't know that L2 changed. */
+ sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1;
+ si_emit_cache_flush(sctx);
+
+ sctx->bindless_descriptors_dirty = false;
+}
+
+/* Update mutable image descriptor fields of all resident textures. */
+static void si_update_all_resident_texture_descriptors(struct si_context *sctx)
+{
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct si_bindless_descriptor *desc = (*tex_handle)->desc;
+ struct si_sampler_view *sview =
+ (struct si_sampler_view *)(*tex_handle)->view;
+ uint32_t desc_list[16];
+
+ if (sview->base.texture->target == PIPE_BUFFER)
+ continue;
+
+ memcpy(desc_list, desc->desc_list, sizeof(desc_list));
+ si_set_sampler_view_desc(sctx, sview, &(*tex_handle)->sstate,
+ &desc->desc_list[0]);
+
+ if (memcmp(desc_list, desc->desc_list, sizeof(desc_list))) {
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+ }
+ }
+
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct si_bindless_descriptor *desc = (*img_handle)->desc;
+ struct pipe_image_view *view = &(*img_handle)->view;
+ uint32_t desc_list[16];
+
+ if (view->resource->target == PIPE_BUFFER)
+ continue;
+
+ memcpy(desc_list, desc->desc_list, sizeof(desc_list));
+ si_set_shader_image_desc(sctx, view, true,
+ &desc->desc_list[0]);
+
+ if (memcmp(desc_list, desc->desc_list, sizeof(desc_list))) {
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+ }
+ }
+
+ si_upload_bindless_descriptors(sctx);
+}
+
/* Update mutable image descriptor fields of all bound textures. */
void si_update_all_texture_descriptors(struct si_context *sctx)
{
view->resource->target == PIPE_BUFFER)
continue;
- si_set_shader_image(sctx, shader, i, view);
+ si_set_shader_image(sctx, shader, i, view, true);
}
/* Sampler views. */
si_set_sampler_view(sctx, shader, i,
samplers->views[i], true);
}
+
+ si_update_shader_needs_decompress_mask(sctx, shader);
}
+
+ si_update_all_resident_texture_descriptors(sctx);
}
/* SHADER USER DATA */
static void si_mark_shader_pointers_dirty(struct si_context *sctx,
unsigned shader)
{
- struct si_descriptors *descs =
- &sctx->descriptors[SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS];
-
- for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
- descs->pointer_dirty = true;
+ sctx->shader_pointers_dirty |=
+ u_bit_consecutive(SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS,
+ SI_NUM_SHADER_DESCS);
if (shader == PIPE_SHADER_VERTEX)
- sctx->vertex_buffers.pointer_dirty = true;
+ sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
}
static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
{
- int i;
-
- for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_mark_shader_pointers_dirty(sctx, i);
- }
- sctx->descriptors[SI_DESCS_RW_BUFFERS].pointer_dirty = true;
+ sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
+ sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
+ si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
}
/* Set a base register address for user data constants in the given shader.
if (*base != new_base) {
*base = new_base;
- if (new_base)
+ if (new_base) {
si_mark_shader_pointers_dirty(sctx, shader);
+
+ if (shader == PIPE_SHADER_VERTEX)
+ sctx->last_vs_state = ~0;
+ }
}
}
void si_shader_change_notify(struct si_context *sctx)
{
/* VS can be bound as VS, ES, or LS. */
- if (sctx->tes_shader.cso)
- si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
- R_00B530_SPI_SHADER_USER_DATA_LS_0);
- else if (sctx->gs_shader.cso)
+ if (sctx->tes_shader.cso) {
+ if (sctx->b.chip_class >= GFX9) {
+ si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
+ R_00B430_SPI_SHADER_USER_DATA_LS_0);
+ } else {
+ si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
+ R_00B530_SPI_SHADER_USER_DATA_LS_0);
+ }
+ } else if (sctx->gs_shader.cso) {
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
R_00B330_SPI_SHADER_USER_DATA_ES_0);
- else
+ } else {
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
R_00B130_SPI_SHADER_USER_DATA_VS_0);
+ }
/* TES can be bound as ES, VS, or not bound. */
if (sctx->tes_shader.cso) {
static void si_emit_shader_pointer(struct si_context *sctx,
struct si_descriptors *desc,
- unsigned sh_base, bool keep_dirty)
+ unsigned sh_base)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
uint64_t va;
- if (!desc->pointer_dirty || !desc->buffer)
- return;
+ if (!desc->buffer)
+ return; /* the pointer is not used by current shaders */
va = desc->buffer->gpu_address +
desc->buffer_offset;
radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
-
- desc->pointer_dirty = keep_dirty;
}
void si_emit_graphics_shader_userdata(struct si_context *sctx,
struct r600_atom *atom)
{
- unsigned shader;
+ unsigned mask;
uint32_t *sh_base = sctx->shader_userdata.sh_base;
struct si_descriptors *descs;
descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
- if (descs->pointer_dirty) {
- si_emit_shader_pointer(sctx, descs,
- R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
+ if (sctx->shader_pointers_dirty & (1 << SI_DESCS_RW_BUFFERS)) {
si_emit_shader_pointer(sctx, descs,
- R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
+ R_00B030_SPI_SHADER_USER_DATA_PS_0);
si_emit_shader_pointer(sctx, descs,
- R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
- si_emit_shader_pointer(sctx, descs,
- R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
- si_emit_shader_pointer(sctx, descs,
- R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
- descs->pointer_dirty = false;
+ R_00B130_SPI_SHADER_USER_DATA_VS_0);
+
+ if (sctx->b.chip_class >= GFX9) {
+ /* GFX9 merged LS-HS and ES-GS.
+ * Set RW_BUFFERS in the special registers, so that
+ * it's preloaded into s[0:1] instead of s[8:9].
+ */
+ si_emit_shader_pointer(sctx, descs,
+ R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS);
+ si_emit_shader_pointer(sctx, descs,
+ R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS);
+ } else {
+ si_emit_shader_pointer(sctx, descs,
+ R_00B230_SPI_SHADER_USER_DATA_GS_0);
+ si_emit_shader_pointer(sctx, descs,
+ R_00B330_SPI_SHADER_USER_DATA_ES_0);
+ si_emit_shader_pointer(sctx, descs,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0);
+ si_emit_shader_pointer(sctx, descs,
+ R_00B530_SPI_SHADER_USER_DATA_LS_0);
+ }
}
- descs = &sctx->descriptors[SI_DESCS_FIRST_SHADER];
+ mask = sctx->shader_pointers_dirty &
+ u_bit_consecutive(SI_DESCS_FIRST_SHADER,
+ SI_DESCS_FIRST_COMPUTE - SI_DESCS_FIRST_SHADER);
- for (shader = 0; shader < SI_NUM_GRAPHICS_SHADERS; shader++) {
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ unsigned shader = (i - SI_DESCS_FIRST_SHADER) / SI_NUM_SHADER_DESCS;
unsigned base = sh_base[shader];
- unsigned i;
- if (!base)
- continue;
+ if (base)
+ si_emit_shader_pointer(sctx, descs + i, base);
+ }
+ sctx->shader_pointers_dirty &=
+ ~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
- for (i = 0; i < SI_NUM_SHADER_DESCS; i++, descs++)
- si_emit_shader_pointer(sctx, descs, base, false);
+ if (sctx->vertex_buffer_pointer_dirty) {
+ si_emit_shader_pointer(sctx, &sctx->vertex_buffers,
+ sh_base[PIPE_SHADER_VERTEX]);
+ sctx->vertex_buffer_pointer_dirty = false;
}
- si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
}
void si_emit_compute_shader_userdata(struct si_context *sctx)
{
unsigned base = R_00B900_COMPUTE_USER_DATA_0;
- struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_FIRST_COMPUTE];
+ struct si_descriptors *descs = sctx->descriptors;
+ unsigned compute_mask =
+ u_bit_consecutive(SI_DESCS_FIRST_COMPUTE, SI_NUM_SHADER_DESCS);
+ unsigned mask = sctx->shader_pointers_dirty & compute_mask;
- for (unsigned i = 0; i < SI_NUM_SHADER_DESCS; ++i, ++descs)
- si_emit_shader_pointer(sctx, descs, base, false);
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+
+ si_emit_shader_pointer(sctx, descs + i, base);
+ }
+ sctx->shader_pointers_dirty &= ~compute_mask;
+}
+
+/* BINDLESS */
+
+struct si_bindless_descriptor_slab
+{
+ struct pb_slab base;
+ struct r600_resource *buffer;
+ struct si_bindless_descriptor *entries;
+};
+
+bool si_bindless_descriptor_can_reclaim_slab(void *priv,
+ struct pb_slab_entry *entry)
+{
+ /* Do not allow to reclaim any bindless descriptors for now because the
+ * GPU might be using them. This should be improved later on.
+ */
+ return false;
+}
+
+struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap,
+ unsigned entry_size,
+ unsigned group_index)
+{
+ struct si_context *sctx = priv;
+ struct si_screen *sscreen = sctx->screen;
+ struct si_bindless_descriptor_slab *slab;
+
+ slab = CALLOC_STRUCT(si_bindless_descriptor_slab);
+ if (!slab)
+ return NULL;
+
+ /* Create a buffer in VRAM for 1024 bindless descriptors. */
+ slab->buffer = (struct r600_resource *)
+ pipe_buffer_create(&sscreen->b.b, 0,
+ PIPE_USAGE_DEFAULT, 64 * 1024);
+ if (!slab->buffer)
+ goto fail;
+
+ slab->base.num_entries = slab->buffer->bo_size / entry_size;
+ slab->base.num_free = slab->base.num_entries;
+ slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
+ if (!slab->entries)
+ goto fail_buffer;
+
+ LIST_INITHEAD(&slab->base.free);
+
+ for (unsigned i = 0; i < slab->base.num_entries; ++i) {
+ struct si_bindless_descriptor *desc = &slab->entries[i];
+
+ desc->entry.slab = &slab->base;
+ desc->entry.group_index = group_index;
+ desc->buffer = slab->buffer;
+ desc->offset = i * entry_size;
+
+ LIST_ADDTAIL(&desc->entry.head, &slab->base.free);
+ }
+
+ /* Add the descriptor to the per-context list. */
+ util_dynarray_append(&sctx->bindless_descriptors,
+ struct r600_resource *, slab->buffer);
+
+ return &slab->base;
+
+fail_buffer:
+ r600_resource_reference(&slab->buffer, NULL);
+fail:
+ FREE(slab);
+ return NULL;
+}
+
+void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab)
+{
+ struct si_context *sctx = priv;
+ struct si_bindless_descriptor_slab *slab =
+ (struct si_bindless_descriptor_slab *)pslab;
+
+ /* Remove the descriptor from the per-context list. */
+ util_dynarray_delete_unordered(&sctx->bindless_descriptors,
+ struct r600_resource *, slab->buffer);
+
+ r600_resource_reference(&slab->buffer, NULL);
+ FREE(slab->entries);
+ FREE(slab);
+}
+
+static struct si_bindless_descriptor *
+si_create_bindless_descriptor(struct si_context *sctx, uint32_t *desc_list,
+ unsigned size)
+{
+ struct si_screen *sscreen = sctx->screen;
+ struct si_bindless_descriptor *desc;
+ struct pb_slab_entry *entry;
+ void *ptr;
+
+ /* Sub-allocate the bindless descriptor from a slab to avoid dealing
+ * with a ton of buffers and for reducing the winsys overhead.
+ */
+ entry = pb_slab_alloc(&sctx->bindless_descriptor_slabs, 64, 0);
+ if (!entry)
+ return NULL;
+
+ desc = NULL;
+ desc = container_of(entry, desc, entry);
+
+ /* Upload the descriptor directly in VRAM. Because the slabs are
+ * currently never reclaimed, we don't need to synchronize the
+ * operation.
+ */
+ ptr = sscreen->b.ws->buffer_map(desc->buffer->buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
+ util_memcpy_cpu_to_le32(ptr + desc->offset, desc_list, size);
+
+ /* Keep track of the initial descriptor especially for buffers
+ * invalidation because we might need to know the previous address.
+ */
+ memcpy(desc->desc_list, desc_list, sizeof(desc->desc_list));
+
+ return desc;
+}
+
+static void si_invalidate_bindless_buf_desc(struct si_context *sctx,
+ struct si_bindless_descriptor *desc,
+ struct pipe_resource *resource,
+ uint64_t offset)
+{
+ struct r600_resource *buf = r600_resource(resource);
+ uint32_t *desc_list = desc->desc_list + 4;
+ uint64_t old_desc_va;
+
+ assert(resource->target == PIPE_BUFFER);
+
+ /* Retrieve the old buffer addr from the descriptor. */
+ old_desc_va = desc_list[0];
+ old_desc_va |= ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc_list[1]) << 32);
+
+ if (old_desc_va != buf->gpu_address + offset) {
+ /* The buffer has been invalidated when the handle wasn't
+ * resident, update the descriptor and the dirty flag.
+ */
+ si_set_buf_desc_address(buf, offset, &desc_list[0]);
+
+ desc->dirty = true;
+ sctx->bindless_descriptors_dirty = true;
+ }
+}
+
+static uint64_t si_create_texture_handle(struct pipe_context *ctx,
+ struct pipe_sampler_view *view,
+ const struct pipe_sampler_state *state)
+{
+ struct si_sampler_view *sview = (struct si_sampler_view *)view;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_texture_handle *tex_handle;
+ struct si_sampler_state *sstate;
+ uint32_t desc_list[16];
+ uint64_t handle;
+
+ tex_handle = CALLOC_STRUCT(si_texture_handle);
+ if (!tex_handle)
+ return 0;
+
+ memset(desc_list, 0, sizeof(desc_list));
+ si_init_descriptor_list(&desc_list[0], 16, 1, null_texture_descriptor);
+
+ sstate = ctx->create_sampler_state(ctx, state);
+ if (!sstate) {
+ FREE(tex_handle);
+ return 0;
+ }
+
+ si_set_sampler_view_desc(sctx, sview, sstate, &desc_list[0]);
+ memcpy(&tex_handle->sstate, sstate, sizeof(*sstate));
+ ctx->delete_sampler_state(ctx, sstate);
+
+ tex_handle->desc = si_create_bindless_descriptor(sctx, desc_list,
+ sizeof(desc_list));
+ if (!tex_handle->desc) {
+ FREE(tex_handle);
+ return 0;
+ }
+
+ handle = tex_handle->desc->buffer->gpu_address +
+ tex_handle->desc->offset;
+
+ if (!_mesa_hash_table_insert(sctx->tex_handles, (void *)handle,
+ tex_handle)) {
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &tex_handle->desc->entry);
+ FREE(tex_handle);
+ return 0;
+ }
+
+ pipe_sampler_view_reference(&tex_handle->view, view);
+
+ r600_resource(sview->base.texture)->texture_handle_allocated = true;
+
+ return handle;
+}
+
+static void si_delete_texture_handle(struct pipe_context *ctx, uint64_t handle)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_texture_handle *tex_handle;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->tex_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ tex_handle = (struct si_texture_handle *)entry->data;
+
+ pipe_sampler_view_reference(&tex_handle->view, NULL);
+ _mesa_hash_table_remove(sctx->tex_handles, entry);
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &tex_handle->desc->entry);
+ FREE(tex_handle);
+}
+
+static void si_make_texture_handle_resident(struct pipe_context *ctx,
+ uint64_t handle, bool resident)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_texture_handle *tex_handle;
+ struct si_sampler_view *sview;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->tex_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ tex_handle = (struct si_texture_handle *)entry->data;
+ sview = (struct si_sampler_view *)tex_handle->view;
+
+ if (resident) {
+ if (sview->base.texture->target != PIPE_BUFFER) {
+ struct r600_texture *rtex =
+ (struct r600_texture *)sview->base.texture;
+
+ if (depth_needs_decompression(rtex)) {
+ util_dynarray_append(
+ &sctx->resident_tex_needs_depth_decompress,
+ struct si_texture_handle *,
+ tex_handle);
+ }
+
+ if (color_needs_decompression(rtex)) {
+ util_dynarray_append(
+ &sctx->resident_tex_needs_color_decompress,
+ struct si_texture_handle *,
+ tex_handle);
+ }
+
+ if (rtex->dcc_offset &&
+ p_atomic_read(&rtex->framebuffers_bound))
+ sctx->need_check_render_feedback = true;
+ } else {
+ si_invalidate_bindless_buf_desc(sctx, tex_handle->desc,
+ sview->base.texture,
+ sview->base.u.buf.offset);
+ }
+
+ /* Add the texture handle to the per-context list. */
+ util_dynarray_append(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle);
+
+ /* Add the buffers to the current CS in case si_begin_new_cs()
+ * is not going to be called.
+ */
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ tex_handle->desc->buffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_DESCRIPTORS);
+
+ si_sampler_view_add_buffer(sctx, sview->base.texture,
+ RADEON_USAGE_READ,
+ sview->is_stencil_sampler, false);
+ } else {
+ /* Remove the texture handle from the per-context list. */
+ util_dynarray_delete_unordered(&sctx->resident_tex_handles,
+ struct si_texture_handle *,
+ tex_handle);
+
+ if (sview->base.texture->target != PIPE_BUFFER) {
+ util_dynarray_delete_unordered(
+ &sctx->resident_tex_needs_depth_decompress,
+ struct si_texture_handle *, tex_handle);
+
+ util_dynarray_delete_unordered(
+ &sctx->resident_tex_needs_color_decompress,
+ struct si_texture_handle *, tex_handle);
+ }
+ }
+}
+
+static uint64_t si_create_image_handle(struct pipe_context *ctx,
+ const struct pipe_image_view *view)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_image_handle *img_handle;
+ uint32_t desc_list[16];
+ uint64_t handle;
+
+ if (!view || !view->resource)
+ return 0;
+
+ img_handle = CALLOC_STRUCT(si_image_handle);
+ if (!img_handle)
+ return 0;
+
+ memset(desc_list, 0, sizeof(desc_list));
+ si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
+
+ si_set_shader_image_desc(sctx, view, false, &desc_list[0]);
+
+ img_handle->desc = si_create_bindless_descriptor(sctx, desc_list,
+ sizeof(desc_list));
+ if (!img_handle->desc) {
+ FREE(img_handle);
+ return 0;
+ }
+
+ handle = img_handle->desc->buffer->gpu_address +
+ img_handle->desc->offset;
+
+ if (!_mesa_hash_table_insert(sctx->img_handles, (void *)handle,
+ img_handle)) {
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &img_handle->desc->entry);
+ FREE(img_handle);
+ return 0;
+ }
+
+ util_copy_image_view(&img_handle->view, view);
+
+ r600_resource(view->resource)->image_handle_allocated = true;
+
+ return handle;
+}
+
+static void si_delete_image_handle(struct pipe_context *ctx, uint64_t handle)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_image_handle *img_handle;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->img_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ img_handle = (struct si_image_handle *)entry->data;
+
+ util_copy_image_view(&img_handle->view, NULL);
+ _mesa_hash_table_remove(sctx->img_handles, entry);
+ pb_slab_free(&sctx->bindless_descriptor_slabs,
+ &img_handle->desc->entry);
+ FREE(img_handle);
+}
+
+static void si_make_image_handle_resident(struct pipe_context *ctx,
+ uint64_t handle, unsigned access,
+ bool resident)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_image_handle *img_handle;
+ struct pipe_image_view *view;
+ struct r600_resource *res;
+ struct hash_entry *entry;
+
+ entry = _mesa_hash_table_search(sctx->img_handles, (void *)handle);
+ if (!entry)
+ return;
+
+ img_handle = (struct si_image_handle *)entry->data;
+ view = &img_handle->view;
+ res = (struct r600_resource *)view->resource;
+
+ if (resident) {
+ if (res->b.b.target != PIPE_BUFFER) {
+ struct r600_texture *rtex = (struct r600_texture *)res;
+ unsigned level = view->u.tex.level;
+
+ if (color_needs_decompression(rtex)) {
+ util_dynarray_append(
+ &sctx->resident_img_needs_color_decompress,
+ struct si_image_handle *,
+ img_handle);
+ }
+
+ if (vi_dcc_enabled(rtex, level) &&
+ p_atomic_read(&rtex->framebuffers_bound))
+ sctx->need_check_render_feedback = true;
+ } else {
+ si_invalidate_bindless_buf_desc(sctx, img_handle->desc,
+ view->resource,
+ view->u.buf.offset);
+ }
+
+ /* Add the image handle to the per-context list. */
+ util_dynarray_append(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle);
+
+ /* Add the buffers to the current CS in case si_begin_new_cs()
+ * is not going to be called.
+ */
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ img_handle->desc->buffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_DESCRIPTORS);
+
+ si_sampler_view_add_buffer(sctx, view->resource,
+ (access & PIPE_IMAGE_ACCESS_WRITE) ?
+ RADEON_USAGE_READWRITE :
+ RADEON_USAGE_READ, false, false);
+ } else {
+ /* Remove the image handle from the per-context list. */
+ util_dynarray_delete_unordered(&sctx->resident_img_handles,
+ struct si_image_handle *,
+ img_handle);
+
+ if (res->b.b.target != PIPE_BUFFER) {
+ util_dynarray_delete_unordered(
+ &sctx->resident_img_needs_color_decompress,
+ struct si_image_handle *,
+ img_handle);
+ }
+ }
+}
+
+
+void si_all_resident_buffers_begin_new_cs(struct si_context *sctx)
+{
+ unsigned num_resident_tex_handles, num_resident_img_handles;
+
+ num_resident_tex_handles = sctx->resident_tex_handles.size /
+ sizeof(struct si_texture_handle *);
+ num_resident_img_handles = sctx->resident_img_handles.size /
+ sizeof(struct si_image_handle *);
+
+ /* Skip adding the bindless descriptors when no handles are resident.
+ */
+ if (!num_resident_tex_handles && !num_resident_img_handles)
+ return;
+
+ /* Add all bindless descriptors. */
+ util_dynarray_foreach(&sctx->bindless_descriptors,
+ struct r600_resource *, desc) {
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *desc,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_DESCRIPTORS);
+ }
+
+ /* Add all resident texture handles. */
+ util_dynarray_foreach(&sctx->resident_tex_handles,
+ struct si_texture_handle *, tex_handle) {
+ struct si_sampler_view *sview =
+ (struct si_sampler_view *)(*tex_handle)->view;
+
+ si_sampler_view_add_buffer(sctx, sview->base.texture,
+ RADEON_USAGE_READ,
+ sview->is_stencil_sampler, false);
+ }
+
+ /* Add all resident image handles. */
+ util_dynarray_foreach(&sctx->resident_img_handles,
+ struct si_image_handle *, img_handle) {
+ struct pipe_image_view *view = &(*img_handle)->view;
+
+ si_sampler_view_add_buffer(sctx, view->resource,
+ RADEON_USAGE_READWRITE,
+ false, false);
+ }
+
+ sctx->b.num_resident_handles += num_resident_tex_handles +
+ num_resident_img_handles;
}
/* INIT/DEINIT/UPLOAD */
+/* GFX9 has only 4KB of CE, while previous chips had 32KB. In order
+ * to make CE RAM as useful as possible, this defines limits
+ * for the number slots that can be in CE RAM on GFX9. If a shader
+ * is using more, descriptors will be uploaded to memory directly and
+ * CE won't be used.
+ *
+ * These numbers are based on shader-db.
+ */
+static unsigned gfx9_max_ce_samplers[SI_NUM_SHADERS] = {
+ [PIPE_SHADER_VERTEX] = 0,
+ [PIPE_SHADER_TESS_CTRL] = 0,
+ [PIPE_SHADER_TESS_EVAL] = 1,
+ [PIPE_SHADER_GEOMETRY] = 0,
+ [PIPE_SHADER_FRAGMENT] = 24,
+ [PIPE_SHADER_COMPUTE] = 16,
+};
+static unsigned gfx9_max_ce_images[SI_NUM_SHADERS] = {
+ /* these must be even due to slot alignment */
+ [PIPE_SHADER_VERTEX] = 0,
+ [PIPE_SHADER_TESS_CTRL] = 0,
+ [PIPE_SHADER_TESS_EVAL] = 0,
+ [PIPE_SHADER_GEOMETRY] = 0,
+ [PIPE_SHADER_FRAGMENT] = 2,
+ [PIPE_SHADER_COMPUTE] = 8,
+};
+static unsigned gfx9_max_ce_const_buffers[SI_NUM_SHADERS] = {
+ [PIPE_SHADER_VERTEX] = 9,
+ [PIPE_SHADER_TESS_CTRL] = 3,
+ [PIPE_SHADER_TESS_EVAL] = 5,
+ [PIPE_SHADER_GEOMETRY] = 0,
+ [PIPE_SHADER_FRAGMENT] = 8,
+ [PIPE_SHADER_COMPUTE] = 6,
+};
+static unsigned gfx9_max_ce_shader_buffers[SI_NUM_SHADERS] = {
+ [PIPE_SHADER_VERTEX] = 0,
+ [PIPE_SHADER_TESS_CTRL] = 0,
+ [PIPE_SHADER_TESS_EVAL] = 0,
+ [PIPE_SHADER_GEOMETRY] = 0,
+ [PIPE_SHADER_FRAGMENT] = 12,
+ [PIPE_SHADER_COMPUTE] = 13,
+};
+
void si_init_all_descriptors(struct si_context *sctx)
{
int i;
unsigned ce_offset = 0;
+ STATIC_ASSERT(GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS % 2 == 0);
+ STATIC_ASSERT(GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS % 2 == 0);
+
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_init_buffer_resources(&sctx->const_buffers[i],
- si_const_buffer_descriptors(sctx, i),
- SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
- RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
- &ce_offset);
- si_init_buffer_resources(&sctx->shader_buffers[i],
- si_shader_buffer_descriptors(sctx, i),
- SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
- &ce_offset);
+ bool gfx9_tcs = false;
+ bool gfx9_gs = false;
+ unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
+ unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
+
+ unsigned first_sampler_ce_slot = 0;
+ unsigned num_sampler_ce_slots = num_sampler_slots;
+
+ unsigned first_buffer_ce_slot = 0;
+ unsigned num_buffer_ce_slots = num_buffer_slots;
+
+ /* Adjust CE slot ranges based on GFX9 CE RAM limits. */
+ if (sctx->b.chip_class >= GFX9) {
+ gfx9_tcs = i == PIPE_SHADER_TESS_CTRL;
+ gfx9_gs = i == PIPE_SHADER_GEOMETRY;
+
+ first_sampler_ce_slot =
+ si_get_image_slot(gfx9_max_ce_images[i] - 1) / 2;
+ num_sampler_ce_slots = gfx9_max_ce_images[i] / 2 +
+ gfx9_max_ce_samplers[i];
+
+ first_buffer_ce_slot =
+ si_get_shaderbuf_slot(gfx9_max_ce_shader_buffers[i] - 1);
+ num_buffer_ce_slots = gfx9_max_ce_shader_buffers[i] +
+ gfx9_max_ce_const_buffers[i];
+ }
- si_init_descriptors(si_sampler_descriptors(sctx, i),
- SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
- null_texture_descriptor, &ce_offset);
+ si_init_buffer_resources(sctx, &sctx->const_and_shader_buffers[i],
+ si_const_and_shader_buffer_descriptors(sctx, i),
+ num_buffer_slots,
+ first_buffer_ce_slot, num_buffer_ce_slots,
+ gfx9_tcs ? GFX9_SGPR_TCS_CONST_AND_SHADER_BUFFERS :
+ gfx9_gs ? GFX9_SGPR_GS_CONST_AND_SHADER_BUFFERS :
+ SI_SGPR_CONST_AND_SHADER_BUFFERS,
+ RADEON_USAGE_READWRITE,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_SHADER_RW_BUFFER,
+ RADEON_PRIO_CONST_BUFFER,
+ &ce_offset);
- si_init_descriptors(si_image_descriptors(sctx, i),
- SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
- null_image_descriptor, &ce_offset);
+ struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, i);
+ si_init_descriptors(sctx, desc,
+ gfx9_tcs ? GFX9_SGPR_TCS_SAMPLERS_AND_IMAGES :
+ gfx9_gs ? GFX9_SGPR_GS_SAMPLERS_AND_IMAGES :
+ SI_SGPR_SAMPLERS_AND_IMAGES,
+ 16, num_sampler_slots,
+ first_sampler_ce_slot, num_sampler_ce_slots,
+ &ce_offset);
+
+ int j;
+ for (j = 0; j < SI_NUM_IMAGES; j++)
+ memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
+ for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
+ memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
}
- si_init_buffer_resources(&sctx->rw_buffers,
+ si_init_buffer_resources(sctx, &sctx->rw_buffers,
&sctx->descriptors[SI_DESCS_RW_BUFFERS],
- SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS,
+ SI_NUM_RW_BUFFERS, 0, SI_NUM_RW_BUFFERS,
+ SI_SGPR_RW_BUFFERS,
+ /* The second set of usage/priority is used by
+ * const buffers in RW buffer slots. */
+ RADEON_USAGE_READWRITE, RADEON_USAGE_READ,
+ RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER,
&ce_offset);
- si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
- 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
+ sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
+
+ si_init_descriptors(sctx, &sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
+ 4, SI_NUM_VERTEX_BUFFERS, 0, 0, NULL);
sctx->descriptors_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
+ sctx->total_ce_ram_allocated = ce_offset;
- assert(ce_offset <= 32768);
+ if (sctx->b.chip_class >= GFX9)
+ assert(ce_offset <= 4096);
+ else
+ assert(ce_offset <= 32768);
/* Set pipe_context functions. */
sctx->b.b.bind_sampler_states = si_bind_sampler_states;
sctx->b.b.set_shader_buffers = si_set_shader_buffers;
sctx->b.b.set_sampler_views = si_set_sampler_views;
sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
+ sctx->b.b.create_texture_handle = si_create_texture_handle;
+ sctx->b.b.delete_texture_handle = si_delete_texture_handle;
+ sctx->b.b.make_texture_handle_resident = si_make_texture_handle_resident;
+ sctx->b.b.create_image_handle = si_create_image_handle;
+ sctx->b.b.delete_image_handle = si_delete_image_handle;
+ sctx->b.b.make_image_handle_resident = si_make_image_handle_resident;
sctx->b.invalidate_buffer = si_invalidate_buffer;
+ sctx->b.rebind_buffer = si_rebind_buffer;
/* Shader user data. */
si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
/* Set default and immutable mappings. */
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
- si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
- si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
+
+ if (sctx->b.chip_class >= GFX9) {
+ si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
+ R_00B430_SPI_SHADER_USER_DATA_LS_0);
+ si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
+ R_00B330_SPI_SHADER_USER_DATA_ES_0);
+ } else {
+ si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0);
+ si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
+ R_00B230_SPI_SHADER_USER_DATA_GS_0);
+ }
si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
}
const unsigned mask = u_bit_consecutive(0, SI_DESCS_FIRST_COMPUTE);
unsigned dirty = sctx->descriptors_dirty & mask;
+ /* Assume nothing will go wrong: */
+ sctx->shader_pointers_dirty |= dirty;
+
while (dirty) {
unsigned i = u_bit_scan(&dirty);
}
sctx->descriptors_dirty &= ~mask;
+
+ si_upload_bindless_descriptors(sctx);
+
return true;
}
SI_NUM_DESCS - SI_DESCS_FIRST_COMPUTE);
unsigned dirty = sctx->descriptors_dirty & mask;
+ /* Assume nothing will go wrong: */
+ sctx->shader_pointers_dirty |= dirty;
+
while (dirty) {
unsigned i = u_bit_scan(&dirty);
sctx->descriptors_dirty &= ~mask;
+ si_upload_bindless_descriptors(sctx);
+
return true;
}
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_release_buffer_resources(&sctx->const_buffers[i],
- si_const_buffer_descriptors(sctx, i));
- si_release_buffer_resources(&sctx->shader_buffers[i],
- si_shader_buffer_descriptors(sctx, i));
+ si_release_buffer_resources(&sctx->const_and_shader_buffers[i],
+ si_const_and_shader_buffer_descriptors(sctx, i));
si_release_sampler_views(&sctx->samplers[i].views);
si_release_image_views(&sctx->images[i]);
}
si_release_buffer_resources(&sctx->rw_buffers,
&sctx->descriptors[SI_DESCS_RW_BUFFERS]);
+ for (i = 0; i < SI_NUM_VERTEX_BUFFERS; i++)
+ pipe_vertex_buffer_unreference(&sctx->vertex_buffer[i]);
for (i = 0; i < SI_NUM_DESCS; ++i)
si_release_descriptors(&sctx->descriptors[i]);
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
- si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
+ si_buffer_resources_begin_new_cs(sctx, &sctx->const_and_shader_buffers[i]);
si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
si_image_views_begin_new_cs(sctx, &sctx->images[i]);
}
si_shader_userdata_begin_new_cs(sctx);
}
+
+void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
+ uint64_t new_active_mask)
+{
+ struct si_descriptors *desc = &sctx->descriptors[desc_idx];
+
+ /* Ignore no-op updates and updates that disable all slots. */
+ if (!new_active_mask ||
+ new_active_mask == u_bit_consecutive64(desc->first_active_slot,
+ desc->num_active_slots))
+ return;
+
+ int first, count;
+ u_bit_scan_consecutive_range64(&new_active_mask, &first, &count);
+ assert(new_active_mask == 0);
+
+ /* Upload/dump descriptors if slots are being enabled. */
+ if (first < desc->first_active_slot ||
+ first + count > desc->first_active_slot + desc->num_active_slots)
+ sctx->descriptors_dirty |= 1u << desc_idx;
+
+ /* Enable or disable CE for this descriptor array. */
+ bool used_ce = desc->uses_ce;
+ desc->uses_ce = desc->first_ce_slot <= first &&
+ desc->first_ce_slot + desc->num_ce_slots >= first + count;
+
+ if (desc->uses_ce != used_ce) {
+ /* Upload or dump descriptors if we're disabling or enabling CE,
+ * respectively. */
+ sctx->descriptors_dirty |= 1u << desc_idx;
+
+ /* If we're enabling CE, re-upload all descriptors to CE RAM.
+ * When CE was disabled, uploads to CE RAM stopped.
+ */
+ if (desc->uses_ce) {
+ desc->dirty_mask |=
+ u_bit_consecutive64(desc->first_ce_slot,
+ desc->num_ce_slots);
+ }
+ }
+
+ desc->first_active_slot = first;
+ desc->num_active_slots = count;
+}
+
+void si_set_active_descriptors_for_shader(struct si_context *sctx,
+ struct si_shader_selector *sel)
+{
+ if (!sel)
+ return;
+
+ si_set_active_descriptors(sctx,
+ si_const_and_shader_buffer_descriptors_idx(sel->type),
+ sel->active_const_and_shader_buffers);
+ si_set_active_descriptors(sctx,
+ si_sampler_and_image_descriptors_idx(sel->type),
+ sel->active_samplers_and_images);
+}