* Marek Olšák <marek.olsak@amd.com>
*/
-/* Resource binding slots and sampler states (each described with 8 or 4 dwords)
- * live in memory on SI.
+/* Resource binding slots and sampler states (each described with 8 or
+ * 4 dwords) are stored in lists in memory which is accessed by shaders
+ * using scalar load instructions.
*
- * This file is responsible for managing lists of resources and sampler states
- * in memory and binding them, which means updating those structures in memory.
+ * This file is responsible for managing such lists. It keeps a copy of all
+ * descriptors in CPU memory and re-uploads a whole list if some slots have
+ * been changed.
*
- * There is also code for updating shader pointers to resources and sampler
- * states. CP DMA functions are here too.
+ * This code is also reponsible for updating shader pointers to those lists.
+ *
+ * Note that CP DMA can't be used for updating the lists, because a GPU hang
+ * could leave the list in a mid-IB state and the next IB would get wrong
+ * descriptors and the whole context would be unusable at that point.
+ * (Note: The register shadowing can't be used due to the same reason)
+ *
+ * Also, uploading descriptors to newly allocated memory doesn't require
+ * a KCACHE flush.
+ *
+ *
+ * Possible scenarios for one 16 dword image+sampler slot:
+ *
+ * | Image | w/ FMASK | Buffer | NULL
+ * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
+ * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
+ * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
+ * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
+ *
+ * FMASK implies MSAA, therefore no sampler state.
+ * Sampler states are never unbound except when FMASK is bound.
*/
#include "radeon/r600_cs.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
-#define SI_NUM_CONTEXTS 16
-/* NULL image and buffer descriptor.
+/* NULL image and buffer descriptor for textures (alpha = 1) and images
+ * (alpha = 0).
*
* For images, all fields must be zero except for the swizzle, which
* supports arbitrary combinations of 0s and 1s. The texture type must be
*
* This is the only reason why the buffer descriptor must be in words [4:7].
*/
-static uint32_t null_descriptor[8] = {
+static uint32_t null_texture_descriptor[8] = {
0,
0,
0,
* descriptor */
};
-/* Set this if you want the 3D engine to wait until CP DMA is done.
- * It should be set on the last CP DMA packet. */
-#define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
-
-/* Set this if the source data was used as a destination in a previous CP DMA
- * packet. It's for preventing a read-after-write (RAW) hazard between two
- * CP DMA packets. */
-#define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
-#define CIK_CP_DMA_USE_L2 (1 << 2)
-
-/* Emit a CP DMA packet to do a copy from one buffer to another.
- * The size must fit in bits [20:0].
- */
-static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
- uint64_t dst_va, uint64_t src_va,
- unsigned size, unsigned flags)
-{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
- uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
- uint32_t sel = flags & CIK_CP_DMA_USE_L2 ?
- PKT3_CP_DMA_SRC_SEL(3) | PKT3_CP_DMA_DST_SEL(3) : 0;
-
- assert(size);
- assert((size & ((1<<21)-1)) == size);
-
- if (sctx->b.chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
- radeon_emit(cs, sync_flag | sel); /* CP_SYNC [31] */
- radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
- radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
- radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
- radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
- radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
- } else {
- radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
- radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
- radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
- radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
- radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
- radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
- }
-}
-
-/* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
-static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
- uint64_t dst_va, unsigned size,
- uint32_t clear_value, unsigned flags)
-{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
- uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
- uint32_t dst_sel = flags & CIK_CP_DMA_USE_L2 ? PKT3_CP_DMA_DST_SEL(3) : 0;
-
- assert(size);
- assert((size & ((1<<21)-1)) == size);
-
- if (sctx->b.chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
- radeon_emit(cs, sync_flag | dst_sel | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
- radeon_emit(cs, clear_value); /* DATA [31:0] */
- radeon_emit(cs, 0);
- radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
- radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
- radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
- } else {
- radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
- radeon_emit(cs, clear_value); /* DATA [31:0] */
- radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
- radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
- radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
- radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
- }
-}
+static uint32_t null_image_descriptor[8] = {
+ 0,
+ 0,
+ 0,
+ S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
+ /* the rest must contain zeros, which is also used by the buffer
+ * descriptor */
+};
-static void si_init_descriptors(struct si_context *sctx,
- struct si_descriptors *desc,
+static void si_init_descriptors(struct si_descriptors *desc,
unsigned shader_userdata_index,
unsigned element_dw_size,
unsigned num_elements,
- void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
+ const uint32_t *null_descriptor)
{
+ int i;
+
assert(num_elements <= sizeof(desc->enabled_mask)*8);
- assert(num_elements <= sizeof(desc->dirty_mask)*8);
- desc->atom.emit = (void*)emit_func;
- desc->shader_userdata_offset = shader_userdata_index * 4;
+ desc->list = CALLOC(num_elements, element_dw_size * 4);
desc->element_dw_size = element_dw_size;
desc->num_elements = num_elements;
- desc->context_size = num_elements * element_dw_size * 4;
-
- desc->buffer = (struct r600_resource*)
- pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT,
- SI_NUM_CONTEXTS * desc->context_size);
-
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer,
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
+ desc->list_dirty = true; /* upload the list before the next draw */
+ desc->shader_userdata_offset = shader_userdata_index * 4;
- /* We don't check for CS space here, because this should be called
- * only once at context initialization. */
- si_emit_cp_dma_clear_buffer(sctx, desc->buffer->gpu_address,
- desc->buffer->b.b.width0, 0,
- R600_CP_DMA_SYNC | CIK_CP_DMA_USE_L2);
+ /* Initialize the array to NULL descriptors if the element size is 8. */
+ if (null_descriptor) {
+ assert(element_dw_size % 8 == 0);
+ for (i = 0; i < num_elements * element_dw_size / 8; i++)
+ memcpy(desc->list + i * 8, null_descriptor,
+ 8 * 4);
+ }
}
static void si_release_descriptors(struct si_descriptors *desc)
{
pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
+ FREE(desc->list);
}
-static void si_update_descriptors(struct si_context *sctx,
+static bool si_upload_descriptors(struct si_context *sctx,
struct si_descriptors *desc)
{
- if (desc->dirty_mask) {
- desc->atom.num_dw =
- 7 + /* copy */
- (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask); /* update */
-
- desc->atom.dirty = true;
- desc->pointer_dirty = true;
- sctx->shader_userdata.atom.dirty = true;
-
- /* TODO: Investigate if these flushes can be removed after
- * adding CE support. */
-
- /* The descriptors are read with the K cache. */
- sctx->b.flags |= SI_CONTEXT_INV_KCACHE;
-
- /* Since SI uses uncached CP DMA to update descriptors,
- * we have to flush TC L2, which is used to fetch constants
- * along with KCACHE. */
- if (sctx->b.chip_class == SI)
- sctx->b.flags |= SI_CONTEXT_INV_TC_L2;
- } else {
- desc->atom.dirty = false;
- }
-}
+ unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
+ void *ptr;
-static void si_emit_descriptors(struct si_context *sctx,
- struct si_descriptors *desc,
- uint32_t **descriptors)
-{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- uint64_t va_base;
- int packet_start = 0;
- int packet_size = 0;
- int last_index = desc->num_elements; /* point to a non-existing element */
- uint64_t dirty_mask = desc->dirty_mask;
- unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
-
- assert(dirty_mask);
-
- va_base = desc->buffer->gpu_address;
-
- /* Copy the descriptors to a new context slot. */
- si_emit_cp_dma_copy_buffer(sctx,
- va_base + new_context_id * desc->context_size,
- va_base + desc->current_context_id * desc->context_size,
- desc->context_size, R600_CP_DMA_SYNC | CIK_CP_DMA_USE_L2);
-
- va_base += new_context_id * desc->context_size;
-
- /* Update the descriptors.
- * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
- *
- * XXX When unbinding lots of resources, consider clearing the memory
- * with CP DMA instead of emitting zeros.
- */
- while (dirty_mask) {
- int i = u_bit_scan64(&dirty_mask);
+ if (!desc->list_dirty)
+ return true;
- assert(i < desc->num_elements);
+ u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
+ &desc->buffer_offset,
+ (struct pipe_resource**)&desc->buffer, &ptr);
+ if (!desc->buffer)
+ return false; /* skip the draw call */
- if (last_index+1 == i && packet_size) {
- /* Append new data at the end of the last packet. */
- packet_size += desc->element_dw_size;
- cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
- } else {
- /* Start a new packet. */
- uint64_t va = va_base + i * desc->element_dw_size * 4;
-
- packet_start = cs->cdw;
- packet_size = 2 + desc->element_dw_size;
-
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
- radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(sctx->b.chip_class == SI ?
- PKT3_WRITE_DATA_DST_SEL_MEM_SYNC :
- PKT3_WRITE_DATA_DST_SEL_TC_L2) |
- PKT3_WRITE_DATA_WR_CONFIRM |
- PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
- radeon_emit(cs, va & 0xFFFFFFFFUL);
- radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
- }
+ util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
- radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
-
- last_index = i;
- }
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
- desc->dirty_mask = 0;
- desc->current_context_id = new_context_id;
+ desc->list_dirty = false;
+ desc->pointer_dirty = true;
+ si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
+ return true;
}
/* SAMPLER VIEWS */
-static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
-{
- struct si_sampler_views *views = (struct si_sampler_views*)atom;
-
- si_emit_descriptors(sctx, &views->desc, views->desc_data);
-}
-
-static void si_init_sampler_views(struct si_context *sctx,
- struct si_sampler_views *views)
-{
- int i;
-
- si_init_descriptors(sctx, &views->desc, SI_SGPR_RESOURCE,
- 8, SI_NUM_SAMPLER_VIEWS, si_emit_sampler_views);
-
- for (i = 0; i < views->desc.num_elements; i++) {
- views->desc_data[i] = null_descriptor;
- views->desc.dirty_mask |= 1llu << i;
- }
- si_update_descriptors(sctx, &views->desc);
-}
-
static void si_release_sampler_views(struct si_sampler_views *views)
{
int i;
si_release_descriptors(&views->desc);
}
-static enum radeon_bo_priority si_get_resource_ro_priority(struct r600_resource *res)
+static void si_sampler_view_add_buffer(struct si_context *sctx,
+ struct pipe_resource *resource)
{
- if (res->b.b.target == PIPE_BUFFER)
- return RADEON_PRIO_SHADER_BUFFER_RO;
+ struct r600_resource *rres = (struct r600_resource*)resource;
- if (res->b.b.nr_samples > 1)
- return RADEON_PRIO_SHADER_TEXTURE_MSAA;
+ if (!resource)
+ return;
- return RADEON_PRIO_SHADER_TEXTURE_RO;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
+ RADEON_USAGE_READ,
+ r600_get_sampler_view_priority(rres));
}
static void si_sampler_views_begin_new_cs(struct si_context *sctx,
{
uint64_t mask = views->desc.enabled_mask;
- /* Add relocations to the CS. */
+ /* Add buffers to the CS. */
while (mask) {
int i = u_bit_scan64(&mask);
- struct si_sampler_view *rview =
- (struct si_sampler_view*)views->views[i];
- if (!rview->resource)
- continue;
-
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
- rview->resource, RADEON_USAGE_READ,
- si_get_resource_ro_priority(rview->resource));
+ si_sampler_view_add_buffer(sctx, views->views[i]->texture);
}
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer,
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
+ if (!views->desc.buffer)
+ return;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
}
-static void si_set_sampler_view(struct si_context *sctx, unsigned shader,
- unsigned slot, struct pipe_sampler_view *view,
- unsigned *view_desc)
+static void si_set_sampler_view(struct si_context *sctx,
+ struct si_sampler_views *views,
+ unsigned slot, struct pipe_sampler_view *view)
{
- struct si_sampler_views *views = &sctx->samplers[shader].views;
-
- if (views->views[slot] == view)
+ struct si_sampler_view *rview = (struct si_sampler_view*)view;
+
+ if (view && view->texture && view->texture->target != PIPE_BUFFER &&
+ G_008F28_COMPRESSION_EN(rview->state[6]) &&
+ ((struct r600_texture*)view->texture)->dcc_offset == 0) {
+ rview->state[6] &= C_008F28_COMPRESSION_EN &
+ C_008F28_ALPHA_IS_ON_MSB;
+ } else if (views->views[slot] == view)
return;
if (view) {
- struct si_sampler_view *rview =
- (struct si_sampler_view*)view;
-
- if (rview->resource)
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
- rview->resource, RADEON_USAGE_READ,
- si_get_resource_ro_priority(rview->resource));
+ struct r600_texture *rtex = (struct r600_texture *)view->texture;
+ si_sampler_view_add_buffer(sctx, view->texture);
pipe_sampler_view_reference(&views->views[slot], view);
- views->desc_data[slot] = view_desc;
+ memcpy(views->desc.list + slot * 16, rview->state, 8*4);
+
+ if (view->texture && view->texture->target != PIPE_BUFFER &&
+ rtex->fmask.size) {
+ memcpy(views->desc.list + slot*16 + 8,
+ rview->fmask_state, 8*4);
+ } else {
+ /* Disable FMASK and bind sampler state in [12:15]. */
+ memcpy(views->desc.list + slot*16 + 8,
+ null_texture_descriptor, 4*4);
+
+ if (views->sampler_states[slot])
+ memcpy(views->desc.list + slot*16 + 12,
+ views->sampler_states[slot], 4*4);
+ }
+
views->desc.enabled_mask |= 1llu << slot;
} else {
pipe_sampler_view_reference(&views->views[slot], NULL);
- views->desc_data[slot] = null_descriptor;
+ memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
+ /* Only clear the lower dwords of FMASK. */
+ memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
views->desc.enabled_mask &= ~(1llu << slot);
}
- views->desc.dirty_mask |= 1llu << slot;
+ views->desc.list_dirty = true;
+}
+
+static bool is_compressed_colortex(struct r600_texture *rtex)
+{
+ return rtex->cmask.size || rtex->fmask.size ||
+ (rtex->dcc_offset && rtex->dirty_level_mask);
}
static void si_set_sampler_views(struct pipe_context *ctx,
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_textures_info *samplers = &sctx->samplers[shader];
- struct si_sampler_view **rviews = (struct si_sampler_view **)views;
int i;
if (!count || shader >= SI_NUM_SHADERS)
if (!views || !views[i]) {
samplers->depth_texture_mask &= ~(1 << slot);
samplers->compressed_colortex_mask &= ~(1 << slot);
- si_set_sampler_view(sctx, shader, slot, NULL, NULL);
- si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
- NULL, NULL);
+ si_set_sampler_view(sctx, &samplers->views, slot, NULL);
continue;
}
- si_set_sampler_view(sctx, shader, slot, views[i], rviews[i]->state);
+ si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
struct r600_texture *rtex =
} else {
samplers->depth_texture_mask &= ~(1 << slot);
}
- if (rtex->cmask.size || rtex->fmask.size) {
+ if (is_compressed_colortex(rtex)) {
samplers->compressed_colortex_mask |= 1 << slot;
} else {
samplers->compressed_colortex_mask &= ~(1 << slot);
}
-
- if (rtex->fmask.size) {
- si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
- views[i], rviews[i]->fmask_state);
- } else {
- si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
- NULL, NULL);
- }
} else {
samplers->depth_texture_mask &= ~(1 << slot);
samplers->compressed_colortex_mask &= ~(1 << slot);
- si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
- NULL, NULL);
}
}
+}
+
+static void
+si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
+{
+ uint64_t mask = samplers->views.desc.enabled_mask;
- si_update_descriptors(sctx, &samplers->views.desc);
+ while (mask) {
+ int i = u_bit_scan64(&mask);
+ struct pipe_resource *res = samplers->views.views[i]->texture;
+
+ if (res && res->target != PIPE_BUFFER) {
+ struct r600_texture *rtex = (struct r600_texture *)res;
+
+ if (is_compressed_colortex(rtex)) {
+ samplers->compressed_colortex_mask |= 1 << i;
+ } else {
+ samplers->compressed_colortex_mask &= ~(1 << i);
+ }
+ }
+ }
}
-/* SAMPLER STATES */
+/* IMAGE VIEWS */
+
+static void
+si_release_image_views(struct si_images_info *images)
+{
+ unsigned i;
+
+ for (i = 0; i < SI_NUM_IMAGES; ++i) {
+ struct pipe_image_view *view = &images->views[i];
+
+ pipe_resource_reference(&view->resource, NULL);
+ }
+
+ si_release_descriptors(&images->desc);
+}
-static void si_emit_sampler_states(struct si_context *sctx, struct r600_atom *atom)
+static void
+si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
{
- struct si_sampler_states *states = (struct si_sampler_states*)atom;
+ uint mask = images->desc.enabled_mask;
+
+ /* Add buffers to the CS. */
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ struct pipe_image_view *view = &images->views[i];
+
+ assert(view->resource);
+
+ si_sampler_view_add_buffer(sctx, view->resource);
+ }
- si_emit_descriptors(sctx, &states->desc, states->desc_data);
+ if (images->desc.buffer) {
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ images->desc.buffer,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_DESCRIPTORS);
+ }
}
-static void si_sampler_states_begin_new_cs(struct si_context *sctx,
- struct si_sampler_states *states)
+static void
+si_disable_shader_image(struct si_images_info *images, unsigned slot)
{
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, states->desc.buffer,
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
+ if (images->desc.enabled_mask & (1llu << slot)) {
+ pipe_resource_reference(&images->views[slot].resource, NULL);
+ images->compressed_colortex_mask &= ~(1 << slot);
+
+ memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
+ images->desc.enabled_mask &= ~(1llu << slot);
+ images->desc.list_dirty = true;
+ }
}
-void si_set_sampler_descriptors(struct si_context *sctx, unsigned shader,
- unsigned start, unsigned count, void **states)
+static void
+si_set_shader_images(struct pipe_context *pipe, unsigned shader,
+ unsigned start_slot, unsigned count,
+ struct pipe_image_view *views)
{
- struct si_sampler_states *samplers = &sctx->samplers[shader].states;
- struct si_sampler_state **sstates = (struct si_sampler_state**)states;
- int i;
+ struct si_context *ctx = (struct si_context *)pipe;
+ struct si_screen *screen = ctx->screen;
+ struct si_images_info *images = &ctx->images[shader];
+ unsigned i, slot;
- if (start == 0)
- samplers->saved_states[0] = states[0];
- if (start == 1)
- samplers->saved_states[1] = states[0];
- else if (start == 0 && count >= 2)
- samplers->saved_states[1] = states[1];
+ assert(shader < SI_NUM_SHADERS);
- for (i = 0; i < count; i++) {
- unsigned slot = start + i;
+ if (!count)
+ return;
+
+ assert(start_slot + count <= SI_NUM_IMAGES);
- if (!sstates[i]) {
- samplers->desc.dirty_mask &= ~(1llu << slot);
+ for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
+ struct r600_resource *res;
+
+ if (!views || !views[i].resource) {
+ si_disable_shader_image(images, slot);
continue;
}
- samplers->desc_data[slot] = sstates[i]->val;
- samplers->desc.dirty_mask |= 1llu << slot;
+ res = (struct r600_resource *)views[i].resource;
+ util_copy_image_view(&images->views[slot], &views[i]);
+
+ si_sampler_view_add_buffer(ctx, &res->b.b);
+
+ if (res->b.b.target == PIPE_BUFFER) {
+ si_make_buffer_descriptor(screen, res,
+ views[i].format,
+ views[i].u.buf.first_element,
+ views[i].u.buf.last_element,
+ images->desc.list + slot * 8);
+ images->compressed_colortex_mask &= ~(1 << slot);
+ } else {
+ static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
+ struct r600_texture *tex = (struct r600_texture *)res;
+ unsigned level;
+ unsigned width, height, depth;
+
+ assert(!tex->is_depth);
+ assert(tex->fmask.size == 0);
+
+ if (tex->dcc_offset &&
+ views[i].access & PIPE_IMAGE_ACCESS_WRITE)
+ r600_texture_disable_dcc(&screen->b, tex);
+
+ if (is_compressed_colortex(tex)) {
+ images->compressed_colortex_mask |= 1 << slot;
+ } else {
+ images->compressed_colortex_mask &= ~(1 << slot);
+ }
+
+ /* Always force the base level to the selected level.
+ *
+ * This is required for 3D textures, where otherwise
+ * selecting a single slice for non-layered bindings
+ * fails. It doesn't hurt the other targets.
+ */
+ level = views[i].u.tex.level;
+ width = u_minify(res->b.b.width0, level);
+ height = u_minify(res->b.b.height0, level);
+ depth = u_minify(res->b.b.depth0, level);
+
+ si_make_texture_descriptor(screen, tex, false, res->b.b.target,
+ views[i].format, swizzle,
+ level, 0, 0,
+ views[i].u.tex.first_layer, views[i].u.tex.last_layer,
+ width, height, depth,
+ images->desc.list + slot * 8,
+ NULL);
+ }
+
+ images->desc.enabled_mask |= 1llu << slot;
+ images->desc.list_dirty = true;
}
+}
+
+static void
+si_images_update_compressed_colortex_mask(struct si_images_info *images)
+{
+ uint64_t mask = images->desc.enabled_mask;
+
+ while (mask) {
+ int i = u_bit_scan64(&mask);
+ struct pipe_resource *res = images->views[i].resource;
- si_update_descriptors(sctx, &samplers->desc);
+ if (res && res->target != PIPE_BUFFER) {
+ struct r600_texture *rtex = (struct r600_texture *)res;
+
+ if (is_compressed_colortex(rtex)) {
+ images->compressed_colortex_mask |= 1 << i;
+ } else {
+ images->compressed_colortex_mask &= ~(1 << i);
+ }
+ }
+ }
}
-/* BUFFER RESOURCES */
+/* SAMPLER STATES */
-static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
+static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
+ unsigned start, unsigned count, void **states)
{
- struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_textures_info *samplers = &sctx->samplers[shader];
+ struct si_descriptors *desc = &samplers->views.desc;
+ struct si_sampler_state **sstates = (struct si_sampler_state**)states;
+ int i;
+
+ if (!count || shader >= SI_NUM_SHADERS)
+ return;
+
+ for (i = 0; i < count; i++) {
+ unsigned slot = start + i;
+
+ if (!sstates[i] ||
+ sstates[i] == samplers->views.sampler_states[slot])
+ continue;
+
+ samplers->views.sampler_states[slot] = sstates[i];
+
+ /* If FMASK is bound, don't overwrite it.
+ * The sampler state will be set after FMASK is unbound.
+ */
+ if (samplers->views.views[i] &&
+ samplers->views.views[i]->texture &&
+ samplers->views.views[i]->texture->target != PIPE_BUFFER &&
+ ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
+ continue;
- si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
+ memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
+ desc->list_dirty = true;
+ }
}
-static void si_init_buffer_resources(struct si_context *sctx,
- struct si_buffer_resources *buffers,
+/* BUFFER RESOURCES */
+
+static void si_init_buffer_resources(struct si_buffer_resources *buffers,
unsigned num_buffers,
unsigned shader_userdata_index,
enum radeon_bo_usage shader_usage,
enum radeon_bo_priority priority)
{
- int i;
-
- buffers->num_buffers = num_buffers;
buffers->shader_usage = shader_usage;
buffers->priority = priority;
buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
- buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
-
- /* si_emit_descriptors only accepts an array of arrays.
- * This adds such an array. */
- buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
- for (i = 0; i < num_buffers; i++) {
- buffers->desc_data[i] = &buffers->desc_storage[i*4];
- }
- si_init_descriptors(sctx, &buffers->desc, shader_userdata_index, 4,
- num_buffers, si_emit_buffer_resources);
+ si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
+ num_buffers, NULL);
}
static void si_release_buffer_resources(struct si_buffer_resources *buffers)
{
int i;
- for (i = 0; i < buffers->num_buffers; i++) {
+ for (i = 0; i < buffers->desc.num_elements; i++) {
pipe_resource_reference(&buffers->buffers[i], NULL);
}
FREE(buffers->buffers);
- FREE(buffers->desc_storage);
- FREE(buffers->desc_data);
si_release_descriptors(&buffers->desc);
}
{
uint64_t mask = buffers->desc.enabled_mask;
- /* Add relocations to the CS. */
+ /* Add buffers to the CS. */
while (mask) {
int i = u_bit_scan64(&mask);
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffers->buffers[i],
buffers->shader_usage, buffers->priority);
}
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ if (!buffers->desc.buffer)
+ return;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
buffers->desc.buffer, RADEON_USAGE_READWRITE,
- RADEON_PRIO_SHADER_DATA);
+ RADEON_PRIO_DESCRIPTORS);
}
/* VERTEX BUFFERS */
if (!sctx->vertex_buffer[vb].buffer)
continue;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer,
- RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
+ RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+
+ if (!desc->buffer)
+ return;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
desc->buffer, RADEON_USAGE_READ,
- RADEON_PRIO_SHADER_DATA);
+ RADEON_PRIO_DESCRIPTORS);
}
-void si_update_vertex_buffers(struct si_context *sctx)
+static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
{
struct si_descriptors *desc = &sctx->vertex_buffers;
bool bound[SI_NUM_VERTEX_BUFFERS] = {};
uint64_t va;
uint32_t *ptr;
+ if (!sctx->vertex_buffers_dirty)
+ return true;
if (!count || !sctx->vertex_elements)
- return;
+ return true;
/* Vertex buffer descriptors are the only ones which are uploaded
* directly through a staging buffer and don't go through
* the fine-grained upload path.
*/
- u_upload_alloc(sctx->b.uploader, 0, count * 16, &desc->buffer_offset,
+ u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
(struct pipe_resource**)&desc->buffer, (void**)&ptr);
+ if (!desc->buffer)
+ return false;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
desc->buffer, RADEON_USAGE_READ,
- RADEON_PRIO_SHADER_DATA);
+ RADEON_PRIO_DESCRIPTORS);
assert(count <= SI_NUM_VERTEX_BUFFERS);
- assert(desc->current_context_id == 0);
for (i = 0; i < count; i++) {
struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
rbuffer = (struct r600_resource*)vb->buffer;
- if (rbuffer == NULL) {
+ if (!rbuffer) {
memset(desc, 0, 16);
continue;
}
va = rbuffer->gpu_address + offset;
/* Fill in T# buffer resource description */
- desc[0] = va & 0xFFFFFFFF;
+ desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
S_008F04_STRIDE(vb->stride);
- if (vb->stride)
+
+ if (sctx->b.chip_class <= CIK && vb->stride)
/* Round up by rounding down and adding 1 */
desc[2] = (vb->buffer->width0 - offset -
sctx->vertex_elements->format_size[i]) /
desc[3] = sctx->vertex_elements->rsrc_word3[i];
if (!bound[ve->vertex_buffer_index]) {
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)vb->buffer,
- RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
+ RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
bound[ve->vertex_buffer_index] = true;
}
}
* uploaded to a fresh new buffer, so I don't think flushing the const
* cache is needed. */
desc->pointer_dirty = true;
- sctx->shader_userdata.atom.dirty = true;
+ si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
+ sctx->vertex_buffers_dirty = false;
+ return true;
}
{
void *tmp;
- u_upload_alloc(sctx->b.uploader, 0, size, const_offset,
+ u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
(struct pipe_resource**)rbuffer, &tmp);
- util_memcpy_cpu_to_le32(tmp, ptr, size);
+ if (rbuffer)
+ util_memcpy_cpu_to_le32(tmp, ptr, size);
}
static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
if (shader >= SI_NUM_SHADERS)
return;
- assert(slot < buffers->num_buffers);
+ assert(slot < buffers->desc.num_elements);
pipe_resource_reference(&buffers->buffers[slot], NULL);
/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
si_upload_const_buffer(sctx,
(struct r600_resource**)&buffer, input->user_buffer,
input->buffer_size, &buffer_offset);
+ if (!buffer) {
+ /* Just unbind on failure. */
+ si_set_constant_buffer(ctx, shader, slot, NULL);
+ return;
+ }
va = r600_resource(buffer)->gpu_address + buffer_offset;
} else {
pipe_resource_reference(&buffer, input->buffer);
}
/* Set the descriptor. */
- uint32_t *desc = buffers->desc_data[slot];
+ uint32_t *desc = buffers->desc.list + slot*4;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
S_008F04_STRIDE(0);
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
buffers->buffers[slot] = buffer;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->desc.enabled_mask |= 1llu << slot;
} else {
/* Clear the descriptor. */
- memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
+ memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
buffers->desc.enabled_mask &= ~(1llu << slot);
}
- buffers->desc.dirty_mask |= 1llu << slot;
- si_update_descriptors(sctx, &buffers->desc);
+ buffers->desc.list_dirty = true;
}
/* RING BUFFERS */
struct pipe_resource *buffer,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
- unsigned element_size, unsigned index_stride)
+ unsigned element_size, unsigned index_stride, uint64_t offset)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
/* The stride field in the resource descriptor has 14 bits */
assert(stride < (1 << 14));
- assert(slot < buffers->num_buffers);
+ assert(slot < buffers->desc.num_elements);
pipe_resource_reference(&buffers->buffers[slot], NULL);
if (buffer) {
uint64_t va;
- va = r600_resource(buffer)->gpu_address;
+ va = r600_resource(buffer)->gpu_address + offset;
switch (element_size) {
default:
break;
}
+ if (sctx->b.chip_class >= VI && stride)
+ num_records *= stride;
+
/* Set the descriptor. */
- uint32_t *desc = buffers->desc_data[slot];
+ uint32_t *desc = buffers->desc.list + slot*4;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
S_008F04_STRIDE(stride) |
S_008F0C_ADD_TID_ENABLE(add_tid);
pipe_resource_reference(&buffers->buffers[slot], buffer);
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->desc.enabled_mask |= 1llu << slot;
} else {
/* Clear the descriptor. */
- memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
+ memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
buffers->desc.enabled_mask &= ~(1llu << slot);
}
- buffers->desc.dirty_mask |= 1llu << slot;
- si_update_descriptors(sctx, &buffers->desc);
+ buffers->desc.list_dirty = true;
}
/* STREAMOUT BUFFERS */
* VS_PARTIAL_FLUSH is required if the buffers are going to be
* used as an input immediately.
*/
- sctx->b.flags |= SI_CONTEXT_INV_KCACHE |
- SI_CONTEXT_INV_TC_L1 |
+ sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
+ SI_CONTEXT_INV_VMEM_L1 |
SI_CONTEXT_VS_PARTIAL_FLUSH;
}
struct pipe_resource *buffer = targets[i]->buffer;
uint64_t va = r600_resource(buffer)->gpu_address;
- /* Set the descriptor. */
- uint32_t *desc = buffers->desc_data[bufidx];
+ /* Set the descriptor.
+ *
+ * On VI, the format must be non-INVALID, otherwise
+ * the buffer will be considered not bound and store
+ * instructions will be no-ops.
+ */
+ uint32_t *desc = buffers->desc.list + bufidx*4;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
desc[2] = 0xffffffff;
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
/* Set the resource. */
pipe_resource_reference(&buffers->buffers[bufidx],
buffer);
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
buffers->desc.enabled_mask |= 1llu << bufidx;
} else {
/* Clear the descriptor and unset the resource. */
- memset(buffers->desc_data[bufidx], 0,
+ memset(buffers->desc.list + bufidx*4, 0,
sizeof(uint32_t) * 4);
pipe_resource_reference(&buffers->buffers[bufidx],
NULL);
buffers->desc.enabled_mask &= ~(1llu << bufidx);
}
- buffers->desc.dirty_mask |= 1llu << bufidx;
}
for (; i < old_num_targets; i++) {
bufidx = SI_SO_BUF_OFFSET + i;
/* Clear the descriptor and unset the resource. */
- memset(buffers->desc_data[bufidx], 0, sizeof(uint32_t) * 4);
+ memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
pipe_resource_reference(&buffers->buffers[bufidx], NULL);
buffers->desc.enabled_mask &= ~(1llu << bufidx);
- buffers->desc.dirty_mask |= 1llu << bufidx;
}
- si_update_descriptors(sctx, &buffers->desc);
+ buffers->desc.list_dirty = true;
}
static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
S_008F04_BASE_ADDRESS_HI(va >> 32);
}
+/* TEXTURE METADATA ENABLE/DISABLE */
+
+/* CMASK can be enabled (for fast clear) and disabled (for texture export)
+ * while the texture is bound, possibly by a different context. In that case,
+ * call this function to update compressed_colortex_masks.
+ */
+void si_update_compressed_colortex_masks(struct si_context *sctx)
+{
+ for (int i = 0; i < SI_NUM_SHADERS; ++i) {
+ si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
+ si_images_update_compressed_colortex_mask(&sctx->images[i]);
+ }
+}
+
/* BUFFER DISCARD/INVALIDATION */
/* Reallocate a buffer a update all resource bindings where the buffer is
/* Read/Write buffers. */
for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
- bool found = false;
uint64_t mask = buffers->desc.enabled_mask;
while (mask) {
i = u_bit_scan64(&mask);
if (buffers->buffers[i] == buf) {
- si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
+ si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
old_va, buf);
+ buffers->desc.list_dirty = true;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rbuffer, buffers->shader_usage,
buffers->priority);
- buffers->desc.dirty_mask |= 1llu << i;
- found = true;
-
if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
/* Update the streamout state. */
if (sctx->b.streamout.begin_emitted) {
}
}
}
- if (found) {
- si_update_descriptors(sctx, &buffers->desc);
- }
}
/* Constant buffers. */
for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
- bool found = false;
uint64_t mask = buffers->desc.enabled_mask;
while (mask) {
unsigned i = u_bit_scan64(&mask);
if (buffers->buffers[i] == buf) {
- si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
+ si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
old_va, buf);
+ buffers->desc.list_dirty = true;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rbuffer, buffers->shader_usage,
buffers->priority);
-
- buffers->desc.dirty_mask |= 1llu << i;
- found = true;
}
}
- if (found) {
- si_update_descriptors(sctx, &buffers->desc);
- }
}
/* Texture buffers - update virtual addresses in sampler view descriptors. */
/* Texture buffers - update bindings. */
for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
struct si_sampler_views *views = &sctx->samplers[shader].views;
- bool found = false;
uint64_t mask = views->desc.enabled_mask;
while (mask) {
unsigned i = u_bit_scan64(&mask);
if (views->views[i]->texture == buf) {
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
- rbuffer, RADEON_USAGE_READ,
- RADEON_PRIO_SHADER_BUFFER_RO);
+ si_desc_reset_buffer_offset(ctx,
+ views->desc.list +
+ i * 16 + 4,
+ old_va, buf);
+ views->desc.list_dirty = true;
- views->desc.dirty_mask |= 1llu << i;
- found = true;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ rbuffer, RADEON_USAGE_READ,
+ RADEON_PRIO_SAMPLER_BUFFER);
}
}
- if (found) {
- si_update_descriptors(sctx, &views->desc);
- }
- }
-}
-
-/* CP DMA */
-
-/* The max number of bytes to copy per packet. */
-#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
-
-static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
- unsigned offset, unsigned size, unsigned value,
- bool is_framebuffer)
-{
- struct si_context *sctx = (struct si_context*)ctx;
- unsigned flush_flags, tc_l2_flag;
-
- if (!size)
- return;
-
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
- offset + size);
-
- /* Fallback for unaligned clears. */
- if (offset % 4 != 0 || size % 4 != 0) {
- uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
- sctx->b.rings.gfx.cs,
- PIPE_TRANSFER_WRITE);
- size /= 4;
- for (unsigned i = 0; i < size; i++)
- *map++ = value;
- return;
- }
-
- uint64_t va = r600_resource(dst)->gpu_address + offset;
-
- /* Flush the caches where the resource is bound. */
- if (is_framebuffer) {
- flush_flags = SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
- tc_l2_flag = 0;
- } else {
- flush_flags = SI_CONTEXT_INV_TC_L1 |
- (sctx->b.chip_class == SI ? SI_CONTEXT_INV_TC_L2 : 0) |
- SI_CONTEXT_INV_KCACHE;
- tc_l2_flag = sctx->b.chip_class == SI ? 0 : CIK_CP_DMA_USE_L2;
- }
-
- sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- flush_flags;
-
- while (size) {
- unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
- unsigned dma_flags = tc_l2_flag;
-
- si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
- FALSE);
-
- /* This must be done after need_cs_space. */
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
- (struct r600_resource*)dst, RADEON_USAGE_WRITE,
- RADEON_PRIO_MIN);
-
- /* Flush the caches for the first copy only.
- * Also wait for the previous CP DMA operations. */
- if (sctx->b.flags) {
- si_emit_cache_flush(&sctx->b, NULL);
- dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
- }
-
- /* Do the synchronization after the last copy, so that all data is written to memory. */
- if (size == byte_count)
- dma_flags |= R600_CP_DMA_SYNC;
-
- /* Emit the clear packet. */
- si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
-
- size -= byte_count;
- va += byte_count;
- }
-
- /* Flush the caches again in case the 3D engine has been prefetching
- * the resource. */
- sctx->b.flags |= flush_flags;
-
- if (tc_l2_flag)
- r600_resource(dst)->TC_L2_dirty = true;
-}
-
-void si_copy_buffer(struct si_context *sctx,
- struct pipe_resource *dst, struct pipe_resource *src,
- uint64_t dst_offset, uint64_t src_offset, unsigned size,
- bool is_framebuffer)
-{
- unsigned flush_flags, tc_l2_flag;
-
- if (!size)
- return;
-
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
-
- dst_offset += r600_resource(dst)->gpu_address;
- src_offset += r600_resource(src)->gpu_address;
-
- /* Flush the caches where the resource is bound. */
- if (is_framebuffer) {
- flush_flags = SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
- tc_l2_flag = 0;
- } else {
- flush_flags = SI_CONTEXT_INV_TC_L1 |
- (sctx->b.chip_class == SI ? SI_CONTEXT_INV_TC_L2 : 0) |
- SI_CONTEXT_INV_KCACHE;
- tc_l2_flag = sctx->b.chip_class == SI ? 0 : CIK_CP_DMA_USE_L2;
}
- sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- flush_flags;
+ /* Shader images */
+ for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
+ struct si_images_info *images = &sctx->images[shader];
+ unsigned mask = images->desc.enabled_mask;
- while (size) {
- unsigned sync_flags = tc_l2_flag;
- unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
-
- si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
-
- /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
- if (sctx->b.flags) {
- si_emit_cache_flush(&sctx->b, NULL);
- sync_flags |= SI_CP_DMA_RAW_WAIT;
- }
-
- /* Do the synchronization after the last copy, so that all data is written to memory. */
- if (size == byte_count) {
- sync_flags |= R600_CP_DMA_SYNC;
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+
+ if (images->views[i].resource == buf) {
+ si_desc_reset_buffer_offset(
+ ctx, images->desc.list + i * 8 + 4,
+ old_va, buf);
+ images->desc.list_dirty = true;
+
+ radeon_add_to_buffer_list(
+ &sctx->b, &sctx->b.gfx, rbuffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SAMPLER_BUFFER);
+ }
}
-
- /* This must be done after r600_need_cs_space. */
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src,
- RADEON_USAGE_READ, RADEON_PRIO_MIN);
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst,
- RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
-
- si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
-
- size -= byte_count;
- src_offset += byte_count;
- dst_offset += byte_count;
}
-
- /* Flush the caches again in case the 3D engine has been prefetching
- * the resource. */
- sctx->b.flags |= flush_flags;
-
- if (tc_l2_flag)
- r600_resource(dst)->TC_L2_dirty = true;
}
/* SHADER USER DATA */
sctx->const_buffers[shader].desc.pointer_dirty = true;
sctx->rw_buffers[shader].desc.pointer_dirty = true;
sctx->samplers[shader].views.desc.pointer_dirty = true;
- sctx->samplers[shader].states.desc.pointer_dirty = true;
if (shader == PIPE_SHADER_VERTEX)
sctx->vertex_buffers.pointer_dirty = true;
- sctx->shader_userdata.atom.dirty = true;
+ si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
}
static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
void si_shader_change_notify(struct si_context *sctx)
{
/* VS can be bound as VS, ES, or LS. */
- if (sctx->tes_shader)
+ if (sctx->tes_shader.cso)
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
R_00B530_SPI_SHADER_USER_DATA_LS_0);
- else if (sctx->gs_shader)
+ else if (sctx->gs_shader.cso)
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
R_00B330_SPI_SHADER_USER_DATA_ES_0);
else
R_00B130_SPI_SHADER_USER_DATA_VS_0);
/* TES can be bound as ES, VS, or not bound. */
- if (sctx->tes_shader) {
- if (sctx->gs_shader)
+ if (sctx->tes_shader.cso) {
+ if (sctx->gs_shader.cso)
si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
R_00B330_SPI_SHADER_USER_DATA_ES_0);
else
struct si_descriptors *desc,
unsigned sh_base, bool keep_dirty)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
uint64_t va;
- if (!desc->pointer_dirty)
+ if (!desc->pointer_dirty || !desc->buffer)
return;
va = desc->buffer->gpu_address +
- desc->current_context_id * desc->context_size +
desc->buffer_offset;
radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
desc->pointer_dirty = keep_dirty;
}
-static void si_emit_shader_userdata(struct si_context *sctx,
- struct r600_atom *atom)
+void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
{
unsigned i;
uint32_t *sh_base = sctx->shader_userdata.sh_base;
- if (sctx->gs_shader) {
+ if (sctx->gs_shader.cso) {
/* The VS copy shader needs these for clipping, streamout, and rings. */
unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
unsigned i = PIPE_SHADER_VERTEX;
si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
- /* The TESSEVAL shader needs this for the ESGS ring buffer. */
- si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
- R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
- } else if (sctx->tes_shader) {
+ if (sctx->tes_shader.cso) {
+ /* The TESSEVAL shader needs this for the ESGS ring buffer. */
+ si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
+ R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
+ }
+ } else if (sctx->tes_shader.cso) {
/* The TESSEVAL shader needs this for streamout. */
si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
- si_emit_shader_pointer(sctx, &sctx->samplers[i].states.desc, base, false);
+ si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
}
si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
}
-/* INIT/DEINIT */
+/* INIT/DEINIT/UPLOAD */
void si_init_all_descriptors(struct si_context *sctx)
{
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- si_init_buffer_resources(sctx, &sctx->const_buffers[i],
- SI_NUM_CONST_BUFFERS, SI_SGPR_CONST,
- RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
- si_init_buffer_resources(sctx, &sctx->rw_buffers[i],
+ si_init_buffer_resources(&sctx->const_buffers[i],
+ SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
+ RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
+ si_init_buffer_resources(&sctx->rw_buffers[i],
SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
-
- si_init_sampler_views(sctx, &sctx->samplers[i].views);
+ RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT);
- si_init_descriptors(sctx, &sctx->samplers[i].states.desc,
- SI_SGPR_SAMPLER, 4, SI_NUM_SAMPLER_STATES,
- si_emit_sampler_states);
+ si_init_descriptors(&sctx->samplers[i].views.desc,
+ SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
+ null_texture_descriptor);
- sctx->atoms.s.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
- sctx->atoms.s.rw_buffers[i] = &sctx->rw_buffers[i].desc.atom;
- sctx->atoms.s.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
- sctx->atoms.s.sampler_states[i] = &sctx->samplers[i].states.desc.atom;
+ si_init_descriptors(&sctx->images[i].desc,
+ SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
+ null_image_descriptor);
}
- si_init_descriptors(sctx, &sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFER,
+ si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
4, SI_NUM_VERTEX_BUFFERS, NULL);
/* Set pipe_context functions. */
+ sctx->b.b.bind_sampler_states = si_bind_sampler_states;
+ sctx->b.b.set_shader_images = si_set_shader_images;
sctx->b.b.set_constant_buffer = si_set_constant_buffer;
sctx->b.b.set_sampler_views = si_set_sampler_views;
sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
- sctx->b.clear_buffer = si_clear_buffer;
sctx->b.invalidate_buffer = si_invalidate_buffer;
/* Shader user data. */
- sctx->atoms.s.shader_userdata = &sctx->shader_userdata.atom;
- sctx->shader_userdata.atom.emit = (void*)si_emit_shader_userdata;
-
- /* Upper bound, 4 pointers per shader, +1 for vertex buffers, +2 for the VS copy shader. */
- sctx->shader_userdata.atom.num_dw = (SI_NUM_SHADERS * 4 + 1 + 2) * 4;
+ si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
+ si_emit_shader_userdata);
/* Set default and immutable mappings. */
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
}
+bool si_upload_shader_descriptors(struct si_context *sctx)
+{
+ int i;
+
+ for (i = 0; i < SI_NUM_SHADERS; i++) {
+ if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
+ !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
+ !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
+ !si_upload_descriptors(sctx, &sctx->images[i].desc))
+ return false;
+ }
+ return si_upload_vertex_buffer_descriptors(sctx);
+}
+
void si_release_all_descriptors(struct si_context *sctx)
{
int i;
si_release_buffer_resources(&sctx->const_buffers[i]);
si_release_buffer_resources(&sctx->rw_buffers[i]);
si_release_sampler_views(&sctx->samplers[i].views);
- si_release_descriptors(&sctx->samplers[i].states.desc);
+ si_release_image_views(&sctx->images[i]);
}
si_release_descriptors(&sctx->vertex_buffers);
}
si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
- si_sampler_states_begin_new_cs(sctx, &sctx->samplers[i].states);
+ si_image_views_begin_new_cs(sctx, &sctx->images[i]);
}
si_vertex_buffers_begin_new_cs(sctx);
si_shader_userdata_begin_new_cs(sctx);