if (vi_dcc_enabled(tex, first_level)) {
meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
- tex->dcc_offset;
+ tex->surface.dcc_offset;
if (sscreen->info.chip_class == GFX8) {
meta_va += base_level_info->dcc_offset;
unsigned dcc_tile_swizzle = tex->surface.tile_swizzle << 8;
dcc_tile_swizzle &= tex->surface.dcc_alignment - 1;
meta_va |= dcc_tile_swizzle;
- } else if (vi_tc_compat_htile_enabled(tex, first_level)) {
- meta_va = tex->buffer.gpu_address + tex->htile_offset;
+ } else if (vi_tc_compat_htile_enabled(tex, first_level,
+ is_stencil ? PIPE_MASK_S : PIPE_MASK_Z)) {
+ meta_va = tex->buffer.gpu_address + tex->surface.htile_offset;
}
if (meta_va)
if (meta_va) {
struct gfx9_surf_meta_flags meta;
- if (tex->dcc_offset)
+ if (tex->surface.dcc_offset)
meta = tex->surface.u.gfx9.dcc;
else
meta = tex->surface.u.gfx9.htile;
}
state[7] = meta_va >> 16;
- } else if (sscreen->info.chip_class >= GFX9) {
+ } else if (sscreen->info.chip_class == GFX9) {
state[3] &= C_008F1C_SW_MODE;
state[4] &= C_008F20_PITCH;
if (meta_va) {
struct gfx9_surf_meta_flags meta;
- if (tex->dcc_offset)
+ if (tex->surface.dcc_offset)
meta = tex->surface.u.gfx9.dcc;
else
meta = tex->surface.u.gfx9.htile;
{
return tex->surface.fmask_size ||
(tex->dirty_level_mask &&
- (tex->cmask_buffer || tex->dcc_offset));
+ (tex->cmask_buffer || tex->surface.dcc_offset));
}
static bool depth_needs_decompression(struct si_texture *tex)
samplers->needs_color_decompress_mask &= ~(1u << slot);
}
- if (tex->dcc_offset &&
+ if (tex->surface.dcc_offset &&
p_atomic_read(&tex->framebuffers_bound))
sctx->need_check_render_feedback = true;
}
if (res->b.b.target != PIPE_BUFFER)
return;
- util_range_add(&res->valid_buffer_range,
+ util_range_add(&res->b.b, &res->valid_buffer_range,
view->u.buf.offset,
view->u.buf.offset + view->u.buf.size);
}
bool uses_dcc = vi_dcc_enabled(tex, level);
unsigned access = view->access;
- /* Clear the write flag when writes can't occur.
- * Note that DCC_DECOMPRESS for MSAA doesn't work in some cases,
- * so we don't wanna trigger it.
- */
- if (tex->is_depth ||
- (!fmask_desc && tex->surface.fmask_size != 0)) {
- assert(!"Z/S and MSAA image stores are not supported");
- access &= ~PIPE_IMAGE_ACCESS_WRITE;
- }
-
assert(!tex->is_depth);
- assert(fmask_desc || tex->surface.fmask_size == 0);
+ assert(fmask_desc || tex->surface.fmask_offset == 0);
if (uses_dcc && !skip_decompress &&
- (view->access & PIPE_IMAGE_ACCESS_WRITE ||
- !vi_dcc_formats_compatible(res->b.b.format, view->format))) {
+ (access & PIPE_IMAGE_ACCESS_WRITE ||
+ !vi_dcc_formats_compatible(screen, res->b.b.format, view->format))) {
/* If DCC can't be disabled, at least decompress it.
* The decompression is relatively cheap if the surface
* has been decompressed already.
struct si_images *images = &ctx->images[shader];
struct si_descriptors *descs = si_sampler_and_image_descriptors(ctx, shader);
struct si_resource *res;
- unsigned desc_slot = si_get_image_slot(slot);
- uint32_t *desc = descs->list + desc_slot * 8;
if (!view || !view->resource) {
si_disable_shader_image(ctx, shader, slot);
if (&images->views[slot] != view)
util_copy_image_view(&images->views[slot], view);
- si_set_shader_image_desc(ctx, view, skip_decompress, desc, NULL);
+ si_set_shader_image_desc(ctx, view, skip_decompress,
+ descs->list + si_get_image_slot(slot) * 8,
+ descs->list + si_get_image_slot(slot + SI_NUM_IMAGES) * 8);
if (res->b.b.target == PIPE_BUFFER ||
view->shader_access & SI_IMAGE_ACCESS_AS_BUFFER) {
/* See whether FBFETCH is used and color buffer 0 is set. */
if (sctx->ps_shader.cso &&
- sctx->ps_shader.cso->info.opcode_count[TGSI_OPCODE_FBFETCH] &&
+ sctx->ps_shader.cso->info.uses_fbfetch &&
sctx->framebuffer.state.nr_cbufs &&
sctx->framebuffer.state.cbufs[0])
surf = sctx->framebuffer.state.cbufs[0];
int64_t offset = (int64_t)((int)vb->buffer_offset) +
velems->src_offset[i];
+
+ if (offset >= buf->b.b.width0) {
+ assert(offset < buf->b.b.width0);
+ memset(desc, 0, 16);
+ continue;
+ }
+
uint64_t va = buf->gpu_address + offset;
int64_t num_records = (int64_t)buf->b.b.width0 - offset;
buffers->enabled_mask |= 1u << slot;
sctx->descriptors_dirty |= 1u << descriptors_idx;
- util_range_add(&buf->valid_buffer_range, sbuffer->buffer_offset,
+ util_range_add(&buf->b.b, &buf->valid_buffer_range, sbuffer->buffer_offset,
sbuffer->buffer_offset + sbuffer->buffer_size);
}
struct si_descriptors *desc = &sctx->bindless_descriptors;
unsigned desc_slot_offset = img_handle->desc_slot * 16;
struct pipe_image_view *view = &img_handle->view;
- uint32_t desc_list[8];
+ struct pipe_resource *res = view->resource;
+ uint32_t image_desc[16];
+ unsigned desc_size = (res->nr_samples >= 2 ? 16 : 8) * 4;
- if (view->resource->target == PIPE_BUFFER)
+ if (res->target == PIPE_BUFFER)
return;
- memcpy(desc_list, desc->list + desc_slot_offset,
- sizeof(desc_list));
+ memcpy(image_desc, desc->list + desc_slot_offset, desc_size);
si_set_shader_image_desc(sctx, view, true,
- desc->list + desc_slot_offset, NULL);
+ desc->list + desc_slot_offset,
+ desc->list + desc_slot_offset + 8);
- if (memcmp(desc_list, desc->list + desc_slot_offset,
- sizeof(desc_list))) {
+ if (memcmp(image_desc, desc->list + desc_slot_offset, desc_size)) {
img_handle->desc_dirty = true;
sctx->bindless_descriptors_dirty = true;
}
if (sctx->chip_class >= GFX10) {
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
R_00B430_SPI_SHADER_USER_DATA_HS_0);
- } else if (sctx->chip_class >= GFX9) {
+ } else if (sctx->chip_class == GFX9) {
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
R_00B430_SPI_SHADER_USER_DATA_LS_0);
} else {
short shader_userdata_rel_index,
unsigned num_elements)
{
- MAYBE_UNUSED unsigned desc_slot;
+ ASSERTED unsigned desc_slot;
si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
sctx->bindless_descriptors.num_active_slots = num_elements;
tex_handle);
}
- if (tex->dcc_offset &&
+ if (tex->surface.dcc_offset &&
p_atomic_read(&tex->framebuffers_bound))
sctx->need_check_render_feedback = true;
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_image_handle *img_handle;
- uint32_t desc_list[8];
+ uint32_t desc_list[16];
uint64_t handle;
if (!view || !view->resource)
return 0;
memset(desc_list, 0, sizeof(desc_list));
- si_init_descriptor_list(&desc_list[0], 8, 1, null_image_descriptor);
+ si_init_descriptor_list(&desc_list[0], 8, 2, null_image_descriptor);
- si_set_shader_image_desc(sctx, view, false, &desc_list[0], NULL);
+ si_set_shader_image_desc(sctx, view, false, &desc_list[0], &desc_list[8]);
img_handle->desc_slot = si_create_bindless_descriptor(sctx, desc_list,
sizeof(desc_list));
bool is_2nd = sctx->chip_class >= GFX9 &&
(i == PIPE_SHADER_TESS_CTRL ||
i == PIPE_SHADER_GEOMETRY);
- unsigned num_sampler_slots = SI_NUM_IMAGES / 2 + SI_NUM_SAMPLERS;
+ unsigned num_sampler_slots = SI_NUM_IMAGE_SLOTS / 2 + SI_NUM_SAMPLERS;
unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
int rel_dw_offset;
struct si_descriptors *desc;
si_init_descriptors(desc, rel_dw_offset, 16, num_sampler_slots);
int j;
- for (j = 0; j < SI_NUM_IMAGES; j++)
+ for (j = 0; j < SI_NUM_IMAGE_SLOTS; j++)
memcpy(desc->list + j * 8, null_image_descriptor, 8 * 4);
- for (; j < SI_NUM_IMAGES + SI_NUM_SAMPLERS * 2; j++)
+ for (; j < SI_NUM_IMAGE_SLOTS + SI_NUM_SAMPLERS * 2; j++)
memcpy(desc->list + j * 8, null_texture_descriptor, 8 * 4);
}