}
/* use htile only for first level */
- if (rtex->htile_buffer && !level) {
- uint64_t va = rtex->htile_buffer->gpu_address;
+ if (rtex->htile_offset && !level) {
+ uint64_t va = rtex->resource.gpu_address + rtex->htile_offset;
surf->db_htile_data_base = va >> 8;
surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) |
S_028ABC_HTILE_HEIGHT(1) |
radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
- reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
+ reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc_idx);
* disable fast clear for texture array.
*/
/* Only use htile for first level */
- if (rtex->htile_buffer && !level &&
+ if (rtex->htile_offset && !level &&
fb->zsbuf->u.tex.first_layer == 0 &&
fb->zsbuf->u.tex.last_layer == util_max_layer(&rtex->resource.b.b, level)) {
if (rtex->depth_clear_value != depth) {
surf->db_prefetch_limit = (rtex->surface.u.legacy.level[level].nblk_y / 8) - 1;
/* use htile only for first level */
- if (rtex->htile_buffer && !level) {
- surf->db_htile_data_base = 0;
+ if (rtex->htile_offset && !level) {
+ surf->db_htile_data_base = rtex->htile_offset >> 8;
surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) |
S_028D24_HTILE_HEIGHT(1) |
S_028D24_FULL_CACHE(1);
radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
- reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
+ reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc_idx);
unsigned last_msaa_resolve_target_micro_mode;
/* Depth buffer compression and fast clear. */
- struct r600_resource *htile_buffer;
+ uint64_t htile_offset;
bool tc_compatible_htile;
bool depth_cleared; /* if it was cleared at least once */
float depth_clear_value;
rtex->cb_color_info = new_tex->cb_color_info;
rtex->cmask = new_tex->cmask; /* needed even without CMASK */
- assert(!rtex->htile_buffer);
+ assert(!rtex->htile_offset);
assert(!rtex->cmask.size);
assert(!rtex->fmask.size);
assert(!rtex->dcc_offset);
r600_texture_reference(&rtex->flushed_depth_texture, NULL);
- r600_resource_reference(&rtex->htile_buffer, NULL);
if (rtex->cmask_buffer != &rtex->resource) {
r600_resource_reference(&rtex->cmask_buffer, NULL);
}
static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
struct r600_texture *rtex)
{
- uint32_t clear_value;
-
- if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile) {
- clear_value = 0x0000030F;
- } else {
+ if (rscreen->chip_class <= VI && !rtex->tc_compatible_htile)
r600_texture_get_htile_size(rscreen, rtex);
- clear_value = 0;
- }
if (!rtex->surface.htile_size)
return;
- rtex->htile_buffer = (struct r600_resource*)
- r600_aligned_buffer_create(&rscreen->b,
- R600_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- rtex->surface.htile_size,
- rtex->surface.htile_alignment);
- if (rtex->htile_buffer == NULL) {
- /* this is not a fatal error as we can still keep rendering
- * without htile buffer */
- R600_ERR("Failed to create buffer object for htile buffer.\n");
- } else {
- r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b,
- 0, rtex->surface.htile_size,
- clear_value);
- }
+ rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
+ rtex->size = rtex->htile_offset + rtex->surface.htile_size;
}
void r600_print_texture_info(struct r600_common_screen *rscreen,
rtex->surface.u.gfx9.cmask.pipe_aligned);
}
- if (rtex->htile_buffer) {
- fprintf(f, " HTile: size=%u, alignment=%u, "
+ if (rtex->htile_offset) {
+ fprintf(f, " HTile: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
"rb_aligned=%u, pipe_aligned=%u\n",
- rtex->htile_buffer->b.b.width0,
- rtex->htile_buffer->buf->alignment,
+ rtex->htile_offset,
+ rtex->surface.htile_size,
+ rtex->surface.htile_alignment,
rtex->surface.u.gfx9.htile.rb_aligned,
rtex->surface.u.gfx9.htile.pipe_aligned);
}
rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
rtex->cmask.slice_tile_max);
- if (rtex->htile_buffer)
- fprintf(f, " HTile: size=%u, alignment=%u, TC_compatible = %u\n",
- rtex->htile_buffer->b.b.width0,
- rtex->htile_buffer->buf->alignment,
+ if (rtex->htile_offset)
+ fprintf(f, " HTile: offset=%"PRIu64", size=%"PRIu64", "
+ "alignment=%u, TC_compatible = %u\n",
+ rtex->htile_offset, rtex->surface.htile_size,
+ rtex->surface.htile_alignment,
rtex->tc_compatible_htile);
if (rtex->dcc_offset) {
rtex->cmask.offset, rtex->cmask.size,
0xCCCCCCCC);
}
+ if (rtex->htile_offset) {
+ uint32_t clear_value = 0;
+
+ if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile)
+ clear_value = 0x0000030F;
+
+ r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
+ rtex->htile_offset,
+ rtex->surface.htile_size,
+ clear_value);
+ }
/* Initialize DCC only if the texture is not being imported. */
if (!buf && rtex->dcc_offset) {
}
}
- if (zstex && zstex->htile_buffer &&
+ if (zstex && zstex->htile_offset &&
zsbuf->u.tex.level == 0 &&
zsbuf->u.tex.first_layer == 0 &&
zsbuf->u.tex.last_layer == util_max_layer(&zstex->resource.b.b, 0)) {
rtex->dcc_separate_buffer, usage,
RADEON_PRIO_DCC, check_mem);
}
-
- if (rtex->htile_buffer &&
- rtex->tc_compatible_htile) {
- radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
- rtex->htile_buffer, usage,
- RADEON_PRIO_HTILE, check_mem);
- }
}
static void si_sampler_views_begin_new_cs(struct si_context *sctx,
if (sscreen->b.chip_class <= VI)
meta_va += base_level_info->dcc_offset;
} else if (tex->tc_compatible_htile) {
- meta_va = tex->htile_buffer->gpu_address;
+ meta_va = tex->resource.gpu_address + tex->htile_offset;
}
if (meta_va) {
S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1);
/* Only use HTILE for the first level. */
- if (rtex->htile_buffer && !level) {
+ if (rtex->htile_offset && !level) {
z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
S_028038_ALLOW_EXPCLEAR(1);
s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8;
+ surf->db_htile_data_base = (rtex->resource.gpu_address +
+ rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
S_028ABC_RB_ALIGNED(rtex->surface.u.gfx9.htile.rb_aligned);
levelinfo->nblk_y) / 64 - 1);
/* Only use HTILE for the first level. */
- if (rtex->htile_buffer && !level) {
+ if (rtex->htile_offset && !level) {
z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
S_028040_ALLOW_EXPCLEAR(1);
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8;
+ surf->db_htile_data_base = (rtex->resource.gpu_address +
+ rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
if (rtex->tc_compatible_htile) {
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
- if (zb->db_htile_data_base) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- rtex->htile_buffer, RADEON_USAGE_READWRITE,
- RADEON_PRIO_HTILE);
- }
-
if (sctx->b.chip_class >= GFX9) {
radeon_set_context_reg_seq(cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cs, zb->db_htile_data_base); /* DB_HTILE_DATA_BASE */