r600_resource could be renamed to si_buffer.
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
if (dec->ctx.res)
decode->hw_ctxt_size = dec->ctx.res->buf->size;
- return luma->resource.buf;
+ return luma->buffer.buf;
}
static void rvcn_dec_message_destroy(struct radeon_decoder *dec)
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned bpp = rdst->surface.bpe;
- uint64_t dst_address = rdst->resource.gpu_address +
+ uint64_t dst_address = rdst->buffer.gpu_address +
rdst->surface.u.legacy.level[dst_level].offset;
- uint64_t src_address = rsrc->resource.gpu_address +
+ uint64_t src_address = rsrc->buffer.gpu_address +
rsrc->surface.u.legacy.level[src_level].offset;
unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
unsigned src_mode = rsrc->surface.u.legacy.level[src_level].mode;
unsigned src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x;
uint64_t dst_slice_pitch = ((uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
uint64_t src_slice_pitch = ((uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
- unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0,
+ unsigned dst_width = minify_as_blocks(rdst->buffer.b.b.width0,
dst_level, rdst->surface.blk_w);
- unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0,
+ unsigned src_width = minify_as_blocks(rsrc->buffer.b.b.width0,
src_level, rsrc->surface.blk_w);
- unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0,
+ unsigned dst_height = minify_as_blocks(rdst->buffer.b.b.height0,
dst_level, rdst->surface.blk_h);
- unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0,
+ unsigned src_height = minify_as_blocks(rsrc->buffer.b.b.height0,
src_level, rsrc->surface.blk_h);
unsigned srcx = src_box->x / rsrc->surface.blk_w;
unsigned srcy = src_box->y / rsrc->surface.blk_h;
assert(dst_level <= dst->last_level);
assert(rdst->surface.u.legacy.level[dst_level].offset +
dst_slice_pitch * bpp * (dstz + src_box->depth) <=
- rdst->resource.buf->size);
+ rdst->buffer.buf->size);
assert(rsrc->surface.u.legacy.level[src_level].offset +
src_slice_pitch * bpp * (srcz + src_box->depth) <=
- rsrc->resource.buf->size);
+ rsrc->buffer.buf->size);
if (!si_prepare_for_dma_blit(sctx, rdst, dst_level, dstx, dsty,
dstz, rsrc, src_level, src_box))
srcy + copy_height != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 13, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 13, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
struct radeon_winsys_cs *cs = sctx->dma_cs;
uint32_t direction = linear == rdst ? 1u << 31 : 0;
- si_need_dma_space(sctx, 14, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 14, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
dstx + copy_width != (1 << 14)))) {
struct radeon_winsys_cs *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 15, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(sctx, 15, &rdst->buffer, &rsrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&src->resource.b.b, level);
+ max_layer = util_max_layer(&src->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
surf_tmpl.u.tex.level = level;
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *zsurf, *cbsurf;
- surf_tmpl.format = src->resource.b.b.format;
+ surf_tmpl.format = src->buffer.b.b.format;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- zsurf = sctx->b.create_surface(&sctx->b, &src->resource.b.b, &surf_tmpl);
+ zsurf = sctx->b.create_surface(&sctx->b, &src->buffer.b.b, &surf_tmpl);
- surf_tmpl.format = dst->resource.b.b.format;
- cbsurf = sctx->b.create_surface(&sctx->b, &dst->resource.b.b, &surf_tmpl);
+ surf_tmpl.format = dst->buffer.b.b.format;
+ cbsurf = sctx->b.create_surface(&sctx->b, &dst->buffer.b.b, &surf_tmpl);
for (sample = first_sample; sample <= last_sample; sample++) {
if (sample != sctx->dbcb_copy_sample) {
}
if (first_layer == 0 && last_layer >= max_layer &&
- first_sample == 0 && last_sample >= u_max_sample(&src->resource.b.b))
+ first_sample == 0 && last_sample >= u_max_sample(&src->buffer.b.b))
fully_copied_levels |= 1u << level;
}
assert(staging != NULL && "use si_blit_decompress_zs_in_place instead");
- desc = util_format_description(staging->resource.b.b.format);
+ desc = util_format_description(staging->buffer.b.b.format);
if (util_format_has_depth(desc))
planes |= PIPE_MASK_Z;
sctx->db_flush_depth_inplace = true;
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
- surf_tmpl.format = texture->resource.b.b.format;
+ surf_tmpl.format = texture->buffer.b.b.format;
sctx->decompression_enabled = true;
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&texture->resource.b.b, level);
+ max_layer = util_max_layer(&texture->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- zsurf = sctx->b.create_surface(&sctx->b, &texture->resource.b.b, &surf_tmpl);
+ zsurf = sctx->b.create_surface(&sctx->b, &texture->buffer.b.b, &surf_tmpl);
si_blitter_begin(sctx, SI_DECOMPRESS);
util_blitter_custom_depth_stencil(sctx->blitter, zsurf, NULL, ~0,
*/
if (copy_planes &&
(tex->flushed_depth_texture ||
- si_init_flushed_depth_texture(&sctx->b, &tex->resource.b.b, NULL))) {
+ si_init_flushed_depth_texture(&sctx->b, &tex->buffer.b.b, NULL))) {
struct r600_texture *dst = tex->flushed_depth_texture;
unsigned fully_copied_levels;
unsigned levels = 0;
assert(tex->flushed_depth_texture);
- if (util_format_is_depth_and_stencil(dst->resource.b.b.format))
+ if (util_format_is_depth_and_stencil(dst->buffer.b.b.format))
copy_planes = PIPE_MASK_Z | PIPE_MASK_S;
if (copy_planes & PIPE_MASK_Z) {
fully_copied_levels = si_blit_dbcb_copy(
sctx, tex, dst, copy_planes, levels,
first_layer, last_layer,
- 0, u_max_sample(&tex->resource.b.b));
+ 0, u_max_sample(&tex->buffer.b.b));
if (copy_planes & PIPE_MASK_Z)
tex->dirty_level_mask &= ~fully_copied_levels;
/* Only in-place decompression needs to flush DB caches, or
* when we don't decompress but TC-compatible planes are dirty.
*/
- si_make_DB_shader_coherent(sctx, tex->resource.b.b.nr_samples,
+ si_make_DB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
inplace_planes & PIPE_MASK_S,
tc_compat_htile);
}
/* set_framebuffer_state takes care of coherency for single-sample.
* The DB->CB copy uses CB for the final writes.
*/
- if (copy_planes && tex->resource.b.b.nr_samples > 1)
- si_make_CB_shader_coherent(sctx, tex->resource.b.b.nr_samples,
+ if (copy_planes && tex->buffer.b.b.nr_samples > 1)
+ si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
false);
}
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
view->u.tex.first_level, view->u.tex.last_level,
- 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
+ 0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
}
}
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&rtex->resource.b.b, level);
+ max_layer = util_max_layer(&rtex->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *cbsurf, surf_tmpl;
- surf_tmpl.format = rtex->resource.b.b.format;
+ surf_tmpl.format = rtex->buffer.b.b.format;
surf_tmpl.u.tex.level = level;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- cbsurf = sctx->b.create_surface(&sctx->b, &rtex->resource.b.b, &surf_tmpl);
+ cbsurf = sctx->b.create_surface(&sctx->b, &rtex->buffer.b.b, &surf_tmpl);
/* Required before and after FMASK and DCC_DECOMPRESS. */
if (custom_blend == sctx->custom_blend_fmask_decompress ||
}
sctx->decompression_enabled = false;
- si_make_CB_shader_coherent(sctx, rtex->resource.b.b.nr_samples,
+ si_make_CB_shader_coherent(sctx, rtex->buffer.b.b.nr_samples,
vi_dcc_enabled(rtex, first_level));
}
return;
si_blit_decompress_color(sctx, tex, first_level, last_level, 0,
- util_max_layer(&tex->resource.b.b, first_level),
+ util_max_layer(&tex->buffer.b.b, first_level),
false);
}
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
view->u.tex.first_level, view->u.tex.last_level,
- 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
+ 0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
}
}
if (!rtex->dcc_offset)
return;
- si_blit_decompress_color(sctx, rtex, 0, rtex->resource.b.b.last_level,
- 0, util_max_layer(&rtex->resource.b.b, 0),
+ si_blit_decompress_color(sctx, rtex, 0, rtex->buffer.b.b.last_level,
+ 0, util_max_layer(&rtex->buffer.b.b, 0),
true);
}
dcc_buffer = &rtex->dcc_separate_buffer->b.b;
dcc_offset = 0;
} else {
- dcc_buffer = &rtex->resource.b.b;
+ dcc_buffer = &rtex->buffer.b.b;
dcc_offset = rtex->dcc_offset;
}
if (sctx->chip_class >= GFX9) {
/* Mipmap level clears aren't implemented. */
- assert(rtex->resource.b.b.last_level == 0);
+ assert(rtex->buffer.b.b.last_level == 0);
/* 4x and 8x MSAA needs a sophisticated compute shader for
* the clear. See AMDVLK. */
- assert(rtex->resource.b.b.nr_samples <= 2);
+ assert(rtex->buffer.b.b.nr_samples <= 2);
clear_size = rtex->surface.dcc_size;
} else {
- unsigned num_layers = util_num_layers(&rtex->resource.b.b, level);
+ unsigned num_layers = util_num_layers(&rtex->buffer.b.b, level);
/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
assert(rtex->surface.u.legacy.level[level].dcc_fast_clear_size);
* dcc_fast_clear_size bytes for each layer. A compute shader
* would be more efficient than separate per-layer clear operations.
*/
- assert(rtex->resource.b.b.nr_samples <= 2 || num_layers == 1);
+ assert(rtex->buffer.b.b.nr_samples <= 2 || num_layers == 1);
dcc_offset += rtex->surface.u.legacy.level[level].dcc_offset;
clear_size = rtex->surface.u.legacy.level[level].dcc_fast_clear_size *
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen,
struct r600_texture *rtex)
{
- if (rtex->resource.b.is_shared ||
- rtex->resource.b.b.nr_samples <= 1 ||
+ if (rtex->buffer.b.is_shared ||
+ rtex->buffer.b.b.nr_samples <= 1 ||
rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
return;
assert(sscreen->info.chip_class >= GFX9 ||
rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
- assert(rtex->resource.b.b.last_level == 0);
+ assert(rtex->buffer.b.b.last_level == 0);
if (sscreen->info.chip_class >= GFX9) {
/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
* organized in a 2D plane).
*/
if (sctx->chip_class >= GFX9 &&
- tex->resource.b.b.last_level > 0)
+ tex->buffer.b.b.last_level > 0)
continue;
/* the clear is allowed if all layers are bound */
if (fb->cbufs[i]->u.tex.first_layer != 0 ||
- fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
+ fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
continue;
}
* because there is no way to communicate the clear color among
* all clients
*/
- if (tex->resource.b.is_shared &&
- !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
+ if (tex->buffer.b.is_shared &&
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
continue;
/* fast color clear with 1D tiling doesn't work on old kernels and CIK */
*
* This helps on both dGPUs and APUs, even small APUs like Mullins.
*/
- bool too_small = tex->resource.b.b.nr_samples <= 1 &&
- tex->resource.b.b.width0 *
- tex->resource.b.b.height0 <= 512 * 512;
+ bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
+ tex->buffer.b.b.width0 *
+ tex->buffer.b.b.height0 <= 512 * 512;
/* Try to clear DCC first, otherwise try CMASK. */
if (vi_dcc_enabled(tex, 0)) {
!tex->surface.u.legacy.level[level].dcc_fast_clear_size)
continue;
- if (!vi_get_fast_clear_parameters(tex->resource.b.b.format,
+ if (!vi_get_fast_clear_parameters(tex->buffer.b.b.format,
fb->cbufs[i]->format,
color, &reset_value,
&eliminate_needed))
continue;
/* DCC fast clear with MSAA should clear CMASK to 0xC. */
- if (tex->resource.b.b.nr_samples >= 2 && tex->cmask.size) {
+ if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask.size) {
/* TODO: This doesn't work with MSAA. */
if (eliminate_needed)
continue;
if (zstex &&
si_htile_enabled(zstex, zsbuf->u.tex.level) &&
zsbuf->u.tex.first_layer == 0 &&
- zsbuf->u.tex.last_layer == util_max_layer(&zstex->resource.b.b, 0)) {
+ zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
/* TC-compatible HTILE only supports depth clears to 0 or 1. */
if (buffers & PIPE_CLEAR_DEPTH &&
(!zstex->tc_compatible_htile ||
struct r600_texture *tex = (struct r600_texture*)resource;
if (tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
- resource = &tex->flushed_depth_texture->resource.b.b;
+ resource = &tex->flushed_depth_texture->buffer.b.b;
}
rres = r600_resource(resource);
is_stencil = false;
}
- va = tex->resource.gpu_address;
+ va = tex->buffer.gpu_address;
if (sscreen->info.chip_class >= GFX9) {
/* Only stencil_offset needs to be added here. */
state[7] = 0;
if (vi_dcc_enabled(tex, first_level)) {
- meta_va = (!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ meta_va = (!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
tex->dcc_offset;
if (sscreen->info.chip_class == VI) {
meta_va |= (uint32_t)tex->surface.tile_swizzle << 8;
} else if (vi_tc_compat_htile_enabled(tex, first_level)) {
- meta_va = tex->resource.gpu_address + tex->htile_offset;
+ meta_va = tex->buffer.gpu_address + tex->htile_offset;
}
if (meta_va) {
{
struct pipe_sampler_view *view = &sview->base;
struct r600_texture *rtex = (struct r600_texture *)view->texture;
- bool is_buffer = rtex->resource.b.b.target == PIPE_BUFFER;
+ bool is_buffer = rtex->buffer.b.b.target == PIPE_BUFFER;
if (unlikely(!is_buffer && sview->dcc_incompatible)) {
if (vi_dcc_enabled(rtex, view->u.tex.first_level))
memcpy(desc, sview->state, 8*4);
if (is_buffer) {
- si_set_buf_desc_address(&rtex->resource,
+ si_set_buf_desc_address(&rtex->buffer,
sview->base.u.buf.offset,
desc + 4);
} else {
si_set_sampler_view_desc(sctx, rview,
samplers->sampler_states[slot], desc);
- if (rtex->resource.b.b.target == PIPE_BUFFER) {
- rtex->resource.bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ if (rtex->buffer.b.b.target == PIPE_BUFFER) {
+ rtex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
samplers->needs_depth_decompress_mask &= ~(1u << slot);
samplers->needs_color_decompress_mask &= ~(1u << slot);
} else {
*/
si_texture_disable_dcc(sctx, tex);
- if (tex->resource.b.b.nr_samples <= 1 && tex->cmask_buffer) {
+ if (tex->buffer.b.b.nr_samples <= 1 && tex->cmask_buffer) {
/* Disable CMASK. */
- assert(tex->cmask_buffer != &tex->resource);
+ assert(tex->cmask_buffer != &tex->buffer);
si_eliminate_fast_color_clear(sctx, tex);
si_texture_discard_cmask(sctx->screen, tex);
}
memset(desc, 0, 16 * 4);
si_set_shader_image_desc(sctx, &view, true, desc, desc + 8);
- pipe_resource_reference(&buffers->buffers[slot], &tex->resource.b.b);
+ pipe_resource_reference(&buffers->buffers[slot], &tex->buffer.b.b);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &tex->resource, RADEON_USAGE_READ,
+ &tex->buffer, RADEON_USAGE_READ,
RADEON_PRIO_SHADER_RW_IMAGE);
buffers->enabled_mask |= 1u << slot;
} else {
tiled_y = detile ? src_y : dst_y;
tiled_z = detile ? src_z : dst_z;
- assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
+ assert(!util_format_is_depth_and_stencil(rtiled->buffer.b.b.format));
array_mode = G_009910_ARRAY_MODE(tile_mode);
slice_tile_max = (rtiled->surface.u.legacy.level[tiled_lvl].nblk_x *
/* Non-depth modes don't have TILE_SPLIT set. */
tile_split = util_logbase2(rtiled->surface.u.legacy.tile_split >> 6);
nbanks = G_009910_NUM_BANKS(tile_mode);
- base += rtiled->resource.gpu_address;
- addr += rlinear->resource.gpu_address;
+ base += rtiled->buffer.gpu_address;
+ addr += rlinear->buffer.gpu_address;
pipe_config = G_009910_PIPE_CONFIG(tile_mode);
mt = G_009910_MICRO_TILE_MODE(tile_mode);
size = copy_height * pitch;
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(ctx, ncopy * 9, &rdst->resource, &rsrc->resource);
+ si_need_dma_space(ctx, ncopy * 9, &rdst->buffer, &rsrc->buffer);
for (i = 0; i < ncopy; i++) {
cheight = copy_height;
bpp = rdst->surface.bpe;
dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
- src_w = u_minify(rsrc->resource.b.b.width0, src_level);
- dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
+ src_w = u_minify(rsrc->buffer.b.b.width0, src_level);
+ dst_w = u_minify(rdst->buffer.b.b.width0, dst_level);
dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
src_mode = rsrc->surface.u.legacy.level[src_level].mode;
if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
src_box->width != src_w ||
- src_box->height != u_minify(rsrc->resource.b.b.height0, src_level) ||
- src_box->height != u_minify(rdst->resource.b.b.height0, dst_level) ||
+ src_box->height != u_minify(rsrc->buffer.b.b.height0, src_level) ||
+ src_box->height != u_minify(rdst->buffer.b.b.height0, dst_level) ||
rsrc->surface.u.legacy.level[src_level].nblk_y !=
rdst->surface.u.legacy.level[dst_level].nblk_y) {
/* FIXME si can do partial blit */
};
struct r600_texture {
- struct r600_resource resource;
+ struct r600_resource buffer;
struct radeon_surf surface;
uint64_t size;
static inline void
r600_texture_reference(struct r600_texture **ptr, struct r600_texture *res)
{
- pipe_resource_reference((struct pipe_resource **)ptr, &res->resource.b.b);
+ pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
}
static inline bool
static unsigned si_tex_dim(struct si_screen *sscreen, struct r600_texture *rtex,
unsigned view_target, unsigned nr_samples)
{
- unsigned res_target = rtex->resource.b.b.target;
+ unsigned res_target = rtex->buffer.b.b.target;
if (view_target == PIPE_TEXTURE_CUBE ||
view_target == PIPE_TEXTURE_CUBE_ARRAY)
color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == PIPE_SWIZZLE_1 ||
util_format_is_intensity(surf->base.format));
- if (rtex->resource.b.b.nr_samples > 1) {
- unsigned log_samples = util_logbase2(rtex->resource.b.b.nr_samples);
+ if (rtex->buffer.b.b.nr_samples > 1) {
+ unsigned log_samples = util_logbase2(rtex->buffer.b.b.nr_samples);
color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
S_028C74_NUM_FRAGMENTS(log_samples);
if (!sctx->screen->info.has_dedicated_vram)
min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
- if (rtex->resource.b.b.nr_samples > 1) {
+ if (rtex->buffer.b.b.nr_samples > 1) {
if (rtex->surface.bpe == 1)
max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
else if (rtex->surface.bpe == 2)
S_028C6C_SLICE_MAX(surf->base.u.tex.last_layer);
if (sctx->chip_class >= GFX9) {
- unsigned mip0_depth = util_max_layer(&rtex->resource.b.b, 0);
+ unsigned mip0_depth = util_max_layer(&rtex->buffer.b.b, 0);
color_view |= S_028C6C_MIP_LEVEL(surf->base.u.tex.level);
color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
S_028C74_RESOURCE_TYPE(rtex->surface.u.gfx9.resource_type);
surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(surf->width0 - 1) |
S_028C68_MIP0_HEIGHT(surf->height0 - 1) |
- S_028C68_MAX_MIP(rtex->resource.b.b.last_level);
+ S_028C68_MAX_MIP(rtex->buffer.b.b.last_level);
}
surf->cb_color_view = color_view;
assert(format != V_028040_Z_INVALID);
if (format == V_028040_Z_INVALID)
- PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->resource.b.b.format);
+ PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->buffer.b.b.format);
surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) |
S_028008_SLICE_MAX(surf->base.u.tex.last_layer);
if (sctx->chip_class >= GFX9) {
assert(rtex->surface.u.gfx9.surf_offset == 0);
- surf->db_depth_base = rtex->resource.gpu_address >> 8;
- surf->db_stencil_base = (rtex->resource.gpu_address +
+ surf->db_depth_base = rtex->buffer.gpu_address >> 8;
+ surf->db_stencil_base = (rtex->buffer.gpu_address +
rtex->surface.u.gfx9.stencil_offset) >> 8;
z_info = S_028038_FORMAT(format) |
- S_028038_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples)) |
+ S_028038_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples)) |
S_028038_SW_MODE(rtex->surface.u.gfx9.surf.swizzle_mode) |
- S_028038_MAXMIP(rtex->resource.b.b.last_level);
+ S_028038_MAXMIP(rtex->buffer.b.b.last_level);
s_info = S_02803C_FORMAT(stencil_format) |
S_02803C_SW_MODE(rtex->surface.u.gfx9.stencil.swizzle_mode);
surf->db_z_info2 = S_028068_EPITCH(rtex->surface.u.gfx9.surf.epitch);
surf->db_stencil_info2 = S_02806C_EPITCH(rtex->surface.u.gfx9.stencil.epitch);
surf->db_depth_view |= S_028008_MIPID(level);
- surf->db_depth_size = S_02801C_X_MAX(rtex->resource.b.b.width0 - 1) |
- S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1);
+ surf->db_depth_size = S_02801C_X_MAX(rtex->buffer.b.b.width0 - 1) |
+ S_02801C_Y_MAX(rtex->buffer.b.b.height0 - 1);
if (si_htile_enabled(rtex, level)) {
z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
unsigned max_zplanes = 4;
if (rtex->db_render_format == PIPE_FORMAT_Z16_UNORM &&
- rtex->resource.b.b.nr_samples > 1)
+ rtex->buffer.b.b.nr_samples > 1)
max_zplanes = 2;
z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
/* Stencil buffer workaround ported from the SI-CI-VI code.
* See that for explanation.
*/
- s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->resource.b.b.nr_samples <= 1);
+ s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->buffer.b.b.nr_samples <= 1);
} else {
/* Use all HTILE for depth if there's no stencil. */
s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->resource.gpu_address +
+ surf->db_htile_data_base = (rtex->buffer.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0);
- surf->db_depth_base = (rtex->resource.gpu_address +
+ surf->db_depth_base = (rtex->buffer.gpu_address +
rtex->surface.u.legacy.level[level].offset) >> 8;
- surf->db_stencil_base = (rtex->resource.gpu_address +
+ surf->db_stencil_base = (rtex->buffer.gpu_address +
rtex->surface.u.legacy.stencil_level[level].offset) >> 8;
z_info = S_028040_FORMAT(format) |
- S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples));
+ S_028040_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples));
s_info = S_028044_FORMAT(stencil_format);
surf->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!rtex->tc_compatible_htile);
* Check piglit's arb_texture_multisample-stencil-clear
* test if you want to try changing this.
*/
- if (rtex->resource.b.b.nr_samples <= 1)
+ if (rtex->buffer.b.b.nr_samples <= 1)
s_info |= S_028044_ALLOW_EXPCLEAR(1);
} else if (!rtex->tc_compatible_htile) {
/* Use all of the htile_buffer for depth if there's no stencil.
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->resource.gpu_address +
+ surf->db_htile_data_base = (rtex->buffer.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
if (rtex->tc_compatible_htile) {
surf->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
- if (rtex->resource.b.b.nr_samples <= 1)
+ if (rtex->buffer.b.b.nr_samples <= 1)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
- else if (rtex->resource.b.b.nr_samples <= 4)
+ else if (rtex->buffer.b.b.nr_samples <= 4)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
else
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
tex = (struct r600_texture *)cb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &tex->resource, RADEON_USAGE_READWRITE,
- tex->resource.b.b.nr_samples > 1 ?
+ &tex->buffer, RADEON_USAGE_READWRITE,
+ tex->buffer.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
- if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
+ if (tex->cmask_buffer && tex->cmask_buffer != &tex->buffer) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_CMASK);
RADEON_PRIO_DCC);
/* Compute mutable surface parameters. */
- cb_color_base = tex->resource.gpu_address >> 8;
+ cb_color_base = tex->buffer.gpu_address >> 8;
cb_color_fmask = 0;
cb_color_cmask = tex->cmask.base_address_reg;
cb_dcc_base = 0;
cb_color_info &= C_028C70_FAST_CLEAR;
if (tex->fmask.size) {
- cb_color_fmask = (tex->resource.gpu_address + tex->fmask.offset) >> 8;
+ cb_color_fmask = (tex->buffer.gpu_address + tex->fmask.offset) >> 8;
cb_color_fmask |= tex->fmask.tile_swizzle;
}
if (!is_msaa_resolve_dst)
cb_color_info |= S_028C70_DCC_ENABLE(1);
- cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
+ cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->buffer.gpu_address : 0) +
tex->dcc_offset) >> 8;
cb_dcc_base |= tex->surface.tile_swizzle;
}
struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &rtex->resource, RADEON_USAGE_READWRITE,
+ &rtex->buffer, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
uint32_t *state,
uint32_t *fmask_state)
{
- struct pipe_resource *res = &tex->resource.b.b;
+ struct pipe_resource *res = &tex->buffer.b.b;
const struct util_format_description *desc;
unsigned char swizzle[4];
int first_non_void;
state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle);
state[5] |= S_008F24_MAX_MIP(res->nr_samples > 1 ?
util_logbase2(res->nr_samples) :
- tex->resource.b.b.last_level);
+ tex->buffer.b.b.last_level);
} else {
state[3] |= S_008F1C_POW2_PAD(res->last_level > 0);
state[4] |= S_008F20_DEPTH(depth - 1);
if (tex->fmask.size) {
uint32_t data_format, num_format;
- va = tex->resource.gpu_address + tex->fmask.offset;
+ va = tex->buffer.gpu_address + tex->fmask.offset;
if (screen->info.chip_class >= GFX9) {
data_format = V_008F14_IMG_DATA_FORMAT_FMASK;
/* Override format for the case where the flushed texture
* contains only Z or only S.
*/
- if (tmp->flushed_depth_texture->resource.b.b.format != tmp->resource.b.b.format)
- pipe_format = tmp->flushed_depth_texture->resource.b.b.format;
+ if (tmp->flushed_depth_texture->buffer.b.b.format != tmp->buffer.b.b.format)
+ pipe_format = tmp->flushed_depth_texture->buffer.b.b.format;
tmp = tmp->flushed_depth_texture;
}
return false;
/* MSAA: Blits don't exist in the real world. */
- if (rsrc->resource.b.b.nr_samples > 1 ||
- rdst->resource.b.b.nr_samples > 1)
+ if (rsrc->buffer.b.b.nr_samples > 1 ||
+ rdst->buffer.b.b.nr_samples > 1)
return false;
/* Depth-stencil surfaces:
if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
/* The CMASK clear is only enabled for the first level. */
assert(dst_level == 0);
- if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
+ if (!util_texrange_covers_whole_level(&rdst->buffer.b.b, dst_level,
dstx, dsty, dstz, src_box->width,
src_box->height, src_box->depth))
return false;
/* All requirements are met. Prepare textures for SDMA. */
if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
- sctx->b.flush_resource(&sctx->b, &rsrc->resource.b.b);
+ sctx->b.flush_resource(&sctx->b, &rsrc->buffer.b.b);
assert(!(rsrc->dirty_level_mask & (1 << src_level)));
assert(!(rdst->dirty_level_mask & (1 << dst_level)));
mtx_lock(&sscreen->aux_context_lock);
unsigned n = sctx->num_decompress_calls;
- ctx->flush_resource(ctx, &rtex->resource.b.b);
+ ctx->flush_resource(ctx, &rtex->buffer.b.b);
/* Flush only if any fast clear elimination took place. */
if (n != sctx->num_decompress_calls)
if (!rtex->cmask.size)
return;
- assert(rtex->resource.b.b.nr_samples <= 1);
+ assert(rtex->buffer.b.b.nr_samples <= 1);
/* Disable CMASK. */
memset(&rtex->cmask, 0, sizeof(rtex->cmask));
- rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
+ rtex->cmask.base_address_reg = rtex->buffer.gpu_address >> 8;
rtex->dirty_level_mask = 0;
rtex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
- if (rtex->cmask_buffer != &rtex->resource)
+ if (rtex->cmask_buffer != &rtex->buffer)
r600_resource_reference(&rtex->cmask_buffer, NULL);
/* Notify all contexts about the change. */
{
/* We can't disable DCC if it can be written by another process. */
return rtex->dcc_offset &&
- (!rtex->resource.b.is_shared ||
- !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
+ (!rtex->buffer.b.is_shared ||
+ !(rtex->buffer.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
static bool si_texture_discard_dcc(struct si_screen *sscreen,
{
struct pipe_screen *screen = sctx->b.screen;
struct r600_texture *new_tex;
- struct pipe_resource templ = rtex->resource.b.b;
+ struct pipe_resource templ = rtex->buffer.b.b;
unsigned i;
templ.bind |= new_bind_flag;
- if (rtex->resource.b.is_shared)
+ if (rtex->buffer.b.is_shared)
return;
if (new_bind_flag == PIPE_BIND_LINEAR) {
u_minify(templ.width0, i), u_minify(templ.height0, i),
util_num_layers(&templ, i), &box);
- sctx->dma_copy(&sctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
- &rtex->resource.b.b, i, &box);
+ sctx->dma_copy(&sctx->b, &new_tex->buffer.b.b, i, 0, 0, 0,
+ &rtex->buffer.b.b, i, &box);
}
}
}
/* Replace the structure fields of rtex. */
- rtex->resource.b.b.bind = templ.bind;
- pb_reference(&rtex->resource.buf, new_tex->resource.buf);
- rtex->resource.gpu_address = new_tex->resource.gpu_address;
- rtex->resource.vram_usage = new_tex->resource.vram_usage;
- rtex->resource.gart_usage = new_tex->resource.gart_usage;
- rtex->resource.bo_size = new_tex->resource.bo_size;
- rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
- rtex->resource.domains = new_tex->resource.domains;
- rtex->resource.flags = new_tex->resource.flags;
+ rtex->buffer.b.b.bind = templ.bind;
+ pb_reference(&rtex->buffer.buf, new_tex->buffer.buf);
+ rtex->buffer.gpu_address = new_tex->buffer.gpu_address;
+ rtex->buffer.vram_usage = new_tex->buffer.vram_usage;
+ rtex->buffer.gart_usage = new_tex->buffer.gart_usage;
+ rtex->buffer.bo_size = new_tex->buffer.bo_size;
+ rtex->buffer.bo_alignment = new_tex->buffer.bo_alignment;
+ rtex->buffer.domains = new_tex->buffer.domains;
+ rtex->buffer.flags = new_tex->buffer.flags;
rtex->size = new_tex->size;
rtex->db_render_format = new_tex->db_render_format;
rtex->db_compatible = new_tex->db_compatible;
struct r600_texture *rtex,
struct radeon_bo_metadata *md)
{
- struct pipe_resource *res = &rtex->resource.b.b;
+ struct pipe_resource *res = &rtex->buffer.b.b;
static const unsigned char swizzle[] = {
PIPE_SWIZZLE_X,
PIPE_SWIZZLE_Y,
/* Move a suballocated texture into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
rtex->surface.tile_swizzle ||
- (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers &&
whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
/* Move a suballocated buffer into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
/* A DMABUF export always fails if the BO is local. */
- (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers)) {
assert(!res->b.is_shared);
struct pipe_resource *ptex)
{
struct r600_texture *rtex = (struct r600_texture*)ptex;
- struct r600_resource *resource = &rtex->resource;
+ struct r600_resource *resource = &rtex->buffer;
r600_texture_reference(&rtex->flushed_depth_texture, NULL);
- if (rtex->cmask_buffer != &rtex->resource) {
+ if (rtex->cmask_buffer != &rtex->buffer) {
r600_resource_reference(&rtex->cmask_buffer, NULL);
}
pb_reference(&resource->buf, NULL);
struct r600_fmask_info *out)
{
/* FMASK is allocated like an ordinary texture. */
- struct pipe_resource templ = rtex->resource.b.b;
+ struct pipe_resource templ = rtex->buffer.b.b;
struct radeon_surf fmask = {};
unsigned flags, bpe;
struct r600_texture *rtex)
{
si_texture_get_fmask_info(sscreen, rtex,
- rtex->resource.b.b.nr_samples, &rtex->fmask);
+ rtex->buffer.b.b.nr_samples, &rtex->fmask);
rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
rtex->size = rtex->fmask.offset + rtex->fmask.size;
unsigned base_align = num_pipes * pipe_interleave_bytes;
- unsigned width = align(rtex->resource.b.b.width0, cl_width*8);
- unsigned height = align(rtex->resource.b.b.height0, cl_height*8);
+ unsigned width = align(rtex->buffer.b.b.width0, cl_width*8);
+ unsigned height = align(rtex->buffer.b.b.height0, cl_height*8);
unsigned slice_elements = (width * height) / (8*8);
/* Each element of CMASK is a nibble. */
out->slice_tile_max -= 1;
out->alignment = MAX2(256, base_align);
- out->size = util_num_layers(&rtex->resource.b.b, 0) *
+ out->size = util_num_layers(&rtex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
return;
}
- width = align(rtex->resource.b.b.width0, cl_width * 8);
- height = align(rtex->resource.b.b.height0, cl_height * 8);
+ width = align(rtex->buffer.b.b.width0, cl_width * 8);
+ height = align(rtex->buffer.b.b.height0, cl_height * 8);
slice_elements = (width * height) / (8 * 8);
slice_bytes = slice_elements * 4;
rtex->surface.htile_alignment = base_align;
rtex->surface.htile_size =
- util_num_layers(&rtex->resource.b.b, 0) *
+ util_num_layers(&rtex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
"blk_h=%u, array_size=%u, last_level=%u, "
"bpe=%u, nsamples=%u, flags=0x%x, %s\n",
- rtex->resource.b.b.width0, rtex->resource.b.b.height0,
- rtex->resource.b.b.depth0, rtex->surface.blk_w,
+ rtex->buffer.b.b.width0, rtex->buffer.b.b.height0,
+ rtex->buffer.b.b.depth0, rtex->surface.blk_w,
rtex->surface.blk_h,
- rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
- rtex->surface.bpe, rtex->resource.b.b.nr_samples,
- rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
+ rtex->buffer.b.b.array_size, rtex->buffer.b.b.last_level,
+ rtex->surface.bpe, rtex->buffer.b.b.nr_samples,
+ rtex->surface.flags, util_format_short_name(rtex->buffer.b.b.format));
if (sscreen->info.chip_class >= GFX9) {
u_log_printf(log, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
rtex->dcc_offset, rtex->surface.dcc_size,
rtex->surface.dcc_alignment);
- for (i = 0; i <= rtex->resource.b.b.last_level; i++)
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
u_log_printf(log, " DCCLevel[%i]: enabled=%u, offset=%u, "
"fast_clear_size=%u\n",
i, i < rtex->surface.num_dcc_levels,
rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
}
- for (i = 0; i <= rtex->resource.b.b.last_level; i++)
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
i, rtex->surface.u.legacy.level[i].offset,
(uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
- u_minify(rtex->resource.b.b.width0, i),
- u_minify(rtex->resource.b.b.height0, i),
- u_minify(rtex->resource.b.b.depth0, i),
+ u_minify(rtex->buffer.b.b.width0, i),
+ u_minify(rtex->buffer.b.b.height0, i),
+ u_minify(rtex->buffer.b.b.depth0, i),
rtex->surface.u.legacy.level[i].nblk_x,
rtex->surface.u.legacy.level[i].nblk_y,
rtex->surface.u.legacy.level[i].mode,
if (rtex->surface.has_stencil) {
u_log_printf(log, " StencilLayout: tilesplit=%u\n",
rtex->surface.u.legacy.stencil_tile_split);
- for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
+ for (i = 0; i <= rtex->buffer.b.b.last_level; i++) {
u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
"slice_size=%"PRIu64", npix_x=%u, "
"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
i, rtex->surface.u.legacy.stencil_level[i].offset,
(uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
- u_minify(rtex->resource.b.b.width0, i),
- u_minify(rtex->resource.b.b.height0, i),
- u_minify(rtex->resource.b.b.depth0, i),
+ u_minify(rtex->buffer.b.b.width0, i),
+ u_minify(rtex->buffer.b.b.height0, i),
+ u_minify(rtex->buffer.b.b.depth0, i),
rtex->surface.u.legacy.stencil_level[i].nblk_x,
rtex->surface.u.legacy.stencil_level[i].nblk_y,
rtex->surface.u.legacy.stencil_level[i].mode,
if (!rtex)
return NULL;
- resource = &rtex->resource;
+ resource = &rtex->buffer;
resource->b.b = *base;
resource->b.b.next = NULL;
resource->b.vtbl = &si_texture_vtbl;
resource->b.b.screen = screen;
/* don't include stencil-only formats which we don't support for rendering */
- rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
+ rtex->is_depth = util_format_has_depth(util_format_description(rtex->buffer.b.b.format));
rtex->surface = *surface;
rtex->size = rtex->surface.surf_size;
!(sscreen->debug_flags & DBG(NO_FMASK))) {
si_texture_allocate_fmask(sscreen, rtex);
si_texture_allocate_cmask(sscreen, rtex);
- rtex->cmask_buffer = &rtex->resource;
+ rtex->cmask_buffer = &rtex->buffer;
if (!rtex->fmask.size || !rtex->cmask.size) {
FREE(rtex);
if (sscreen->info.chip_class >= GFX9 || rtex->tc_compatible_htile)
clear_value = 0x0000030F;
- si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
rtex->htile_offset,
rtex->surface.htile_size,
clear_value);
/* Initialize DCC only if the texture is not being imported. */
if (!buf && rtex->dcc_offset) {
- si_screen_clear_buffer(sscreen, &rtex->resource.b.b,
+ si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
rtex->dcc_offset,
rtex->surface.dcc_size,
0xFFFFFFFF);
/* Initialize the CMASK base register value. */
rtex->cmask.base_address_reg =
- (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+ (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
if (sscreen->debug_flags & DBG(VM)) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
- rtex->resource.gpu_address,
- rtex->resource.gpu_address + rtex->resource.buf->size,
+ rtex->buffer.gpu_address,
+ rtex->buffer.gpu_address + rtex->buffer.buf->size,
base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
}
if (!rtex)
return NULL;
- rtex->resource.b.is_shared = true;
- rtex->resource.external_usage = usage;
+ rtex->buffer.b.is_shared = true;
+ rtex->buffer.external_usage = usage;
si_apply_opaque_metadata(sscreen, rtex, &metadata);
assert(rtex->surface.tile_swizzle == 0);
- return &rtex->resource.b.b;
+ return &rtex->buffer.b.b;
}
bool si_init_flushed_depth_texture(struct pipe_context *ctx,
unsigned transfer_usage,
const struct pipe_box *box)
{
- return !rtex->resource.b.is_shared &&
+ return !rtex->buffer.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
- rtex->resource.b.b.last_level == 0 &&
- util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
+ rtex->buffer.b.b.last_level == 0 &&
+ util_texrange_covers_whole_level(&rtex->buffer.b.b, 0,
box->x, box->y, box->z,
box->width, box->height,
box->depth);
assert(rtex->surface.is_linear);
/* Reallocate the buffer in the same pipe_resource. */
- si_alloc_resource(sscreen, &rtex->resource);
+ si_alloc_resource(sscreen, &rtex->buffer);
/* Initialize the CMASK base address (needed even without CMASK). */
rtex->cmask.base_address_reg =
- (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+ (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
p_atomic_inc(&sscreen->dirty_tex_counter);
use_staging_texture = true;
else if (usage & PIPE_TRANSFER_READ)
use_staging_texture =
- rtex->resource.domains & RADEON_DOMAIN_VRAM ||
- rtex->resource.flags & RADEON_FLAG_GTT_WC;
+ rtex->buffer.domains & RADEON_DOMAIN_VRAM ||
+ rtex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
- else if (si_rings_is_buffer_referenced(sctx, rtex->resource.buf,
+ else if (si_rings_is_buffer_referenced(sctx, rtex->buffer.buf,
RADEON_USAGE_READWRITE) ||
- !sctx->ws->buffer_wait(rtex->resource.buf, 0,
+ !sctx->ws->buffer_wait(rtex->buffer.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
if (si_can_invalidate_texture(sctx->screen, rtex,
if (rtex->is_depth) {
struct r600_texture *staging_depth;
- if (rtex->resource.b.b.nr_samples > 1) {
+ if (rtex->buffer.b.b.nr_samples > 1) {
/* MSAA depth buffers need to be converted to single sample buffers.
*
* Mapping MSAA depth buffers can occur if ReadPixels is called
&trans->b.b.layer_stride);
}
- trans->staging = &staging_depth->resource;
+ trans->staging = &staging_depth->buffer;
buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
goto fail_trans;
}
- trans->staging = &staging->resource;
+ trans->staging = &staging->buffer;
/* Just get the strides. */
si_texture_get_offset(sctx->screen, staging, 0, NULL,
offset = si_texture_get_offset(sctx->screen, rtex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
- buf = &rtex->resource;
+ buf = &rtex->buffer;
}
if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage)))
struct r600_texture *rtex = (struct r600_texture*)texture;
if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
- if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
+ if (rtex->is_depth && rtex->buffer.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
&rtransfer->staging->b.b, transfer->level,
/* Remove zombie textures (textures kept alive by this array only). */
for (i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++)
if (sctx->dcc_stats[i].tex &&
- sctx->dcc_stats[i].tex->resource.b.b.reference.count == 1)
+ sctx->dcc_stats[i].tex->buffer.b.b.reference.count == 1)
vi_dcc_clean_up_context_slot(sctx, i);
/* Find the texture. */
/* The intent is to use this with shared displayable back buffers,
* but it's not strictly limited only to them.
*/
- if (!tex->resource.b.is_shared ||
- !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
- tex->resource.b.b.target != PIPE_TEXTURE_2D ||
- tex->resource.b.b.last_level > 0 ||
+ if (!tex->buffer.b.is_shared ||
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
+ tex->buffer.b.b.target != PIPE_TEXTURE_2D ||
+ tex->buffer.b.b.last_level > 0 ||
!tex->surface.dcc_size)
return;
/* Compute the approximate number of fullscreen draws. */
tex->ps_draw_ratio =
result.pipeline_statistics.ps_invocations /
- (tex->resource.b.b.width0 * tex->resource.b.b.height0);
+ (tex->buffer.b.b.width0 * tex->buffer.b.b.height0);
sctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
disable = tex->dcc_separate_buffer &&
*/
pb_reference(&buf, memobj->buf);
- rtex->resource.b.is_shared = true;
- rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
+ rtex->buffer.b.is_shared = true;
+ rtex->buffer.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
si_apply_opaque_metadata(sscreen, rtex, &metadata);
- return &rtex->resource.b.b;
+ return &rtex->buffer.b.b;
}
static bool si_check_resource_capability(struct pipe_screen *screen,
continue;
surfaces[i] = & resources[i]->surface;
- pbs[i] = &resources[i]->resource.buf;
+ pbs[i] = &resources[i]->buffer.buf;
}
si_vid_join_surfaces(ctx, pbs, surfaces);
continue;
/* reset the address */
- resources[i]->resource.gpu_address = ctx->ws->buffer_get_virtual_address(
- resources[i]->resource.buf);
+ resources[i]->buffer.gpu_address = ctx->ws->buffer_get_virtual_address(
+ resources[i]->buffer.buf);
}
vidtemplate.height *= array_size;
si_uvd_set_dt_surfaces(msg, &luma->surface, (chroma) ? &chroma->surface : NULL, type);
- return luma->resource.buf;
+ return luma->buffer.buf;
}
/* get the radeon resources for VCE */
struct r600_texture *res = (struct r600_texture *)resource;
if (handle)
- *handle = res->resource.buf;
+ *handle = res->buffer.buf;
if (surface)
*surface = &res->surface;