/*
* Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
*/
#include "radeonsi/si_pipe.h"
-#include "r600_cs.h"
#include "r600_query.h"
#include "util/u_format.h"
#include "util/u_log.h"
#include "amd/common/sid.h"
static enum radeon_surf_mode
-r600_choose_tiling(struct si_screen *sscreen,
- const struct pipe_resource *templ);
+si_choose_tiling(struct si_screen *sscreen,
+ const struct pipe_resource *templ);
-bool si_prepare_for_dma_blit(struct r600_common_context *rctx,
+bool si_prepare_for_dma_blit(struct si_context *sctx,
struct r600_texture *rdst,
unsigned dst_level, unsigned dstx,
unsigned dsty, unsigned dstz,
unsigned src_level,
const struct pipe_box *src_box)
{
- if (!rctx->dma.cs)
+ if (!sctx->b.dma_cs)
return false;
if (rdst->surface.bpe != rsrc->surface.bpe)
src_box->height, src_box->depth))
return false;
- si_texture_discard_cmask(rctx->screen, rdst);
+ si_texture_discard_cmask(sctx->screen, rdst);
}
/* All requirements are met. Prepare textures for SDMA. */
if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
- rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
+ sctx->b.b.flush_resource(&sctx->b.b, &rsrc->resource.b.b);
assert(!(rsrc->dirty_level_mask & (1 << src_level)));
assert(!(rdst->dirty_level_mask & (1 << dst_level)));
}
/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
-static void r600_copy_region_with_blit(struct pipe_context *pipe,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dstx, unsigned dsty, unsigned dstz,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box)
+static void si_copy_region_with_blit(struct pipe_context *pipe,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
{
struct pipe_blit_info blit;
}
/* Copy from a full GPU texture to a transfer's staging one. */
-static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
+static void si_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
struct pipe_resource *dst = &rtransfer->staging->b.b;
struct pipe_resource *src = transfer->resource;
if (src->nr_samples > 1) {
- r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
+ si_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
src, transfer->level, &transfer->box);
return;
}
- rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
+ sctx->b.dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
&transfer->box);
}
/* Copy from a transfer's staging texture to a full GPU one. */
-static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
+static void si_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
struct pipe_resource *dst = transfer->resource;
struct pipe_resource *src = &rtransfer->staging->b.b;
u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
if (dst->nr_samples > 1) {
- r600_copy_region_with_blit(ctx, dst, transfer->level,
+ si_copy_region_with_blit(ctx, dst, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
src, 0, &sbox);
return;
}
- rctx->dma_copy(ctx, dst, transfer->level,
+ sctx->b.dma_copy(ctx, dst, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
src, 0, &sbox);
}
-static unsigned r600_texture_get_offset(struct si_screen *sscreen,
- struct r600_texture *rtex, unsigned level,
- const struct pipe_box *box,
- unsigned *stride,
- unsigned *layer_stride)
+static unsigned si_texture_get_offset(struct si_screen *sscreen,
+ struct r600_texture *rtex, unsigned level,
+ const struct pipe_box *box,
+ unsigned *stride,
+ unsigned *layer_stride)
{
if (sscreen->info.chip_class >= GFX9) {
*stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
}
}
-static int r600_init_surface(struct si_screen *sscreen,
- struct radeon_surf *surface,
- const struct pipe_resource *ptex,
- enum radeon_surf_mode array_mode,
- unsigned pitch_in_bytes_override,
- unsigned offset,
- bool is_imported,
- bool is_scanout,
- bool is_flushed_depth,
- bool tc_compatible_htile)
+static int si_init_surface(struct si_screen *sscreen,
+ struct radeon_surf *surface,
+ const struct pipe_resource *ptex,
+ enum radeon_surf_mode array_mode,
+ unsigned pitch_in_bytes_override,
+ unsigned offset,
+ bool is_imported,
+ bool is_scanout,
+ bool is_flushed_depth,
+ bool tc_compatible_htile)
{
const struct util_format_description *desc =
util_format_description(ptex->format);
bpe = 4; /* stencil is allocated separately on evergreen */
} else {
bpe = util_format_get_blocksize(ptex->format);
- assert(util_is_power_of_two(bpe));
+ assert(util_is_power_of_two_or_zero(bpe));
}
if (!is_flushed_depth && is_depth) {
}
if (sscreen->info.chip_class >= VI &&
- (ptex->flags & R600_RESOURCE_FLAG_DISABLE_DCC ||
+ (ptex->flags & SI_RESOURCE_FLAG_DISABLE_DCC ||
ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT ||
/* DCC MSAA array textures are disallowed due to incomplete clear impl. */
(ptex->nr_samples >= 2 &&
flags |= RADEON_SURF_SHAREABLE;
if (is_imported)
flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
- if (!(ptex->flags & R600_RESOURCE_FLAG_FORCE_TILING))
+ if (!(ptex->flags & SI_RESOURCE_FLAG_FORCE_TILING))
flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
r = sscreen->ws->surface_init(sscreen->ws, ptex, flags, bpe,
return 0;
}
-static void r600_texture_init_metadata(struct si_screen *sscreen,
- struct r600_texture *rtex,
- struct radeon_bo_metadata *metadata)
+static void si_texture_init_metadata(struct si_screen *sscreen,
+ struct r600_texture *rtex,
+ struct radeon_bo_metadata *metadata)
{
struct radeon_surf *surface = &rtex->surface;
}
}
-static void r600_surface_import_metadata(struct si_screen *sscreen,
- struct radeon_surf *surf,
- struct radeon_bo_metadata *metadata,
- enum radeon_surf_mode *array_mode,
- bool *is_scanout)
+static void si_surface_import_metadata(struct si_screen *sscreen,
+ struct radeon_surf *surf,
+ struct radeon_bo_metadata *metadata,
+ enum radeon_surf_mode *array_mode,
+ bool *is_scanout)
{
if (sscreen->info.chip_class >= GFX9) {
if (metadata->u.gfx9.swizzle_mode > 0)
}
}
-void si_eliminate_fast_color_clear(struct r600_common_context *rctx,
+void si_eliminate_fast_color_clear(struct si_context *sctx,
struct r600_texture *rtex)
{
- struct si_screen *sscreen = rctx->screen;
- struct pipe_context *ctx = &rctx->b;
+ struct si_screen *sscreen = sctx->screen;
+ struct pipe_context *ctx = &sctx->b.b;
if (ctx == sscreen->aux_context)
mtx_lock(&sscreen->aux_context_lock);
- unsigned n = rctx->num_decompress_calls;
+ unsigned n = sctx->b.num_decompress_calls;
ctx->flush_resource(ctx, &rtex->resource.b.b);
/* Flush only if any fast clear elimination took place. */
- if (n != rctx->num_decompress_calls)
+ if (n != sctx->b.num_decompress_calls)
ctx->flush(ctx, NULL, 0);
if (ctx == sscreen->aux_context)
p_atomic_inc(&sscreen->compressed_colortex_counter);
}
-static bool r600_can_disable_dcc(struct r600_texture *rtex)
+static bool si_can_disable_dcc(struct r600_texture *rtex)
{
/* We can't disable DCC if it can be written by another process. */
return rtex->dcc_offset &&
!(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
-static bool r600_texture_discard_dcc(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static bool si_texture_discard_dcc(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
- if (!r600_can_disable_dcc(rtex))
+ if (!si_can_disable_dcc(rtex))
return false;
assert(rtex->dcc_separate_buffer == NULL);
* context 1 & 2 read garbage, because DCC is disabled, yet there are
* compressed tiled
*
- * \param rctx the current context if you have one, or rscreen->aux_context
+ * \param sctx the current context if you have one, or rscreen->aux_context
* if you don't.
*/
-bool si_texture_disable_dcc(struct r600_common_context *rctx,
+bool si_texture_disable_dcc(struct si_context *sctx,
struct r600_texture *rtex)
{
- struct si_screen *sscreen = rctx->screen;
+ struct si_screen *sscreen = sctx->screen;
- if (!r600_can_disable_dcc(rtex))
+ if (!si_can_disable_dcc(rtex))
return false;
- if (&rctx->b == sscreen->aux_context)
+ if (&sctx->b.b == sscreen->aux_context)
mtx_lock(&sscreen->aux_context_lock);
/* Decompress DCC. */
- rctx->decompress_dcc(&rctx->b, rtex);
- rctx->b.flush(&rctx->b, NULL, 0);
+ si_decompress_dcc(sctx, rtex);
+ sctx->b.b.flush(&sctx->b.b, NULL, 0);
- if (&rctx->b == sscreen->aux_context)
+ if (&sctx->b.b == sscreen->aux_context)
mtx_unlock(&sscreen->aux_context_lock);
- return r600_texture_discard_dcc(sscreen, rtex);
+ return si_texture_discard_dcc(sscreen, rtex);
}
-static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
- struct r600_texture *rtex,
- unsigned new_bind_flag,
- bool invalidate_storage)
+static void si_reallocate_texture_inplace(struct si_context *sctx,
+ struct r600_texture *rtex,
+ unsigned new_bind_flag,
+ bool invalidate_storage)
{
- struct pipe_screen *screen = rctx->b.screen;
+ struct pipe_screen *screen = sctx->b.b.screen;
struct r600_texture *new_tex;
struct pipe_resource templ = rtex->resource.b.b;
unsigned i;
return;
/* This fails with MSAA, depth, and compressed textures. */
- if (r600_choose_tiling(rctx->screen, &templ) !=
+ if (si_choose_tiling(sctx->screen, &templ) !=
RADEON_SURF_MODE_LINEAR_ALIGNED)
return;
}
u_minify(templ.width0, i), u_minify(templ.height0, i),
util_num_layers(&templ, i), &box);
- rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
+ sctx->b.dma_copy(&sctx->b.b, &new_tex->resource.b.b, i, 0, 0, 0,
&rtex->resource.b.b, i, &box);
}
}
if (new_bind_flag == PIPE_BIND_LINEAR) {
- si_texture_discard_cmask(rctx->screen, rtex);
- r600_texture_discard_dcc(rctx->screen, rtex);
+ si_texture_discard_cmask(sctx->screen, rtex);
+ si_texture_discard_dcc(sctx->screen, rtex);
}
/* Replace the structure fields of rtex. */
r600_texture_reference(&new_tex, NULL);
- p_atomic_inc(&rctx->screen->dirty_tex_counter);
+ p_atomic_inc(&sctx->screen->dirty_tex_counter);
}
static uint32_t si_get_bo_metadata_word1(struct si_screen *sscreen)
rtex->dcc_offset = 0;
}
-static boolean r600_texture_get_handle(struct pipe_screen* screen,
- struct pipe_context *ctx,
- struct pipe_resource *resource,
- struct winsys_handle *whandle,
- unsigned usage)
+static boolean si_texture_get_handle(struct pipe_screen* screen,
+ struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ struct winsys_handle *whandle,
+ unsigned usage)
{
struct si_screen *sscreen = (struct si_screen*)screen;
- struct r600_common_context *rctx;
+ struct si_context *sctx;
struct r600_resource *res = (struct r600_resource*)resource;
struct r600_texture *rtex = (struct r600_texture*)resource;
struct radeon_bo_metadata metadata;
bool flush = false;
ctx = threaded_context_unwrap_sync(ctx);
- rctx = (struct r600_common_context*)(ctx ? ctx : sscreen->aux_context);
+ sctx = (struct si_context*)(ctx ? ctx : sscreen->aux_context);
if (resource->target != PIPE_BUFFER) {
/* This is not supported now, but it might be required for OpenCL
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
rtex->surface.tile_swizzle ||
(rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ sscreen->info.has_local_buffers &&
whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
- r600_reallocate_texture_inplace(rctx, rtex,
+ si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_SHARED, false);
flush = true;
assert(res->b.b.bind & PIPE_BIND_SHARED);
* access.
*/
if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
- if (si_texture_disable_dcc(rctx, rtex)) {
+ if (si_texture_disable_dcc(sctx, rtex)) {
update_metadata = true;
/* si_texture_disable_dcc flushes the context */
flush = false;
if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
(rtex->cmask.size || rtex->dcc_offset)) {
/* Eliminate fast clear (both CMASK and DCC) */
- si_eliminate_fast_color_clear(rctx, rtex);
+ si_eliminate_fast_color_clear(sctx, rtex);
/* eliminate_fast_color_clear flushes the context */
flush = false;
/* Set metadata. */
if (!res->b.is_shared || update_metadata) {
- r600_texture_init_metadata(sscreen, rtex, &metadata);
+ si_texture_init_metadata(sscreen, rtex, &metadata);
si_query_opaque_metadata(sscreen, rtex, &metadata);
sscreen->ws->buffer_set_metadata(res->buf, &metadata);
/* Move a suballocated buffer into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
/* A DMABUF export always fails if the BO is local. */
- rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING) {
+ (rtex->resource.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ sscreen->info.has_local_buffers)) {
assert(!res->b.is_shared);
/* Allocate a new buffer with PIPE_BIND_SHARED. */
/* Copy the old buffer contents to the new one. */
struct pipe_box box;
u_box_1d(0, newb->width0, &box);
- rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
+ sctx->b.b.resource_copy_region(&sctx->b.b, newb, 0, 0, 0, 0,
&res->b.b, 0, &box);
flush = true;
/* Move the new buffer storage to the old pipe_resource. */
- si_replace_buffer_storage(&rctx->b, &res->b.b, newb);
+ si_replace_buffer_storage(&sctx->b.b, &res->b.b, newb);
pipe_resource_reference(&newb, NULL);
assert(res->b.b.bind & PIPE_BIND_SHARED);
}
if (flush)
- rctx->b.flush(&rctx->b, NULL, 0);
+ sctx->b.b.flush(&sctx->b.b, NULL, 0);
if (res->b.is_shared) {
/* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
slice_size, whandle);
}
-static void r600_texture_destroy(struct pipe_screen *screen,
- struct pipe_resource *ptex)
+static void si_texture_destroy(struct pipe_screen *screen,
+ struct pipe_resource *ptex)
{
struct r600_texture *rtex = (struct r600_texture*)ptex;
struct r600_resource *resource = &rtex->resource;
FREE(rtex);
}
-static const struct u_resource_vtbl r600_texture_vtbl;
+static const struct u_resource_vtbl si_texture_vtbl;
/* The number of samples can be specified independently of the texture. */
void si_texture_get_fmask_info(struct si_screen *sscreen,
if (sscreen->info.chip_class >= GFX9) {
out->alignment = rtex->surface.u.gfx9.fmask_alignment;
out->size = rtex->surface.u.gfx9.fmask_size;
+ out->tile_swizzle = rtex->surface.u.gfx9.fmask_tile_swizzle;
return;
}
bpe = 4;
break;
default:
- R600_ERR("Invalid sample count for FMASK allocation.\n");
+ PRINT_ERR("Invalid sample count for FMASK allocation.\n");
return;
}
if (sscreen->ws->surface_init(sscreen->ws, &templ, flags, bpe,
RADEON_SURF_MODE_2D, &fmask)) {
- R600_ERR("Got error in surface_init while allocating FMASK.\n");
+ PRINT_ERR("Got error in surface_init while allocating FMASK.\n");
return;
}
out->size = fmask.surf_size;
}
-static void r600_texture_allocate_fmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_allocate_fmask(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
si_texture_get_fmask_info(sscreen, rtex,
rtex->resource.b.b.nr_samples, &rtex->fmask);
align(slice_bytes, base_align);
}
-static void r600_texture_allocate_cmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_allocate_cmask(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
}
-static void r600_texture_get_htile_size(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_get_htile_size(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
unsigned cl_width, cl_height, width, height;
unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
align(slice_bytes, base_align);
}
-static void r600_texture_allocate_htile(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_allocate_htile(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
- r600_texture_get_htile_size(sscreen, rtex);
+ si_texture_get_htile_size(sscreen, rtex);
if (!rtex->surface.htile_size)
return;
/* Common processing for r600_texture_create and r600_texture_from_handle */
static struct r600_texture *
-r600_texture_create_object(struct pipe_screen *screen,
- const struct pipe_resource *base,
- struct pb_buffer *buf,
- struct radeon_surf *surface)
+si_texture_create_object(struct pipe_screen *screen,
+ const struct pipe_resource *base,
+ struct pb_buffer *buf,
+ struct radeon_surf *surface)
{
struct r600_texture *rtex;
struct r600_resource *resource;
resource = &rtex->resource;
resource->b.b = *base;
resource->b.b.next = NULL;
- resource->b.vtbl = &r600_texture_vtbl;
+ resource->b.vtbl = &si_texture_vtbl;
pipe_reference_init(&resource->b.b.reference, 1);
resource->b.b.screen = screen;
rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
}
- if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
- R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
+ if (!(base->flags & (SI_RESOURCE_FLAG_TRANSFER |
+ SI_RESOURCE_FLAG_FLUSHED_DEPTH))) {
rtex->db_compatible = true;
if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
- r600_texture_allocate_htile(sscreen, rtex);
+ si_texture_allocate_htile(sscreen, rtex);
}
} else {
- if (base->nr_samples > 1) {
- if (!buf) {
- r600_texture_allocate_fmask(sscreen, rtex);
- r600_texture_allocate_cmask(sscreen, rtex);
- rtex->cmask_buffer = &rtex->resource;
- }
+ if (base->nr_samples > 1 &&
+ !buf &&
+ !(sscreen->debug_flags & DBG(NO_FMASK))) {
+ si_texture_allocate_fmask(sscreen, rtex);
+ si_texture_allocate_cmask(sscreen, rtex);
+ rtex->cmask_buffer = &rtex->resource;
+
if (!rtex->fmask.size || !rtex->cmask.size) {
FREE(rtex);
return NULL;
}
static enum radeon_surf_mode
-r600_choose_tiling(struct si_screen *sscreen,
+si_choose_tiling(struct si_screen *sscreen,
const struct pipe_resource *templ)
{
const struct util_format_description *desc = util_format_description(templ->format);
- bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
+ bool force_tiling = templ->flags & SI_RESOURCE_FLAG_FORCE_TILING;
bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
- !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
+ !(templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH);
/* MSAA resources must be 2D tiled. */
if (templ->nr_samples > 1)
return RADEON_SURF_MODE_2D;
/* Transfer resources should be linear. */
- if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
+ if (templ->flags & SI_RESOURCE_FLAG_TRANSFER)
return RADEON_SURF_MODE_LINEAR_ALIGNED;
/* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct radeon_surf surface = {0};
- bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;
+ bool is_flushed_depth = templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH;
bool tc_compatible_htile =
sscreen->info.chip_class >= VI &&
(templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
int r;
- r = r600_init_surface(sscreen, &surface, templ,
- r600_choose_tiling(sscreen, templ), 0, 0,
+ r = si_init_surface(sscreen, &surface, templ,
+ si_choose_tiling(sscreen, templ), 0, 0,
false, false, is_flushed_depth,
tc_compatible_htile);
if (r) {
}
return (struct pipe_resource *)
- r600_texture_create_object(screen, templ, NULL, &surface);
+ si_texture_create_object(screen, templ, NULL, &surface);
}
-static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- struct winsys_handle *whandle,
- unsigned usage)
+static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ struct winsys_handle *whandle,
+ unsigned usage)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct pb_buffer *buf = NULL;
return NULL;
sscreen->ws->buffer_get_metadata(buf, &metadata);
- r600_surface_import_metadata(sscreen, &surface, &metadata,
+ si_surface_import_metadata(sscreen, &surface, &metadata,
&array_mode, &is_scanout);
- r = r600_init_surface(sscreen, &surface, templ, array_mode, stride,
+ r = si_init_surface(sscreen, &surface, templ, array_mode, stride,
offset, true, is_scanout, false, false);
if (r) {
return NULL;
}
- rtex = r600_texture_create_object(screen, templ, buf, &surface);
+ rtex = si_texture_create_object(screen, templ, buf, &surface);
if (!rtex)
return NULL;
resource.nr_samples = texture->nr_samples;
resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
- resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
+ resource.flags = texture->flags | SI_RESOURCE_FLAG_FLUSHED_DEPTH;
if (staging)
- resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
+ resource.flags |= SI_RESOURCE_FLAG_TRANSFER;
*flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
if (*flushed_depth_texture == NULL) {
- R600_ERR("failed to create temporary texture to hold flushed depth\n");
+ PRINT_ERR("failed to create temporary texture to hold flushed depth\n");
return false;
}
return true;
* which is supposed to hold a subregion of the texture "orig" at the given
* mipmap level.
*/
-static void r600_init_temp_resource_from_box(struct pipe_resource *res,
- struct pipe_resource *orig,
- const struct pipe_box *box,
- unsigned level, unsigned flags)
+static void si_init_temp_resource_from_box(struct pipe_resource *res,
+ struct pipe_resource *orig,
+ const struct pipe_box *box,
+ unsigned level, unsigned flags)
{
memset(res, 0, sizeof(*res));
res->format = orig->format;
res->height0 = box->height;
res->depth0 = 1;
res->array_size = 1;
- res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
+ res->usage = flags & SI_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
res->flags = flags;
/* We must set the correct texture target and dimensions for a 3D box. */
}
}
-static bool r600_can_invalidate_texture(struct si_screen *sscreen,
- struct r600_texture *rtex,
- unsigned transfer_usage,
- const struct pipe_box *box)
+static bool si_can_invalidate_texture(struct si_screen *sscreen,
+ struct r600_texture *rtex,
+ unsigned transfer_usage,
+ const struct pipe_box *box)
{
return !rtex->resource.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
box->depth);
}
-static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
- struct r600_texture *rtex)
+static void si_texture_invalidate_storage(struct si_context *sctx,
+ struct r600_texture *rtex)
{
- struct si_screen *sscreen = rctx->screen;
+ struct si_screen *sscreen = sctx->screen;
/* There is no point in discarding depth and tiled buffers. */
assert(!rtex->is_depth);
p_atomic_inc(&sscreen->dirty_tex_counter);
- rctx->num_alloc_tex_transfer_bytes += rtex->size;
+ sctx->b.num_alloc_tex_transfer_bytes += rtex->size;
}
-static void *r600_texture_transfer_map(struct pipe_context *ctx,
- struct pipe_resource *texture,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer)
+static void *si_texture_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *texture,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct si_context *sctx = (struct si_context*)ctx;
struct r600_texture *rtex = (struct r600_texture*)texture;
struct r600_transfer *trans;
struct r600_resource *buf;
char *map;
bool use_staging_texture = false;
- assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
+ assert(!(texture->flags & SI_RESOURCE_FLAG_TRANSFER));
assert(box->width && box->height && box->depth);
/* Depth textures use staging unconditionally. */
* On dGPUs, the staging texture is always faster.
* Only count uploads that are at least 4x4 pixels large.
*/
- if (!rctx->screen->info.has_dedicated_vram &&
+ if (!sctx->screen->info.has_dedicated_vram &&
level == 0 &&
box->width >= 4 && box->height >= 4 &&
p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
bool can_invalidate =
- r600_can_invalidate_texture(rctx->screen, rtex,
+ si_can_invalidate_texture(sctx->screen, rtex,
usage, box);
- r600_reallocate_texture_inplace(rctx, rtex,
+ si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_LINEAR,
can_invalidate);
}
rtex->resource.domains & RADEON_DOMAIN_VRAM ||
rtex->resource.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
- else if (si_rings_is_buffer_referenced(rctx, rtex->resource.buf,
- RADEON_USAGE_READWRITE) ||
- !rctx->ws->buffer_wait(rtex->resource.buf, 0,
+ else if (si_rings_is_buffer_referenced(sctx, rtex->resource.buf,
+ RADEON_USAGE_READWRITE) ||
+ !sctx->b.ws->buffer_wait(rtex->resource.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
- if (r600_can_invalidate_texture(rctx->screen, rtex,
+ if (si_can_invalidate_texture(sctx->screen, rtex,
usage, box))
- r600_texture_invalidate_storage(rctx, rtex);
+ si_texture_invalidate_storage(sctx, rtex);
else
use_staging_texture = true;
}
*/
struct pipe_resource resource;
- r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
+ si_init_temp_resource_from_box(&resource, texture, box, level, 0);
if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
- R600_ERR("failed to create temporary texture to hold untiled copy\n");
+ PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
FREE(trans);
return NULL;
}
if (usage & PIPE_TRANSFER_READ) {
struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
if (!temp) {
- R600_ERR("failed to create a temporary depth texture\n");
+ PRINT_ERR("failed to create a temporary depth texture\n");
FREE(trans);
return NULL;
}
- r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
- rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
- 0, 0, 0, box->depth, 0, 0);
+ si_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
+ si_blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
+ 0, 0, 0, box->depth, 0, 0);
pipe_resource_reference(&temp, NULL);
}
/* Just get the strides. */
- r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,
+ si_texture_get_offset(sctx->screen, staging_depth, level, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
} else {
/* XXX: only readback the rectangle which is being mapped? */
/* XXX: when discard is true, no need to read back from depth texture */
if (!si_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
- R600_ERR("failed to create temporary texture to hold untiled copy\n");
+ PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
FREE(trans);
return NULL;
}
- rctx->blit_decompress_depth(ctx, rtex, staging_depth,
- level, level,
- box->z, box->z + box->depth - 1,
- 0, 0);
+ si_blit_decompress_depth(ctx, rtex, staging_depth,
+ level, level,
+ box->z, box->z + box->depth - 1,
+ 0, 0);
- offset = r600_texture_get_offset(rctx->screen, staging_depth,
+ offset = si_texture_get_offset(sctx->screen, staging_depth,
level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
struct pipe_resource resource;
struct r600_texture *staging;
- r600_init_temp_resource_from_box(&resource, texture, box, level,
- R600_RESOURCE_FLAG_TRANSFER);
+ si_init_temp_resource_from_box(&resource, texture, box, level,
+ SI_RESOURCE_FLAG_TRANSFER);
resource.usage = (usage & PIPE_TRANSFER_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
if (!staging) {
- R600_ERR("failed to create temporary texture to hold untiled copy\n");
+ PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
FREE(trans);
return NULL;
}
trans->staging = &staging->resource;
/* Just get the strides. */
- r600_texture_get_offset(rctx->screen, staging, 0, NULL,
+ si_texture_get_offset(sctx->screen, staging, 0, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
- r600_copy_to_staging_texture(ctx, trans);
+ si_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
buf = trans->staging;
} else {
/* the resource is mapped directly */
- offset = r600_texture_get_offset(rctx->screen, rtex, level, box,
+ offset = si_texture_get_offset(sctx->screen, rtex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
buf = &rtex->resource;
}
- if (!(map = si_buffer_map_sync_with_rings(rctx, buf, usage))) {
+ if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage))) {
r600_resource_reference(&trans->staging, NULL);
FREE(trans);
return NULL;
return map + offset;
}
-static void r600_texture_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer* transfer)
+static void si_texture_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer* transfer)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct pipe_resource *texture = transfer->resource;
struct r600_texture *rtex = (struct r600_texture*)texture;
&rtransfer->staging->b.b, transfer->level,
&transfer->box);
} else {
- r600_copy_from_staging_texture(ctx, rtransfer);
+ si_copy_from_staging_texture(ctx, rtransfer);
}
}
if (rtransfer->staging) {
- rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
+ sctx->b.num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
r600_resource_reference(&rtransfer->staging, NULL);
}
*
* The result is that the kernel memory manager is never a bottleneck.
*/
- if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
- rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
- rctx->num_alloc_tex_transfer_bytes = 0;
+ if (sctx->b.num_alloc_tex_transfer_bytes > sctx->screen->info.gart_size / 4) {
+ si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
+ sctx->b.num_alloc_tex_transfer_bytes = 0;
}
pipe_resource_reference(&transfer->resource, NULL);
FREE(transfer);
}
-static const struct u_resource_vtbl r600_texture_vtbl =
+static const struct u_resource_vtbl si_texture_vtbl =
{
NULL, /* get_handle */
- r600_texture_destroy, /* resource_destroy */
- r600_texture_transfer_map, /* transfer_map */
+ si_texture_destroy, /* resource_destroy */
+ si_texture_transfer_map, /* transfer_map */
u_default_transfer_flush_region, /* transfer_flush_region */
- r600_texture_transfer_unmap, /* transfer_unmap */
+ si_texture_transfer_unmap, /* transfer_unmap */
};
/* DCC channel type categories within which formats can be reinterpreted
* while keeping the same DCC encoding. The swizzle must also match. */
enum dcc_channel_type {
- dcc_channel_float32,
- dcc_channel_uint32,
- dcc_channel_sint32,
- dcc_channel_float16,
- dcc_channel_uint16,
- dcc_channel_sint16,
+ dcc_channel_float,
+ /* uint and sint can be merged if we never use TC-compatible DCC clear
+ * encoding with the clear value of 1. */
+ dcc_channel_uint,
+ dcc_channel_sint,
dcc_channel_uint_10_10_10_2,
- dcc_channel_uint8,
- dcc_channel_sint8,
dcc_channel_incompatible,
};
switch (desc->channel[i].size) {
case 32:
- if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
- return dcc_channel_float32;
- if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
- return dcc_channel_uint32;
- return dcc_channel_sint32;
case 16:
+ case 8:
if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
- return dcc_channel_float16;
+ return dcc_channel_float;
if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
- return dcc_channel_uint16;
- return dcc_channel_sint16;
+ return dcc_channel_uint;
+ return dcc_channel_sint;
case 10:
return dcc_channel_uint_10_10_10_2;
- case 8:
- if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
- return dcc_channel_uint8;
- return dcc_channel_sint8;
default:
return dcc_channel_incompatible;
}
/* This can't be merged with the above function, because
* vi_dcc_formats_compatible should be called only when DCC is enabled. */
-void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx,
+void vi_disable_dcc_if_incompatible_format(struct si_context *sctx,
struct pipe_resource *tex,
unsigned level,
enum pipe_format view_format)
struct r600_texture *rtex = (struct r600_texture *)tex;
if (vi_dcc_formats_are_incompatible(tex, level, view_format))
- if (!si_texture_disable_dcc(rctx, (struct r600_texture*)tex))
- rctx->decompress_dcc(&rctx->b, rtex);
+ if (!si_texture_disable_dcc(sctx, (struct r600_texture*)tex))
+ si_decompress_dcc(sctx, rtex);
}
struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
return &surface->base;
}
-static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
- struct pipe_resource *tex,
- const struct pipe_surface *templ)
+static struct pipe_surface *si_create_surface(struct pipe_context *pipe,
+ struct pipe_resource *tex,
+ const struct pipe_surface *templ)
{
unsigned level = templ->u.tex.level;
unsigned width = u_minify(tex->width0, level);
width, height);
}
-static void r600_surface_destroy(struct pipe_context *pipe,
- struct pipe_surface *surface)
+static void si_surface_destroy(struct pipe_context *pipe,
+ struct pipe_surface *surface)
{
pipe_resource_reference(&surface->texture, NULL);
FREE(surface);
/* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
-static void vi_dcc_clean_up_context_slot(struct r600_common_context *rctx,
+static void vi_dcc_clean_up_context_slot(struct si_context *sctx,
int slot)
{
int i;
- if (rctx->dcc_stats[slot].query_active)
- vi_separate_dcc_stop_query(&rctx->b,
- rctx->dcc_stats[slot].tex);
+ if (sctx->b.dcc_stats[slot].query_active)
+ vi_separate_dcc_stop_query(sctx,
+ sctx->b.dcc_stats[slot].tex);
- for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats[slot].ps_stats); i++)
- if (rctx->dcc_stats[slot].ps_stats[i]) {
- rctx->b.destroy_query(&rctx->b,
- rctx->dcc_stats[slot].ps_stats[i]);
- rctx->dcc_stats[slot].ps_stats[i] = NULL;
+ for (i = 0; i < ARRAY_SIZE(sctx->b.dcc_stats[slot].ps_stats); i++)
+ if (sctx->b.dcc_stats[slot].ps_stats[i]) {
+ sctx->b.b.destroy_query(&sctx->b.b,
+ sctx->b.dcc_stats[slot].ps_stats[i]);
+ sctx->b.dcc_stats[slot].ps_stats[i] = NULL;
}
- r600_texture_reference(&rctx->dcc_stats[slot].tex, NULL);
+ r600_texture_reference(&sctx->b.dcc_stats[slot].tex, NULL);
}
/**
* Return the per-context slot where DCC statistics queries for the texture live.
*/
-static unsigned vi_get_context_dcc_stats_index(struct r600_common_context *rctx,
+static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
struct r600_texture *tex)
{
int i, empty_slot = -1;
/* Remove zombie textures (textures kept alive by this array only). */
- for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++)
- if (rctx->dcc_stats[i].tex &&
- rctx->dcc_stats[i].tex->resource.b.b.reference.count == 1)
- vi_dcc_clean_up_context_slot(rctx, i);
+ for (i = 0; i < ARRAY_SIZE(sctx->b.dcc_stats); i++)
+ if (sctx->b.dcc_stats[i].tex &&
+ sctx->b.dcc_stats[i].tex->resource.b.b.reference.count == 1)
+ vi_dcc_clean_up_context_slot(sctx, i);
/* Find the texture. */
- for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(sctx->b.dcc_stats); i++) {
/* Return if found. */
- if (rctx->dcc_stats[i].tex == tex) {
- rctx->dcc_stats[i].last_use_timestamp = os_time_get();
+ if (sctx->b.dcc_stats[i].tex == tex) {
+ sctx->b.dcc_stats[i].last_use_timestamp = os_time_get();
return i;
}
/* Record the first seen empty slot. */
- if (empty_slot == -1 && !rctx->dcc_stats[i].tex)
+ if (empty_slot == -1 && !sctx->b.dcc_stats[i].tex)
empty_slot = i;
}
int oldest_slot = 0;
/* Find the oldest slot. */
- for (i = 1; i < ARRAY_SIZE(rctx->dcc_stats); i++)
- if (rctx->dcc_stats[oldest_slot].last_use_timestamp >
- rctx->dcc_stats[i].last_use_timestamp)
+ for (i = 1; i < ARRAY_SIZE(sctx->b.dcc_stats); i++)
+ if (sctx->b.dcc_stats[oldest_slot].last_use_timestamp >
+ sctx->b.dcc_stats[i].last_use_timestamp)
oldest_slot = i;
/* Clean up the oldest slot. */
- vi_dcc_clean_up_context_slot(rctx, oldest_slot);
+ vi_dcc_clean_up_context_slot(sctx, oldest_slot);
empty_slot = oldest_slot;
}
/* Add the texture to the new slot. */
- r600_texture_reference(&rctx->dcc_stats[empty_slot].tex, tex);
- rctx->dcc_stats[empty_slot].last_use_timestamp = os_time_get();
+ r600_texture_reference(&sctx->b.dcc_stats[empty_slot].tex, tex);
+ sctx->b.dcc_stats[empty_slot].last_use_timestamp = os_time_get();
return empty_slot;
}
static struct pipe_query *
-vi_create_resuming_pipestats_query(struct pipe_context *ctx)
+vi_create_resuming_pipestats_query(struct si_context *sctx)
{
- struct r600_query_hw *query = (struct r600_query_hw*)
- ctx->create_query(ctx, PIPE_QUERY_PIPELINE_STATISTICS, 0);
+ struct si_query_hw *query = (struct si_query_hw*)
+ sctx->b.b.create_query(&sctx->b.b, PIPE_QUERY_PIPELINE_STATISTICS, 0);
- query->flags |= R600_QUERY_HW_FLAG_BEGIN_RESUMES;
+ query->flags |= SI_QUERY_HW_FLAG_BEGIN_RESUMES;
return (struct pipe_query*)query;
}
/**
* Called when binding a color buffer.
*/
-void vi_separate_dcc_start_query(struct pipe_context *ctx,
+void vi_separate_dcc_start_query(struct si_context *sctx,
struct r600_texture *tex)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
- unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
+ unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
- assert(!rctx->dcc_stats[i].query_active);
+ assert(!sctx->b.dcc_stats[i].query_active);
- if (!rctx->dcc_stats[i].ps_stats[0])
- rctx->dcc_stats[i].ps_stats[0] = vi_create_resuming_pipestats_query(ctx);
+ if (!sctx->b.dcc_stats[i].ps_stats[0])
+ sctx->b.dcc_stats[i].ps_stats[0] = vi_create_resuming_pipestats_query(sctx);
/* begin or resume the query */
- ctx->begin_query(ctx, rctx->dcc_stats[i].ps_stats[0]);
- rctx->dcc_stats[i].query_active = true;
+ sctx->b.b.begin_query(&sctx->b.b, sctx->b.dcc_stats[i].ps_stats[0]);
+ sctx->b.dcc_stats[i].query_active = true;
}
/**
* Called when unbinding a color buffer.
*/
-void vi_separate_dcc_stop_query(struct pipe_context *ctx,
+void vi_separate_dcc_stop_query(struct si_context *sctx,
struct r600_texture *tex)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
- unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
+ unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
- assert(rctx->dcc_stats[i].query_active);
- assert(rctx->dcc_stats[i].ps_stats[0]);
+ assert(sctx->b.dcc_stats[i].query_active);
+ assert(sctx->b.dcc_stats[i].ps_stats[0]);
/* pause or end the query */
- ctx->end_query(ctx, rctx->dcc_stats[i].ps_stats[0]);
- rctx->dcc_stats[i].query_active = false;
+ sctx->b.b.end_query(&sctx->b.b, sctx->b.dcc_stats[i].ps_stats[0]);
+ sctx->b.dcc_stats[i].query_active = false;
}
static bool vi_should_enable_separate_dcc(struct r600_texture *tex)
}
/* Called by fast clear. */
-void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
+void vi_separate_dcc_try_enable(struct si_context *sctx,
struct r600_texture *tex)
{
/* The intent is to use this with shared displayable back buffers,
/* Enable the DCC stat gathering. */
if (!tex->dcc_gather_statistics) {
tex->dcc_gather_statistics = true;
- vi_separate_dcc_start_query(&rctx->b, tex);
+ vi_separate_dcc_start_query(sctx, tex);
}
if (!vi_should_enable_separate_dcc(tex))
assert(tex->surface.num_dcc_levels);
assert(!tex->dcc_separate_buffer);
- si_texture_discard_cmask(rctx->screen, tex);
+ si_texture_discard_cmask(sctx->screen, tex);
/* Get a DCC buffer. */
if (tex->last_dcc_separate_buffer) {
tex->last_dcc_separate_buffer = NULL;
} else {
tex->dcc_separate_buffer = (struct r600_resource*)
- si_aligned_buffer_create(rctx->b.screen,
- R600_RESOURCE_FLAG_UNMAPPABLE,
+ si_aligned_buffer_create(sctx->b.b.screen,
+ SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
tex->surface.dcc_size,
tex->surface.dcc_alignment);
void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
struct r600_texture *tex)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct si_context *sctx = (struct si_context*)ctx;
struct pipe_query *tmp;
- unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
- bool query_active = rctx->dcc_stats[i].query_active;
+ unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
+ bool query_active = sctx->b.dcc_stats[i].query_active;
bool disable = false;
- if (rctx->dcc_stats[i].ps_stats[2]) {
+ if (sctx->b.dcc_stats[i].ps_stats[2]) {
union pipe_query_result result;
/* Read the results. */
- ctx->get_query_result(ctx, rctx->dcc_stats[i].ps_stats[2],
+ ctx->get_query_result(ctx, sctx->b.dcc_stats[i].ps_stats[2],
true, &result);
- si_query_hw_reset_buffers(rctx,
- (struct r600_query_hw*)
- rctx->dcc_stats[i].ps_stats[2]);
+ si_query_hw_reset_buffers(sctx,
+ (struct si_query_hw*)
+ sctx->b.dcc_stats[i].ps_stats[2]);
/* Compute the approximate number of fullscreen draws. */
tex->ps_draw_ratio =
result.pipeline_statistics.ps_invocations /
(tex->resource.b.b.width0 * tex->resource.b.b.height0);
- rctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
+ sctx->b.last_tex_ps_draw_ratio = tex->ps_draw_ratio;
disable = tex->dcc_separate_buffer &&
!vi_should_enable_separate_dcc(tex);
/* stop the statistics query for ps_stats[0] */
if (query_active)
- vi_separate_dcc_stop_query(ctx, tex);
+ vi_separate_dcc_stop_query(sctx, tex);
/* Move the queries in the queue by one. */
- tmp = rctx->dcc_stats[i].ps_stats[2];
- rctx->dcc_stats[i].ps_stats[2] = rctx->dcc_stats[i].ps_stats[1];
- rctx->dcc_stats[i].ps_stats[1] = rctx->dcc_stats[i].ps_stats[0];
- rctx->dcc_stats[i].ps_stats[0] = tmp;
+ tmp = sctx->b.dcc_stats[i].ps_stats[2];
+ sctx->b.dcc_stats[i].ps_stats[2] = sctx->b.dcc_stats[i].ps_stats[1];
+ sctx->b.dcc_stats[i].ps_stats[1] = sctx->b.dcc_stats[i].ps_stats[0];
+ sctx->b.dcc_stats[i].ps_stats[0] = tmp;
/* create and start a new query as ps_stats[0] */
if (query_active)
- vi_separate_dcc_start_query(ctx, tex);
+ vi_separate_dcc_start_query(sctx, tex);
if (disable) {
assert(!tex->last_dcc_separate_buffer);
}
static struct pipe_memory_object *
-r600_memobj_from_handle(struct pipe_screen *screen,
- struct winsys_handle *whandle,
- bool dedicated)
+si_memobj_from_handle(struct pipe_screen *screen,
+ struct winsys_handle *whandle,
+ bool dedicated)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
}
static void
-r600_memobj_destroy(struct pipe_screen *screen,
- struct pipe_memory_object *_memobj)
+si_memobj_destroy(struct pipe_screen *screen,
+ struct pipe_memory_object *_memobj)
{
struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
}
static struct pipe_resource *
-r600_texture_from_memobj(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- struct pipe_memory_object *_memobj,
- uint64_t offset)
+si_texture_from_memobj(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ struct pipe_memory_object *_memobj,
+ uint64_t offset)
{
int r;
struct si_screen *sscreen = (struct si_screen*)screen;
if (memobj->b.dedicated) {
sscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
- r600_surface_import_metadata(sscreen, &surface, &metadata,
+ si_surface_import_metadata(sscreen, &surface, &metadata,
&array_mode, &is_scanout);
} else {
/**
}
- r = r600_init_surface(sscreen, &surface, templ,
+ r = si_init_surface(sscreen, &surface, templ,
array_mode, memobj->stride,
offset, true, is_scanout,
false, false);
if (r)
return NULL;
- rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
+ rtex = si_texture_create_object(screen, templ, memobj->buf, &surface);
if (!rtex)
return NULL;
void si_init_screen_texture_functions(struct si_screen *sscreen)
{
- sscreen->b.resource_from_handle = r600_texture_from_handle;
- sscreen->b.resource_get_handle = r600_texture_get_handle;
- sscreen->b.resource_from_memobj = r600_texture_from_memobj;
- sscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
- sscreen->b.memobj_destroy = r600_memobj_destroy;
+ sscreen->b.resource_from_handle = si_texture_from_handle;
+ sscreen->b.resource_get_handle = si_texture_get_handle;
+ sscreen->b.resource_from_memobj = si_texture_from_memobj;
+ sscreen->b.memobj_create_from_handle = si_memobj_from_handle;
+ sscreen->b.memobj_destroy = si_memobj_destroy;
sscreen->b.check_resource_capability = si_check_resource_capability;
}
-void si_init_context_texture_functions(struct r600_common_context *rctx)
+void si_init_context_texture_functions(struct si_context *sctx)
{
- rctx->b.create_surface = r600_create_surface;
- rctx->b.surface_destroy = r600_surface_destroy;
+ sctx->b.b.create_surface = si_create_surface;
+ sctx->b.b.surface_destroy = si_surface_destroy;
}