return true;
}
-static void r600_buffer_destroy(struct pipe_screen *screen,
- struct pipe_resource *buf)
+static void si_buffer_destroy(struct pipe_screen *screen,
+ struct pipe_resource *buf)
{
struct r600_resource *rbuffer = r600_resource(buf);
(void)si_invalidate_buffer(sctx, rbuffer);
}
-static void *r600_buffer_get_transfer(struct pipe_context *ctx,
- struct pipe_resource *resource,
- unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer,
- void *data, struct r600_resource *staging,
- unsigned offset)
+static void *si_buffer_get_transfer(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer,
+ void *data, struct r600_resource *staging,
+ unsigned offset)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *transfer;
return data;
}
-static void *r600_buffer_transfer_map(struct pipe_context *ctx,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer)
+static void *si_buffer_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_resource *rbuffer = r600_resource(resource);
if (staging) {
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
- return r600_buffer_get_transfer(ctx, resource, usage, box,
+ return si_buffer_get_transfer(ctx, resource, usage, box,
ptransfer, data, staging, offset);
} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
return NULL;
}
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
- return r600_buffer_get_transfer(ctx, resource, usage, box,
+ return si_buffer_get_transfer(ctx, resource, usage, box,
ptransfer, data, staging, 0);
} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
return NULL;
}
data += box->x;
- return r600_buffer_get_transfer(ctx, resource, usage, box,
+ return si_buffer_get_transfer(ctx, resource, usage, box,
ptransfer, data, NULL, 0);
}
-static void r600_buffer_do_flush_region(struct pipe_context *ctx,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
+static void si_buffer_do_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_resource *rbuffer = r600_resource(transfer->resource);
box->x + box->width);
}
-static void r600_buffer_flush_region(struct pipe_context *ctx,
- struct pipe_transfer *transfer,
- const struct pipe_box *rel_box)
+static void si_buffer_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *rel_box)
{
unsigned required_usage = PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_FLUSH_EXPLICIT;
struct pipe_box box;
u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
- r600_buffer_do_flush_region(ctx, transfer, &box);
+ si_buffer_do_flush_region(ctx, transfer, &box);
}
}
-static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer *transfer)
+static void si_buffer_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
if (transfer->usage & PIPE_TRANSFER_WRITE &&
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
- r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
+ si_buffer_do_flush_region(ctx, transfer, &transfer->box);
r600_resource_reference(&rtransfer->staging, NULL);
assert(rtransfer->b.staging == NULL); /* for threaded context only */
uint8_t *map = NULL;
u_box_1d(offset, size, &box);
- map = r600_buffer_transfer_map(ctx, buffer, 0,
+ map = si_buffer_transfer_map(ctx, buffer, 0,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE |
usage,
return;
memcpy(map, data, size);
- r600_buffer_transfer_unmap(ctx, transfer);
+ si_buffer_transfer_unmap(ctx, transfer);
}
-static const struct u_resource_vtbl r600_buffer_vtbl =
+static const struct u_resource_vtbl si_buffer_vtbl =
{
NULL, /* get_handle */
- r600_buffer_destroy, /* resource_destroy */
- r600_buffer_transfer_map, /* transfer_map */
- r600_buffer_flush_region, /* transfer_flush_region */
- r600_buffer_transfer_unmap, /* transfer_unmap */
+ si_buffer_destroy, /* resource_destroy */
+ si_buffer_transfer_map, /* transfer_map */
+ si_buffer_flush_region, /* transfer_flush_region */
+ si_buffer_transfer_unmap, /* transfer_unmap */
};
static struct r600_resource *
-r600_alloc_buffer_struct(struct pipe_screen *screen,
- const struct pipe_resource *templ)
+si_alloc_buffer_struct(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
{
struct r600_resource *rbuffer;
pipe_reference_init(&rbuffer->b.b.reference, 1);
rbuffer->b.b.screen = screen;
- rbuffer->b.vtbl = &r600_buffer_vtbl;
+ rbuffer->b.vtbl = &si_buffer_vtbl;
threaded_resource_init(&rbuffer->b.b);
rbuffer->buf = NULL;
unsigned alignment)
{
struct si_screen *sscreen = (struct si_screen*)screen;
- struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
+ struct r600_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
rbuffer->b.b.flags |= R600_RESOURCE_FLAG_UNMAPPABLE;
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct radeon_winsys *ws = sscreen->ws;
- struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
+ struct r600_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
rbuffer->domains = RADEON_DOMAIN_GTT;
rbuffer->flags = 0;
*
* When this SHOULD NOT be used:
*
- * - if r600_context_add_resource_size has been called for the buffer
+ * - if si_context_add_resource_size has been called for the buffer
* followed by *_need_cs_space for checking the memory usage
*
- * - if r600_need_dma_space has been called for the buffer
+ * - if si_need_dma_space has been called for the buffer
*
* - when emitting state packets and draw packets (because preceding packets
* can't be re-emitted at that point)
p_atomic_inc(&counters->named.field.idle); \
} while (0)
-static void r600_update_mmio_counters(struct si_screen *sscreen,
- union r600_mmio_counters *counters)
+static void si_update_mmio_counters(struct si_screen *sscreen,
+ union si_mmio_counters *counters)
{
uint32_t value = 0;
bool gui_busy, sdma_busy = false;
#undef UPDATE_COUNTER
static int
-r600_gpu_load_thread(void *param)
+si_gpu_load_thread(void *param)
{
struct si_screen *sscreen = (struct si_screen*)param;
const int period_us = 1000000 / SAMPLES_PER_SEC;
last_time = cur_time;
/* Update the counters. */
- r600_update_mmio_counters(sscreen, &sscreen->mmio_counters);
+ si_update_mmio_counters(sscreen, &sscreen->mmio_counters);
}
p_atomic_dec(&sscreen->gpu_load_stop_thread);
return 0;
sscreen->gpu_load_thread = 0;
}
-static uint64_t r600_read_mmio_counter(struct si_screen *sscreen,
- unsigned busy_index)
+static uint64_t si_read_mmio_counter(struct si_screen *sscreen,
+ unsigned busy_index)
{
/* Start the thread if needed. */
if (!sscreen->gpu_load_thread) {
/* Check again inside the mutex. */
if (!sscreen->gpu_load_thread)
sscreen->gpu_load_thread =
- u_thread_create(r600_gpu_load_thread, sscreen);
+ u_thread_create(si_gpu_load_thread, sscreen);
mtx_unlock(&sscreen->gpu_load_mutex);
}
return busy | ((uint64_t)idle << 32);
}
-static unsigned r600_end_mmio_counter(struct si_screen *sscreen,
- uint64_t begin, unsigned busy_index)
+static unsigned si_end_mmio_counter(struct si_screen *sscreen,
+ uint64_t begin, unsigned busy_index)
{
- uint64_t end = r600_read_mmio_counter(sscreen, busy_index);
+ uint64_t end = si_read_mmio_counter(sscreen, busy_index);
unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
unsigned idle = (end >> 32) - (begin >> 32);
if (idle || busy) {
return busy*100 / (busy + idle);
} else {
- union r600_mmio_counters counters;
+ union si_mmio_counters counters;
memset(&counters, 0, sizeof(counters));
- r600_update_mmio_counters(sscreen, &counters);
+ si_update_mmio_counters(sscreen, &counters);
return counters.array[busy_index] ? 100 : 0;
}
}
uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type)
{
unsigned busy_index = busy_index_from_type(sscreen, type);
- return r600_read_mmio_counter(sscreen, busy_index);
+ return si_read_mmio_counter(sscreen, busy_index);
}
unsigned si_end_counter(struct si_screen *sscreen, unsigned type,
uint64_t begin)
{
unsigned busy_index = busy_index_from_type(sscreen, type);
- return r600_end_mmio_counter(sscreen, begin, busy_index);
+ return si_end_mmio_counter(sscreen, begin, busy_index);
}
/* Max counters per HW block */
#define R600_QUERY_MAX_COUNTERS 16
-static struct r600_perfcounter_block *
-lookup_counter(struct r600_perfcounters *pc, unsigned index,
+static struct si_perfcounter_block *
+lookup_counter(struct si_perfcounters *pc, unsigned index,
unsigned *base_gid, unsigned *sub_index)
{
- struct r600_perfcounter_block *block = pc->blocks;
+ struct si_perfcounter_block *block = pc->blocks;
unsigned bid;
*base_gid = 0;
return NULL;
}
-static struct r600_perfcounter_block *
-lookup_group(struct r600_perfcounters *pc, unsigned *index)
+static struct si_perfcounter_block *
+lookup_group(struct si_perfcounters *pc, unsigned *index)
{
unsigned bid;
- struct r600_perfcounter_block *block = pc->blocks;
+ struct si_perfcounter_block *block = pc->blocks;
for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
if (*index < block->num_groups)
return NULL;
}
-struct r600_pc_group {
- struct r600_pc_group *next;
- struct r600_perfcounter_block *block;
+struct si_pc_group {
+ struct si_pc_group *next;
+ struct si_perfcounter_block *block;
unsigned sub_gid; /* only used during init */
unsigned result_base; /* only used during init */
int se;
unsigned selectors[R600_QUERY_MAX_COUNTERS];
};
-struct r600_pc_counter {
+struct si_pc_counter {
unsigned base;
unsigned qwords;
unsigned stride; /* in uint64s */
};
-#define R600_PC_SHADERS_WINDOWING (1 << 31)
+#define SI_PC_SHADERS_WINDOWING (1 << 31)
-struct r600_query_pc {
- struct r600_query_hw b;
+struct si_query_pc {
+ struct si_query_hw b;
unsigned shaders;
unsigned num_counters;
- struct r600_pc_counter *counters;
- struct r600_pc_group *groups;
+ struct si_pc_counter *counters;
+ struct si_pc_group *groups;
};
-static void r600_pc_query_destroy(struct si_screen *sscreen,
- struct r600_query *rquery)
+static void si_pc_query_destroy(struct si_screen *sscreen,
+ struct si_query *rquery)
{
- struct r600_query_pc *query = (struct r600_query_pc *)rquery;
+ struct si_query_pc *query = (struct si_query_pc *)rquery;
while (query->groups) {
- struct r600_pc_group *group = query->groups;
+ struct si_pc_group *group = query->groups;
query->groups = group->next;
FREE(group);
}
si_query_hw_destroy(sscreen, rquery);
}
-static bool r600_pc_query_prepare_buffer(struct si_screen *screen,
- struct r600_query_hw *hwquery,
- struct r600_resource *buffer)
+static bool si_pc_query_prepare_buffer(struct si_screen *screen,
+ struct si_query_hw *hwquery,
+ struct r600_resource *buffer)
{
/* no-op */
return true;
}
-static void r600_pc_query_emit_start(struct si_context *sctx,
- struct r600_query_hw *hwquery,
- struct r600_resource *buffer, uint64_t va)
+static void si_pc_query_emit_start(struct si_context *sctx,
+ struct si_query_hw *hwquery,
+ struct r600_resource *buffer, uint64_t va)
{
- struct r600_perfcounters *pc = sctx->screen->perfcounters;
- struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
- struct r600_pc_group *group;
+ struct si_perfcounters *pc = sctx->screen->perfcounters;
+ struct si_query_pc *query = (struct si_query_pc *)hwquery;
+ struct si_pc_group *group;
int current_se = -1;
int current_instance = -1;
pc->emit_shaders(sctx, query->shaders);
for (group = query->groups; group; group = group->next) {
- struct r600_perfcounter_block *block = group->block;
+ struct si_perfcounter_block *block = group->block;
if (group->se != current_se || group->instance != current_instance) {
current_se = group->se;
pc->emit_start(sctx, buffer, va);
}
-static void r600_pc_query_emit_stop(struct si_context *sctx,
- struct r600_query_hw *hwquery,
- struct r600_resource *buffer, uint64_t va)
+static void si_pc_query_emit_stop(struct si_context *sctx,
+ struct si_query_hw *hwquery,
+ struct r600_resource *buffer, uint64_t va)
{
- struct r600_perfcounters *pc = sctx->screen->perfcounters;
- struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
- struct r600_pc_group *group;
+ struct si_perfcounters *pc = sctx->screen->perfcounters;
+ struct si_query_pc *query = (struct si_query_pc *)hwquery;
+ struct si_pc_group *group;
pc->emit_stop(sctx, buffer, va);
for (group = query->groups; group; group = group->next) {
- struct r600_perfcounter_block *block = group->block;
+ struct si_perfcounter_block *block = group->block;
unsigned se = group->se >= 0 ? group->se : 0;
unsigned se_end = se + 1;
pc->emit_instance(sctx, -1, -1);
}
-static void r600_pc_query_clear_result(struct r600_query_hw *hwquery,
- union pipe_query_result *result)
+static void si_pc_query_clear_result(struct si_query_hw *hwquery,
+ union pipe_query_result *result)
{
- struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
+ struct si_query_pc *query = (struct si_query_pc *)hwquery;
memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
}
-static void r600_pc_query_add_result(struct si_screen *sscreen,
- struct r600_query_hw *hwquery,
- void *buffer,
- union pipe_query_result *result)
+static void si_pc_query_add_result(struct si_screen *sscreen,
+ struct si_query_hw *hwquery,
+ void *buffer,
+ union pipe_query_result *result)
{
- struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
+ struct si_query_pc *query = (struct si_query_pc *)hwquery;
uint64_t *results = buffer;
unsigned i, j;
for (i = 0; i < query->num_counters; ++i) {
- struct r600_pc_counter *counter = &query->counters[i];
+ struct si_pc_counter *counter = &query->counters[i];
for (j = 0; j < counter->qwords; ++j) {
uint32_t value = results[counter->base + j * counter->stride];
}
}
-static struct r600_query_ops batch_query_ops = {
- .destroy = r600_pc_query_destroy,
+static struct si_query_ops batch_query_ops = {
+ .destroy = si_pc_query_destroy,
.begin = si_query_hw_begin,
.end = si_query_hw_end,
.get_result = si_query_hw_get_result
};
-static struct r600_query_hw_ops batch_query_hw_ops = {
- .prepare_buffer = r600_pc_query_prepare_buffer,
- .emit_start = r600_pc_query_emit_start,
- .emit_stop = r600_pc_query_emit_stop,
- .clear_result = r600_pc_query_clear_result,
- .add_result = r600_pc_query_add_result,
+static struct si_query_hw_ops batch_query_hw_ops = {
+ .prepare_buffer = si_pc_query_prepare_buffer,
+ .emit_start = si_pc_query_emit_start,
+ .emit_stop = si_pc_query_emit_stop,
+ .clear_result = si_pc_query_clear_result,
+ .add_result = si_pc_query_add_result,
};
-static struct r600_pc_group *get_group_state(struct si_screen *screen,
- struct r600_query_pc *query,
- struct r600_perfcounter_block *block,
+static struct si_pc_group *get_group_state(struct si_screen *screen,
+ struct si_query_pc *query,
+ struct si_perfcounter_block *block,
unsigned sub_gid)
{
- struct r600_pc_group *group = query->groups;
+ struct si_pc_group *group = query->groups;
while (group) {
if (group->block == block && group->sub_gid == sub_gid)
group = group->next;
}
- group = CALLOC_STRUCT(r600_pc_group);
+ group = CALLOC_STRUCT(si_pc_group);
if (!group)
return NULL;
shaders = screen->perfcounters->shader_type_bits[shader_id];
- query_shaders = query->shaders & ~R600_PC_SHADERS_WINDOWING;
+ query_shaders = query->shaders & ~SI_PC_SHADERS_WINDOWING;
if (query_shaders && query_shaders != shaders) {
- fprintf(stderr, "r600_perfcounter: incompatible shader groups\n");
+ fprintf(stderr, "si_perfcounter: incompatible shader groups\n");
FREE(group);
return NULL;
}
if (block->flags & R600_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
// A non-zero value in query->shaders ensures that the shader
// masking is reset unless the user explicitly requests one.
- query->shaders = R600_PC_SHADERS_WINDOWING;
+ query->shaders = SI_PC_SHADERS_WINDOWING;
}
if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
{
struct si_screen *screen =
(struct si_screen *)ctx->screen;
- struct r600_perfcounters *pc = screen->perfcounters;
- struct r600_perfcounter_block *block;
- struct r600_pc_group *group;
- struct r600_query_pc *query;
+ struct si_perfcounters *pc = screen->perfcounters;
+ struct si_perfcounter_block *block;
+ struct si_pc_group *group;
+ struct si_query_pc *query;
unsigned base_gid, sub_gid, sub_index;
unsigned i, j;
if (!pc)
return NULL;
- query = CALLOC_STRUCT(r600_query_pc);
+ query = CALLOC_STRUCT(si_query_pc);
if (!query)
return NULL;
i = 0;
for (group = query->groups; group; group = group->next) {
- struct r600_perfcounter_block *block = group->block;
+ struct si_perfcounter_block *block = group->block;
unsigned read_dw;
unsigned instances = 1;
}
if (query->shaders) {
- if (query->shaders == R600_PC_SHADERS_WINDOWING)
+ if (query->shaders == SI_PC_SHADERS_WINDOWING)
query->shaders = 0xffffffff;
}
/* Map user-supplied query array to result indices */
query->counters = CALLOC(num_queries, sizeof(*query->counters));
for (i = 0; i < num_queries; ++i) {
- struct r600_pc_counter *counter = &query->counters[i];
- struct r600_perfcounter_block *block;
+ struct si_pc_counter *counter = &query->counters[i];
+ struct si_perfcounter_block *block;
block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
&base_gid, &sub_index);
return (struct pipe_query *)query;
error:
- r600_pc_query_destroy(screen, &query->b.b);
+ si_pc_query_destroy(screen, &query->b.b);
return NULL;
}
-static bool r600_init_block_names(struct si_screen *screen,
- struct r600_perfcounter_block *block)
+static bool si_init_block_names(struct si_screen *screen,
+ struct si_perfcounter_block *block)
{
unsigned i, j, k;
unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
unsigned index,
struct pipe_driver_query_info *info)
{
- struct r600_perfcounters *pc = screen->perfcounters;
- struct r600_perfcounter_block *block;
+ struct si_perfcounters *pc = screen->perfcounters;
+ struct si_perfcounter_block *block;
unsigned base_gid, sub;
if (!pc)
return 0;
if (!block->selector_names) {
- if (!r600_init_block_names(screen, block))
+ if (!si_init_block_names(screen, block))
return 0;
}
info->name = block->selector_names + sub * block->selector_name_stride;
unsigned index,
struct pipe_driver_query_group_info *info)
{
- struct r600_perfcounters *pc = screen->perfcounters;
- struct r600_perfcounter_block *block;
+ struct si_perfcounters *pc = screen->perfcounters;
+ struct si_perfcounter_block *block;
if (!pc)
return 0;
return 0;
if (!block->group_names) {
- if (!r600_init_block_names(screen, block))
+ if (!si_init_block_names(screen, block))
return 0;
}
info->name = block->group_names + index * block->group_name_stride;
sscreen->perfcounters->cleanup(sscreen);
}
-bool si_perfcounters_init(struct r600_perfcounters *pc,
+bool si_perfcounters_init(struct si_perfcounters *pc,
unsigned num_blocks)
{
- pc->blocks = CALLOC(num_blocks, sizeof(struct r600_perfcounter_block));
+ pc->blocks = CALLOC(num_blocks, sizeof(struct si_perfcounter_block));
if (!pc->blocks)
return false;
}
void si_perfcounters_add_block(struct si_screen *sscreen,
- struct r600_perfcounters *pc,
+ struct si_perfcounters *pc,
const char *name, unsigned flags,
unsigned counters, unsigned selectors,
unsigned instances, void *data)
{
- struct r600_perfcounter_block *block = &pc->blocks[pc->num_blocks];
+ struct si_perfcounter_block *block = &pc->blocks[pc->num_blocks];
assert(counters <= R600_QUERY_MAX_COUNTERS);
pc->num_groups += block->num_groups;
}
-void si_perfcounters_do_destroy(struct r600_perfcounters *pc)
+void si_perfcounters_do_destroy(struct si_perfcounters *pc)
{
unsigned i;
* pipe_context
*/
-static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
+static enum pipe_reset_status si_get_reset_status(struct pipe_context *ctx)
{
struct si_context *sctx = (struct si_context *)ctx;
unsigned latest = sctx->b.ws->query_value(sctx->b.ws,
return PIPE_UNKNOWN_CONTEXT_RESET;
}
-static void r600_set_device_reset_callback(struct pipe_context *ctx,
+static void si_set_device_reset_callback(struct pipe_context *ctx,
const struct pipe_device_reset_callback *cb)
{
struct si_context *sctx = (struct si_context *)ctx;
return true;
}
-static bool r600_resource_commit(struct pipe_context *pctx,
- struct pipe_resource *resource,
- unsigned level, struct pipe_box *box,
- bool commit)
+static bool si_resource_commit(struct pipe_context *pctx,
+ struct pipe_resource *resource,
+ unsigned level, struct pipe_box *box,
+ bool commit)
{
struct si_context *ctx = (struct si_context *)pctx;
struct r600_resource *res = r600_resource(resource);
sctx->b.family = sscreen->info.family;
sctx->b.chip_class = sscreen->info.chip_class;
- sctx->b.b.resource_commit = r600_resource_commit;
+ sctx->b.b.resource_commit = si_resource_commit;
if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 43) {
- sctx->b.b.get_device_reset_status = r600_get_reset_status;
+ sctx->b.b.get_device_reset_status = si_get_reset_status;
sctx->b.gpu_reset_counter =
sctx->b.ws->query_value(sctx->b.ws,
RADEON_GPU_RESET_COUNTER);
}
- sctx->b.b.set_device_reset_callback = r600_set_device_reset_callback;
+ sctx->b.b.set_device_reset_callback = si_set_device_reset_callback;
si_init_context_texture_functions(sctx);
si_init_query_functions(sctx);
#define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
-struct r600_perfcounters;
+struct si_perfcounters;
struct tgsi_shader_info;
-struct r600_qbo_state;
+struct si_qbo_state;
/* Only 32-bit buffer allocations are supported, gallium doesn't support more
* at the moment.
unsigned db_htile_surface;
};
-struct r600_mmio_counter {
+struct si_mmio_counter {
unsigned busy;
unsigned idle;
};
-union r600_mmio_counters {
+union si_mmio_counters {
struct {
/* For global GPU load including SDMA. */
- struct r600_mmio_counter gpu;
+ struct si_mmio_counter gpu;
/* GRBM_STATUS */
- struct r600_mmio_counter spi;
- struct r600_mmio_counter gui;
- struct r600_mmio_counter ta;
- struct r600_mmio_counter gds;
- struct r600_mmio_counter vgt;
- struct r600_mmio_counter ia;
- struct r600_mmio_counter sx;
- struct r600_mmio_counter wd;
- struct r600_mmio_counter bci;
- struct r600_mmio_counter sc;
- struct r600_mmio_counter pa;
- struct r600_mmio_counter db;
- struct r600_mmio_counter cp;
- struct r600_mmio_counter cb;
+ struct si_mmio_counter spi;
+ struct si_mmio_counter gui;
+ struct si_mmio_counter ta;
+ struct si_mmio_counter gds;
+ struct si_mmio_counter vgt;
+ struct si_mmio_counter ia;
+ struct si_mmio_counter sx;
+ struct si_mmio_counter wd;
+ struct si_mmio_counter bci;
+ struct si_mmio_counter sc;
+ struct si_mmio_counter pa;
+ struct si_mmio_counter db;
+ struct si_mmio_counter cp;
+ struct si_mmio_counter cb;
/* SRBM_STATUS2 */
- struct r600_mmio_counter sdma;
+ struct si_mmio_counter sdma;
/* CP_STAT */
- struct r600_mmio_counter pfp;
- struct r600_mmio_counter meq;
- struct r600_mmio_counter me;
- struct r600_mmio_counter surf_sync;
- struct r600_mmio_counter cp_dma;
- struct r600_mmio_counter scratch_ram;
+ struct si_mmio_counter pfp;
+ struct si_mmio_counter meq;
+ struct si_mmio_counter me;
+ struct si_mmio_counter surf_sync;
+ struct si_mmio_counter cp_dma;
+ struct si_mmio_counter scratch_ram;
} named;
unsigned array[0];
};
#include "tgsi/tgsi_text.h"
#include "amd/common/sid.h"
-#define R600_MAX_STREAMS 4
+#define SI_MAX_STREAMS 4
-struct r600_hw_query_params {
+struct si_hw_query_params {
unsigned start_offset;
unsigned end_offset;
unsigned fence_offset;
};
/* Queries without buffer handling or suspend/resume. */
-struct r600_query_sw {
- struct r600_query b;
+struct si_query_sw {
+ struct si_query b;
uint64_t begin_result;
uint64_t end_result;
struct pipe_fence_handle *fence;
};
-static void r600_query_sw_destroy(struct si_screen *sscreen,
- struct r600_query *rquery)
+static void si_query_sw_destroy(struct si_screen *sscreen,
+ struct si_query *rquery)
{
- struct r600_query_sw *query = (struct r600_query_sw *)rquery;
+ struct si_query_sw *query = (struct si_query_sw *)rquery;
sscreen->b.fence_reference(&sscreen->b, &query->fence, NULL);
FREE(query);
}
}
-static bool r600_query_sw_begin(struct si_context *sctx,
- struct r600_query *rquery)
+static bool si_query_sw_begin(struct si_context *sctx,
+ struct si_query *rquery)
{
- struct r600_query_sw *query = (struct r600_query_sw *)rquery;
+ struct si_query_sw *query = (struct si_query_sw *)rquery;
enum radeon_value_id ws_id;
switch(query->b.type) {
case R600_QUERY_GPIN_NUM_SE:
break;
default:
- unreachable("r600_query_sw_begin: bad query type");
+ unreachable("si_query_sw_begin: bad query type");
}
return true;
}
-static bool r600_query_sw_end(struct si_context *sctx,
- struct r600_query *rquery)
+static bool si_query_sw_end(struct si_context *sctx,
+ struct si_query *rquery)
{
- struct r600_query_sw *query = (struct r600_query_sw *)rquery;
+ struct si_query_sw *query = (struct si_query_sw *)rquery;
enum radeon_value_id ws_id;
switch(query->b.type) {
case R600_QUERY_GPIN_NUM_SE:
break;
default:
- unreachable("r600_query_sw_end: bad query type");
+ unreachable("si_query_sw_end: bad query type");
}
return true;
}
-static bool r600_query_sw_get_result(struct si_context *sctx,
- struct r600_query *rquery,
- bool wait,
- union pipe_query_result *result)
+static bool si_query_sw_get_result(struct si_context *sctx,
+ struct si_query *rquery,
+ bool wait,
+ union pipe_query_result *result)
{
- struct r600_query_sw *query = (struct r600_query_sw *)rquery;
+ struct si_query_sw *query = (struct si_query_sw *)rquery;
switch (query->b.type) {
case PIPE_QUERY_TIMESTAMP_DISJOINT:
}
-static struct r600_query_ops sw_query_ops = {
- .destroy = r600_query_sw_destroy,
- .begin = r600_query_sw_begin,
- .end = r600_query_sw_end,
- .get_result = r600_query_sw_get_result,
+static struct si_query_ops sw_query_ops = {
+ .destroy = si_query_sw_destroy,
+ .begin = si_query_sw_begin,
+ .end = si_query_sw_end,
+ .get_result = si_query_sw_get_result,
.get_result_resource = NULL
};
-static struct pipe_query *r600_query_sw_create(unsigned query_type)
+static struct pipe_query *si_query_sw_create(unsigned query_type)
{
- struct r600_query_sw *query;
+ struct si_query_sw *query;
- query = CALLOC_STRUCT(r600_query_sw);
+ query = CALLOC_STRUCT(si_query_sw);
if (!query)
return NULL;
}
void si_query_hw_destroy(struct si_screen *sscreen,
- struct r600_query *rquery)
+ struct si_query *rquery)
{
- struct r600_query_hw *query = (struct r600_query_hw *)rquery;
- struct r600_query_buffer *prev = query->buffer.previous;
+ struct si_query_hw *query = (struct si_query_hw *)rquery;
+ struct si_query_buffer *prev = query->buffer.previous;
/* Release all query buffers. */
while (prev) {
- struct r600_query_buffer *qbuf = prev;
+ struct si_query_buffer *qbuf = prev;
prev = prev->previous;
r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
FREE(rquery);
}
-static struct r600_resource *r600_new_query_buffer(struct si_screen *sscreen,
- struct r600_query_hw *query)
+static struct r600_resource *si_new_query_buffer(struct si_screen *sscreen,
+ struct si_query_hw *query)
{
unsigned buf_size = MAX2(query->result_size,
sscreen->info.min_alloc_size);
return buf;
}
-static bool r600_query_hw_prepare_buffer(struct si_screen *sscreen,
- struct r600_query_hw *query,
- struct r600_resource *buffer)
+static bool si_query_hw_prepare_buffer(struct si_screen *sscreen,
+ struct si_query_hw *query,
+ struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
uint32_t *results = sscreen->ws->buffer_map(buffer->buf, NULL,
return true;
}
-static void r600_query_hw_get_result_resource(struct si_context *sctx,
- struct r600_query *rquery,
- bool wait,
- enum pipe_query_value_type result_type,
- int index,
- struct pipe_resource *resource,
- unsigned offset);
+static void si_query_hw_get_result_resource(struct si_context *sctx,
+ struct si_query *rquery,
+ bool wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset);
-static struct r600_query_ops query_hw_ops = {
+static struct si_query_ops query_hw_ops = {
.destroy = si_query_hw_destroy,
.begin = si_query_hw_begin,
.end = si_query_hw_end,
.get_result = si_query_hw_get_result,
- .get_result_resource = r600_query_hw_get_result_resource,
+ .get_result_resource = si_query_hw_get_result_resource,
};
-static void r600_query_hw_do_emit_start(struct si_context *sctx,
- struct r600_query_hw *query,
- struct r600_resource *buffer,
- uint64_t va);
-static void r600_query_hw_do_emit_stop(struct si_context *sctx,
- struct r600_query_hw *query,
- struct r600_resource *buffer,
- uint64_t va);
-static void r600_query_hw_add_result(struct si_screen *sscreen,
- struct r600_query_hw *, void *buffer,
- union pipe_query_result *result);
-static void r600_query_hw_clear_result(struct r600_query_hw *,
- union pipe_query_result *);
-
-static struct r600_query_hw_ops query_hw_default_hw_ops = {
- .prepare_buffer = r600_query_hw_prepare_buffer,
- .emit_start = r600_query_hw_do_emit_start,
- .emit_stop = r600_query_hw_do_emit_stop,
- .clear_result = r600_query_hw_clear_result,
- .add_result = r600_query_hw_add_result,
+static void si_query_hw_do_emit_start(struct si_context *sctx,
+ struct si_query_hw *query,
+ struct r600_resource *buffer,
+ uint64_t va);
+static void si_query_hw_do_emit_stop(struct si_context *sctx,
+ struct si_query_hw *query,
+ struct r600_resource *buffer,
+ uint64_t va);
+static void si_query_hw_add_result(struct si_screen *sscreen,
+ struct si_query_hw *, void *buffer,
+ union pipe_query_result *result);
+static void si_query_hw_clear_result(struct si_query_hw *,
+ union pipe_query_result *);
+
+static struct si_query_hw_ops query_hw_default_hw_ops = {
+ .prepare_buffer = si_query_hw_prepare_buffer,
+ .emit_start = si_query_hw_do_emit_start,
+ .emit_stop = si_query_hw_do_emit_stop,
+ .clear_result = si_query_hw_clear_result,
+ .add_result = si_query_hw_add_result,
};
bool si_query_hw_init(struct si_screen *sscreen,
- struct r600_query_hw *query)
+ struct si_query_hw *query)
{
- query->buffer.buf = r600_new_query_buffer(sscreen, query);
+ query->buffer.buf = si_new_query_buffer(sscreen, query);
if (!query->buffer.buf)
return false;
return true;
}
-static struct pipe_query *r600_query_hw_create(struct si_screen *sscreen,
- unsigned query_type,
- unsigned index)
+static struct pipe_query *si_query_hw_create(struct si_screen *sscreen,
+ unsigned query_type,
+ unsigned index)
{
- struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
+ struct si_query_hw *query = CALLOC_STRUCT(si_query_hw);
if (!query)
return NULL;
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
- query->result_size = 32 * R600_MAX_STREAMS;
- query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
+ query->result_size = 32 * SI_MAX_STREAMS;
+ query->num_cs_dw_end = 6 * SI_MAX_STREAMS;
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* 11 values on GCN. */
return (struct pipe_query *)query;
}
-static void r600_update_occlusion_query_state(struct si_context *sctx,
- unsigned type, int diff)
+static void si_update_occlusion_query_state(struct si_context *sctx,
+ unsigned type, int diff)
{
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
type == PIPE_QUERY_OCCLUSION_PREDICATE ||
radeon_emit(cs, va >> 32);
}
-static void r600_query_hw_do_emit_start(struct si_context *sctx,
- struct r600_query_hw *query,
+static void si_query_hw_do_emit_start(struct si_context *sctx,
+ struct si_query_hw *query,
struct r600_resource *buffer,
uint64_t va)
{
emit_sample_streamout(cs, va, query->stream);
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
- for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
+ for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
emit_sample_streamout(cs, va + 32 * stream, stream);
break;
case PIPE_QUERY_TIME_ELAPSED:
RADEON_PRIO_QUERY);
}
-static void r600_query_hw_emit_start(struct si_context *sctx,
- struct r600_query_hw *query)
+static void si_query_hw_emit_start(struct si_context *sctx,
+ struct si_query_hw *query)
{
uint64_t va;
if (!query->buffer.buf)
return; // previous buffer allocation failure
- r600_update_occlusion_query_state(sctx, query->b.type, 1);
+ si_update_occlusion_query_state(sctx, query->b.type, 1);
si_update_prims_generated_query_state(sctx, query->b.type, 1);
si_need_gfx_cs_space(sctx);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
- struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
+ struct si_query_buffer *qbuf = MALLOC_STRUCT(si_query_buffer);
*qbuf = query->buffer;
query->buffer.results_end = 0;
query->buffer.previous = qbuf;
- query->buffer.buf = r600_new_query_buffer(sctx->screen, query);
+ query->buffer.buf = si_new_query_buffer(sctx->screen, query);
if (!query->buffer.buf)
return;
}
sctx->b.num_cs_dw_queries_suspend += query->num_cs_dw_end;
}
-static void r600_query_hw_do_emit_stop(struct si_context *sctx,
- struct r600_query_hw *query,
+static void si_query_hw_do_emit_stop(struct si_context *sctx,
+ struct si_query_hw *query,
struct r600_resource *buffer,
uint64_t va)
{
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
va += 16;
- for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
+ for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
emit_sample_streamout(cs, va + 32 * stream, stream);
break;
case PIPE_QUERY_TIME_ELAPSED:
query->b.type);
}
-static void r600_query_hw_emit_stop(struct si_context *sctx,
- struct r600_query_hw *query)
+static void si_query_hw_emit_stop(struct si_context *sctx,
+ struct si_query_hw *query)
{
uint64_t va;
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
sctx->b.num_cs_dw_queries_suspend -= query->num_cs_dw_end;
- r600_update_occlusion_query_state(sctx, query->b.type, -1);
+ si_update_occlusion_query_state(sctx, query->b.type, -1);
si_update_prims_generated_query_state(sctx, query->b.type, -1);
}
RADEON_PRIO_QUERY);
}
-static void r600_emit_query_predication(struct si_context *ctx,
- struct r600_atom *atom)
+static void si_emit_query_predication(struct si_context *ctx,
+ struct r600_atom *atom)
{
- struct r600_query_hw *query = (struct r600_query_hw *)ctx->b.render_cond;
- struct r600_query_buffer *qbuf;
+ struct si_query_hw *query = (struct si_query_hw *)ctx->b.render_cond;
+ struct si_query_buffer *qbuf;
uint32_t op;
bool flag_wait, invert;
uint64_t va = va_base + results_base;
if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
- for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
+ for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
/* set CONTINUE bit for all packets except the first */
}
}
-static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
+static struct pipe_query *si_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
{
struct si_screen *sscreen =
(struct si_screen *)ctx->screen;
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
query_type == PIPE_QUERY_GPU_FINISHED ||
query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
- return r600_query_sw_create(query_type);
+ return si_query_sw_create(query_type);
- return r600_query_hw_create(sscreen, query_type, index);
+ return si_query_hw_create(sscreen, query_type, index);
}
-static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
+static void si_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_query *rquery = (struct r600_query *)query;
+ struct si_query *rquery = (struct si_query *)query;
rquery->ops->destroy(sctx->screen, rquery);
}
-static boolean r600_begin_query(struct pipe_context *ctx,
+static boolean si_begin_query(struct pipe_context *ctx,
struct pipe_query *query)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_query *rquery = (struct r600_query *)query;
+ struct si_query *rquery = (struct si_query *)query;
return rquery->ops->begin(sctx, rquery);
}
void si_query_hw_reset_buffers(struct si_context *sctx,
- struct r600_query_hw *query)
+ struct si_query_hw *query)
{
- struct r600_query_buffer *prev = query->buffer.previous;
+ struct si_query_buffer *prev = query->buffer.previous;
/* Discard the old query buffers. */
while (prev) {
- struct r600_query_buffer *qbuf = prev;
+ struct si_query_buffer *qbuf = prev;
prev = prev->previous;
r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
if (si_rings_is_buffer_referenced(sctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!sctx->b.ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
r600_resource_reference(&query->buffer.buf, NULL);
- query->buffer.buf = r600_new_query_buffer(sctx->screen, query);
+ query->buffer.buf = si_new_query_buffer(sctx->screen, query);
} else {
if (!query->ops->prepare_buffer(sctx->screen, query, query->buffer.buf))
r600_resource_reference(&query->buffer.buf, NULL);
}
bool si_query_hw_begin(struct si_context *sctx,
- struct r600_query *rquery)
+ struct si_query *rquery)
{
- struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+ struct si_query_hw *query = (struct si_query_hw *)rquery;
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
assert(0);
r600_resource_reference(&query->workaround_buf, NULL);
- r600_query_hw_emit_start(sctx, query);
+ si_query_hw_emit_start(sctx, query);
if (!query->buffer.buf)
return false;
return true;
}
-static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
+static bool si_end_query(struct pipe_context *ctx, struct pipe_query *query)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_query *rquery = (struct r600_query *)query;
+ struct si_query *rquery = (struct si_query *)query;
return rquery->ops->end(sctx, rquery);
}
bool si_query_hw_end(struct si_context *sctx,
- struct r600_query *rquery)
+ struct si_query *rquery)
{
- struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+ struct si_query_hw *query = (struct si_query_hw *)rquery;
if (query->flags & R600_QUERY_HW_FLAG_NO_START)
si_query_hw_reset_buffers(sctx, query);
- r600_query_hw_emit_stop(sctx, query);
+ si_query_hw_emit_stop(sctx, query);
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
LIST_DELINIT(&query->list);
return true;
}
-static void r600_get_hw_query_params(struct si_context *sctx,
- struct r600_query_hw *rquery, int index,
- struct r600_hw_query_params *params)
+static void si_get_hw_query_params(struct si_context *sctx,
+ struct si_query_hw *rquery, int index,
+ struct si_hw_query_params *params)
{
unsigned max_rbs = sctx->screen->info.num_render_backends;
params->fence_offset = params->end_offset + 4;
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
- params->pair_count = R600_MAX_STREAMS;
+ params->pair_count = SI_MAX_STREAMS;
params->pair_stride = 32;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
params->start_offset = 0;
break;
}
default:
- unreachable("r600_get_hw_query_params unsupported");
+ unreachable("si_get_hw_query_params unsupported");
}
}
-static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
- bool test_status_bit)
+static unsigned si_query_read_result(void *map, unsigned start_index, unsigned end_index,
+ bool test_status_bit)
{
uint32_t *current_result = (uint32_t*)map;
uint64_t start, end;
return 0;
}
-static void r600_query_hw_add_result(struct si_screen *sscreen,
- struct r600_query_hw *query,
+static void si_query_hw_add_result(struct si_screen *sscreen,
+ struct si_query_hw *query,
void *buffer,
union pipe_query_result *result)
{
for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->u64 +=
- r600_query_read_result(buffer + results_base, 0, 2, true);
+ si_query_read_result(buffer + results_base, 0, 2, true);
}
break;
}
for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->b = result->b ||
- r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
+ si_query_read_result(buffer + results_base, 0, 2, true) != 0;
}
break;
}
case PIPE_QUERY_TIME_ELAPSED:
- result->u64 += r600_query_read_result(buffer, 0, 2, false);
+ result->u64 += si_query_read_result(buffer, 0, 2, false);
break;
case PIPE_QUERY_TIMESTAMP:
result->u64 = *(uint64_t*)buffer;
* u64 PrimitiveStorageNeeded;
* }
* We only need NumPrimitivesWritten here. */
- result->u64 += r600_query_read_result(buffer, 2, 6, true);
+ result->u64 += si_query_read_result(buffer, 2, 6, true);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
/* Here we read PrimitiveStorageNeeded. */
- result->u64 += r600_query_read_result(buffer, 0, 4, true);
+ result->u64 += si_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_STATISTICS:
result->so_statistics.num_primitives_written +=
- r600_query_read_result(buffer, 2, 6, true);
+ si_query_read_result(buffer, 2, 6, true);
result->so_statistics.primitives_storage_needed +=
- r600_query_read_result(buffer, 0, 4, true);
+ si_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
result->b = result->b ||
- r600_query_read_result(buffer, 2, 6, true) !=
- r600_query_read_result(buffer, 0, 4, true);
+ si_query_read_result(buffer, 2, 6, true) !=
+ si_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
- for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
+ for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
result->b = result->b ||
- r600_query_read_result(buffer, 2, 6, true) !=
- r600_query_read_result(buffer, 0, 4, true);
+ si_query_read_result(buffer, 2, 6, true) !=
+ si_query_read_result(buffer, 0, 4, true);
buffer = (char *)buffer + 32;
}
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
result->pipeline_statistics.ps_invocations +=
- r600_query_read_result(buffer, 0, 22, false);
+ si_query_read_result(buffer, 0, 22, false);
result->pipeline_statistics.c_primitives +=
- r600_query_read_result(buffer, 2, 24, false);
+ si_query_read_result(buffer, 2, 24, false);
result->pipeline_statistics.c_invocations +=
- r600_query_read_result(buffer, 4, 26, false);
+ si_query_read_result(buffer, 4, 26, false);
result->pipeline_statistics.vs_invocations +=
- r600_query_read_result(buffer, 6, 28, false);
+ si_query_read_result(buffer, 6, 28, false);
result->pipeline_statistics.gs_invocations +=
- r600_query_read_result(buffer, 8, 30, false);
+ si_query_read_result(buffer, 8, 30, false);
result->pipeline_statistics.gs_primitives +=
- r600_query_read_result(buffer, 10, 32, false);
+ si_query_read_result(buffer, 10, 32, false);
result->pipeline_statistics.ia_primitives +=
- r600_query_read_result(buffer, 12, 34, false);
+ si_query_read_result(buffer, 12, 34, false);
result->pipeline_statistics.ia_vertices +=
- r600_query_read_result(buffer, 14, 36, false);
+ si_query_read_result(buffer, 14, 36, false);
result->pipeline_statistics.hs_invocations +=
- r600_query_read_result(buffer, 16, 38, false);
+ si_query_read_result(buffer, 16, 38, false);
result->pipeline_statistics.ds_invocations +=
- r600_query_read_result(buffer, 18, 40, false);
+ si_query_read_result(buffer, 18, 40, false);
result->pipeline_statistics.cs_invocations +=
- r600_query_read_result(buffer, 20, 42, false);
+ si_query_read_result(buffer, 20, 42, false);
#if 0 /* for testing */
printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
"DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
}
}
-static boolean r600_get_query_result(struct pipe_context *ctx,
- struct pipe_query *query, boolean wait,
- union pipe_query_result *result)
+static boolean si_get_query_result(struct pipe_context *ctx,
+ struct pipe_query *query, boolean wait,
+ union pipe_query_result *result)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_query *rquery = (struct r600_query *)query;
+ struct si_query *rquery = (struct si_query *)query;
return rquery->ops->get_result(sctx, rquery, wait, result);
}
-static void r600_get_query_result_resource(struct pipe_context *ctx,
- struct pipe_query *query,
- boolean wait,
- enum pipe_query_value_type result_type,
- int index,
- struct pipe_resource *resource,
- unsigned offset)
+static void si_get_query_result_resource(struct pipe_context *ctx,
+ struct pipe_query *query,
+ boolean wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_query *rquery = (struct r600_query *)query;
+ struct si_query *rquery = (struct si_query *)query;
rquery->ops->get_result_resource(sctx, rquery, wait, result_type, index,
resource, offset);
}
-static void r600_query_hw_clear_result(struct r600_query_hw *query,
+static void si_query_hw_clear_result(struct si_query_hw *query,
union pipe_query_result *result)
{
util_query_clear_result(result, query->b.type);
}
bool si_query_hw_get_result(struct si_context *sctx,
- struct r600_query *rquery,
+ struct si_query *rquery,
bool wait, union pipe_query_result *result)
{
struct si_screen *sscreen = sctx->screen;
- struct r600_query_hw *query = (struct r600_query_hw *)rquery;
- struct r600_query_buffer *qbuf;
+ struct si_query_hw *query = (struct si_query_hw *)rquery;
+ struct si_query_buffer *qbuf;
query->ops->clear_result(query, result);
* BUFFER[1] = previous summary buffer
* BUFFER[2] = next summary buffer or user-supplied buffer
*/
-static void r600_create_query_result_shader(struct si_context *sctx)
+static void si_create_query_result_shader(struct si_context *sctx)
{
/* TEMP[0].xy = accumulated result so far
* TEMP[0].z = result not available
sctx->b.query_result_shader = sctx->b.b.create_compute_state(&sctx->b.b, &state);
}
-static void r600_restore_qbo_state(struct si_context *sctx,
- struct r600_qbo_state *st)
+static void si_restore_qbo_state(struct si_context *sctx,
+ struct si_qbo_state *st)
{
sctx->b.b.bind_compute_state(&sctx->b.b, st->saved_compute);
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
}
-static void r600_query_hw_get_result_resource(struct si_context *sctx,
- struct r600_query *rquery,
+static void si_query_hw_get_result_resource(struct si_context *sctx,
+ struct si_query *rquery,
bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset)
{
- struct r600_query_hw *query = (struct r600_query_hw *)rquery;
- struct r600_query_buffer *qbuf;
- struct r600_query_buffer *qbuf_prev;
+ struct si_query_hw *query = (struct si_query_hw *)rquery;
+ struct si_query_buffer *qbuf;
+ struct si_query_buffer *qbuf_prev;
struct pipe_resource *tmp_buffer = NULL;
unsigned tmp_buffer_offset = 0;
- struct r600_qbo_state saved_state = {};
+ struct si_qbo_state saved_state = {};
struct pipe_grid_info grid = {};
struct pipe_constant_buffer constant_buffer = {};
struct pipe_shader_buffer ssbo[3];
- struct r600_hw_query_params params;
+ struct si_hw_query_params params;
struct {
uint32_t end_offset;
uint32_t result_stride;
} consts;
if (!sctx->b.query_result_shader) {
- r600_create_query_result_shader(sctx);
+ si_create_query_result_shader(sctx);
if (!sctx->b.query_result_shader)
return;
}
si_save_qbo_state(sctx, &saved_state);
- r600_get_hw_query_params(sctx, query, index >= 0 ? index : 0, ¶ms);
+ si_get_hw_query_params(sctx, query, index >= 0 ? index : 0, ¶ms);
consts.end_offset = params.end_offset - params.start_offset;
consts.fence_offset = params.fence_offset - params.start_offset;
consts.result_stride = query->result_size;
sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
- r600_restore_qbo_state(sctx, &saved_state);
+ si_restore_qbo_state(sctx, &saved_state);
pipe_resource_reference(&tmp_buffer, NULL);
}
-static void r600_render_condition(struct pipe_context *ctx,
- struct pipe_query *query,
- boolean condition,
- enum pipe_render_cond_flag mode)
+static void si_render_condition(struct pipe_context *ctx,
+ struct pipe_query *query,
+ boolean condition,
+ enum pipe_render_cond_flag mode)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_query_hw *rquery = (struct r600_query_hw *)query;
+ struct si_query_hw *rquery = (struct si_query_hw *)query;
struct r600_atom *atom = &sctx->b.render_cond_atom;
if (query) {
void si_suspend_queries(struct si_context *sctx)
{
- struct r600_query_hw *query;
+ struct si_query_hw *query;
LIST_FOR_EACH_ENTRY(query, &sctx->b.active_queries, list) {
- r600_query_hw_emit_stop(sctx, query);
+ si_query_hw_emit_stop(sctx, query);
}
assert(sctx->b.num_cs_dw_queries_suspend == 0);
}
void si_resume_queries(struct si_context *sctx)
{
- struct r600_query_hw *query;
+ struct si_query_hw *query;
assert(sctx->b.num_cs_dw_queries_suspend == 0);
si_need_gfx_cs_space(sctx);
LIST_FOR_EACH_ENTRY(query, &sctx->b.active_queries, list) {
- r600_query_hw_emit_start(sctx, query);
+ si_query_hw_emit_start(sctx, query);
}
}
#define XG(group_, name_, query_type_, type_, result_type_) \
XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
-static struct pipe_driver_query_info r600_driver_query_list[] = {
+static struct pipe_driver_query_info si_driver_query_list[] = {
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
#undef XG
#undef XFULL
-static unsigned r600_get_num_queries(struct si_screen *sscreen)
+static unsigned si_get_num_queries(struct si_screen *sscreen)
{
if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 42)
- return ARRAY_SIZE(r600_driver_query_list);
+ return ARRAY_SIZE(si_driver_query_list);
else if (sscreen->info.drm_major == 3) {
if (sscreen->info.chip_class >= VI)
- return ARRAY_SIZE(r600_driver_query_list);
+ return ARRAY_SIZE(si_driver_query_list);
else
- return ARRAY_SIZE(r600_driver_query_list) - 7;
+ return ARRAY_SIZE(si_driver_query_list) - 7;
}
else
- return ARRAY_SIZE(r600_driver_query_list) - 25;
+ return ARRAY_SIZE(si_driver_query_list) - 25;
}
-static int r600_get_driver_query_info(struct pipe_screen *screen,
- unsigned index,
- struct pipe_driver_query_info *info)
+static int si_get_driver_query_info(struct pipe_screen *screen,
+ unsigned index,
+ struct pipe_driver_query_info *info)
{
struct si_screen *sscreen = (struct si_screen*)screen;
- unsigned num_queries = r600_get_num_queries(sscreen);
+ unsigned num_queries = si_get_num_queries(sscreen);
if (!info) {
unsigned num_perfcounters =
if (index >= num_queries)
return si_get_perfcounter_info(sscreen, index - num_queries, info);
- *info = r600_driver_query_list[index];
+ *info = si_driver_query_list[index];
switch (info->query_type) {
case R600_QUERY_REQUESTED_VRAM:
* performance counter groups, so be careful when changing this and related
* functions.
*/
-static int r600_get_driver_query_group_info(struct pipe_screen *screen,
- unsigned index,
- struct pipe_driver_query_group_info *info)
+static int si_get_driver_query_group_info(struct pipe_screen *screen,
+ unsigned index,
+ struct pipe_driver_query_group_info *info)
{
struct si_screen *sscreen = (struct si_screen *)screen;
unsigned num_pc_groups = 0;
void si_init_query_functions(struct si_context *sctx)
{
- sctx->b.b.create_query = r600_create_query;
+ sctx->b.b.create_query = si_create_query;
sctx->b.b.create_batch_query = si_create_batch_query;
- sctx->b.b.destroy_query = r600_destroy_query;
- sctx->b.b.begin_query = r600_begin_query;
- sctx->b.b.end_query = r600_end_query;
- sctx->b.b.get_query_result = r600_get_query_result;
- sctx->b.b.get_query_result_resource = r600_get_query_result_resource;
- sctx->b.render_cond_atom.emit = r600_emit_query_predication;
+ sctx->b.b.destroy_query = si_destroy_query;
+ sctx->b.b.begin_query = si_begin_query;
+ sctx->b.b.end_query = si_end_query;
+ sctx->b.b.get_query_result = si_get_query_result;
+ sctx->b.b.get_query_result_resource = si_get_query_result_resource;
+ sctx->b.render_cond_atom.emit = si_emit_query_predication;
if (((struct si_screen*)sctx->b.b.screen)->info.num_render_backends > 0)
- sctx->b.b.render_condition = r600_render_condition;
+ sctx->b.b.render_condition = si_render_condition;
LIST_INITHEAD(&sctx->b.active_queries);
}
void si_init_screen_query_functions(struct si_screen *sscreen)
{
- sscreen->b.get_driver_query_info = r600_get_driver_query_info;
- sscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
+ sscreen->b.get_driver_query_info = si_get_driver_query_info;
+ sscreen->b.get_driver_query_group_info = si_get_driver_query_group_info;
}
struct si_screen;
struct si_context;
-struct r600_query;
-struct r600_query_hw;
+struct si_query;
+struct si_query_hw;
struct r600_resource;
enum {
R600_NUM_SW_QUERY_GROUPS
};
-struct r600_query_ops {
- void (*destroy)(struct si_screen *, struct r600_query *);
- bool (*begin)(struct si_context *, struct r600_query *);
- bool (*end)(struct si_context *, struct r600_query *);
+struct si_query_ops {
+ void (*destroy)(struct si_screen *, struct si_query *);
+ bool (*begin)(struct si_context *, struct si_query *);
+ bool (*end)(struct si_context *, struct si_query *);
bool (*get_result)(struct si_context *,
- struct r600_query *, bool wait,
+ struct si_query *, bool wait,
union pipe_query_result *result);
void (*get_result_resource)(struct si_context *,
- struct r600_query *, bool wait,
+ struct si_query *, bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset);
};
-struct r600_query {
+struct si_query {
struct threaded_query b;
- struct r600_query_ops *ops;
+ struct si_query_ops *ops;
/* The type of query */
unsigned type;
R600_QUERY_HW_FLAG_BEGIN_RESUMES = (1 << 2),
};
-struct r600_query_hw_ops {
+struct si_query_hw_ops {
bool (*prepare_buffer)(struct si_screen *,
- struct r600_query_hw *,
+ struct si_query_hw *,
struct r600_resource *);
void (*emit_start)(struct si_context *,
- struct r600_query_hw *,
+ struct si_query_hw *,
struct r600_resource *buffer, uint64_t va);
void (*emit_stop)(struct si_context *,
- struct r600_query_hw *,
+ struct si_query_hw *,
struct r600_resource *buffer, uint64_t va);
- void (*clear_result)(struct r600_query_hw *, union pipe_query_result *);
+ void (*clear_result)(struct si_query_hw *, union pipe_query_result *);
void (*add_result)(struct si_screen *screen,
- struct r600_query_hw *, void *buffer,
+ struct si_query_hw *, void *buffer,
union pipe_query_result *result);
};
-struct r600_query_buffer {
+struct si_query_buffer {
/* The buffer where query results are stored. */
struct r600_resource *buf;
/* Offset of the next free result after current query data */
/* If a query buffer is full, a new buffer is created and the old one
* is put in here. When we calculate the result, we sum up the samples
* from all buffers. */
- struct r600_query_buffer *previous;
+ struct si_query_buffer *previous;
};
-struct r600_query_hw {
- struct r600_query b;
- struct r600_query_hw_ops *ops;
+struct si_query_hw {
+ struct si_query b;
+ struct si_query_hw_ops *ops;
unsigned flags;
/* The query buffer and how many results are in it. */
- struct r600_query_buffer buffer;
+ struct si_query_buffer buffer;
/* Size of the result in memory for both begin_query and end_query,
* this can be one or two numbers, or it could even be a size of a structure. */
unsigned result_size;
};
bool si_query_hw_init(struct si_screen *sscreen,
- struct r600_query_hw *query);
+ struct si_query_hw *query);
void si_query_hw_destroy(struct si_screen *sscreen,
- struct r600_query *rquery);
+ struct si_query *rquery);
bool si_query_hw_begin(struct si_context *sctx,
- struct r600_query *rquery);
+ struct si_query *rquery);
bool si_query_hw_end(struct si_context *sctx,
- struct r600_query *rquery);
+ struct si_query *rquery);
bool si_query_hw_get_result(struct si_context *sctx,
- struct r600_query *rquery,
+ struct si_query *rquery,
bool wait,
union pipe_query_result *result);
* (c) expose one performance counter group per instance, but summed over all
* shader engines.
*/
-struct r600_perfcounter_block {
+struct si_perfcounter_block {
const char *basename;
unsigned flags;
unsigned num_counters;
void *data;
};
-struct r600_perfcounters {
+struct si_perfcounters {
unsigned num_groups;
unsigned num_blocks;
- struct r600_perfcounter_block *blocks;
+ struct si_perfcounter_block *blocks;
unsigned num_stop_cs_dwords;
unsigned num_instance_cs_dwords;
int se, int instance);
void (*emit_shaders)(struct si_context *, unsigned shaders);
void (*emit_select)(struct si_context *,
- struct r600_perfcounter_block *,
+ struct si_perfcounter_block *,
unsigned count, unsigned *selectors);
void (*emit_start)(struct si_context *,
struct r600_resource *buffer, uint64_t va);
void (*emit_stop)(struct si_context *,
struct r600_resource *buffer, uint64_t va);
void (*emit_read)(struct si_context *,
- struct r600_perfcounter_block *,
+ struct si_perfcounter_block *,
unsigned count, unsigned *selectors,
struct r600_resource *buffer, uint64_t va);
unsigned index,
struct pipe_driver_query_group_info *info);
-bool si_perfcounters_init(struct r600_perfcounters *, unsigned num_blocks);
+bool si_perfcounters_init(struct si_perfcounters *, unsigned num_blocks);
void si_perfcounters_add_block(struct si_screen *,
- struct r600_perfcounters *,
+ struct si_perfcounters *,
const char *name, unsigned flags,
unsigned counters, unsigned selectors,
unsigned instances, void *data);
-void si_perfcounters_do_destroy(struct r600_perfcounters *);
+void si_perfcounters_do_destroy(struct si_perfcounters *);
void si_query_hw_reset_buffers(struct si_context *sctx,
- struct r600_query_hw *query);
+ struct si_query_hw *query);
-struct r600_qbo_state {
+struct si_qbo_state {
void *saved_compute;
struct pipe_constant_buffer saved_const0;
struct pipe_shader_buffer saved_ssbo[3];
#include "amd/common/sid.h"
static enum radeon_surf_mode
-r600_choose_tiling(struct si_screen *sscreen,
- const struct pipe_resource *templ);
+si_choose_tiling(struct si_screen *sscreen,
+ const struct pipe_resource *templ);
bool si_prepare_for_dma_blit(struct si_context *sctx,
}
/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
-static void r600_copy_region_with_blit(struct pipe_context *pipe,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dstx, unsigned dsty, unsigned dstz,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box)
+static void si_copy_region_with_blit(struct pipe_context *pipe,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
{
struct pipe_blit_info blit;
}
/* Copy from a full GPU texture to a transfer's staging one. */
-static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
+static void si_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
struct pipe_resource *src = transfer->resource;
if (src->nr_samples > 1) {
- r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
+ si_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
src, transfer->level, &transfer->box);
return;
}
}
/* Copy from a transfer's staging texture to a full GPU one. */
-static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
+static void si_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
if (dst->nr_samples > 1) {
- r600_copy_region_with_blit(ctx, dst, transfer->level,
+ si_copy_region_with_blit(ctx, dst, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
src, 0, &sbox);
return;
src, 0, &sbox);
}
-static unsigned r600_texture_get_offset(struct si_screen *sscreen,
- struct r600_texture *rtex, unsigned level,
- const struct pipe_box *box,
- unsigned *stride,
- unsigned *layer_stride)
+static unsigned si_texture_get_offset(struct si_screen *sscreen,
+ struct r600_texture *rtex, unsigned level,
+ const struct pipe_box *box,
+ unsigned *stride,
+ unsigned *layer_stride)
{
if (sscreen->info.chip_class >= GFX9) {
*stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
}
}
-static int r600_init_surface(struct si_screen *sscreen,
- struct radeon_surf *surface,
- const struct pipe_resource *ptex,
- enum radeon_surf_mode array_mode,
- unsigned pitch_in_bytes_override,
- unsigned offset,
- bool is_imported,
- bool is_scanout,
- bool is_flushed_depth,
- bool tc_compatible_htile)
+static int si_init_surface(struct si_screen *sscreen,
+ struct radeon_surf *surface,
+ const struct pipe_resource *ptex,
+ enum radeon_surf_mode array_mode,
+ unsigned pitch_in_bytes_override,
+ unsigned offset,
+ bool is_imported,
+ bool is_scanout,
+ bool is_flushed_depth,
+ bool tc_compatible_htile)
{
const struct util_format_description *desc =
util_format_description(ptex->format);
return 0;
}
-static void r600_texture_init_metadata(struct si_screen *sscreen,
- struct r600_texture *rtex,
- struct radeon_bo_metadata *metadata)
+static void si_texture_init_metadata(struct si_screen *sscreen,
+ struct r600_texture *rtex,
+ struct radeon_bo_metadata *metadata)
{
struct radeon_surf *surface = &rtex->surface;
}
}
-static void r600_surface_import_metadata(struct si_screen *sscreen,
- struct radeon_surf *surf,
- struct radeon_bo_metadata *metadata,
- enum radeon_surf_mode *array_mode,
- bool *is_scanout)
+static void si_surface_import_metadata(struct si_screen *sscreen,
+ struct radeon_surf *surf,
+ struct radeon_bo_metadata *metadata,
+ enum radeon_surf_mode *array_mode,
+ bool *is_scanout)
{
if (sscreen->info.chip_class >= GFX9) {
if (metadata->u.gfx9.swizzle_mode > 0)
p_atomic_inc(&sscreen->compressed_colortex_counter);
}
-static bool r600_can_disable_dcc(struct r600_texture *rtex)
+static bool si_can_disable_dcc(struct r600_texture *rtex)
{
/* We can't disable DCC if it can be written by another process. */
return rtex->dcc_offset &&
!(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
-static bool r600_texture_discard_dcc(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static bool si_texture_discard_dcc(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
- if (!r600_can_disable_dcc(rtex))
+ if (!si_can_disable_dcc(rtex))
return false;
assert(rtex->dcc_separate_buffer == NULL);
{
struct si_screen *sscreen = sctx->screen;
- if (!r600_can_disable_dcc(rtex))
+ if (!si_can_disable_dcc(rtex))
return false;
if (&sctx->b.b == sscreen->aux_context)
if (&sctx->b.b == sscreen->aux_context)
mtx_unlock(&sscreen->aux_context_lock);
- return r600_texture_discard_dcc(sscreen, rtex);
+ return si_texture_discard_dcc(sscreen, rtex);
}
-static void r600_reallocate_texture_inplace(struct si_context *sctx,
- struct r600_texture *rtex,
- unsigned new_bind_flag,
- bool invalidate_storage)
+static void si_reallocate_texture_inplace(struct si_context *sctx,
+ struct r600_texture *rtex,
+ unsigned new_bind_flag,
+ bool invalidate_storage)
{
struct pipe_screen *screen = sctx->b.b.screen;
struct r600_texture *new_tex;
return;
/* This fails with MSAA, depth, and compressed textures. */
- if (r600_choose_tiling(sctx->screen, &templ) !=
+ if (si_choose_tiling(sctx->screen, &templ) !=
RADEON_SURF_MODE_LINEAR_ALIGNED)
return;
}
if (new_bind_flag == PIPE_BIND_LINEAR) {
si_texture_discard_cmask(sctx->screen, rtex);
- r600_texture_discard_dcc(sctx->screen, rtex);
+ si_texture_discard_dcc(sctx->screen, rtex);
}
/* Replace the structure fields of rtex. */
rtex->dcc_offset = 0;
}
-static boolean r600_texture_get_handle(struct pipe_screen* screen,
- struct pipe_context *ctx,
- struct pipe_resource *resource,
- struct winsys_handle *whandle,
- unsigned usage)
+static boolean si_texture_get_handle(struct pipe_screen* screen,
+ struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ struct winsys_handle *whandle,
+ unsigned usage)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct si_context *sctx;
sscreen->info.has_local_buffers &&
whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
- r600_reallocate_texture_inplace(sctx, rtex,
+ si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_SHARED, false);
flush = true;
assert(res->b.b.bind & PIPE_BIND_SHARED);
/* Set metadata. */
if (!res->b.is_shared || update_metadata) {
- r600_texture_init_metadata(sscreen, rtex, &metadata);
+ si_texture_init_metadata(sscreen, rtex, &metadata);
si_query_opaque_metadata(sscreen, rtex, &metadata);
sscreen->ws->buffer_set_metadata(res->buf, &metadata);
slice_size, whandle);
}
-static void r600_texture_destroy(struct pipe_screen *screen,
- struct pipe_resource *ptex)
+static void si_texture_destroy(struct pipe_screen *screen,
+ struct pipe_resource *ptex)
{
struct r600_texture *rtex = (struct r600_texture*)ptex;
struct r600_resource *resource = &rtex->resource;
FREE(rtex);
}
-static const struct u_resource_vtbl r600_texture_vtbl;
+static const struct u_resource_vtbl si_texture_vtbl;
/* The number of samples can be specified independently of the texture. */
void si_texture_get_fmask_info(struct si_screen *sscreen,
out->size = fmask.surf_size;
}
-static void r600_texture_allocate_fmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_allocate_fmask(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
si_texture_get_fmask_info(sscreen, rtex,
rtex->resource.b.b.nr_samples, &rtex->fmask);
align(slice_bytes, base_align);
}
-static void r600_texture_allocate_cmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_allocate_cmask(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
}
-static void r600_texture_get_htile_size(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_get_htile_size(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
unsigned cl_width, cl_height, width, height;
unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
align(slice_bytes, base_align);
}
-static void r600_texture_allocate_htile(struct si_screen *sscreen,
- struct r600_texture *rtex)
+static void si_texture_allocate_htile(struct si_screen *sscreen,
+ struct r600_texture *rtex)
{
if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
- r600_texture_get_htile_size(sscreen, rtex);
+ si_texture_get_htile_size(sscreen, rtex);
if (!rtex->surface.htile_size)
return;
/* Common processing for r600_texture_create and r600_texture_from_handle */
static struct r600_texture *
-r600_texture_create_object(struct pipe_screen *screen,
- const struct pipe_resource *base,
- struct pb_buffer *buf,
- struct radeon_surf *surface)
+si_texture_create_object(struct pipe_screen *screen,
+ const struct pipe_resource *base,
+ struct pb_buffer *buf,
+ struct radeon_surf *surface)
{
struct r600_texture *rtex;
struct r600_resource *resource;
resource = &rtex->resource;
resource->b.b = *base;
resource->b.b.next = NULL;
- resource->b.vtbl = &r600_texture_vtbl;
+ resource->b.vtbl = &si_texture_vtbl;
pipe_reference_init(&resource->b.b.reference, 1);
resource->b.b.screen = screen;
rtex->db_compatible = true;
if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
- r600_texture_allocate_htile(sscreen, rtex);
+ si_texture_allocate_htile(sscreen, rtex);
}
} else {
if (base->nr_samples > 1 &&
!buf &&
!(sscreen->debug_flags & DBG(NO_FMASK))) {
- r600_texture_allocate_fmask(sscreen, rtex);
- r600_texture_allocate_cmask(sscreen, rtex);
+ si_texture_allocate_fmask(sscreen, rtex);
+ si_texture_allocate_cmask(sscreen, rtex);
rtex->cmask_buffer = &rtex->resource;
if (!rtex->fmask.size || !rtex->cmask.size) {
}
static enum radeon_surf_mode
-r600_choose_tiling(struct si_screen *sscreen,
+si_choose_tiling(struct si_screen *sscreen,
const struct pipe_resource *templ)
{
const struct util_format_description *desc = util_format_description(templ->format);
int r;
- r = r600_init_surface(sscreen, &surface, templ,
- r600_choose_tiling(sscreen, templ), 0, 0,
+ r = si_init_surface(sscreen, &surface, templ,
+ si_choose_tiling(sscreen, templ), 0, 0,
false, false, is_flushed_depth,
tc_compatible_htile);
if (r) {
}
return (struct pipe_resource *)
- r600_texture_create_object(screen, templ, NULL, &surface);
+ si_texture_create_object(screen, templ, NULL, &surface);
}
-static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- struct winsys_handle *whandle,
- unsigned usage)
+static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ struct winsys_handle *whandle,
+ unsigned usage)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct pb_buffer *buf = NULL;
return NULL;
sscreen->ws->buffer_get_metadata(buf, &metadata);
- r600_surface_import_metadata(sscreen, &surface, &metadata,
+ si_surface_import_metadata(sscreen, &surface, &metadata,
&array_mode, &is_scanout);
- r = r600_init_surface(sscreen, &surface, templ, array_mode, stride,
+ r = si_init_surface(sscreen, &surface, templ, array_mode, stride,
offset, true, is_scanout, false, false);
if (r) {
return NULL;
}
- rtex = r600_texture_create_object(screen, templ, buf, &surface);
+ rtex = si_texture_create_object(screen, templ, buf, &surface);
if (!rtex)
return NULL;
* which is supposed to hold a subregion of the texture "orig" at the given
* mipmap level.
*/
-static void r600_init_temp_resource_from_box(struct pipe_resource *res,
- struct pipe_resource *orig,
- const struct pipe_box *box,
- unsigned level, unsigned flags)
+static void si_init_temp_resource_from_box(struct pipe_resource *res,
+ struct pipe_resource *orig,
+ const struct pipe_box *box,
+ unsigned level, unsigned flags)
{
memset(res, 0, sizeof(*res));
res->format = orig->format;
}
}
-static bool r600_can_invalidate_texture(struct si_screen *sscreen,
- struct r600_texture *rtex,
- unsigned transfer_usage,
- const struct pipe_box *box)
+static bool si_can_invalidate_texture(struct si_screen *sscreen,
+ struct r600_texture *rtex,
+ unsigned transfer_usage,
+ const struct pipe_box *box)
{
return !rtex->resource.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
box->depth);
}
-static void r600_texture_invalidate_storage(struct si_context *sctx,
- struct r600_texture *rtex)
+static void si_texture_invalidate_storage(struct si_context *sctx,
+ struct r600_texture *rtex)
{
struct si_screen *sscreen = sctx->screen;
sctx->b.num_alloc_tex_transfer_bytes += rtex->size;
}
-static void *r600_texture_transfer_map(struct pipe_context *ctx,
- struct pipe_resource *texture,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer)
+static void *si_texture_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *texture,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_texture *rtex = (struct r600_texture*)texture;
box->width >= 4 && box->height >= 4 &&
p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
bool can_invalidate =
- r600_can_invalidate_texture(sctx->screen, rtex,
+ si_can_invalidate_texture(sctx->screen, rtex,
usage, box);
- r600_reallocate_texture_inplace(sctx, rtex,
+ si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_LINEAR,
can_invalidate);
}
!sctx->b.ws->buffer_wait(rtex->resource.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
- if (r600_can_invalidate_texture(sctx->screen, rtex,
+ if (si_can_invalidate_texture(sctx->screen, rtex,
usage, box))
- r600_texture_invalidate_storage(sctx, rtex);
+ si_texture_invalidate_storage(sctx, rtex);
else
use_staging_texture = true;
}
*/
struct pipe_resource resource;
- r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
+ si_init_temp_resource_from_box(&resource, texture, box, level, 0);
if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
R600_ERR("failed to create temporary texture to hold untiled copy\n");
return NULL;
}
- r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
+ si_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
si_blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
0, 0, 0, box->depth, 0, 0);
pipe_resource_reference(&temp, NULL);
}
/* Just get the strides. */
- r600_texture_get_offset(sctx->screen, staging_depth, level, NULL,
+ si_texture_get_offset(sctx->screen, staging_depth, level, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
} else {
box->z, box->z + box->depth - 1,
0, 0);
- offset = r600_texture_get_offset(sctx->screen, staging_depth,
+ offset = si_texture_get_offset(sctx->screen, staging_depth,
level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
struct pipe_resource resource;
struct r600_texture *staging;
- r600_init_temp_resource_from_box(&resource, texture, box, level,
+ si_init_temp_resource_from_box(&resource, texture, box, level,
R600_RESOURCE_FLAG_TRANSFER);
resource.usage = (usage & PIPE_TRANSFER_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
trans->staging = &staging->resource;
/* Just get the strides. */
- r600_texture_get_offset(sctx->screen, staging, 0, NULL,
+ si_texture_get_offset(sctx->screen, staging, 0, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
- r600_copy_to_staging_texture(ctx, trans);
+ si_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
buf = trans->staging;
} else {
/* the resource is mapped directly */
- offset = r600_texture_get_offset(sctx->screen, rtex, level, box,
+ offset = si_texture_get_offset(sctx->screen, rtex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
buf = &rtex->resource;
return map + offset;
}
-static void r600_texture_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer* transfer)
+static void si_texture_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer* transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
&rtransfer->staging->b.b, transfer->level,
&transfer->box);
} else {
- r600_copy_from_staging_texture(ctx, rtransfer);
+ si_copy_from_staging_texture(ctx, rtransfer);
}
}
FREE(transfer);
}
-static const struct u_resource_vtbl r600_texture_vtbl =
+static const struct u_resource_vtbl si_texture_vtbl =
{
NULL, /* get_handle */
- r600_texture_destroy, /* resource_destroy */
- r600_texture_transfer_map, /* transfer_map */
+ si_texture_destroy, /* resource_destroy */
+ si_texture_transfer_map, /* transfer_map */
u_default_transfer_flush_region, /* transfer_flush_region */
- r600_texture_transfer_unmap, /* transfer_unmap */
+ si_texture_transfer_unmap, /* transfer_unmap */
};
/* DCC channel type categories within which formats can be reinterpreted
return &surface->base;
}
-static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
- struct pipe_resource *tex,
- const struct pipe_surface *templ)
+static struct pipe_surface *si_create_surface(struct pipe_context *pipe,
+ struct pipe_resource *tex,
+ const struct pipe_surface *templ)
{
unsigned level = templ->u.tex.level;
unsigned width = u_minify(tex->width0, level);
width, height);
}
-static void r600_surface_destroy(struct pipe_context *pipe,
- struct pipe_surface *surface)
+static void si_surface_destroy(struct pipe_context *pipe,
+ struct pipe_surface *surface)
{
pipe_resource_reference(&surface->texture, NULL);
FREE(surface);
static struct pipe_query *
vi_create_resuming_pipestats_query(struct si_context *sctx)
{
- struct r600_query_hw *query = (struct r600_query_hw*)
+ struct si_query_hw *query = (struct si_query_hw*)
sctx->b.b.create_query(&sctx->b.b, PIPE_QUERY_PIPELINE_STATISTICS, 0);
query->flags |= R600_QUERY_HW_FLAG_BEGIN_RESUMES;
ctx->get_query_result(ctx, sctx->b.dcc_stats[i].ps_stats[2],
true, &result);
si_query_hw_reset_buffers(sctx,
- (struct r600_query_hw*)
+ (struct si_query_hw*)
sctx->b.dcc_stats[i].ps_stats[2]);
/* Compute the approximate number of fullscreen draws. */
}
static struct pipe_memory_object *
-r600_memobj_from_handle(struct pipe_screen *screen,
- struct winsys_handle *whandle,
- bool dedicated)
+si_memobj_from_handle(struct pipe_screen *screen,
+ struct winsys_handle *whandle,
+ bool dedicated)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
}
static void
-r600_memobj_destroy(struct pipe_screen *screen,
- struct pipe_memory_object *_memobj)
+si_memobj_destroy(struct pipe_screen *screen,
+ struct pipe_memory_object *_memobj)
{
struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
}
static struct pipe_resource *
-r600_texture_from_memobj(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- struct pipe_memory_object *_memobj,
- uint64_t offset)
+si_texture_from_memobj(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ struct pipe_memory_object *_memobj,
+ uint64_t offset)
{
int r;
struct si_screen *sscreen = (struct si_screen*)screen;
if (memobj->b.dedicated) {
sscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
- r600_surface_import_metadata(sscreen, &surface, &metadata,
+ si_surface_import_metadata(sscreen, &surface, &metadata,
&array_mode, &is_scanout);
} else {
/**
}
- r = r600_init_surface(sscreen, &surface, templ,
+ r = si_init_surface(sscreen, &surface, templ,
array_mode, memobj->stride,
offset, true, is_scanout,
false, false);
if (r)
return NULL;
- rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
+ rtex = si_texture_create_object(screen, templ, memobj->buf, &surface);
if (!rtex)
return NULL;
void si_init_screen_texture_functions(struct si_screen *sscreen)
{
- sscreen->b.resource_from_handle = r600_texture_from_handle;
- sscreen->b.resource_get_handle = r600_texture_get_handle;
- sscreen->b.resource_from_memobj = r600_texture_from_memobj;
- sscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
- sscreen->b.memobj_destroy = r600_memobj_destroy;
+ sscreen->b.resource_from_handle = si_texture_from_handle;
+ sscreen->b.resource_get_handle = si_texture_get_handle;
+ sscreen->b.resource_from_memobj = si_texture_from_memobj;
+ sscreen->b.memobj_create_from_handle = si_memobj_from_handle;
+ sscreen->b.memobj_destroy = si_memobj_destroy;
sscreen->b.check_resource_capability = si_check_resource_capability;
}
void si_init_context_texture_functions(struct si_context *sctx)
{
- sctx->b.b.create_surface = r600_create_surface;
- sctx->b.b.surface_destroy = r600_surface_destroy;
+ sctx->b.b.create_surface = si_create_surface;
+ sctx->b.b.surface_destroy = si_surface_destroy;
}
}
static void si_pc_emit_select(struct si_context *sctx,
- struct r600_perfcounter_block *group,
+ struct si_perfcounter_block *group,
unsigned count, unsigned *selectors)
{
struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
}
static void si_pc_emit_read(struct si_context *sctx,
- struct r600_perfcounter_block *group,
+ struct si_perfcounter_block *group,
unsigned count, unsigned *selectors,
struct r600_resource *buffer, uint64_t va)
{
void si_init_perfcounters(struct si_screen *screen)
{
- struct r600_perfcounters *pc;
+ struct si_perfcounters *pc;
struct si_pc_block *blocks;
unsigned num_blocks;
unsigned i;
screen->info.max_sh_per_se);
}
- pc = CALLOC_STRUCT(r600_perfcounters);
+ pc = CALLOC_STRUCT(si_perfcounters);
if (!pc)
return;
/* GPU load thread. */
mtx_t gpu_load_mutex;
thrd_t gpu_load_thread;
- union r600_mmio_counters mmio_counters;
+ union si_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
/* Performance counters. */
- struct r600_perfcounters *perfcounters;
+ struct si_perfcounters *perfcounters;
/* If pipe_screen wants to recompute and re-emit the framebuffer,
* sampler, and image states of all contexts, it should atomically
si_mark_atom_dirty(sctx, &sctx->msaa_config);
}
-void si_save_qbo_state(struct si_context *sctx, struct r600_qbo_state *st)
+void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st)
{
st->saved_compute = sctx->cs_shader_state.program;
unsigned force_level);
void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
void si_update_ps_iter_samples(struct si_context *sctx);
-void si_save_qbo_state(struct si_context *sctx, struct r600_qbo_state *st);
+void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
void si_set_occlusion_query_state(struct si_context *sctx,
bool old_perfect_enable);