struct pipe_fence_handle *fence = NULL;
uint64_t t0;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CONTEXTFLUSH);
+
svga->curr.nr_fbs = 0;
/* Ensure that texture dma uploads are processed
svgascreen->sws->fence_reference(svgascreen->sws, pfence, fence);
svgascreen->sws->fence_reference(svgascreen->sws, &fence, NULL);
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
unsigned i;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SURFACEFLUSH);
+
/* Emit buffered drawing commands.
*/
svga_hwtnl_flush_retry( svga );
if (svga->curr.framebuffer.zsbuf)
svga_propagate_surface(svga, svga->curr.framebuffer.zsbuf);
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
return (struct svga_context *)pipe;
}
+static inline struct svga_winsys_screen *
+svga_sws(struct svga_context *svga)
+{
+ return svga_screen(svga->pipe.screen)->sws;
+}
static inline boolean
svga_have_gb_objects(const struct svga_context *svga)
enum pipe_error
svga_hwtnl_flush(struct svga_hwtnl *hwtnl)
{
+ enum pipe_error ret = PIPE_OK;
+
+ SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLFLUSH);
+
if (!svga_have_vgpu10(hwtnl->svga) && hwtnl->cmd.prim_count) {
/* we only queue up primitive for VGPU9 */
- return draw_vgpu9(hwtnl);
+ ret = draw_vgpu9(hwtnl);
}
- return PIPE_OK;
+
+ SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
+ return ret;
}
{
enum pipe_error ret = PIPE_OK;
+ SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLPRIM);
+
if (svga_have_vgpu10(hwtnl->svga)) {
/* draw immediately */
ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
if (hwtnl->cmd.prim_count + 1 >= QSZ) {
ret = svga_hwtnl_flush(hwtnl);
if (ret != PIPE_OK)
- return ret;
+ goto done;
}
/* min/max indices are relative to bias */
hwtnl->cmd.prim_count++;
}
+done:
+ SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
return ret;
}
enum pipe_error ret = PIPE_OK;
int i;
+ SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_GENERATEINDICES);
+
for (i = 0; i < IDX_CACHE_MAX; i++) {
if (hwtnl->index_cache[prim][i].buffer != NULL &&
hwtnl->index_cache[prim][i].generate == generate) {
if (DBG)
debug_printf("%s retrieve %d/%d\n", __FUNCTION__, i, gen_nr);
- return PIPE_OK;
+ goto done;
}
else if (gen_type == U_GENERATE_REUSABLE) {
pipe_resource_reference(&hwtnl->index_cache[prim][i].buffer,
ret = generate_indices(hwtnl, gen_nr, gen_size, generate, out_buf);
if (ret != PIPE_OK)
- return ret;
+ goto done;
hwtnl->index_cache[prim][i].generate = generate;
hwtnl->index_cache[prim][i].gen_nr = gen_nr;
debug_printf("%s cache %d/%d\n", __FUNCTION__,
i, hwtnl->index_cache[prim][i].gen_nr);
- return PIPE_OK;
+done:
+ SVGA_STATS_TIME_POP(svga_sws(hwtnl->svga));
+ return ret;
}
unsigned api_pv = hwtnl->api_pv;
struct svga_context *svga = hwtnl->svga;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_HWTNLDRAWARRAYS);
+
if (svga->curr.rast->templ.fill_front !=
svga->curr.rast->templ.fill_back) {
assert(hwtnl->api_fillmode == PIPE_POLYGON_MODE_FILL);
}
if (gen_type == U_GENERATE_LINEAR) {
- return simple_draw_arrays(hwtnl, gen_prim, start, count,
+ ret = simple_draw_arrays(hwtnl, gen_prim, start, count,
start_instance, instance_count);
}
else {
gen_prim, 0, gen_nr,
start_instance,
instance_count);
- if (ret != PIPE_OK)
- goto done;
-
done:
if (gen_buf)
pipe_resource_reference(&gen_buf, NULL);
-
- return ret;
}
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return ret;
}
u_translate_func gen_func;
enum pipe_error ret = PIPE_OK;
+ SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga),
+ SVGA_STATS_TIME_HWTNLDRAWELEMENTS);
+
if (svga_need_unfilled_fallback(hwtnl, prim)) {
gen_type = u_unfilled_translator(prim,
index_size,
if (gen_type == U_TRANSLATE_MEMCPY) {
/* No need for translation, just pass through to hardware:
*/
- return svga_hwtnl_simple_draw_range_elements(hwtnl, index_buffer,
+ ret = svga_hwtnl_simple_draw_range_elements(hwtnl, index_buffer,
index_size,
index_bias,
min_index,
gen_prim, 0, gen_nr,
start_instance,
instance_count);
- if (ret != PIPE_OK)
- goto done;
-
done:
if (gen_buf)
pipe_resource_reference(&gen_buf, NULL);
-
- return ret;
}
+
+ SVGA_STATS_TIME_POP(svga_sws(hwtnl->svga));
+ return ret;
}
}
svga->hud.num_blend_objects++;
+ SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
+ SVGA_STATS_COUNT_BLENDSTATE);
return blend;
}
svga->hud.num_depthstencil_objects++;
+ SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
+ SVGA_STATS_COUNT_DEPTHSTENCILSTATE);
+
return ds;
}
{
enum pipe_error ret = PIPE_OK;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWELEMENTS);
+
svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode);
ret = svga_update_state( svga, SVGA_STATE_HW_DRAW );
if (ret != PIPE_OK)
goto retry;
- return PIPE_OK;
+ goto done;
retry:
svga_context_flush( svga, NULL );
if (do_retry)
{
- return retry_draw_range_elements( svga,
- index_buffer, index_size, index_bias,
- min_index, max_index,
- prim, start, count,
- start_instance, instance_count, FALSE );
+ ret = retry_draw_range_elements(svga,
+ index_buffer, index_size, index_bias,
+ min_index, max_index,
+ prim, start, count,
+ start_instance, instance_count, FALSE);
}
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return ret;
}
{
enum pipe_error ret;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWARRAYS);
+
svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode);
ret = svga_update_state( svga, SVGA_STATE_HW_DRAW );
if (ret != PIPE_OK)
goto retry;
- return PIPE_OK;
+ goto done;
retry:
if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry)
{
svga_context_flush( svga, NULL );
- return retry_draw_arrays(svga, prim, start, count,
- start_instance, instance_count,
- FALSE );
+ ret = retry_draw_arrays(svga, prim, start, count,
+ start_instance, instance_count,
+ FALSE);
}
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return ret;
}
enum pipe_error ret = 0;
boolean needed_swtnl;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);
+
svga->hud.num_draw_calls++; /* for SVGA_QUERY_NUM_DRAW_CALLS */
if (u_reduced_prim(info->mode) == PIPE_PRIM_TRIANGLES &&
svga->curr.rast->templ.cull_face == PIPE_FACE_FRONT_AND_BACK)
- return;
+ goto done;
/*
* Mark currently bound target surfaces as dirty
r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info);
assert(r == PIPE_OK);
(void) r;
- return;
+ goto done;
}
if (!u_trim_pipe_prim( info->mode, &count ))
- return;
+ goto done;
needed_swtnl = svga->state.sw.need_swtnl;
#ifdef DEBUG
if (svga->curr.vs->base.id == svga->debug.disable_shader ||
svga->curr.fs->base.id == svga->debug.disable_shader)
- return;
+ goto done;
#endif
if (svga->state.sw.need_swtnl) {
svga_hwtnl_flush_retry( svga );
svga_context_flush(svga, NULL);
}
+
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+;
}
if (!fs)
return NULL;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CREATEFS);
+
fs->base.tokens = tgsi_dup_tokens(templ->tokens);
/* Collect basic info that we'll need later:
fs->draw_shader = draw_create_fragment_shader(svga->swtnl.draw, templ);
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return fs;
}
if (!gs)
return NULL;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CREATEGS);
+
gs->base.tokens = tgsi_dup_tokens(templ->tokens);
/* Collect basic info that we'll need later:
&templ->stream_output);
}
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return gs;
}
}
svga->hud.num_rasterizer_objects++;
+ SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
+ SVGA_STATS_COUNT_RASTERIZERSTATE);
return rast;
}
cso->mipfilter == SVGA3D_TEX_FILTER_NONE ? "SVGA3D_TEX_FILTER_NONE" : "SOMETHING");
svga->hud.num_sampler_objects++;
+ SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
+ SVGA_STATS_COUNT_SAMPLER);
return cso;
}
sv->id = SVGA3D_INVALID_ID;
svga->hud.num_samplerview_objects++;
+ SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
+ SVGA_STATS_COUNT_SAMPLERVIEW);
return &sv->base;
}
if (!svga_have_vgpu10(svga) && shader != PIPE_SHADER_FRAGMENT)
return;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SETSAMPLERVIEWS);
+
/* This bit of code works around a quirk in the CSO module.
* If start=num=0 it means all sampler views should be released.
* Note that the CSO module treats sampler views for fragment shaders
}
if (!any_change) {
- return;
+ goto done;
}
/* find highest non-null sampler_views[] entry */
}
}
}
+
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
}
svga->hud.num_vertexelement_objects++;
+ SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
+ SVGA_STATS_COUNT_VERTEXELEMENT);
return velems;
}
if (!vs)
return NULL;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CREATEVS);
+
/* substitute a debug shader?
*/
vs->base.tokens = tgsi_dup_tokens(substitute_vs(svga->debug.shader_id,
&templ->stream_output);
}
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return vs;
}
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_buffer *sbuf = svga_buffer(resource);
struct pipe_transfer *transfer;
- uint8_t *map;
+ uint8_t *map = NULL;
int64_t begin = svga_get_time(svga);
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERMAP);
+
assert(box->y == 0);
assert(box->z == 0);
assert(box->height == 1);
transfer = MALLOC_STRUCT(pipe_transfer);
if (!transfer) {
- return NULL;
+ goto done;
}
transfer->resource = resource;
*/
FREE(transfer);
- return NULL;
+ goto done;
}
svga_context_flush(svga, NULL);
sbuf->swbuf = align_malloc(sbuf->b.b.width0, 16);
if (!sbuf->swbuf) {
FREE(transfer);
- return NULL;
+ goto done;
}
}
}
svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return map;
}
struct svga_context *svga = svga_context(pipe);
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP);
+
pipe_mutex_lock(ss->swc_mutex);
assert(sbuf->map.count);
pipe_mutex_unlock(ss->swc_mutex);
FREE(transfer);
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
struct svga_screen *ss = svga_screen(screen);
struct svga_buffer *sbuf;
+ SVGA_STATS_TIME_PUSH(ss->sws, SVGA_STATS_TIME_CREATEBUFFER);
+
sbuf = CALLOC_STRUCT(svga_buffer);
if (!sbuf)
goto error1;
ss->hud.total_resource_bytes += sbuf->size;
ss->hud.num_resources++;
+ SVGA_STATS_TIME_POP(ss->sws);
return &sbuf->b.b;
error2:
FREE(sbuf);
error1:
+ SVGA_STATS_TIME_POP(ss->sws);
return NULL;
}
{
struct list_head *curr, *next;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
+
curr = svga->dirty_buffers.next;
next = curr->next;
while (curr != &svga->dirty_buffers) {
curr = next;
next = curr->next;
}
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
boolean use_direct_map = svga_have_gb_objects(svga) &&
!svga_have_gb_dma(svga);
unsigned d;
- void *returnVal;
+ void *returnVal = NULL;
int64_t begin = svga_get_time(svga);
+ SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
+
/* We can't map texture storage directly unless we have GB objects */
if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
if (svga_have_gb_objects(svga))
use_direct_map = TRUE;
else
- return NULL;
+ goto done;
}
st = CALLOC_STRUCT(svga_transfer);
if (!st)
- return NULL;
+ goto done;
st->base.level = level;
st->base.usage = usage;
if (!st->hwbuf) {
FREE(st);
- return NULL;
+ goto done;
}
if (st->hw_nblocksy < nblocksy) {
if (!st->swbuf) {
sws->buffer_destroy(sws, st->hwbuf);
FREE(st);
- return NULL;
+ goto done;
}
}
if (!surf) {
FREE(st);
- return NULL;
+ goto done;
}
/* If this is the first time mapping to the surface in this
}
svga->hud.num_readbacks++;
+ SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
assert(ret == PIPE_OK);
(void) ret;
svga_surfaces_flush(svga);
if (!sws->surface_is_flushed(sws, surf)) {
svga->hud.surface_write_flushes++;
+ SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
svga_context_flush(svga, NULL);
}
}
*/
if (!map) {
FREE(st);
- return map;
+ returnVal = map;
+ goto done;
}
/**
svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
svga->hud.num_resources_mapped++;
+done:
+ SVGA_STATS_TIME_POP(sws);
return returnVal;
}
struct svga_transfer *st = svga_transfer(transfer);
struct svga_texture *tex = svga_texture(transfer->resource);
+ SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
+
if (!st->swbuf) {
if (st->use_direct_map) {
svga_texture_surface_unmap(svga, transfer);
sws->buffer_destroy(sws, st->hwbuf);
}
FREE(st);
+ SVGA_STATS_TIME_POP(sws);
}
struct svga_texture *tex;
unsigned bindings = template->bind;
+ SVGA_STATS_TIME_PUSH(svgascreen->sws,
+ SVGA_STATS_TIME_CREATETEXTURE);
+
assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
- return NULL;
+ goto fail_notex;
}
tex = CALLOC_STRUCT(svga_texture);
if (!tex) {
- return NULL;
+ goto fail_notex;
}
tex->defined = CALLOC(template->depth0 * template->array_size,
sizeof(tex->defined[0]));
if (!tex->defined) {
FREE(tex);
- return NULL;
+ goto fail_notex;
}
tex->rendered_to = CALLOC(template->depth0 * template->array_size,
svgascreen->hud.total_resource_bytes += tex->size;
svgascreen->hud.num_resources++;
+ SVGA_STATS_TIME_POP(svgascreen->sws);
+
return &tex->b.b;
fail:
if (tex->defined)
FREE(tex->defined);
FREE(tex);
+fail_notex:
+ SVGA_STATS_TIME_POP(svgascreen->sws);
return NULL;
}
uint64_t timeout)
{
struct svga_winsys_screen *sws = svga_screen(screen)->sws;
+ boolean retVal;
- if (!timeout)
- return sws->fence_signalled(sws, fence, 0) == 0;
+ SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_FENCEFINISH);
- SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n",
- __FUNCTION__, fence);
+ if (!timeout) {
+ retVal = sws->fence_signalled(sws, fence, 0) == 0;
+ }
+ else {
+ SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n",
+ __FUNCTION__, fence);
+
+ retVal = sws->fence_finish(sws, fence, 0) == 0;
+ }
+
+ SVGA_STATS_TIME_POP(sws);
- return sws->fence_finish(sws, fence, 0) == 0;
+ return retVal;
}
unsigned codeLen = variant->nr_tokens * sizeof(variant->tokens[0]);
enum pipe_error ret;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DEFINESHADER);
+
variant->id = UTIL_BITMASK_INVALID_INDEX;
if (svga_have_gb_objects(svga)) {
if (svga_have_vgpu10(svga))
- return define_gb_shader_vgpu10(svga, type, variant, codeLen);
+ ret = define_gb_shader_vgpu10(svga, type, variant, codeLen);
else
- return define_gb_shader_vgpu9(svga, type, variant, codeLen);
+ ret = define_gb_shader_vgpu9(svga, type, variant, codeLen);
}
else {
/* Allocate an integer ID for the shader */
variant->id = util_bitmask_add(svga->shader_id_bm);
if (variant->id == UTIL_BITMASK_INVALID_INDEX) {
- return PIPE_ERROR_OUT_OF_MEMORY;
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto done;
}
/* Issue SVGA3D device command to define the shader */
}
}
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return ret;
}
enum pipe_error ret = PIPE_OK;
unsigned i;
+ SVGA_STATS_TIME_PUSH(screen->sws, SVGA_STATS_TIME_UPDATESTATE);
+
/* Check for updates to bound textures. This can't be done in an
* atom as there is no flag which could provoke this test, and we
* cannot create one.
state_levels[i],
&svga->dirty );
if (ret != PIPE_OK)
- return ret;
+ goto done;
svga->state.dirty[i] = 0;
}
svga->hud.num_validations++;
- return PIPE_OK;
+done:
+ SVGA_STATS_TIME_POP(screen->sws);
+ return ret;
}
struct svga_fragment_shader *fs = svga->curr.fs;
struct svga_compile_key key;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITFS);
+
/* SVGA_NEW_BLEND
* SVGA_NEW_TEXTURE_BINDING
* SVGA_NEW_RAST
*/
ret = make_fs_key(svga, fs, &key);
if (ret != PIPE_OK)
- return ret;
+ goto done;
variant = svga_search_shader_key(&fs->base, &key);
if (!variant) {
ret = compile_fs(svga, fs, &key, &variant);
if (ret != PIPE_OK)
- return ret;
+ goto done;
}
assert(variant);
if (variant != svga->state.hw_draw.fs) {
ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_PS, variant);
if (ret != PIPE_OK)
- return ret;
+ goto done;
svga->rebind.flags.fs = FALSE;
svga->state.hw_draw.fs = variant;
}
- return PIPE_OK;
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return ret;
}
struct svga_tracked_state svga_hw_fs =
enum pipe_error ret = PIPE_OK;
struct svga_compile_key key;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITGS);
+
/* If there's a user-defined GS, we should have a pointer to a derived
* GS. This should have been resolved in update_tgsi_transform().
*/
ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_GS, NULL);
svga->state.hw_draw.gs = NULL;
}
- return ret;
+ goto done;
}
/* If there is stream output info for this geometry shader, then use
if (!variant) {
ret = compile_gs(svga, gs, &key, &variant);
if (ret != PIPE_OK)
- return ret;
+ goto done;
/* insert the new variant at head of linked list */
assert(variant);
/* Bind the new variant */
ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_GS, variant);
if (ret != PIPE_OK)
- return ret;
+ goto done;
svga->rebind.flags.gs = FALSE;
svga->dirty |= SVGA_NEW_GS_VARIANT;
svga->state.hw_draw.gs = variant;
}
- return PIPE_OK;
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return ret;
}
struct svga_tracked_state svga_hw_gs =
enum pipe_error ret = PIPE_OK;
struct svga_compile_key key;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITVS);
+
/* If there is an active geometry shader, and it has stream output
* defined, then we will skip the stream output from the vertex shader
*/
ret = compile_vs(svga, vs, &key, &variant);
}
if (ret != PIPE_OK)
- return ret;
+ goto done;
/* insert the new variant at head of linked list */
assert(variant);
if (variant) {
ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
if (ret != PIPE_OK)
- return ret;
+ goto done;
svga->rebind.flags.vs = FALSE;
}
svga->state.hw_draw.vs = variant;
}
- return PIPE_OK;
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return ret;
}
struct svga_tracked_state svga_hw_vs =
unsigned nlayers = 1;
SVGA3dSurfaceFlags flags = 0;
SVGA3dSurfaceFormat format;
+ struct pipe_surface *retVal = NULL;
s = CALLOC_STRUCT(svga_surface);
if (!s)
return NULL;
+ SVGA_STATS_TIME_PUSH(ss->sws, SVGA_STATS_TIME_CREATESURFACEVIEW);
+
if (pt->target == PIPE_TEXTURE_CUBE) {
layer = surf_tmpl->u.tex.first_layer;
zslice = 0;
layer, nlayers, zslice, &s->key);
if (!s->handle) {
FREE(s);
- return NULL;
+ goto done;
}
s->key.format = format;
}
svga->hud.num_surface_views++;
+ retVal = &s->base;
- return &s->base;
+done:
+ SVGA_STATS_TIME_POP(ss->sws);
+ return retVal;
}
{
struct svga_context *svga = svga_context(pipe);
struct pipe_screen *screen = pipe->screen;
+ struct pipe_surface *surf = NULL;
boolean view = FALSE;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CREATESURFACE);
+
if (svga_screen(screen)->debug.force_surface_view)
view = TRUE;
if (svga_have_vgpu10(svga) || svga_screen(screen)->debug.no_surface_view)
view = FALSE;
- return svga_create_surface_view(pipe, pt, surf_tmpl, view);
+ surf = svga_create_surface_view(pipe, pt, surf_tmpl, view);
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+
+ return surf;
}
{
struct svga_surface *bs = s->backed;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga),
+ SVGA_STATS_TIME_CREATEBACKEDSURFACEVIEW);
+
if (!bs) {
struct svga_texture *tex = svga_texture(s->base.texture);
struct pipe_surface *backed_view;
svga_mark_surface_dirty(&bs->base);
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+
return bs;
}
{
enum pipe_error ret = PIPE_OK;
unsigned shader;
+ struct pipe_surface *surf = NULL;
assert(svga_have_vgpu10(svga));
+ SVGA_STATS_TIME_PUSH(svga_sws(svga),
+ SVGA_STATS_TIME_VALIDATESURFACEVIEW);
+
/**
* DX spec explicitly specifies that no resource can be bound to a render
* target view and a shader resource view simultanously.
s->handle);
s = create_backed_surface_view(svga, s);
if (!s)
- return NULL;
+ goto done;
break;
}
if (ret != PIPE_OK) {
util_bitmask_clear(svga->surface_view_id_bm, s->view_id);
s->view_id = SVGA3D_INVALID_ID;
- return NULL;
+ goto done;
}
}
- return &s->base;
+ surf = &s->base;
+
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+
+ return surf;
}
struct svga_screen *ss = svga_screen(surf->texture->screen);
enum pipe_error ret = PIPE_OK;
+ SVGA_STATS_TIME_PUSH(ss->sws, SVGA_STATS_TIME_DESTROYSURFACE);
+
/* Destroy the backed view surface if it exists */
if (s->backed) {
svga_surface_destroy(pipe, &s->backed->base);
FREE(surf);
svga->hud.num_surface_views--;
+ SVGA_STATS_TIME_POP(ss->sws);
}
if (!s->dirty)
return;
+ SVGA_STATS_TIME_PUSH(ss->sws, SVGA_STATS_TIME_PROPAGATESURFACE);
+
if (surf->texture->target == PIPE_TEXTURE_CUBE) {
zslice = 0;
layer = surf->u.tex.first_layer;
svga_define_texture_level(tex, layer + i, surf->u.tex.level);
}
}
+
+ SVGA_STATS_TIME_POP(ss->sws);
}
boolean new_vbuf = FALSE;
boolean new_ibuf = FALSE;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga),
+ SVGA_STATS_TIME_VBUFRENDERALLOCVERT);
+
if (svga_render->vertex_size != vertex_size)
svga->swtnl.new_vdecl = TRUE;
svga_render->vertex_size = (size_t)vertex_size;
if (svga->swtnl.new_vdecl)
svga_render->vdecl_offset = svga_render->vbuf_offset;
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+
return TRUE;
}
{
struct svga_vbuf_render *svga_render = svga_vbuf_render(render);
struct svga_context *svga = svga_render->svga;
+ void * retPtr = NULL;
+
+ SVGA_STATS_TIME_PUSH(svga_sws(svga),
+ SVGA_STATS_TIME_VBUFRENDERMAPVERT);
if (svga_render->vbuf) {
char *ptr = (char*)pipe_buffer_map(&svga->pipe,
&svga_render->vbuf_transfer);
if (ptr) {
svga_render->vbuf_ptr = ptr;
- return ptr + svga_render->vbuf_offset;
+ retPtr = ptr + svga_render->vbuf_offset;
}
else {
svga_render->vbuf_ptr = NULL;
svga_render->vbuf_transfer = NULL;
- return NULL;
+ retPtr = NULL;
}
}
else {
/* we probably ran out of memory when allocating the vertex buffer */
- return NULL;
+ retPtr = NULL;
}
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return retPtr;
}
static void
unsigned offset, length;
size_t used = svga_render->vertex_size * ((size_t)max_index + 1);
+ SVGA_STATS_TIME_PUSH(svga_sws(svga),
+ SVGA_STATS_TIME_VBUFRENDERUNMAPVERT);
+
offset = svga_render->vbuf_offset + svga_render->vertex_size * min_index;
length = svga_render->vertex_size * (max_index + 1 - min_index);
svga_render->min_index = min_index;
svga_render->max_index = max_index;
svga_render->vbuf_used = MAX2(svga_render->vbuf_used, used);
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
static void
if (!svga->swtnl.new_vdecl)
return;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga),
+ SVGA_STATS_TIME_VBUFSUBMITSTATE);
+
memcpy(vdecl, svga_render->vdecl, sizeof(vdecl));
/* flush the hw state */
}
svga->swtnl.new_vdecl = FALSE;
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
static void
const unsigned start_instance = 0;
const unsigned instance_count = 1;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_VBUFDRAWARRAYS);
+
/* off to hardware */
svga_vbuf_submit_state(svga_render);
svga->swtnl.new_vbuf = TRUE;
assert(ret == PIPE_OK);
}
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
assert(( svga_render->vbuf_offset - svga_render->vdecl_offset) % svga_render->vertex_size == 0);
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_VBUFDRAWELEMENTS);
+
if (svga_render->ibuf_size < svga_render->ibuf_offset + size)
pipe_resource_reference(&svga_render->ibuf, NULL);
svga->swtnl.new_vbuf = TRUE;
assert(ret == PIPE_OK);
}
-
svga_render->ibuf_offset += size;
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}
const void *map;
enum pipe_error ret;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SWTNLDRAWVBO);
+
assert(!svga->dirty);
assert(svga->state.sw.need_swtnl);
assert(draw);
svga->state.sw.in_swtnl_draw = FALSE;
svga->dirty |= SVGA_NEW_NEED_PIPELINE | SVGA_NEW_NEED_SWVFETCH;
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return ret;
}
update_swtnl_draw( struct svga_context *svga,
unsigned dirty )
{
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SWTNLUPDATEDRAW);
+
draw_flush( svga->swtnl.draw );
if (dirty & SVGA_NEW_VS)
(svga->curr.framebuffer.zsbuf) ?
svga->curr.framebuffer.zsbuf->format : PIPE_FORMAT_NONE);
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return PIPE_OK;
}
unsigned i;
int any_change;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SWTNLUPDATEVDECL);
+
memset(vinfo, 0, sizeof(*vinfo));
memset(vdecl, 0, sizeof(vdecl));
enum pipe_error ret;
if (!any_change && svga_render->layout_id != SVGA3D_INVALID_ID) {
- return PIPE_OK;
+ goto done;
}
if (svga_render->layout_id != SVGA3D_INVALID_ID) {
}
else {
if (!any_change)
- return PIPE_OK;
+ goto done;
}
memcpy(svga_render->vdecl, vdecl, sizeof(vdecl));
svga->swtnl.new_vdecl = TRUE;
- return 0;
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return PIPE_OK;
}
struct svga_shader_variant *variant = NULL;
struct svga_shader_emitter emit;
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_TGSIVGPU9TRANSLATE);
+
memset(&emit, 0, sizeof(emit));
emit.size = 1024;
}
#endif
- return variant;
+ goto done;
- fail:
+fail:
FREE(variant);
if (emit.buf != err_buf)
FREE(emit.buf);
- return NULL;
+ variant = NULL;
+
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
+ return variant;
}
/* These two flags cannot be used together */
assert(key->vs.need_prescale + key->vs.undo_viewport <= 1);
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_TGSIVGPU10TRANSLATE);
/*
* Setup the code emitter
*/
emit = alloc_emitter();
if (!emit)
- return NULL;
+ goto done;
emit->unit = unit;
emit->key = *key;
cleanup:
free_emitter(emit);
+done:
+ SVGA_STATS_TIME_POP(svga_sws(svga));
return variant;
}
#include "pipe/p_compiler.h"
#include "pipe/p_defines.h"
+#include "svga_mksstats.h"
struct svga_winsys_screen;
struct svga_winsys_buffer;
#define SVGA_HINT_FLAG_CAN_PRE_FLUSH (1 << 0) /* Can preemptively flush */
+/**
+ * SVGA mks statistics info
+ */
+struct svga_winsys_stats_timeframe {
+ void *counterTime;
+ uint64 startTime;
+ uint64 adjustedStartTime;
+ struct svga_winsys_stats_timeframe *enclosing;
+};
+
+enum svga_stats_count {
+ SVGA_STATS_COUNT_BLENDSTATE,
+ SVGA_STATS_COUNT_DEPTHSTENCILSTATE,
+ SVGA_STATS_COUNT_RASTERIZERSTATE,
+ SVGA_STATS_COUNT_SAMPLER,
+ SVGA_STATS_COUNT_SAMPLERVIEW,
+ SVGA_STATS_COUNT_SURFACEWRITEFLUSH,
+ SVGA_STATS_COUNT_TEXREADBACK,
+ SVGA_STATS_COUNT_VERTEXELEMENT,
+ SVGA_STATS_COUNT_MAX
+};
+
+enum svga_stats_time {
+ SVGA_STATS_TIME_BUFFERSFLUSH,
+ SVGA_STATS_TIME_BUFFERTRANSFERMAP,
+ SVGA_STATS_TIME_BUFFERTRANSFERUNMAP,
+ SVGA_STATS_TIME_CONTEXTFINISH,
+ SVGA_STATS_TIME_CONTEXTFLUSH,
+ SVGA_STATS_TIME_CREATEBACKEDSURFACEVIEW,
+ SVGA_STATS_TIME_CREATEBUFFER,
+ SVGA_STATS_TIME_CREATECONTEXT,
+ SVGA_STATS_TIME_CREATEFS,
+ SVGA_STATS_TIME_CREATEGS,
+ SVGA_STATS_TIME_CREATESURFACE,
+ SVGA_STATS_TIME_CREATESURFACEVIEW,
+ SVGA_STATS_TIME_CREATETEXTURE,
+ SVGA_STATS_TIME_CREATEVS,
+ SVGA_STATS_TIME_DEFINESHADER,
+ SVGA_STATS_TIME_DESTROYSURFACE,
+ SVGA_STATS_TIME_DRAWVBO,
+ SVGA_STATS_TIME_DRAWARRAYS,
+ SVGA_STATS_TIME_DRAWELEMENTS,
+ SVGA_STATS_TIME_EMITFS,
+ SVGA_STATS_TIME_EMITGS,
+ SVGA_STATS_TIME_EMITVS,
+ SVGA_STATS_TIME_FENCEFINISH,
+ SVGA_STATS_TIME_GENERATEINDICES,
+ SVGA_STATS_TIME_HWTNLDRAWARRAYS,
+ SVGA_STATS_TIME_HWTNLDRAWELEMENTS,
+ SVGA_STATS_TIME_HWTNLFLUSH,
+ SVGA_STATS_TIME_HWTNLPRIM,
+ SVGA_STATS_TIME_PROPAGATESURFACE,
+ SVGA_STATS_TIME_SETSAMPLERVIEWS,
+ SVGA_STATS_TIME_SURFACEFLUSH,
+ SVGA_STATS_TIME_SWTNLDRAWVBO,
+ SVGA_STATS_TIME_SWTNLUPDATEDRAW,
+ SVGA_STATS_TIME_SWTNLUPDATEVDECL,
+ SVGA_STATS_TIME_TEXTRANSFERMAP,
+ SVGA_STATS_TIME_TEXTRANSFERUNMAP,
+ SVGA_STATS_TIME_TGSIVGPU10TRANSLATE,
+ SVGA_STATS_TIME_TGSIVGPU9TRANSLATE,
+ SVGA_STATS_TIME_UPDATESTATE,
+ SVGA_STATS_TIME_VALIDATESURFACEVIEW,
+ SVGA_STATS_TIME_VBUFDRAWARRAYS,
+ SVGA_STATS_TIME_VBUFDRAWELEMENTS,
+ SVGA_STATS_TIME_VBUFRENDERALLOCVERT,
+ SVGA_STATS_TIME_VBUFRENDERMAPVERT,
+ SVGA_STATS_TIME_VBUFRENDERUNMAPVERT,
+ SVGA_STATS_TIME_VBUFSUBMITSTATE,
+ SVGA_STATS_TIME_MAX
+};
+
+#define SVGA_STATS_PREFIX "GuestGL_"
+
+#define SVGA_STATS_COUNT_NAMES \
+ SVGA_STATS_PREFIX "BlendState", \
+ SVGA_STATS_PREFIX "DepthStencilState", \
+ SVGA_STATS_PREFIX "RasterizerState", \
+ SVGA_STATS_PREFIX "Sampler", \
+ SVGA_STATS_PREFIX "SamplerView", \
+ SVGA_STATS_PREFIX "TextureReadback", \
+ SVGA_STATS_PREFIX "SurfaceWriteFlush", \
+ SVGA_STATS_PREFIX "VertexElement" \
+
+#define SVGA_STATS_TIME_NAMES \
+ SVGA_STATS_PREFIX "BuffersFlush", \
+ SVGA_STATS_PREFIX "BufferTransferMap", \
+ SVGA_STATS_PREFIX "BufferTransferUnmap", \
+ SVGA_STATS_PREFIX "ContextFinish", \
+ SVGA_STATS_PREFIX "ContextFlush", \
+ SVGA_STATS_PREFIX "CreateBackedSurfaceView", \
+ SVGA_STATS_PREFIX "CreateBuffer", \
+ SVGA_STATS_PREFIX "CreateContext", \
+ SVGA_STATS_PREFIX "CreateFS", \
+ SVGA_STATS_PREFIX "CreateGS", \
+ SVGA_STATS_PREFIX "CreateSurface", \
+ SVGA_STATS_PREFIX "CreateSurfaceView", \
+ SVGA_STATS_PREFIX "CreateTexture", \
+ SVGA_STATS_PREFIX "CreateVS", \
+ SVGA_STATS_PREFIX "DefineShader", \
+ SVGA_STATS_PREFIX "DestroySurface", \
+ SVGA_STATS_PREFIX "DrawVBO", \
+ SVGA_STATS_PREFIX "DrawArrays", \
+ SVGA_STATS_PREFIX "DrawElements", \
+ SVGA_STATS_PREFIX "EmitFS", \
+ SVGA_STATS_PREFIX "EmitGS", \
+ SVGA_STATS_PREFIX "EmitVS", \
+ SVGA_STATS_PREFIX "FenceFinish", \
+ SVGA_STATS_PREFIX "GenerateIndices", \
+ SVGA_STATS_PREFIX "HWtnlDrawArrays", \
+ SVGA_STATS_PREFIX "HWtnlDrawElements", \
+ SVGA_STATS_PREFIX "HWtnlFlush", \
+ SVGA_STATS_PREFIX "HWtnlPrim", \
+ SVGA_STATS_PREFIX "PropagateSurface", \
+ SVGA_STATS_PREFIX "SetSamplerViews", \
+ SVGA_STATS_PREFIX "SurfaceFlush", \
+ SVGA_STATS_PREFIX "SwtnlDrawVBO", \
+ SVGA_STATS_PREFIX "SwtnlUpdateDraw", \
+ SVGA_STATS_PREFIX "SwtnlUpdateVDecl", \
+ SVGA_STATS_PREFIX "TextureTransferMap", \
+ SVGA_STATS_PREFIX "TextureTransferUnmap", \
+ SVGA_STATS_PREFIX "TGSIVGPU10Translate", \
+ SVGA_STATS_PREFIX "TGSIVGPU9Translate", \
+ SVGA_STATS_PREFIX "UpdateState", \
+ SVGA_STATS_PREFIX "ValidateSurfaceView", \
+ SVGA_STATS_PREFIX "VbufDrawArrays", \
+ SVGA_STATS_PREFIX "VbufDrawElements", \
+ SVGA_STATS_PREFIX "VbufRenderAllocVertices", \
+ SVGA_STATS_PREFIX "VbufRenderMapVertices", \
+ SVGA_STATS_PREFIX "VbufRenderUnmapVertices", \
+ SVGA_STATS_PREFIX "VbufSubmitState"
+
+
/** Opaque surface handle */
struct svga_winsys_surface;
SVGA3dQueryState *queryState,
void *result, uint32 resultLen);
+ /**
+ * Increment a statistic counter
+ */
+ void
+ (*stats_inc)(enum svga_stats_count);
+
+ /**
+ * Push a time frame onto the stack
+ */
+ void
+ (*stats_time_push)(enum svga_stats_time, struct svga_winsys_stats_timeframe *);
+
+ /**
+ * Pop a time frame.
+ */
+ void
+ (*stats_time_pop)();
+
+
/** Have VGPU v10 hardware? */
boolean have_vgpu10;
vmw_svga_winsys_shader_reference(&d_shader, NULL);
}
+static void
+vmw_svga_winsys_stats_inc(enum svga_stats_count index)
+{
+}
+
+static void
+vmw_svga_winsys_stats_time_push(enum svga_stats_time index,
+ struct svga_winsys_stats_timeframe *tf)
+{
+}
+
+static void
+vmw_svga_winsys_stats_time_pop()
+{
+}
+
boolean
vmw_winsys_screen_init_svga(struct vmw_winsys_screen *vws)
{
vws->base.query_destroy = vmw_svga_winsys_query_destroy;
vws->base.query_get_result = vmw_svga_winsys_query_get_result;
+ vws->base.stats_inc = vmw_svga_winsys_stats_inc;
+ vws->base.stats_time_push = vmw_svga_winsys_stats_time_push;
+ vws->base.stats_time_pop = vmw_svga_winsys_stats_time_pop;
+
return TRUE;
}