&buf->bo, &buf->offset);
if (!buf->bo)
return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
+ NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
} else
if (domain == NOUVEAU_BO_GART) {
buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
&buf->bo, &buf->offset);
if (!buf->bo)
return FALSE;
+ NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
} else {
assert(domain == 0);
if (!nouveau_buffer_malloc(buf))
if (buf->mm)
release_allocation(&buf->mm, buf->fence);
+ if (buf->domain == NOUVEAU_BO_VRAM)
+ NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
+ if (buf->domain == NOUVEAU_BO_GART)
+ NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);
+
buf->domain = 0;
}
nouveau_fence_ref(NULL, &res->fence_wr);
FREE(res);
+
+ NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
}
static uint8_t *
const unsigned base = tx->base.box.x;
const unsigned size = tx->base.box.width;
+ NOUVEAU_DRV_STAT(nv->screen, buf_read_bytes_staging_vid, size);
+
nv->copy_data(nv, tx->bo, tx->offset, NOUVEAU_BO_GART,
buf->bo, buf->offset + base, buf->domain, size);
else
buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
+ if (buf->domain == NOUVEAU_BO_VRAM)
+ NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size);
+ if (buf->domain == NOUVEAU_BO_GART)
+ NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size);
+
if (tx->bo)
nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
if (rw == PIPE_TRANSFER_READ) {
if (!buf->fence_wr)
return TRUE;
+ NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
+ !nouveau_fence_signalled(buf->fence_wr));
if (!nouveau_fence_wait(buf->fence_wr))
return FALSE;
} else {
if (!buf->fence)
return TRUE;
+ NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
+ !nouveau_fence_signalled(buf->fence));
if (!nouveau_fence_wait(buf->fence))
return FALSE;
nouveau_buffer_transfer_init(tx, resource, box, usage);
*ptransfer = &tx->base;
+ if (usage & PIPE_TRANSFER_READ)
+ NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
+ if (usage & PIPE_TRANSFER_WRITE)
+ NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
+
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
}
}
+ if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
+ NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
+
nouveau_buffer_transfer_del(nv, tx);
FREE(tx);
}
if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
nouveau_buffer_cache(NULL, buffer);
+ NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
+
return &buffer->base;
fail:
if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
return TRUE;
+ if (!spins)
+ NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1);
spins++;
#ifdef PIPE_OS_UNIX
if (!(spins % 8)) /* donate a few cycles */
#include "pipe/p_screen.h"
#include "util/u_memory.h"
+#ifdef DEBUG
+# define NOUVEAU_ENABLE_DRIVER_STATISTICS
+#endif
+
typedef uint32_t u32;
typedef uint16_t u16;
int64_t cpu_gpu_time_delta;
boolean hint_buf_keep_sysmem_copy;
+
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+ union {
+ uint64_t v[29];
+ struct {
+ uint64_t tex_obj_current_count;
+ uint64_t tex_obj_current_bytes;
+ uint64_t buf_obj_current_count;
+ uint64_t buf_obj_current_bytes_vid;
+ uint64_t buf_obj_current_bytes_sys;
+ uint64_t tex_transfers_rd;
+ uint64_t tex_transfers_wr;
+ uint64_t tex_copy_count;
+ uint64_t tex_blit_count;
+ uint64_t tex_cache_flush_count;
+ uint64_t buf_transfers_rd;
+ uint64_t buf_transfers_wr;
+ uint64_t buf_read_bytes_staging_vid;
+ uint64_t buf_write_bytes_direct;
+ uint64_t buf_write_bytes_staging_vid;
+ uint64_t buf_write_bytes_staging_sys;
+ uint64_t buf_copy_bytes;
+ uint64_t buf_non_kernel_fence_sync_count;
+ uint64_t any_non_kernel_fence_sync_count;
+ uint64_t query_sync_count;
+ uint64_t gpu_serialize_count;
+ uint64_t draw_calls_array;
+ uint64_t draw_calls_indexed;
+ uint64_t draw_calls_fallback_count;
+ uint64_t user_buffer_upload_bytes;
+ uint64_t constbuf_upload_count;
+ uint64_t constbuf_upload_bytes;
+ uint64_t pushbuf_count;
+ uint64_t resource_validate_count;
+ } named;
+ } stats;
+#endif
};
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+# define NOUVEAU_DRV_STAT(s, n, v) do { \
+ (s)->stats.named.n += (v); \
+ } while(0)
+# define NOUVEAU_DRV_STAT_RES(r, n, v) do { \
+ nouveau_screen((r)->base.screen)->stats.named.n += (v); \
+ } while(0)
+# define NOUVEAU_DRV_STAT_IFD(x) x
+#else
+# define NOUVEAU_DRV_STAT(s, n, v) do { } while(0)
+# define NOUVEAU_DRV_STAT_RES(r, n, v) do { } while(0)
+# define NOUVEAU_DRV_STAT_IFD(x)
+#endif
+
static INLINE struct nouveau_screen *
nouveau_screen(struct pipe_screen *pscreen)
{
nouveau_fence_ref(NULL, &mt->base.fence);
nouveau_fence_ref(NULL, &mt->base.fence_wr);
+ NOUVEAU_DRV_STAT(nouveau_screen(pscreen), tex_obj_current_count, -1);
+ NOUVEAU_DRV_STAT(nouveau_screen(pscreen), tex_obj_current_bytes,
+ -(uint64_t)mt->total_size);
+
FREE(mt);
}
if (screen->cur_ctx)
screen->cur_ctx->state.flushed = TRUE;
}
+ NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
}
static int
{
struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending;
struct nouveau_list *it;
+ NOUVEAU_DRV_STAT_IFD(unsigned count = 0);
for (it = list->next; it != list; it = it->next) {
struct nouveau_bufref *ref = (struct nouveau_bufref *)it;
struct nv04_resource *res = ref->priv;
if (res)
nvc0_resource_validate(res, (unsigned)ref->priv_data);
+ NOUVEAU_DRV_STAT_IFD(count++);
}
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, resource_validate_count, count);
}
}
mt->base.address = mt->base.bo->offset;
+ NOUVEAU_DRV_STAT(nouveau_screen(pscreen), tex_obj_current_count, 1);
+ NOUVEAU_DRV_STAT(nouveau_screen(pscreen), tex_obj_current_bytes,
+ mt->total_size);
+
return pt;
}
boolean is64bit;
uint8_t rotate;
int nesting; /* only used for occlusion queries */
+ union {
+ struct nouveau_mm_allocation *mm;
+ uint64_t value;
+ } u;
struct nouveau_fence *fence;
- struct nouveau_mm_allocation *mm;
};
#define NVC0_QUERY_ALLOC_SPACE 256
if (q->bo) {
nouveau_bo_ref(NULL, &q->bo);
- if (q->mm) {
+ if (q->u.mm) {
if (q->state == NVC0_QUERY_STATE_READY)
- nouveau_mm_free(q->mm);
+ nouveau_mm_free(q->u.mm);
else
nouveau_fence_work(screen->base.fence.current,
- nouveau_mm_free_work, q->mm);
+ nouveau_mm_free_work, q->u.mm);
}
}
if (size) {
- q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
+ q->u.mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
if (!q->bo)
return FALSE;
q->offset = q->base;
space = 16;
break;
default:
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+ if (type >= NVC0_QUERY_DRV_STAT(0) && type <= NVC0_QUERY_DRV_STAT_LAST) {
+ space = 0;
+ q->is64bit = true;
+ q->index = type - NVC0_QUERY_DRV_STAT(0);
+ break;
+ } else
+#endif
if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS &&
nvc0->screen->base.device->drm_version >= 0x01000101) {
if (type >= NVE4_PM_QUERY(0) &&
- type <= NVE4_PM_QUERY_MAX) {
+ type <= NVE4_PM_QUERY_LAST) {
/* 8 counters per MP + clock */
space = 12 * nvc0->screen->mp_count * sizeof(uint32_t);
break;
nvc0_query_get(push, q, 0xc0 + 0x90, 0x0e809002); /* TEP, LAUNCHES */
break;
default:
- if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_MAX)
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+ if (q->type >= NVC0_QUERY_DRV_STAT(0) &&
+ q->type <= NVC0_QUERY_DRV_STAT_LAST) {
+ if (q->index >= 5)
+ q->u.value = nvc0->screen->base.stats.v[q->index];
+ else
+ q->u.value = 0;
+ } else
+#endif
+ if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_LAST) {
nve4_mp_pm_query_begin(nvc0, q);
+ }
break;
}
q->state = NVC0_QUERY_STATE_ACTIVE;
nvc0_query_get(push, q, 0x00, 0x0d005002 | (q->index << 5));
break;
default:
- if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_MAX)
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+ if (q->type >= NVC0_QUERY_DRV_STAT(0) &&
+ q->type <= NVC0_QUERY_DRV_STAT_LAST) {
+ q->u.value = nvc0->screen->base.stats.v[q->index] - q->u.value;
+ return;
+ } else
+#endif
+ if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_LAST)
nve4_mp_pm_query_end(nvc0, q);
break;
}
uint64_t *data64 = (uint64_t *)q->data;
unsigned i;
- if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_MAX)
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+ if (q->type >= NVC0_QUERY_DRV_STAT(0) &&
+ q->type <= NVC0_QUERY_DRV_STAT_LAST) {
+ res64[0] = q->u.value;
+ return TRUE;
+ } else
+#endif
+ if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_LAST) {
return nve4_mp_pm_query_result(nvc0, q, result, wait);
+ }
if (q->state != NVC0_QUERY_STATE_READY)
nvc0_query_update(nvc0->screen->base.client, q);
}
if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->screen->base.client))
return FALSE;
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, query_sync_count, 1);
}
q->state = NVC0_QUERY_STATE_READY;
*serialize = FALSE;
PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1);
IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0);
+
+ NOUVEAU_DRV_STAT(nouveau_screen(pipe->screen), gpu_serialize_count, 1);
}
nvc0_query(targ->pq)->index = index;
}
+/* === DRIVER STATISTICS === */
+
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+
+static const char *nvc0_drv_stat_names[] =
+{
+ "drv-tex_obj_current_count",
+ "drv-tex_obj_current_bytes",
+ "drv-buf_obj_current_count",
+ "drv-buf_obj_current_bytes_vid",
+ "drv-buf_obj_current_bytes_sys",
+ "drv-tex_transfers_rd",
+ "drv-tex_transfers_wr",
+ "drv-tex_copy_count",
+ "drv-tex_blit_count",
+ "drv-tex_cache_flush_count",
+ "drv-buf_transfers_rd",
+ "drv-buf_transfers_wr",
+ "drv-buf_read_bytes_staging_vid",
+ "drv-buf_write_bytes_direct",
+ "drv-buf_write_bytes_staging_vid",
+ "drv-buf_write_bytes_staging_sys",
+ "drv-buf_copy_bytes",
+ "drv-buf_non_kernel_fence_sync_count",
+ "drv-any_non_kernel_fence_sync_count",
+ "drv-query_sync_count",
+ "drv-gpu_serialize_count",
+ "drv-draw_calls_array",
+ "drv-draw_calls_indexed",
+ "drv-draw_calls_fallback_count",
+ "drv-user_buffer_upload_bytes",
+ "drv-constbuf_upload_count",
+ "drv-constbuf_upload_bytes",
+ "drv-pushbuf_count",
+ "drv-resource_validate_count"
+};
+
+#endif /* NOUVEAU_ENABLE_DRIVER_STATISTICS */
+
+
/* === PERFORMANCE MONITORING COUNTERS === */
/* Code to read out MP counters: They are accessible via mmio, too, but let's
struct pipe_driver_query_info *info)
{
struct nvc0_screen *screen = nvc0_screen(pscreen);
+ int count = 0;
+
+ count += NVC0_QUERY_DRV_STAT_COUNT;
if (screen->base.class_3d >= NVE4_3D_CLASS) {
- unsigned count = 0;
if (screen->base.device->drm_version >= 0x01000101)
- count = NVE4_PM_QUERY_COUNT;
- if (!info)
- return count;
- if (id < count) {
- info->name = nve4_pm_query_names[id];
- info->query_type = NVE4_PM_QUERY(id);
- info->max_value = ~0ULL;
- info->uses_byte_units = FALSE;
- return 1;
- }
- } else {
- if (!info)
- return 0;
+ count += NVE4_PM_QUERY_COUNT;
+ }
+ if (!info)
+ return count;
+
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+ if (id < NVC0_QUERY_DRV_STAT_COUNT) {
+ info->name = nvc0_drv_stat_names[id];
+ info->query_type = NVC0_QUERY_DRV_STAT(id);
+ info->max_value = ~0ULL;
+ info->uses_byte_units = !!strstr(info->name, "bytes");
+ return 1;
+ } else
+#endif
+ if (id < count) {
+ info->name = nve4_pm_query_names[id - NVC0_QUERY_DRV_STAT_COUNT];
+ info->query_type = NVE4_PM_QUERY(id - NVC0_QUERY_DRV_STAT_COUNT);
+ info->max_value = ~0ULL;
+ info->uses_byte_units = FALSE;
+ return 1;
}
/* user asked for info about non-existing query */
info->name = "this_is_not_the_query_you_are_looking_for";
*/
#define NVE4_PM_QUERY_COUNT 32
#define NVE4_PM_QUERY(i) (PIPE_QUERY_DRIVER_SPECIFIC + (i))
-#define NVE4_PM_QUERY_MAX NVE4_PM_QUERY(NVE4_PM_QUERY_COUNT - 1)
-/* MP (NOTE: these are also used to index a table, so put them first) */
+#define NVE4_PM_QUERY_LAST NVE4_PM_QUERY(NVE4_PM_QUERY_COUNT - 1)
#define NVE4_PM_QUERY_PROF_TRIGGER_0 0
#define NVE4_PM_QUERY_PROF_TRIGGER_1 1
#define NVE4_PM_QUERY_PROF_TRIGGER_2 2
...
*/
+/* Driver statistics queries:
+ */
+#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
+
+#define NVC0_QUERY_DRV_STAT(i) (PIPE_QUERY_DRIVER_SPECIFIC + 1024 + (i))
+#define NVC0_QUERY_DRV_STAT_COUNT 29
+#define NVC0_QUERY_DRV_STAT_LAST NVC0_QUERY_DRV_STAT(NVC0_QUERY_DRV_STAT_COUNT - 1)
+#define NVC0_QUERY_DRV_STAT_TEX_OBJECT_CURRENT_COUNT 0
+#define NVC0_QUERY_DRV_STAT_TEX_OBJECT_CURRENT_BYTES 1
+#define NVC0_QUERY_DRV_STAT_BUF_OBJECT_CURRENT_COUNT 2
+#define NVC0_QUERY_DRV_STAT_BUF_OBJECT_CURRENT_BYTES_VID 3
+#define NVC0_QUERY_DRV_STAT_BUF_OBJECT_CURRENT_BYTES_SYS 4
+#define NVC0_QUERY_DRV_STAT_TEX_TRANSFERS_READ 5
+#define NVC0_QUERY_DRV_STAT_TEX_TRANSFERS_WRITE 6
+#define NVC0_QUERY_DRV_STAT_TEX_COPY_COUNT 7
+#define NVC0_QUERY_DRV_STAT_TEX_BLIT_COUNT 8
+#define NVC0_QUERY_DRV_STAT_TEX_CACHE_FLUSH_COUNT 9
+#define NVC0_QUERY_DRV_STAT_BUF_TRANSFERS_READ 10
+#define NVC0_QUERY_DRV_STAT_BUF_TRANSFERS_WRITE 11
+#define NVC0_QUERY_DRV_STAT_BUF_READ_BYTES_STAGING_VID 12
+#define NVC0_QUERY_DRV_STAT_BUF_WRITE_BYTES_DIRECT 13
+#define NVC0_QUERY_DRV_STAT_BUF_WRITE_BYTES_STAGING_VID 14
+#define NVC0_QUERY_DRV_STAT_BUF_WRITE_BYTES_STAGING_SYS 15
+#define NVC0_QUERY_DRV_STAT_BUF_COPY_BYTES 16
+#define NVC0_QUERY_DRV_STAT_BUF_NON_KERNEL_FENCE_SYNC_COUNT 17
+#define NVC0_QUERY_DRV_STAT_ANY_NON_KERNEL_FENCE_SYNC_COUNT 18
+#define NVC0_QUERY_DRV_STAT_QUERY_SYNC_COUNT 19
+#define NVC0_QUERY_DRV_STAT_GPU_SERIALIZE_COUNT 20
+#define NVC0_QUERY_DRV_STAT_DRAW_CALLS_ARRAY 21
+#define NVC0_QUERY_DRV_STAT_DRAW_CALLS_INDEXED 22
+#define NVC0_QUERY_DRV_STAT_DRAW_CALLS_FALLBACK_COUNT 23
+#define NVC0_QUERY_DRV_STAT_USER_BUFFER_UPLOAD_BYTES 24
+#define NVC0_QUERY_DRV_STAT_CONSTBUF_UPLOAD_COUNT 25
+#define NVC0_QUERY_DRV_STAT_CONSTBUF_UPLOAD_BYTES 26
+#define NVC0_QUERY_DRV_STAT_PUSHBUF_COUNT 27
+#define NVC0_QUERY_DRV_STAT_RESOURCE_VALIDATE_COUNT 28
+
+#else
+
+#define NVC0_QUERY_DRV_STAT_COUNT 0
+
+#endif
+
int nvc0_screen_get_driver_query_info(struct pipe_screen *, unsigned,
struct pipe_driver_query_info *);
if (serialize)
IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
+
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, gpu_serialize_count, serialize);
}
static void
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz,
src, src_level, src_box);
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, buf_copy_bytes, src_box->width);
return;
}
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_copy_count, 1);
/* 0 and 1 are equal, only supporting 0/1, 2, 4 and 8 */
assert((src->nr_samples | 1) == (dst->nr_samples | 1));
nvc0_blit_eng2d(nvc0, info);
else
nvc0_blit_3d(nvc0, info);
+
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_blit_count, 1);
}
boolean
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
PUSH_DATA (push, (tic->id << 4) | 1);
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_cache_flush_count, 1);
}
nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
tx->rect[0].base += mt->layer_stride;
tx->rect[1].base += tx->nblocksy * tx->base.stride;
}
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1);
}
+ if (tx->base.usage & PIPE_TRANSFER_READ)
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);
nouveau_bo_ref(NULL, &tx->rect[1].bo);
pipe_resource_reference(&transfer->resource, NULL);
struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
struct nouveau_pushbuf *push = nv->pushbuf;
+ NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_count, 1);
+ NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_bytes, words * 4);
+
assert(!(offset & 3));
size = align(size, 0x100);
base, size, &bo);
if (bo)
BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, bo_flags, bo);
+
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, user_buffer_upload_bytes, size);
}
BEGIN_1IC0(push, NVC0_3D(MACRO_VERTEX_ARRAY_SELECT), 5);
PUSH_DATA (push, address + base + size - 1);
PUSH_DATAh(push, address);
PUSH_DATA (push, address);
+
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, user_buffer_upload_bytes, size);
}
mask = nvc0->state.constant_elts;
struct nvc0_screen *screen = push->user_priv;
nouveau_fence_update(&screen->base, TRUE);
+
+ NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
}
static void
prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_array, 1);
}
static void
prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
}
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_indexed, 1);
}
static void
IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
nvc0_query_fifo_wait(push, so->pq);
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
+
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, gpu_serialize_count, 1);
}
while (num_instances--) {
nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
for (i = 0; i < nvc0->num_vtxbufs; ++i)
nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
+
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
}
static INLINE void