X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fnv50%2Fnv50_query.c;h=220b166f98534def5a4da6a4bc06c33cecfdce07;hb=6d1cdec3ba151168bfc3aef222fba6265dfb41fb;hp=940e04365f255498fc278632aac719e3e984faf5;hpb=8d482227915552c414e13743652e6794c4313ae2;p=mesa.git diff --git a/src/gallium/drivers/nv50/nv50_query.c b/src/gallium/drivers/nv50/nv50_query.c index 940e04365f2..220b166f985 100644 --- a/src/gallium/drivers/nv50/nv50_query.c +++ b/src/gallium/drivers/nv50/nv50_query.c @@ -1,5 +1,5 @@ /* - * Copyright 2008 Ben Skeggs + * Copyright 2011 Nouveau Project * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -18,117 +18,316 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. + * + * Authors: Christoph Bumiller */ -#include "pipe/p_context.h" -#include "pipe/p_inlines.h" +#define NV50_PUSH_EXPLICIT_SPACE_CHECKING #include "nv50_context.h" +#include "nouveau/nv_object.xml.h" + +/* XXX: Nested queries, and simultaneous queries on multiple gallium contexts + * (since we use only a single GPU channel per screen) will not work properly. + * + * The first is not that big of an issue because OpenGL does not allow nested + * queries anyway. + */ struct nv50_query { - struct nouveau_bo *bo; - unsigned type; - boolean ready; - uint64_t result; + uint32_t *data; + uint32_t type; + uint32_t sequence; + struct nouveau_bo *bo; + uint32_t base; + uint32_t offset; /* base + i * 16 */ + boolean ready; + boolean is64bit; + struct nouveau_mm_allocation *mm; }; +#define NV50_QUERY_ALLOC_SPACE 128 + static INLINE struct nv50_query * nv50_query(struct pipe_query *pipe) { - return (struct nv50_query *)pipe; + return (struct nv50_query *)pipe; +} + +static boolean +nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) +{ + struct nv50_screen *screen = nv50->screen; + int ret; + + if (q->bo) { + nouveau_bo_ref(NULL, &q->bo); + if (q->mm) { + if (q->ready) + nouveau_mm_free(q->mm); + else + nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, + q->mm); + } + } + if (size) { + q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base); + if (!q->bo) + return FALSE; + q->offset = q->base; + + ret = nouveau_bo_map(q->bo, 0, screen->base.client); + if (ret) { + nv50_query_allocate(nv50, q, 0); + return FALSE; + } + q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base); + } + return TRUE; +} + +static void +nv50_query_destroy(struct pipe_context *pipe, struct pipe_query *pq) +{ + nv50_query_allocate(nv50_context(pipe), nv50_query(pq), 0); + FREE(nv50_query(pq)); } static struct pipe_query * nv50_query_create(struct pipe_context *pipe, unsigned type) { - struct nouveau_device *dev = nouveau_screen(pipe->screen)->device; - struct nv50_query *q = CALLOC_STRUCT(nv50_query); - int ret; + struct nv50_context *nv50 = nv50_context(pipe); + struct nv50_query *q; + + q = CALLOC_STRUCT(nv50_query); + if (!q) + return NULL; - assert (q->type == PIPE_QUERY_OCCLUSION_COUNTER); - q->type = type; + if (!nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE)) { + FREE(q); + return NULL; + } - ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM | NOUVEAU_BO_MAP, 256, - 16, &q->bo); - if (ret) { - FREE(q); - return NULL; - } + q->is64bit = (type == PIPE_QUERY_PRIMITIVES_GENERATED || + type == PIPE_QUERY_PRIMITIVES_EMITTED || + type == PIPE_QUERY_SO_STATISTICS); + q->type = type; - return (struct pipe_query *)q; + if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) { + q->offset -= 16; + q->data -= 16 / sizeof(*q->data); /* we advance before query_begin ! */ + } + + return (struct pipe_query *)q; } static void -nv50_query_destroy(struct pipe_context *pipe, struct pipe_query *pq) +nv50_query_get(struct nouveau_pushbuf *push, struct nv50_query *q, + unsigned offset, uint32_t get) { - struct nv50_query *q = nv50_query(pq); + offset += q->offset; - if (q) { - nouveau_bo_ref(NULL, &q->bo); - FREE(q); - } + PUSH_SPACE(push, 5); + PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_WR); + BEGIN_NV04(push, NV50_3D(QUERY_ADDRESS_HIGH), 4); + PUSH_DATAh(push, q->bo->offset + offset); + PUSH_DATA (push, q->bo->offset + offset); + PUSH_DATA (push, q->sequence); + PUSH_DATA (push, get); } static void nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq) { - struct nv50_context *nv50 = nv50_context(pipe); - struct nouveau_channel *chan = nv50->screen->base.channel; - struct nouveau_grobj *tesla = nv50->screen->tesla; - struct nv50_query *q = nv50_query(pq); + struct nv50_context *nv50 = nv50_context(pipe); + struct nouveau_pushbuf *push = nv50->base.pushbuf; + struct nv50_query *q = nv50_query(pq); - BEGIN_RING(chan, tesla, 0x1530, 1); - OUT_RING (chan, 1); - BEGIN_RING(chan, tesla, 0x1514, 1); - OUT_RING (chan, 1); + /* For occlusion queries we have to change the storage, because a previous + * query might set the initial render conition to FALSE even *after* we re- + * initialized it to TRUE. + */ + if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) { + q->offset += 16; + q->data += 16 / sizeof(*q->data); + if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE) + nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE); - q->ready = FALSE; + /* XXX: can we do this with the GPU, and sync with respect to a previous + * query ? + */ + q->data[1] = 1; /* initial render condition = TRUE */ + } + if (!q->is64bit) + q->data[0] = q->sequence++; /* the previously used one */ + + switch (q->type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + PUSH_SPACE(push, 4); + BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); + PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT); + BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); + PUSH_DATA (push, 1); + break; + case PIPE_QUERY_PRIMITIVES_GENERATED: /* store before & after instead ? */ + PUSH_SPACE(push, 2); + BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); + PUSH_DATA (push, NV50_3D_COUNTER_RESET_GENERATED_PRIMITIVES); + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + PUSH_SPACE(push, 2); + BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1); + PUSH_DATA (push, NV50_3D_COUNTER_RESET_TRANSFORM_FEEDBACK); + break; + case PIPE_QUERY_SO_STATISTICS: + PUSH_SPACE(push, 3); + BEGIN_NI04(push, NV50_3D(COUNTER_RESET), 2); + PUSH_DATA (push, NV50_3D_COUNTER_RESET_TRANSFORM_FEEDBACK); + PUSH_DATA (push, NV50_3D_COUNTER_RESET_GENERATED_PRIMITIVES); + break; + case PIPE_QUERY_TIMESTAMP_DISJOINT: + case PIPE_QUERY_TIME_ELAPSED: + nv50_query_get(push, q, 0x10, 0x00005002); + break; + default: + break; + } + q->ready = FALSE; } static void nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq) { - struct nv50_context *nv50 = nv50_context(pipe); - struct nouveau_channel *chan = nv50->screen->base.channel; - struct nouveau_grobj *tesla = nv50->screen->tesla; - struct nv50_query *q = nv50_query(pq); - - WAIT_RING (chan, 5); - BEGIN_RING(chan, tesla, 0x1b00, 4); - OUT_RELOCh(chan, q->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR); - OUT_RELOCl(chan, q->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR); - OUT_RING (chan, 0x00000000); - OUT_RING (chan, 0x0100f002); - FIRE_RING (chan); + struct nv50_context *nv50 = nv50_context(pipe); + struct nouveau_pushbuf *push = nv50->base.pushbuf; + struct nv50_query *q = nv50_query(pq); + + switch (q->type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + nv50_query_get(push, q, 0, 0x0100f002); + PUSH_SPACE(push, 2); + BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1); + PUSH_DATA (push, 0); + break; + case PIPE_QUERY_PRIMITIVES_GENERATED: + nv50_query_get(push, q, 0, 0x06805002); + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + nv50_query_get(push, q, 0, 0x05805002); + break; + case PIPE_QUERY_SO_STATISTICS: + nv50_query_get(push, q, 0x00, 0x05805002); + nv50_query_get(push, q, 0x10, 0x06805002); + break; + case PIPE_QUERY_TIMESTAMP_DISJOINT: + case PIPE_QUERY_TIME_ELAPSED: + nv50_query_get(push, q, 0, 0x00005002); + break; + case PIPE_QUERY_GPU_FINISHED: + nv50_query_get(push, q, 0, 0x1000f010); + break; + default: + assert(0); + break; + } +} + +static INLINE boolean +nv50_query_ready(struct nv50_query *q) +{ + return q->ready || (!q->is64bit && (q->data[0] == q->sequence)); } static boolean nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq, - boolean wait, uint64_t *result) + boolean wait, union pipe_query_result *result) +{ + struct nv50_context *nv50 = nv50_context(pipe); + struct nv50_query *q = nv50_query(pq); + uint64_t *res64 = (uint64_t *)result; + boolean *res8 = (boolean *)result; + uint64_t *data64 = (uint64_t *)q->data; + + if (!q->ready) /* update ? */ + q->ready = nv50_query_ready(q); + if (!q->ready) { + if (!wait) { + /* for broken apps that spin on GL_QUERY_RESULT_AVAILABLE */ + PUSH_KICK(nv50->base.pushbuf); + return FALSE; + } + if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nv50->screen->base.client)) + return FALSE; + } + q->ready = TRUE; + + switch (q->type) { + case PIPE_QUERY_GPU_FINISHED: + res8[0] = TRUE; + break; + case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */ + res64[0] = q->data[1]; + break; + case PIPE_QUERY_PRIMITIVES_GENERATED: /* u64 count, u64 time */ + case PIPE_QUERY_PRIMITIVES_EMITTED: /* u64 count, u64 time */ + res64[0] = data64[0]; + break; + case PIPE_QUERY_SO_STATISTICS: + res64[0] = data64[0]; + res64[1] = data64[1]; + break; + case PIPE_QUERY_TIMESTAMP_DISJOINT: /* u32 sequence, u32 0, u64 time */ + res64[0] = 1000000000; + res8[8] = (data64[0] == data64[2]) ? FALSE : TRUE; + break; + case PIPE_QUERY_TIME_ELAPSED: + res64[0] = data64[1] - data64[3]; + break; + default: + return FALSE; + } + + return TRUE; +} + +static void +nv50_render_condition(struct pipe_context *pipe, + struct pipe_query *pq, uint mode) { - struct nv50_query *q = nv50_query(pq); + struct nv50_context *nv50 = nv50_context(pipe); + struct nouveau_pushbuf *push = nv50->base.pushbuf; + struct nv50_query *q; - /*XXX: Want to be able to return FALSE here instead of blocking - * until the result is available.. - */ + PUSH_SPACE(push, 6); - if (!q->ready) { - nouveau_bo_map(q->bo, NOUVEAU_BO_RD); - q->result = ((uint32_t *)q->bo->map)[1]; - q->ready = TRUE; - nouveau_bo_unmap(q->bo); - } + if (!pq) { + BEGIN_NV04(push, NV50_3D(COND_MODE), 1); + PUSH_DATA (push, NV50_3D_COND_MODE_ALWAYS); + return; + } + q = nv50_query(pq); - *result = q->result; - return q->ready; + if (mode == PIPE_RENDER_COND_WAIT || + mode == PIPE_RENDER_COND_BY_REGION_WAIT) { + BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1); + PUSH_DATA (push, 0); + } + + BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3); + PUSH_DATAh(push, q->bo->offset + q->offset); + PUSH_DATA (push, q->bo->offset + q->offset); + PUSH_DATA (push, NV50_3D_COND_MODE_RES_NON_ZERO); } void nv50_init_query_functions(struct nv50_context *nv50) { - nv50->pipe.create_query = nv50_query_create; - nv50->pipe.destroy_query = nv50_query_destroy; - nv50->pipe.begin_query = nv50_query_begin; - nv50->pipe.end_query = nv50_query_end; - nv50->pipe.get_query_result = nv50_query_result; + struct pipe_context *pipe = &nv50->base.pipe; + + pipe->create_query = nv50_query_create; + pipe->destroy_query = nv50_query_destroy; + pipe->begin_query = nv50_query_begin; + pipe->end_query = nv50_query_end; + pipe->get_query_result = nv50_query_result; + pipe->render_condition = nv50_render_condition; }