* VGPU9
*/
-static boolean
+static bool
svga_get_query_result(struct pipe_context *pipe,
struct pipe_query *q,
- boolean wait,
+ bool wait,
union pipe_query_result *result);
static enum pipe_error
return ret;
}
-static boolean
+static bool
get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
- boolean wait, uint64_t *result)
+ bool wait, uint64_t *result)
{
struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
enum pipe_error ret;
state = sq->queryResult->state;
if (state == SVGA3D_QUERYSTATE_PENDING) {
if (!wait)
- return FALSE;
- sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
+ return false;
+ sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
+ SVGA_FENCE_FLAG_QUERY);
state = sq->queryResult->state;
}
state == SVGA3D_QUERYSTATE_FAILED);
*result = (uint64_t)sq->queryResult->result32;
- return TRUE;
+ return true;
}
* any empty memory block around that can be freed up.
*/
index = -1;
- for (i = 0; i < SVGA_QUERY_MAX && index == -1; i++) {
+ for (i = 0; i < SVGA3D_QUERYTYPE_MAX && index == -1; i++) {
struct svga_qmem_alloc_entry *alloc_entry;
struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
int slot_index = -1;
unsigned offset;
- assert(type < SVGA_QUERY_MAX);
+ assert(type < SVGA3D_QUERYTYPE_MAX);
alloc_entry = svga->gb_query_map[type];
struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
unsigned i;
- for (i = 0; i < SVGA_QUERY_MAX; i++) {
+ for (i = 0; i < SVGA3D_QUERYTYPE_MAX; i++) {
struct svga_qmem_alloc_entry *alloc_entry, *next;
alloc_entry = svga->gb_query_map[i];
while (alloc_entry) {
return ret;
}
-static boolean
+static bool
get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
- boolean wait, void *result, int resultLen)
+ bool wait, void *result, int resultLen)
{
struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
SVGA3dQueryState queryState;
sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
- if (!sq->fence) {
- /* The query hasn't been submitted yet. We need to submit it now
- * since the GL spec says "Querying the state for a given occlusion
- * query forces that occlusion query to complete within a finite amount
- * of time."
+ if (queryState != SVGA3D_QUERYSTATE_SUCCEEDED && !sq->fence) {
+ /* We don't have the query result yet, and the query hasn't been
+ * submitted. We need to submit it now since the GL spec says
+ * "Querying the state for a given occlusion query forces that
+ * occlusion query to complete within a finite amount of time."
*/
svga_context_flush(svga, &sq->fence);
}
if (queryState == SVGA3D_QUERYSTATE_PENDING ||
queryState == SVGA3D_QUERYSTATE_NEW) {
if (!wait)
- return FALSE;
- sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
+ return false;
+ sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
+ SVGA_FENCE_FLAG_QUERY);
sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
}
assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
queryState == SVGA3D_QUERYSTATE_FAILED);
- return TRUE;
+ return true;
}
static struct pipe_query *
}
break;
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
if (svga_have_vgpu10(svga)) {
sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
case SVGA_QUERY_NUM_BUFFER_UPLOADS:
case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
case SVGA_QUERY_NUM_CONST_UPDATES:
+ case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
+ case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
break;
case SVGA_QUERY_FLUSH_TIME:
case SVGA_QUERY_MAP_BUFFER_TIME:
switch (sq->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
if (svga_have_vgpu10(svga)) {
/* make sure to also destroy any associated predicate query */
if (sq->predicate)
case SVGA_QUERY_NUM_BUFFER_UPLOADS:
case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
case SVGA_QUERY_NUM_CONST_UPDATES:
+ case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
+ case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
/* nothing */
break;
default:
}
-static boolean
+static bool
svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
{
struct svga_context *svga = svga_context(pipe);
switch (sq->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
if (svga_have_vgpu10(svga)) {
ret = begin_query_vgpu10(svga, sq);
/* also need to start the associated occlusion predicate query */
case SVGA_QUERY_NUM_STATE_OBJECTS:
case SVGA_QUERY_NUM_SURFACE_VIEWS:
case SVGA_QUERY_NUM_GENERATE_MIPMAP:
+ case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
+ case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
/* nothing */
break;
default:
switch (sq->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
if (svga_have_vgpu10(svga)) {
ret = end_query_vgpu10(svga, sq);
/* also need to end the associated occlusion predicate query */
}
assert(ret == PIPE_OK);
(void) ret;
- /* TODO: Delay flushing. We don't really need to flush here, just ensure
- * that there is one flush before svga_get_query_result attempts to get
- * the result.
- */
- svga_context_flush(svga, NULL);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
case SVGA_QUERY_NUM_STATE_OBJECTS:
case SVGA_QUERY_NUM_SURFACE_VIEWS:
case SVGA_QUERY_NUM_GENERATE_MIPMAP:
+ case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
+ case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
/* nothing */
break;
default:
}
-static boolean
+static bool
svga_get_query_result(struct pipe_context *pipe,
struct pipe_query *q,
- boolean wait,
+ bool wait,
union pipe_query_result *vresult)
{
struct svga_screen *svgascreen = svga_screen(pipe->screen);
struct svga_context *svga = svga_context(pipe);
struct svga_query *sq = svga_query(q);
uint64_t *result = (uint64_t *)vresult;
- boolean ret = TRUE;
+ bool ret = true;
assert(sq);
ret = get_query_result_vgpu9(svga, sq, wait, result);
}
break;
- case PIPE_QUERY_OCCLUSION_PREDICATE: {
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
if (svga_have_vgpu10(svga)) {
SVGADXOcclusionPredicateQueryResult occResult;
ret = get_query_result_vgpu10(svga, sq, wait,
case SVGA_QUERY_NUM_GENERATE_MIPMAP:
vresult->u64 = svga->hud.num_generate_mipmap;
break;
+ case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
+ vresult->u64 = svgascreen->hud.num_failed_allocations;
+ break;
+ case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
+ vresult->f = (float) svga->swc->num_commands
+ / (float) svga->swc->num_draw_commands;
+ break;
default:
assert(!"unexpected query type in svga_get_query_result");
}
static void
svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
- boolean condition, uint mode)
+ bool condition, enum pipe_render_cond_flag mode)
{
struct svga_context *svga = svga_context(pipe);
struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
if ((mode == PIPE_RENDER_COND_WAIT ||
mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
- sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
+ sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
+ SVGA_FENCE_FLAG_QUERY);
}
}
/*
ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
(uint32) condition);
}
+ svga->pred.query_id = queryId;
+ svga->pred.cond = condition;
}
+
+ svga->render_condition = (sq != NULL);
}
static void
-svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
+svga_set_active_query_state(struct pipe_context *pipe, bool enable)
{
}
+/**
+ * \brief Toggle conditional rendering if already enabled
+ *
+ * \param svga[in] The svga context
+ * \param render_condition_enabled[in] Whether to ignore requests to turn
+ * conditional rendering off
+ * \param on[in] Whether to turn conditional rendering on or off
+ */
+void
+svga_toggle_render_condition(struct svga_context *svga,
+ boolean render_condition_enabled,
+ boolean on)
+{
+ SVGA3dQueryId query_id;
+ enum pipe_error ret;
+
+ if (render_condition_enabled ||
+ svga->pred.query_id == SVGA3D_INVALID_ID) {
+ return;
+ }
+
+ /*
+ * If we get here, it means that the system supports
+ * conditional rendering since svga->pred.query_id has already been
+ * modified for this context and thus support has already been
+ * verified.
+ */
+ query_id = on ? svga->pred.query_id : SVGA3D_INVALID_ID;
+
+ ret = SVGA3D_vgpu10_SetPredication(svga->swc, query_id,
+ (uint32) svga->pred.cond);
+ if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
+ svga_context_flush(svga, NULL);
+ ret = SVGA3D_vgpu10_SetPredication(svga->swc, query_id,
+ (uint32) svga->pred.cond);
+ assert(ret == PIPE_OK);
+ }
+}
+
+
void
svga_init_query_functions(struct svga_context *svga)
{