2 * Copyright © 2008-2009 Maciej Cencora <m.cencora@gmail.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Maciej Cencora <m.cencora@gmail.com>
27 #include "radeon_common.h"
28 #include "radeon_queryobj.h"
29 #include "radeon_debug.h"
31 #include "main/imports.h"
32 #include "main/simple_list.h"
34 static int radeonQueryIsFlushed(GLcontext
*ctx
, struct gl_query_object
*q
)
36 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
37 struct radeon_query_object
*tmp
, *query
= (struct radeon_query_object
*)q
;
39 foreach(tmp
, &radeon
->query
.not_flushed_head
) {
48 static void radeonQueryGetResult(GLcontext
*ctx
, struct gl_query_object
*q
)
50 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
51 struct radeon_query_object
*query
= (struct radeon_query_object
*)q
;
54 radeon_print(RADEON_STATE
, RADEON_VERBOSE
,
55 "%s: query id %d, result %d\n",
56 __FUNCTION__
, query
->Base
.Id
, (int) query
->Base
.Result
);
58 radeon_bo_map(query
->bo
, GL_FALSE
);
60 query
->Base
.Result
= 0;
61 if (IS_R600_CLASS(radeon
->radeonScreen
)) {
62 /* ZPASS EVENT writes alternating qwords
63 * At query start we set the start offset to 0 and
64 * hw writes zpass start counts to qwords 0, 2, 4, 6.
65 * At query end we set the start offset to 8 and
66 * hw writes zpass end counts to qwords 1, 3, 5, 7.
67 * then we substract. MSB is the valid bit.
69 uint64_t *result
= query
->bo
->ptr
;
70 for (i
= 0; i
< 8; i
+= 2) {
71 uint64_t start
= result
[i
];
72 uint64_t end
= result
[i
+ 1];
73 if ((start
& 0x8000000000000000) && (end
& 0x8000000000000000)) {
74 uint64_t query_count
= end
- start
;
75 query
->Base
.Result
+= query_count
;
78 radeon_print(RADEON_STATE
, RADEON_TRACE
,
79 "%d start: %lx, end: %lx %ld\n", i
, start
, end
, end
- start
);
82 uint32_t *result
= query
->bo
->ptr
;
83 for (i
= 0; i
< query
->curr_offset
/sizeof(uint32_t); ++i
) {
84 query
->Base
.Result
+= result
[i
];
85 radeon_print(RADEON_STATE
, RADEON_TRACE
, "result[%d] = %d\n", i
, result
[i
]);
89 radeon_bo_unmap(query
->bo
);
92 static struct gl_query_object
* radeonNewQueryObject(GLcontext
*ctx
, GLuint id
)
94 struct radeon_query_object
*query
;
96 query
= _mesa_calloc(sizeof(struct radeon_query_object
));
99 query
->Base
.Result
= 0;
100 query
->Base
.Active
= GL_FALSE
;
101 query
->Base
.Ready
= GL_TRUE
;
103 radeon_print(RADEON_STATE
, RADEON_VERBOSE
,"%s: query id %d\n", __FUNCTION__
, query
->Base
.Id
);
108 static void radeonDeleteQuery(GLcontext
*ctx
, struct gl_query_object
*q
)
110 struct radeon_query_object
*query
= (struct radeon_query_object
*)q
;
112 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s: query id %d\n", __FUNCTION__
, q
->Id
);
115 radeon_bo_unref(query
->bo
);
121 static void radeonWaitQuery(GLcontext
*ctx
, struct gl_query_object
*q
)
123 struct radeon_query_object
*query
= (struct radeon_query_object
*)q
;
125 /* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
126 if (!radeonQueryIsFlushed(ctx
, q
))
127 ctx
->Driver
.Flush(ctx
);
129 radeon_print(RADEON_STATE
, RADEON_VERBOSE
, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__
, q
->Id
, query
->bo
, query
->curr_offset
);
131 radeonQueryGetResult(ctx
, q
);
133 query
->Base
.Ready
= GL_TRUE
;
137 static void radeonBeginQuery(GLcontext
*ctx
, struct gl_query_object
*q
)
139 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
140 struct radeon_query_object
*query
= (struct radeon_query_object
*)q
;
142 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s: query id %d\n", __FUNCTION__
, q
->Id
);
144 assert(radeon
->query
.current
== NULL
);
146 if (radeon
->dma
.flush
)
147 radeon
->dma
.flush(radeon
->glCtx
);
150 query
->bo
= radeon_bo_open(radeon
->radeonScreen
->bom
, 0, RADEON_QUERY_PAGE_SIZE
, RADEON_QUERY_PAGE_SIZE
, RADEON_GEM_DOMAIN_GTT
, 0);
152 query
->curr_offset
= 0;
154 radeon
->query
.current
= query
;
156 radeon
->query
.queryobj
.dirty
= GL_TRUE
;
157 radeon
->hw
.is_dirty
= GL_TRUE
;
158 insert_at_tail(&radeon
->query
.not_flushed_head
, query
);
162 void radeonEmitQueryEnd(GLcontext
*ctx
)
164 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
165 struct radeon_query_object
*query
= radeon
->query
.current
;
170 if (query
->emitted_begin
== GL_FALSE
)
173 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__
, query
->Base
.Id
, query
->bo
, query
->curr_offset
);
175 radeon_cs_space_check_with_bo(radeon
->cmdbuf
.cs
,
177 0, RADEON_GEM_DOMAIN_GTT
);
179 radeon
->vtbl
.emit_query_finish(radeon
);
182 static void radeonEndQuery(GLcontext
*ctx
, struct gl_query_object
*q
)
184 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
186 radeon_print(RADEON_STATE
, RADEON_NORMAL
, "%s: query id %d\n", __FUNCTION__
, q
->Id
);
188 if (radeon
->dma
.flush
)
189 radeon
->dma
.flush(radeon
->glCtx
);
190 radeonEmitQueryEnd(ctx
);
192 radeon
->query
.current
= NULL
;
195 static void radeonCheckQuery(GLcontext
*ctx
, struct gl_query_object
*q
)
197 radeon_print(RADEON_STATE
, RADEON_TRACE
, "%s: query id %d\n", __FUNCTION__
, q
->Id
);
199 #ifdef DRM_RADEON_GEM_BUSY
200 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
202 if (radeon
->radeonScreen
->kernel_mm
) {
203 struct radeon_query_object
*query
= (struct radeon_query_object
*)q
;
206 /* Need to perform a flush, as per ARB_occlusion_query spec */
207 if (!radeonQueryIsFlushed(ctx
, q
)) {
208 ctx
->Driver
.Flush(ctx
);
211 if (radeon_bo_is_busy(query
->bo
, &domain
) == 0) {
212 radeonQueryGetResult(ctx
, q
);
213 query
->Base
.Ready
= GL_TRUE
;
216 radeonWaitQuery(ctx
, q
);
219 radeonWaitQuery(ctx
, q
);
223 void radeonInitQueryObjFunctions(struct dd_function_table
*functions
)
225 functions
->NewQueryObject
= radeonNewQueryObject
;
226 functions
->DeleteQuery
= radeonDeleteQuery
;
227 functions
->BeginQuery
= radeonBeginQuery
;
228 functions
->EndQuery
= radeonEndQuery
;
229 functions
->CheckQuery
= radeonCheckQuery
;
230 functions
->WaitQuery
= radeonWaitQuery
;
233 int radeon_check_query_active(GLcontext
*ctx
, struct radeon_state_atom
*atom
)
235 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
236 struct radeon_query_object
*query
= radeon
->query
.current
;
238 if (!query
|| query
->emitted_begin
)
240 return atom
->cmd_size
;
243 void radeon_emit_queryobj(GLcontext
*ctx
, struct radeon_state_atom
*atom
)
245 radeonContextPtr radeon
= RADEON_CONTEXT(ctx
);
246 BATCH_LOCALS(radeon
);
249 dwords
= (*atom
->check
) (ctx
, atom
);
251 BEGIN_BATCH_NO_AUTOSTATE(dwords
);
252 OUT_BATCH_TABLE(atom
->cmd
, dwords
);
255 radeon
->query
.current
->emitted_begin
= GL_TRUE
;