Merge branch 'mesa_7_5_branch' into mesa_7_6_branch
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_queryobj.c
1 /*
2 * Copyright © 2008-2009 Maciej Cencora <m.cencora@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Maciej Cencora <m.cencora@gmail.com>
25 *
26 */
27 #include "radeon_common.h"
28 #include "radeon_queryobj.h"
29 #include "radeon_debug.h"
30
31 #include "main/imports.h"
32 #include "main/simple_list.h"
33
34 static int radeonQueryIsFlushed(GLcontext *ctx, struct gl_query_object *q)
35 {
36 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
37 struct radeon_query_object *tmp, *query = (struct radeon_query_object *)q;
38
39 foreach(tmp, &radeon->query.not_flushed_head) {
40 if (tmp == query) {
41 return 0;
42 }
43 }
44
45 return 1;
46 }
47
48 static void radeonQueryGetResult(GLcontext *ctx, struct gl_query_object *q)
49 {
50 struct radeon_query_object *query = (struct radeon_query_object *)q;
51 uint32_t *result;
52 int i;
53
54 radeon_print(RADEON_STATE, RADEON_VERBOSE,
55 "%s: query id %d, result %d\n",
56 __FUNCTION__, query->Base.Id, (int) query->Base.Result);
57
58 radeon_bo_map(query->bo, GL_FALSE);
59
60 result = query->bo->ptr;
61
62 query->Base.Result = 0;
63 for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
64 query->Base.Result += result[i];
65 radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, result[i]);
66 }
67
68 radeon_bo_unmap(query->bo);
69 }
70
71 static struct gl_query_object * radeonNewQueryObject(GLcontext *ctx, GLuint id)
72 {
73 struct radeon_query_object *query;
74
75 query = _mesa_calloc(sizeof(struct radeon_query_object));
76
77 query->Base.Id = id;
78 query->Base.Result = 0;
79 query->Base.Active = GL_FALSE;
80 query->Base.Ready = GL_TRUE;
81
82 radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __FUNCTION__, query->Base.Id);
83
84 return &query->Base;
85 }
86
87 static void radeonDeleteQuery(GLcontext *ctx, struct gl_query_object *q)
88 {
89 struct radeon_query_object *query = (struct radeon_query_object *)q;
90
91 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
92
93 if (query->bo) {
94 radeon_bo_unref(query->bo);
95 }
96
97 _mesa_free(query);
98 }
99
100 static void radeonWaitQuery(GLcontext *ctx, struct gl_query_object *q)
101 {
102 struct radeon_query_object *query = (struct radeon_query_object *)q;
103
104 /* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
105 if (!radeonQueryIsFlushed(ctx, q))
106 ctx->Driver.Flush(ctx);
107
108 radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, q->Id, query->bo, query->curr_offset);
109
110 radeonQueryGetResult(ctx, q);
111
112 query->Base.Ready = GL_TRUE;
113 }
114
115
116 static void radeonBeginQuery(GLcontext *ctx, struct gl_query_object *q)
117 {
118 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
119 struct radeon_query_object *query = (struct radeon_query_object *)q;
120
121 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
122
123 assert(radeon->query.current == NULL);
124
125 if (radeon->dma.flush)
126 radeon->dma.flush(radeon->glCtx);
127
128 if (!query->bo) {
129 query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
130 }
131 query->curr_offset = 0;
132
133 radeon->query.current = query;
134
135 radeon->query.queryobj.dirty = GL_TRUE;
136 radeon->hw.is_dirty = GL_TRUE;
137 insert_at_tail(&radeon->query.not_flushed_head, query);
138
139 }
140
141 void radeonEmitQueryEnd(GLcontext *ctx)
142 {
143 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
144 struct radeon_query_object *query = radeon->query.current;
145
146 if (!query)
147 return;
148
149 if (query->emitted_begin == GL_FALSE)
150 return;
151
152 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, query->Base.Id, query->bo, query->curr_offset);
153
154 radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
155 query->bo,
156 0, RADEON_GEM_DOMAIN_GTT);
157
158 radeon->vtbl.emit_query_finish(radeon);
159 }
160
161 static void radeonEndQuery(GLcontext *ctx, struct gl_query_object *q)
162 {
163 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
164
165 radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
166
167 if (radeon->dma.flush)
168 radeon->dma.flush(radeon->glCtx);
169 radeonEmitQueryEnd(ctx);
170
171 radeon->query.current = NULL;
172 }
173
174 static void radeonCheckQuery(GLcontext *ctx, struct gl_query_object *q)
175 {
176 radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id);
177
178 #ifdef DRM_RADEON_GEM_BUSY
179 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
180
181 if (radeon->radeonScreen->kernel_mm) {
182 struct radeon_query_object *query = (struct radeon_query_object *)q;
183 uint32_t domain;
184
185 /* Need to perform a flush, as per ARB_occlusion_query spec */
186 if (!radeonQueryIsFlushed(ctx, q)) {
187 ctx->Driver.Flush(ctx);
188 }
189
190 if (radeon_bo_is_busy(query->bo, &domain) == 0) {
191 radeonQueryGetResult(ctx, q);
192 query->Base.Ready = GL_TRUE;
193 }
194 } else {
195 radeonWaitQuery(ctx, q);
196 }
197 #else
198 radeonWaitQuery(ctx, q);
199 #endif
200 }
201
202 void radeonInitQueryObjFunctions(struct dd_function_table *functions)
203 {
204 functions->NewQueryObject = radeonNewQueryObject;
205 functions->DeleteQuery = radeonDeleteQuery;
206 functions->BeginQuery = radeonBeginQuery;
207 functions->EndQuery = radeonEndQuery;
208 functions->CheckQuery = radeonCheckQuery;
209 functions->WaitQuery = radeonWaitQuery;
210 }
211
212 int radeon_check_query_active(GLcontext *ctx, struct radeon_state_atom *atom)
213 {
214 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
215 struct radeon_query_object *query = radeon->query.current;
216
217 if (!query || query->emitted_begin)
218 return 0;
219 return atom->cmd_size;
220 }
221
222 void radeon_emit_queryobj(GLcontext *ctx, struct radeon_state_atom *atom)
223 {
224 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
225 BATCH_LOCALS(radeon);
226 int dwords;
227
228 dwords = (*atom->check) (ctx, atom);
229
230 BEGIN_BATCH_NO_AUTOSTATE(dwords);
231 OUT_BATCH_TABLE(atom->cmd, dwords);
232 END_BATCH();
233
234 radeon->query.current->emitted_begin = GL_TRUE;
235 }