gallium: make pipe_context::begin_query return a boolean
[mesa.git] / src / gallium / drivers / freedreno / freedreno_query_hw.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_memory.h"
31 #include "util/u_inlines.h"
32
33 #include "freedreno_query_hw.h"
34 #include "freedreno_context.h"
35 #include "freedreno_util.h"
36
37 struct fd_hw_sample_period {
38 struct fd_hw_sample *start, *end;
39 struct list_head list;
40 };
41
42 /* maps query_type to sample provider idx: */
43 static int pidx(unsigned query_type)
44 {
45 switch (query_type) {
46 case PIPE_QUERY_OCCLUSION_COUNTER:
47 return 0;
48 case PIPE_QUERY_OCCLUSION_PREDICATE:
49 return 1;
50 default:
51 return -1;
52 }
53 }
54
55 static struct fd_hw_sample *
56 get_sample(struct fd_context *ctx, struct fd_ringbuffer *ring,
57 unsigned query_type)
58 {
59 struct fd_hw_sample *samp = NULL;
60 int idx = pidx(query_type);
61
62 if (!ctx->sample_cache[idx]) {
63 ctx->sample_cache[idx] =
64 ctx->sample_providers[idx]->get_sample(ctx, ring);
65 }
66
67 fd_hw_sample_reference(ctx, &samp, ctx->sample_cache[idx]);
68
69 return samp;
70 }
71
72 static void
73 clear_sample_cache(struct fd_context *ctx)
74 {
75 int i;
76
77 for (i = 0; i < ARRAY_SIZE(ctx->sample_cache); i++)
78 fd_hw_sample_reference(ctx, &ctx->sample_cache[i], NULL);
79 }
80
81 static bool
82 is_active(struct fd_hw_query *hq, enum fd_render_stage stage)
83 {
84 return !!(hq->provider->active & stage);
85 }
86
87
88 static void
89 resume_query(struct fd_context *ctx, struct fd_hw_query *hq,
90 struct fd_ringbuffer *ring)
91 {
92 assert(!hq->period);
93 hq->period = util_slab_alloc(&ctx->sample_period_pool);
94 list_inithead(&hq->period->list);
95 hq->period->start = get_sample(ctx, ring, hq->base.type);
96 /* NOTE: util_slab_alloc() does not zero out the buffer: */
97 hq->period->end = NULL;
98 }
99
100 static void
101 pause_query(struct fd_context *ctx, struct fd_hw_query *hq,
102 struct fd_ringbuffer *ring)
103 {
104 assert(hq->period && !hq->period->end);
105 hq->period->end = get_sample(ctx, ring, hq->base.type);
106 list_addtail(&hq->period->list, &hq->current_periods);
107 hq->period = NULL;
108 }
109
110 static void
111 destroy_periods(struct fd_context *ctx, struct list_head *list)
112 {
113 struct fd_hw_sample_period *period, *s;
114 LIST_FOR_EACH_ENTRY_SAFE(period, s, list, list) {
115 fd_hw_sample_reference(ctx, &period->start, NULL);
116 fd_hw_sample_reference(ctx, &period->end, NULL);
117 list_del(&period->list);
118 util_slab_free(&ctx->sample_period_pool, period);
119 }
120 }
121
122 static void
123 fd_hw_destroy_query(struct fd_context *ctx, struct fd_query *q)
124 {
125 struct fd_hw_query *hq = fd_hw_query(q);
126
127 destroy_periods(ctx, &hq->periods);
128 destroy_periods(ctx, &hq->current_periods);
129 list_del(&hq->list);
130
131 free(hq);
132 }
133
134 static boolean
135 fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
136 {
137 struct fd_hw_query *hq = fd_hw_query(q);
138 if (q->active)
139 return false;
140
141 /* begin_query() should clear previous results: */
142 destroy_periods(ctx, &hq->periods);
143
144 if (is_active(hq, ctx->stage))
145 resume_query(ctx, hq, ctx->ring);
146
147 q->active = true;
148
149 /* add to active list: */
150 list_del(&hq->list);
151 list_addtail(&hq->list, &ctx->active_queries);
152 return true;
153 }
154
155 static void
156 fd_hw_end_query(struct fd_context *ctx, struct fd_query *q)
157 {
158 struct fd_hw_query *hq = fd_hw_query(q);
159 if (!q->active)
160 return;
161 if (is_active(hq, ctx->stage))
162 pause_query(ctx, hq, ctx->ring);
163 q->active = false;
164 /* move to current list: */
165 list_del(&hq->list);
166 list_addtail(&hq->list, &ctx->current_queries);
167 }
168
169 /* helper to get ptr to specified sample: */
170 static void * sampptr(struct fd_hw_sample *samp, uint32_t n, void *ptr)
171 {
172 return ((char *)ptr) + (samp->tile_stride * n) + samp->offset;
173 }
174
175 static boolean
176 fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
177 boolean wait, union pipe_query_result *result)
178 {
179 struct fd_hw_query *hq = fd_hw_query(q);
180 const struct fd_hw_sample_provider *p = hq->provider;
181 struct fd_hw_sample_period *period;
182
183 if (q->active)
184 return false;
185
186 /* if the app tries to read back the query result before the
187 * batch is submitted, that forces us to flush so that there
188 * are actually results to wait for:
189 */
190 if (!LIST_IS_EMPTY(&hq->list)) {
191 /* if app didn't actually trigger any cmdstream, then
192 * we have nothing to do:
193 */
194 if (!ctx->needs_flush)
195 return true;
196 DBG("reading query result forces flush!");
197 fd_context_render(&ctx->base);
198 }
199
200 util_query_clear_result(result, q->type);
201
202 if (LIST_IS_EMPTY(&hq->periods))
203 return true;
204
205 assert(LIST_IS_EMPTY(&hq->list));
206 assert(LIST_IS_EMPTY(&hq->current_periods));
207 assert(!hq->period);
208
209 /* if !wait, then check the last sample (the one most likely to
210 * not be ready yet) and bail if it is not ready:
211 */
212 if (!wait) {
213 int ret;
214
215 period = LIST_ENTRY(struct fd_hw_sample_period,
216 hq->periods.prev, list);
217
218 ret = fd_bo_cpu_prep(period->end->bo, ctx->screen->pipe,
219 DRM_FREEDRENO_PREP_READ | DRM_FREEDRENO_PREP_NOSYNC);
220 if (ret)
221 return false;
222
223 fd_bo_cpu_fini(period->end->bo);
224 }
225
226 /* sum the result across all sample periods: */
227 LIST_FOR_EACH_ENTRY(period, &hq->periods, list) {
228 struct fd_hw_sample *start = period->start;
229 struct fd_hw_sample *end = period->end;
230 unsigned i;
231
232 /* start and end samples should be from same batch: */
233 assert(start->bo == end->bo);
234 assert(start->num_tiles == end->num_tiles);
235
236 for (i = 0; i < start->num_tiles; i++) {
237 void *ptr;
238
239 fd_bo_cpu_prep(start->bo, ctx->screen->pipe,
240 DRM_FREEDRENO_PREP_READ);
241
242 ptr = fd_bo_map(start->bo);
243
244 p->accumulate_result(ctx, sampptr(period->start, i, ptr),
245 sampptr(period->end, i, ptr), result);
246
247 fd_bo_cpu_fini(start->bo);
248 }
249 }
250
251 return true;
252 }
253
254 static const struct fd_query_funcs hw_query_funcs = {
255 .destroy_query = fd_hw_destroy_query,
256 .begin_query = fd_hw_begin_query,
257 .end_query = fd_hw_end_query,
258 .get_query_result = fd_hw_get_query_result,
259 };
260
261 struct fd_query *
262 fd_hw_create_query(struct fd_context *ctx, unsigned query_type)
263 {
264 struct fd_hw_query *hq;
265 struct fd_query *q;
266 int idx = pidx(query_type);
267
268 if ((idx < 0) || !ctx->sample_providers[idx])
269 return NULL;
270
271 hq = CALLOC_STRUCT(fd_hw_query);
272 if (!hq)
273 return NULL;
274
275 hq->provider = ctx->sample_providers[idx];
276
277 list_inithead(&hq->periods);
278 list_inithead(&hq->current_periods);
279 list_inithead(&hq->list);
280
281 q = &hq->base;
282 q->funcs = &hw_query_funcs;
283 q->type = query_type;
284
285 return q;
286 }
287
288 struct fd_hw_sample *
289 fd_hw_sample_init(struct fd_context *ctx, uint32_t size)
290 {
291 struct fd_hw_sample *samp = util_slab_alloc(&ctx->sample_pool);
292 pipe_reference_init(&samp->reference, 1);
293 samp->size = size;
294 samp->offset = ctx->next_sample_offset;
295 /* NOTE: util_slab_alloc() does not zero out the buffer: */
296 samp->bo = NULL;
297 samp->num_tiles = 0;
298 samp->tile_stride = 0;
299 ctx->next_sample_offset += size;
300 return samp;
301 }
302
303 void
304 __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp)
305 {
306 if (samp->bo)
307 fd_bo_del(samp->bo);
308 util_slab_free(&ctx->sample_pool, samp);
309 }
310
311 static void
312 prepare_sample(struct fd_hw_sample *samp, struct fd_bo *bo,
313 uint32_t num_tiles, uint32_t tile_stride)
314 {
315 if (samp->bo) {
316 assert(samp->bo == bo);
317 assert(samp->num_tiles == num_tiles);
318 assert(samp->tile_stride == tile_stride);
319 return;
320 }
321 samp->bo = bo;
322 samp->num_tiles = num_tiles;
323 samp->tile_stride = tile_stride;
324 }
325
326 static void
327 prepare_query(struct fd_hw_query *hq, struct fd_bo *bo,
328 uint32_t num_tiles, uint32_t tile_stride)
329 {
330 struct fd_hw_sample_period *period, *s;
331
332 /* prepare all the samples in the query: */
333 LIST_FOR_EACH_ENTRY_SAFE(period, s, &hq->current_periods, list) {
334 prepare_sample(period->start, bo, num_tiles, tile_stride);
335 prepare_sample(period->end, bo, num_tiles, tile_stride);
336
337 /* move from current_periods list to periods list: */
338 list_del(&period->list);
339 list_addtail(&period->list, &hq->periods);
340 }
341 }
342
343 static void
344 prepare_queries(struct fd_context *ctx, struct fd_bo *bo,
345 uint32_t num_tiles, uint32_t tile_stride,
346 struct list_head *list, bool remove)
347 {
348 struct fd_hw_query *hq, *s;
349 LIST_FOR_EACH_ENTRY_SAFE(hq, s, list, list) {
350 prepare_query(hq, bo, num_tiles, tile_stride);
351 if (remove)
352 list_delinit(&hq->list);
353 }
354 }
355
356 /* called from gmem code once total storage requirements are known (ie.
357 * number of samples times number of tiles)
358 */
359 void
360 fd_hw_query_prepare(struct fd_context *ctx, uint32_t num_tiles)
361 {
362 uint32_t tile_stride = ctx->next_sample_offset;
363 struct fd_bo *bo;
364
365 if (ctx->query_bo)
366 fd_bo_del(ctx->query_bo);
367
368 if (tile_stride > 0) {
369 bo = fd_bo_new(ctx->dev, tile_stride * num_tiles,
370 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
371 DRM_FREEDRENO_GEM_TYPE_KMEM);
372 } else {
373 bo = NULL;
374 }
375
376 ctx->query_bo = bo;
377 ctx->query_tile_stride = tile_stride;
378
379 prepare_queries(ctx, bo, num_tiles, tile_stride,
380 &ctx->active_queries, false);
381 prepare_queries(ctx, bo, num_tiles, tile_stride,
382 &ctx->current_queries, true);
383
384 /* reset things for next batch: */
385 ctx->next_sample_offset = 0;
386 }
387
388 void
389 fd_hw_query_prepare_tile(struct fd_context *ctx, uint32_t n,
390 struct fd_ringbuffer *ring)
391 {
392 uint32_t tile_stride = ctx->query_tile_stride;
393 uint32_t offset = tile_stride * n;
394
395 /* bail if no queries: */
396 if (tile_stride == 0)
397 return;
398
399 fd_wfi(ctx, ring);
400 OUT_PKT0 (ring, HW_QUERY_BASE_REG, 1);
401 OUT_RELOCW(ring, ctx->query_bo, offset, 0, 0);
402 }
403
404 void
405 fd_hw_query_set_stage(struct fd_context *ctx, struct fd_ringbuffer *ring,
406 enum fd_render_stage stage)
407 {
408 /* special case: internal blits (like mipmap level generation)
409 * go through normal draw path (via util_blitter_blit()).. but
410 * we need to ignore the FD_STAGE_DRAW which will be set, so we
411 * don't enable queries which should be paused during internal
412 * blits:
413 */
414 if ((ctx->stage == FD_STAGE_BLIT) &&
415 (stage != FD_STAGE_NULL))
416 return;
417
418 if (stage != ctx->stage) {
419 struct fd_hw_query *hq;
420 LIST_FOR_EACH_ENTRY(hq, &ctx->active_queries, list) {
421 bool was_active = is_active(hq, ctx->stage);
422 bool now_active = is_active(hq, stage);
423
424 if (now_active && !was_active)
425 resume_query(ctx, hq, ring);
426 else if (was_active && !now_active)
427 pause_query(ctx, hq, ring);
428 }
429 }
430 clear_sample_cache(ctx);
431 ctx->stage = stage;
432 }
433
434 void
435 fd_hw_query_register_provider(struct pipe_context *pctx,
436 const struct fd_hw_sample_provider *provider)
437 {
438 struct fd_context *ctx = fd_context(pctx);
439 int idx = pidx(provider->query_type);
440
441 assert((0 <= idx) && (idx < MAX_HW_SAMPLE_PROVIDERS));
442 assert(!ctx->sample_providers[idx]);
443
444 ctx->sample_providers[idx] = provider;
445 }
446
447 void
448 fd_hw_query_init(struct pipe_context *pctx)
449 {
450 struct fd_context *ctx = fd_context(pctx);
451
452 util_slab_create(&ctx->sample_pool, sizeof(struct fd_hw_sample),
453 16, UTIL_SLAB_SINGLETHREADED);
454 util_slab_create(&ctx->sample_period_pool, sizeof(struct fd_hw_sample_period),
455 16, UTIL_SLAB_SINGLETHREADED);
456 list_inithead(&ctx->active_queries);
457 list_inithead(&ctx->current_queries);
458 }
459
460 void
461 fd_hw_query_fini(struct pipe_context *pctx)
462 {
463 struct fd_context *ctx = fd_context(pctx);
464
465 util_slab_destroy(&ctx->sample_pool);
466 util_slab_destroy(&ctx->sample_period_pool);
467 }