freedreno: Associate the acc query bo with the batch.
[mesa.git] / src / gallium / drivers / freedreno / freedreno_query_acc.c
1 /*
2 * Copyright (C) 2017 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_memory.h"
28 #include "util/u_inlines.h"
29
30 #include "freedreno_query_acc.h"
31 #include "freedreno_context.h"
32 #include "freedreno_resource.h"
33 #include "freedreno_util.h"
34
35
36 static bool
37 is_active(struct fd_acc_query *aq, enum fd_render_stage stage)
38 {
39 return !!(aq->provider->active & stage);
40 }
41
42 static void
43 fd_acc_destroy_query(struct fd_context *ctx, struct fd_query *q)
44 {
45 struct fd_acc_query *aq = fd_acc_query(q);
46
47 DBG("%p", q);
48
49 pipe_resource_reference(&aq->prsc, NULL);
50 list_del(&aq->node);
51
52 free(aq->query_data);
53 free(aq);
54 }
55
56 static void
57 realloc_query_bo(struct fd_context *ctx, struct fd_acc_query *aq)
58 {
59 struct fd_resource *rsc;
60 void *map;
61
62 pipe_resource_reference(&aq->prsc, NULL);
63
64 aq->prsc = pipe_buffer_create(&ctx->screen->base,
65 PIPE_BIND_QUERY_BUFFER, 0, 0x1000);
66
67 /* don't assume the buffer is zero-initialized: */
68 rsc = fd_resource(aq->prsc);
69
70 fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_WRITE);
71
72 map = fd_bo_map(rsc->bo);
73 memset(map, 0, aq->size);
74 fd_bo_cpu_fini(rsc->bo);
75 }
76
77 static void
78 fd_acc_query_pause(struct fd_acc_query *aq)
79 {
80 const struct fd_acc_sample_provider *p = aq->provider;
81
82 if (!aq->batch)
83 return;
84
85 p->pause(aq, aq->batch);
86 aq->batch = NULL;
87 }
88
89 static void
90 fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch)
91 {
92 const struct fd_acc_sample_provider *p = aq->provider;
93
94 aq->batch = batch;
95 p->resume(aq, aq->batch);
96
97 fd_batch_resource_used(batch, fd_resource(aq->prsc), true);
98 }
99
100 static void
101 fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
102 {
103 struct fd_acc_query *aq = fd_acc_query(q);
104
105 DBG("%p", q);
106
107 /* ->begin_query() discards previous results, so realloc bo: */
108 realloc_query_bo(ctx, aq);
109
110 /* Signal that we need to update the active queries on the next draw */
111 ctx->update_active_queries = true;
112
113 /* add to active list: */
114 assert(list_is_empty(&aq->node));
115 list_addtail(&aq->node, &ctx->acc_active_queries);
116
117 /* TIMESTAMP/GPU_FINISHED and don't do normal bracketing at draw time, we
118 * need to just emit the capture at this moment.
119 */
120 if (skip_begin_query(q->type))
121 fd_acc_query_resume(aq, fd_context_batch(ctx));
122 }
123
124 static void
125 fd_acc_end_query(struct fd_context *ctx, struct fd_query *q)
126 {
127 struct fd_acc_query *aq = fd_acc_query(q);
128
129 DBG("%p", q);
130
131 fd_acc_query_pause(aq);
132
133 /* remove from active list: */
134 list_delinit(&aq->node);
135 }
136
137 static bool
138 fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
139 bool wait, union pipe_query_result *result)
140 {
141 struct fd_acc_query *aq = fd_acc_query(q);
142 const struct fd_acc_sample_provider *p = aq->provider;
143 struct fd_resource *rsc = fd_resource(aq->prsc);
144
145 DBG("%p: wait=%d", q, wait);
146
147 assert(list_is_empty(&aq->node));
148
149 /* if !wait, then check the last sample (the one most likely to
150 * not be ready yet) and bail if it is not ready:
151 */
152 if (!wait) {
153 int ret;
154
155 if (pending(rsc, false)) {
156 /* piglit spec@arb_occlusion_query@occlusion_query_conform
157 * test, and silly apps perhaps, get stuck in a loop trying
158 * to get query result forever with wait==false.. we don't
159 * wait to flush unnecessarily but we also don't want to
160 * spin forever:
161 */
162 if (aq->no_wait_cnt++ > 5)
163 fd_batch_flush(rsc->write_batch);
164 return false;
165 }
166
167 ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe,
168 DRM_FREEDRENO_PREP_READ | DRM_FREEDRENO_PREP_NOSYNC);
169 if (ret)
170 return false;
171
172 fd_bo_cpu_fini(rsc->bo);
173 }
174
175 if (rsc->write_batch)
176 fd_batch_flush(rsc->write_batch);
177
178 /* get the result: */
179 fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
180
181 void *ptr = fd_bo_map(rsc->bo);
182 p->result(aq, ptr, result);
183 fd_bo_cpu_fini(rsc->bo);
184
185 return true;
186 }
187
188 static const struct fd_query_funcs acc_query_funcs = {
189 .destroy_query = fd_acc_destroy_query,
190 .begin_query = fd_acc_begin_query,
191 .end_query = fd_acc_end_query,
192 .get_query_result = fd_acc_get_query_result,
193 };
194
195 struct fd_query *
196 fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
197 unsigned index, const struct fd_acc_sample_provider *provider)
198 {
199 struct fd_acc_query *aq;
200 struct fd_query *q;
201
202 aq = CALLOC_STRUCT(fd_acc_query);
203 if (!aq)
204 return NULL;
205
206 DBG("%p: query_type=%u", aq, query_type);
207
208 aq->provider = provider;
209 aq->size = provider->size;
210
211 list_inithead(&aq->node);
212
213 q = &aq->base;
214 q->funcs = &acc_query_funcs;
215 q->type = query_type;
216 q->index = index;
217
218 return q;
219 }
220
221 struct fd_query *
222 fd_acc_create_query(struct fd_context *ctx, unsigned query_type,
223 unsigned index)
224 {
225 int idx = pidx(query_type);
226
227 if ((idx < 0) || !ctx->acc_sample_providers[idx])
228 return NULL;
229
230 return fd_acc_create_query2(ctx, query_type, index,
231 ctx->acc_sample_providers[idx]);
232 }
233
234 /* Called at clear/draw/blit time to enable/disable the appropriate queries in
235 * the batch (and transfer active querying between batches in the case of
236 * batch reordering).
237 */
238 void
239 fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
240 {
241 struct fd_context *ctx = batch->ctx;
242
243 if (stage != batch->stage || ctx->update_active_queries) {
244 struct fd_acc_query *aq;
245 LIST_FOR_EACH_ENTRY(aq, &ctx->acc_active_queries, node) {
246 bool batch_change = aq->batch != batch;
247 bool was_active = aq->batch != NULL;
248 bool now_active = is_active(aq, stage);
249
250 if (was_active && (!now_active || batch_change))
251 fd_acc_query_pause(aq);
252 if (now_active && (!was_active || batch_change))
253 fd_acc_query_resume(aq, batch);
254 }
255 }
256
257 ctx->update_active_queries = false;
258 }
259
260 void
261 fd_acc_query_register_provider(struct pipe_context *pctx,
262 const struct fd_acc_sample_provider *provider)
263 {
264 struct fd_context *ctx = fd_context(pctx);
265 int idx = pidx(provider->query_type);
266
267 assert((0 <= idx) && (idx < MAX_HW_SAMPLE_PROVIDERS));
268 assert(!ctx->acc_sample_providers[idx]);
269
270 ctx->acc_sample_providers[idx] = provider;
271 }