freedreno: add batch-cache and batch reordering
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_resource.h"
35
36 static void
37 batch_init(struct fd_batch *batch)
38 {
39 struct fd_context *ctx = batch->ctx;
40 unsigned size = 0;
41
42 /* if kernel is too old to support unlimited # of cmd buffers, we
43 * have no option but to allocate large worst-case sizes so that
44 * we don't need to grow the ringbuffer. Performance is likely to
45 * suffer, but there is no good alternative.
46 */
47 if (fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) {
48 size = 0x100000;
49 }
50
51 batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size);
52 batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
53 batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size);
54
55 fd_ringbuffer_set_parent(batch->gmem, NULL);
56 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
57 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
58
59 batch->cleared = batch->partial_cleared = 0;
60 batch->restore = batch->resolve = 0;
61 batch->needs_flush = false;
62 batch->gmem_reason = 0;
63 batch->num_draws = 0;
64
65 /* reset maximal bounds: */
66 batch->max_scissor.minx = batch->max_scissor.miny = ~0;
67 batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
68
69 util_dynarray_init(&batch->draw_patches);
70
71 if (is_a3xx(ctx->screen))
72 util_dynarray_init(&batch->rbrc_patches);
73
74 assert(batch->resources->entries == 0);
75 }
76
77 struct fd_batch *
78 fd_batch_create(struct fd_context *ctx)
79 {
80 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
81
82 if (!batch)
83 return NULL;
84
85 DBG("%p", batch);
86
87 pipe_reference_init(&batch->reference, 1);
88 batch->ctx = ctx;
89
90 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
91 _mesa_key_pointer_equal);
92
93 batch_init(batch);
94
95 return batch;
96 }
97
98 static void
99 batch_fini(struct fd_batch *batch)
100 {
101 fd_ringbuffer_del(batch->draw);
102 fd_ringbuffer_del(batch->binning);
103 fd_ringbuffer_del(batch->gmem);
104
105 util_dynarray_fini(&batch->draw_patches);
106
107 if (is_a3xx(batch->ctx->screen))
108 util_dynarray_fini(&batch->rbrc_patches);
109 }
110
111 static void
112 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
113 {
114 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
115 struct fd_batch *dep;
116
117 foreach_batch(dep, cache, batch->dependents_mask) {
118 if (flush)
119 fd_batch_flush(dep);
120 fd_batch_reference(&dep, NULL);
121 }
122
123 batch->dependents_mask = 0;
124 }
125
126 static void
127 batch_reset_resources(struct fd_batch *batch)
128 {
129 struct set_entry *entry;
130
131 set_foreach(batch->resources, entry) {
132 struct fd_resource *rsc = (struct fd_resource *)entry->key;
133 _mesa_set_remove(batch->resources, entry);
134 debug_assert(rsc->batch_mask & (1 << batch->idx));
135 rsc->batch_mask &= ~(1 << batch->idx);
136 if (rsc->write_batch == batch)
137 fd_batch_reference(&rsc->write_batch, NULL);
138 }
139 }
140
141 static void
142 batch_reset(struct fd_batch *batch)
143 {
144 DBG("%p", batch);
145
146 batch_flush_reset_dependencies(batch, false);
147 batch_reset_resources(batch);
148
149 batch_fini(batch);
150 batch_init(batch);
151 }
152
153 void
154 fd_batch_reset(struct fd_batch *batch)
155 {
156 if (batch->needs_flush)
157 batch_reset(batch);
158 }
159
160 void
161 __fd_batch_destroy(struct fd_batch *batch)
162 {
163 fd_bc_invalidate_batch(batch, true);
164
165 DBG("%p", batch);
166
167 util_copy_framebuffer_state(&batch->framebuffer, NULL);
168
169 batch_fini(batch);
170
171 batch_reset_resources(batch);
172 debug_assert(batch->resources->entries == 0);
173 _mesa_set_destroy(batch->resources, NULL);
174
175 batch_flush_reset_dependencies(batch, false);
176 debug_assert(batch->dependents_mask == 0);
177
178 free(batch);
179 }
180
181 void
182 __fd_batch_describe(char* buf, const struct fd_batch *batch)
183 {
184 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
185 }
186
187 static void
188 batch_flush(struct fd_batch *batch)
189 {
190 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
191
192 if (!batch->needs_flush)
193 return;
194
195 batch->needs_flush = false;
196
197 batch_flush_reset_dependencies(batch, true);
198
199 fd_gmem_render_tiles(batch);
200
201 batch_reset_resources(batch);
202
203 debug_assert(batch->reference.count > 0);
204
205 if (batch == batch->ctx->batch) {
206 batch_reset(batch);
207 } else {
208 fd_bc_invalidate_batch(batch, false);
209 }
210 }
211
212 void
213 fd_batch_flush(struct fd_batch *batch)
214 {
215 /* NOTE: we need to hold an extra ref across the body of flush,
216 * since the last ref to this batch could be dropped when cleaning
217 * up used_resources
218 */
219 struct fd_batch *tmp = NULL;
220 fd_batch_reference(&tmp, batch);
221 batch_flush(tmp);
222 fd_batch_reference(&tmp, NULL);
223 }
224
225 /* does 'batch' depend directly or indirectly on 'other' ? */
226 static bool
227 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
228 {
229 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
230 struct fd_batch *dep;
231
232 if (batch->dependents_mask & (1 << other->idx))
233 return true;
234
235 foreach_batch(dep, cache, batch->dependents_mask)
236 if (batch_depends_on(batch, dep))
237 return true;
238
239 return false;
240 }
241
242 static void
243 batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
244 {
245 if (batch->dependents_mask & (1 << dep->idx))
246 return;
247
248 /* if the new depedency already depends on us, we need to flush
249 * to avoid a loop in the dependency graph.
250 */
251 if (batch_depends_on(dep, batch)) {
252 DBG("%p: flush forced on %p!", batch, dep);
253 fd_batch_flush(dep);
254 } else {
255 struct fd_batch *other = NULL;
256 fd_batch_reference(&other, dep);
257 batch->dependents_mask |= (1 << dep->idx);
258 DBG("%p: added dependency on %p", batch, dep);
259 }
260 }
261
262 void
263 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
264 {
265 if (rsc->stencil)
266 fd_batch_resource_used(batch, rsc->stencil, write);
267
268 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
269
270 /* note, invalidate write batch, to avoid further writes to rsc
271 * resulting in a write-after-read hazard.
272 */
273
274 if (write) {
275 /* if we are pending read or write by any other batch: */
276 if (rsc->batch_mask != (1 << batch->idx)) {
277 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
278 struct fd_batch *dep;
279 foreach_batch(dep, cache, rsc->batch_mask) {
280 struct fd_batch *b = NULL;
281 /* note that batch_add_dep could flush and unref dep, so
282 * we need to hold a reference to keep it live for the
283 * fd_bc_invalidate_batch()
284 */
285 fd_batch_reference(&b, dep);
286 batch_add_dep(batch, b);
287 fd_bc_invalidate_batch(b, false);
288 fd_batch_reference_locked(&b, NULL);
289 }
290 }
291 fd_batch_reference(&rsc->write_batch, batch);
292 } else {
293 if (rsc->write_batch) {
294 batch_add_dep(batch, rsc->write_batch);
295 fd_bc_invalidate_batch(rsc->write_batch, false);
296 }
297 }
298
299 if (rsc->batch_mask & (1 << batch->idx))
300 return;
301
302 debug_assert(!_mesa_set_search(batch->resources, rsc));
303
304 _mesa_set_add(batch->resources, rsc);
305 rsc->batch_mask |= (1 << batch->idx);
306 }
307
308 void
309 fd_batch_check_size(struct fd_batch *batch)
310 {
311 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
312 return;
313
314 struct fd_ringbuffer *ring = batch->draw;
315 if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
316 (fd_mesa_debug & FD_DBG_FLUSH))
317 fd_batch_flush(batch);
318 }