3ce36ad142acc8c9dfa14e8c5fca6fc9f4d49b04
[mesa.git] / src / freedreno / drm / msm_ringbuffer.c
1 /*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <assert.h>
28 #include <inttypes.h>
29
30 #include "util/hash_table.h"
31 #include "util/set.h"
32 #include "util/slab.h"
33
34 #include "drm/freedreno_ringbuffer.h"
35 #include "msm_priv.h"
36
37 /* The legacy implementation of submit/ringbuffer, which still does the
38 * traditional reloc and cmd tracking
39 */
40
41
42 #define INIT_SIZE 0x1000
43
44
45 struct msm_submit {
46 struct fd_submit base;
47
48 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, submit_bos);
49 DECLARE_ARRAY(struct fd_bo *, bos);
50
51 /* maps fd_bo to idx in bos table: */
52 struct hash_table *bo_table;
53
54 struct slab_mempool ring_pool;
55
56 /* hash-set of associated rings: */
57 struct set *ring_set;
58
59 struct fd_ringbuffer *primary;
60
61 /* Allow for sub-allocation of stateobj ring buffers (ie. sharing
62 * the same underlying bo)..
63 *
64 * We also rely on previous stateobj having been fully constructed
65 * so we can reclaim extra space at it's end.
66 */
67 struct fd_ringbuffer *suballoc_ring;
68 };
69 FD_DEFINE_CAST(fd_submit, msm_submit);
70
71 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
72 * and sizes. Ie. a finalized buffer can have no more commands appended to
73 * it.
74 */
75 struct msm_cmd {
76 struct fd_bo *ring_bo;
77 unsigned size;
78 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
79 };
80
81 static struct msm_cmd *
82 cmd_new(struct fd_bo *ring_bo)
83 {
84 struct msm_cmd *cmd = malloc(sizeof(*cmd));
85 cmd->ring_bo = fd_bo_ref(ring_bo);
86 cmd->size = 0;
87 cmd->nr_relocs = cmd->max_relocs = 0;
88 cmd->relocs = NULL;
89 return cmd;
90 }
91
92 static void
93 cmd_free(struct msm_cmd *cmd)
94 {
95 fd_bo_del(cmd->ring_bo);
96 free(cmd->relocs);
97 free(cmd);
98 }
99
100 /* for _FD_RINGBUFFER_OBJECT rb's we need to track the bo's and flags to
101 * later copy into the submit when the stateobj rb is later referenced by
102 * a regular rb:
103 */
104 struct msm_reloc_bo {
105 struct fd_bo *bo;
106 unsigned flags;
107 };
108
109 struct msm_ringbuffer {
110 struct fd_ringbuffer base;
111
112 /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
113 unsigned offset;
114
115 union {
116 /* for _FD_RINGBUFFER_OBJECT case: */
117 struct {
118 struct fd_pipe *pipe;
119 DECLARE_ARRAY(struct msm_reloc_bo, reloc_bos);
120 struct set *ring_set;
121 };
122 /* for other cases: */
123 struct {
124 struct fd_submit *submit;
125 DECLARE_ARRAY(struct msm_cmd *, cmds);
126 };
127 } u;
128
129 struct msm_cmd *cmd; /* current cmd */
130 struct fd_bo *ring_bo;
131 };
132 FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer);
133
134 static void finalize_current_cmd(struct fd_ringbuffer *ring);
135 static struct fd_ringbuffer * msm_ringbuffer_init(
136 struct msm_ringbuffer *msm_ring,
137 uint32_t size, enum fd_ringbuffer_flags flags);
138
139 /* add (if needed) bo to submit and return index: */
140 static uint32_t
141 append_bo(struct msm_submit *submit, struct fd_bo *bo, uint32_t flags)
142 {
143 struct msm_bo *msm_bo = to_msm_bo(bo);
144 uint32_t idx;
145
146 /* NOTE: it is legal to use the same bo on different threads for
147 * different submits. But it is not legal to use the same submit
148 * from given threads.
149 */
150 idx = READ_ONCE(msm_bo->idx);
151
152 if (unlikely((idx >= submit->nr_submit_bos) ||
153 (submit->submit_bos[idx].handle != bo->handle))) {
154 uint32_t hash = _mesa_hash_pointer(bo);
155 struct hash_entry *entry;
156
157 entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
158 if (entry) {
159 /* found */
160 idx = (uint32_t)(uintptr_t)entry->data;
161 } else {
162 idx = APPEND(submit, submit_bos);
163 idx = APPEND(submit, bos);
164
165 submit->submit_bos[idx].flags = 0;
166 submit->submit_bos[idx].handle = bo->handle;
167 submit->submit_bos[idx].presumed = 0;
168
169 submit->bos[idx] = fd_bo_ref(bo);
170
171 _mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
172 (void *)(uintptr_t)idx);
173 }
174 msm_bo->idx = idx;
175 }
176
177 if (flags & FD_RELOC_READ)
178 submit->submit_bos[idx].flags |= MSM_SUBMIT_BO_READ;
179 if (flags & FD_RELOC_WRITE)
180 submit->submit_bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
181
182 return idx;
183 }
184
185 static void
186 append_ring(struct set *set, struct fd_ringbuffer *ring)
187 {
188 uint32_t hash = _mesa_hash_pointer(ring);
189
190 if (!_mesa_set_search_pre_hashed(set, hash, ring)) {
191 fd_ringbuffer_ref(ring);
192 _mesa_set_add_pre_hashed(set, hash, ring);
193 }
194 }
195
196 static void
197 msm_submit_suballoc_ring_bo(struct fd_submit *submit,
198 struct msm_ringbuffer *msm_ring, uint32_t size)
199 {
200 struct msm_submit *msm_submit = to_msm_submit(submit);
201 unsigned suballoc_offset = 0;
202 struct fd_bo *suballoc_bo = NULL;
203
204 if (msm_submit->suballoc_ring) {
205 struct msm_ringbuffer *suballoc_ring =
206 to_msm_ringbuffer(msm_submit->suballoc_ring);
207
208 suballoc_bo = suballoc_ring->ring_bo;
209 suballoc_offset = fd_ringbuffer_size(msm_submit->suballoc_ring) +
210 suballoc_ring->offset;
211
212 suballoc_offset = align(suballoc_offset, 0x10);
213
214 if ((size + suballoc_offset) > suballoc_bo->size) {
215 suballoc_bo = NULL;
216 }
217 }
218
219 if (!suballoc_bo) {
220 // TODO possibly larger size for streaming bo?
221 msm_ring->ring_bo = fd_bo_new_ring(
222 submit->pipe->dev, 0x8000, 0);
223 msm_ring->offset = 0;
224 } else {
225 msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
226 msm_ring->offset = suballoc_offset;
227 }
228
229 struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
230
231 msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
232
233 if (old_suballoc_ring)
234 fd_ringbuffer_del(old_suballoc_ring);
235 }
236
237 static struct fd_ringbuffer *
238 msm_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
239 enum fd_ringbuffer_flags flags)
240 {
241 struct msm_submit *msm_submit = to_msm_submit(submit);
242 struct msm_ringbuffer *msm_ring;
243
244 msm_ring = slab_alloc_st(&msm_submit->ring_pool);
245
246 msm_ring->u.submit = submit;
247
248 /* NOTE: needs to be before _suballoc_ring_bo() since it could
249 * increment the refcnt of the current ring
250 */
251 msm_ring->base.refcnt = 1;
252
253 if (flags & FD_RINGBUFFER_STREAMING) {
254 msm_submit_suballoc_ring_bo(submit, msm_ring, size);
255 } else {
256 if (flags & FD_RINGBUFFER_GROWABLE)
257 size = INIT_SIZE;
258
259 msm_ring->offset = 0;
260 msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size, 0);
261 }
262
263 if (!msm_ringbuffer_init(msm_ring, size, flags))
264 return NULL;
265
266 if (flags & FD_RINGBUFFER_PRIMARY) {
267 debug_assert(!msm_submit->primary);
268 msm_submit->primary = fd_ringbuffer_ref(&msm_ring->base);
269 }
270
271 return &msm_ring->base;
272 }
273
274 static struct drm_msm_gem_submit_reloc *
275 handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
276 {
277 struct msm_cmd *cmd = ring->cmd;
278 struct drm_msm_gem_submit_reloc *relocs;
279
280 relocs = malloc(cmd->nr_relocs * sizeof(*relocs));
281
282 for (unsigned i = 0; i < cmd->nr_relocs; i++) {
283 unsigned idx = cmd->relocs[i].reloc_idx;
284 struct fd_bo *bo = ring->u.reloc_bos[idx].bo;
285 unsigned flags = 0;
286
287 if (ring->u.reloc_bos[idx].flags & MSM_SUBMIT_BO_READ)
288 flags |= FD_RELOC_READ;
289 if (ring->u.reloc_bos[idx].flags & MSM_SUBMIT_BO_WRITE)
290 flags |= FD_RELOC_WRITE;
291
292 relocs[i] = cmd->relocs[i];
293 relocs[i].reloc_idx = append_bo(submit, bo, flags);
294 }
295
296 return relocs;
297 }
298
299 static int
300 msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
301 int *out_fence_fd, uint32_t *out_fence)
302 {
303 struct msm_submit *msm_submit = to_msm_submit(submit);
304 struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
305 struct drm_msm_gem_submit req = {
306 .flags = msm_pipe->pipe,
307 .queueid = msm_pipe->queue_id,
308 };
309 int ret;
310
311 debug_assert(msm_submit->primary);
312
313 finalize_current_cmd(msm_submit->primary);
314 append_ring(msm_submit->ring_set, msm_submit->primary);
315
316 unsigned nr_cmds = 0;
317 unsigned nr_objs = 0;
318
319 set_foreach(msm_submit->ring_set, entry) {
320 struct fd_ringbuffer *ring = (void *)entry->key;
321 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
322 nr_cmds += 1;
323 nr_objs += 1;
324 } else {
325 if (ring != msm_submit->primary)
326 finalize_current_cmd(ring);
327 nr_cmds += to_msm_ringbuffer(ring)->u.nr_cmds;
328 }
329 }
330
331 void *obj_relocs[nr_objs];
332 struct drm_msm_gem_submit_cmd cmds[nr_cmds];
333 unsigned i = 0, o = 0;
334
335 set_foreach(msm_submit->ring_set, entry) {
336 struct fd_ringbuffer *ring = (void *)entry->key;
337 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
338
339 debug_assert(i < nr_cmds);
340
341 // TODO handle relocs:
342 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
343
344 debug_assert(o < nr_objs);
345
346 void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
347 obj_relocs[o++] = relocs;
348
349 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
350 cmds[i].submit_idx =
351 append_bo(msm_submit, msm_ring->ring_bo, FD_RELOC_READ);
352 cmds[i].submit_offset = msm_ring->offset;
353 cmds[i].size = offset_bytes(ring->cur, ring->start);
354 cmds[i].pad = 0;
355 cmds[i].nr_relocs = msm_ring->cmd->nr_relocs;
356 cmds[i].relocs = VOID2U64(relocs);
357
358 i++;
359 } else {
360 for (unsigned j = 0; j < msm_ring->u.nr_cmds; j++) {
361 if (ring->flags & FD_RINGBUFFER_PRIMARY) {
362 cmds[i].type = MSM_SUBMIT_CMD_BUF;
363 } else {
364 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
365 }
366 cmds[i].submit_idx = append_bo(msm_submit,
367 msm_ring->u.cmds[j]->ring_bo, FD_RELOC_READ);
368 cmds[i].submit_offset = msm_ring->offset;
369 cmds[i].size = msm_ring->u.cmds[j]->size;
370 cmds[i].pad = 0;
371 cmds[i].nr_relocs = msm_ring->u.cmds[j]->nr_relocs;
372 cmds[i].relocs = VOID2U64(msm_ring->u.cmds[j]->relocs);
373
374 i++;
375 }
376 }
377 }
378
379 if (in_fence_fd != -1) {
380 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
381 req.fence_fd = in_fence_fd;
382 }
383
384 if (out_fence_fd) {
385 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
386 }
387
388 /* needs to be after get_cmd() as that could create bos/cmds table: */
389 req.bos = VOID2U64(msm_submit->submit_bos),
390 req.nr_bos = msm_submit->nr_submit_bos;
391 req.cmds = VOID2U64(cmds),
392 req.nr_cmds = nr_cmds;
393
394 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
395
396 ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
397 &req, sizeof(req));
398 if (ret) {
399 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
400 msm_dump_submit(&req);
401 } else if (!ret) {
402 if (out_fence)
403 *out_fence = req.fence;
404
405 if (out_fence_fd)
406 *out_fence_fd = req.fence_fd;
407 }
408
409 for (unsigned o = 0; o < nr_objs; o++)
410 free(obj_relocs[o]);
411
412 return ret;
413 }
414
415 static void
416 unref_rings(struct set_entry *entry)
417 {
418 struct fd_ringbuffer *ring = (void *)entry->key;
419 fd_ringbuffer_del(ring);
420 }
421
422 static void
423 msm_submit_destroy(struct fd_submit *submit)
424 {
425 struct msm_submit *msm_submit = to_msm_submit(submit);
426
427 if (msm_submit->primary)
428 fd_ringbuffer_del(msm_submit->primary);
429 if (msm_submit->suballoc_ring)
430 fd_ringbuffer_del(msm_submit->suballoc_ring);
431
432 _mesa_hash_table_destroy(msm_submit->bo_table, NULL);
433 _mesa_set_destroy(msm_submit->ring_set, unref_rings);
434
435 // TODO it would be nice to have a way to debug_assert() if all
436 // rb's haven't been free'd back to the slab, because that is
437 // an indication that we are leaking bo's
438 slab_destroy(&msm_submit->ring_pool);
439
440 for (unsigned i = 0; i < msm_submit->nr_bos; i++)
441 fd_bo_del(msm_submit->bos[i]);
442
443 free(msm_submit->submit_bos);
444 free(msm_submit->bos);
445 free(msm_submit);
446 }
447
448 static const struct fd_submit_funcs submit_funcs = {
449 .new_ringbuffer = msm_submit_new_ringbuffer,
450 .flush = msm_submit_flush,
451 .destroy = msm_submit_destroy,
452 };
453
454 struct fd_submit *
455 msm_submit_new(struct fd_pipe *pipe)
456 {
457 struct msm_submit *msm_submit = calloc(1, sizeof(*msm_submit));
458 struct fd_submit *submit;
459
460 msm_submit->bo_table = _mesa_hash_table_create(NULL,
461 _mesa_hash_pointer, _mesa_key_pointer_equal);
462 msm_submit->ring_set = _mesa_set_create(NULL,
463 _mesa_hash_pointer, _mesa_key_pointer_equal);
464 // TODO tune size:
465 slab_create(&msm_submit->ring_pool, sizeof(struct msm_ringbuffer), 16);
466
467 submit = &msm_submit->base;
468 submit->pipe = pipe;
469 submit->funcs = &submit_funcs;
470
471 return submit;
472 }
473
474
475 static void
476 finalize_current_cmd(struct fd_ringbuffer *ring)
477 {
478 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
479
480 debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
481
482 if (!msm_ring->cmd)
483 return;
484
485 debug_assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
486
487 unsigned idx = APPEND(&msm_ring->u, cmds);
488
489 msm_ring->u.cmds[idx] = msm_ring->cmd;
490 msm_ring->cmd = NULL;
491
492 msm_ring->u.cmds[idx]->size = offset_bytes(ring->cur, ring->start);
493 }
494
495 static void
496 msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
497 {
498 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
499 struct fd_pipe *pipe = msm_ring->u.submit->pipe;
500
501 debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
502
503 finalize_current_cmd(ring);
504
505 fd_bo_del(msm_ring->ring_bo);
506 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size, 0);
507 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
508
509 ring->start = fd_bo_map(msm_ring->ring_bo);
510 ring->end = &(ring->start[size/4]);
511 ring->cur = ring->start;
512 ring->size = size;
513 }
514
515 static void
516 msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
517 const struct fd_reloc *reloc)
518 {
519 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
520 struct fd_pipe *pipe;
521 unsigned reloc_idx;
522
523 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
524 unsigned idx = APPEND(&msm_ring->u, reloc_bos);
525
526 msm_ring->u.reloc_bos[idx].bo = fd_bo_ref(reloc->bo);
527 msm_ring->u.reloc_bos[idx].flags = reloc->flags;
528
529 /* this gets fixed up at submit->flush() time, since this state-
530 * object rb can be used with many different submits
531 */
532 reloc_idx = idx;
533
534 pipe = msm_ring->u.pipe;
535 } else {
536 struct msm_submit *msm_submit =
537 to_msm_submit(msm_ring->u.submit);
538
539 reloc_idx = append_bo(msm_submit, reloc->bo, reloc->flags);
540
541 pipe = msm_ring->u.submit->pipe;
542 }
543
544 struct drm_msm_gem_submit_reloc *r;
545 unsigned idx = APPEND(msm_ring->cmd, relocs);
546
547 r = &msm_ring->cmd->relocs[idx];
548
549 r->reloc_idx = reloc_idx;
550 r->reloc_offset = reloc->offset;
551 r->or = reloc->or;
552 r->shift = reloc->shift;
553 r->submit_offset = offset_bytes(ring->cur, ring->start) +
554 msm_ring->offset;
555
556 ring->cur++;
557
558 if (pipe->gpu_id >= 500) {
559 idx = APPEND(msm_ring->cmd, relocs);
560 r = &msm_ring->cmd->relocs[idx];
561
562 r->reloc_idx = reloc_idx;
563 r->reloc_offset = reloc->offset;
564 r->or = reloc->orhi;
565 r->shift = reloc->shift - 32;
566 r->submit_offset = offset_bytes(ring->cur, ring->start) +
567 msm_ring->offset;
568
569 ring->cur++;
570 }
571 }
572
573 static void
574 append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
575 {
576 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
577
578 debug_assert(target->flags & _FD_RINGBUFFER_OBJECT);
579
580 set_foreach(msm_target->u.ring_set, entry) {
581 struct fd_ringbuffer *ring = (void *)entry->key;
582
583 append_ring(submit->ring_set, ring);
584
585 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
586 append_stateobj_rings(submit, ring);
587 }
588 }
589 }
590
591 static uint32_t
592 msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
593 struct fd_ringbuffer *target, uint32_t cmd_idx)
594 {
595 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
596 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
597 struct fd_bo *bo;
598 uint32_t size;
599
600 if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
601 (cmd_idx < msm_target->u.nr_cmds)) {
602 bo = msm_target->u.cmds[cmd_idx]->ring_bo;
603 size = msm_target->u.cmds[cmd_idx]->size;
604 } else {
605 bo = msm_target->ring_bo;
606 size = offset_bytes(target->cur, target->start);
607 }
608
609 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
610 .bo = bo,
611 .flags = FD_RELOC_READ,
612 .offset = msm_target->offset,
613 });
614
615 if (!size)
616 return 0;
617
618 if ((target->flags & _FD_RINGBUFFER_OBJECT) &&
619 !(ring->flags & _FD_RINGBUFFER_OBJECT)) {
620 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
621
622 append_stateobj_rings(msm_submit, target);
623 }
624
625 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
626 append_ring(msm_ring->u.ring_set, target);
627 } else {
628 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
629 append_ring(msm_submit->ring_set, target);
630 }
631
632 return size;
633 }
634
635 static uint32_t
636 msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
637 {
638 if (ring->flags & FD_RINGBUFFER_GROWABLE)
639 return to_msm_ringbuffer(ring)->u.nr_cmds + 1;
640 return 1;
641 }
642
643 static void
644 msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
645 {
646 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
647
648 fd_bo_del(msm_ring->ring_bo);
649 if (msm_ring->cmd)
650 cmd_free(msm_ring->cmd);
651
652 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
653 for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
654 fd_bo_del(msm_ring->u.reloc_bos[i].bo);
655 }
656
657 _mesa_set_destroy(msm_ring->u.ring_set, unref_rings);
658
659 free(msm_ring->u.reloc_bos);
660 free(msm_ring);
661 } else {
662 struct fd_submit *submit = msm_ring->u.submit;
663
664 for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
665 cmd_free(msm_ring->u.cmds[i]);
666 }
667
668 free(msm_ring->u.cmds);
669 slab_free_st(&to_msm_submit(submit)->ring_pool, msm_ring);
670 }
671 }
672
673 static const struct fd_ringbuffer_funcs ring_funcs = {
674 .grow = msm_ringbuffer_grow,
675 .emit_reloc = msm_ringbuffer_emit_reloc,
676 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
677 .cmd_count = msm_ringbuffer_cmd_count,
678 .destroy = msm_ringbuffer_destroy,
679 };
680
681 static inline struct fd_ringbuffer *
682 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
683 enum fd_ringbuffer_flags flags)
684 {
685 struct fd_ringbuffer *ring = &msm_ring->base;
686
687 debug_assert(msm_ring->ring_bo);
688
689 uint8_t *base = fd_bo_map(msm_ring->ring_bo);
690 ring->start = (void *)(base + msm_ring->offset);
691 ring->end = &(ring->start[size/4]);
692 ring->cur = ring->start;
693
694 ring->size = size;
695 ring->flags = flags;
696
697 ring->funcs = &ring_funcs;
698
699 msm_ring->u.cmds = NULL;
700 msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
701
702 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
703
704 return ring;
705 }
706
707 struct fd_ringbuffer *
708 msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
709 {
710 struct msm_ringbuffer *msm_ring = malloc(sizeof(*msm_ring));
711
712 msm_ring->u.pipe = pipe;
713 msm_ring->offset = 0;
714 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size, 0);
715 msm_ring->base.refcnt = 1;
716
717 msm_ring->u.reloc_bos = NULL;
718 msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
719
720 msm_ring->u.ring_set = _mesa_set_create(NULL,
721 _mesa_hash_pointer, _mesa_key_pointer_equal);
722
723 return msm_ringbuffer_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
724 }