freedreno/drm: expose GMEM_BASE address
[mesa.git] / src / freedreno / drm / msm_ringbuffer.c
1 /*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include <assert.h>
28 #include <inttypes.h>
29
30 #include "util/hash_table.h"
31 #include "util/set.h"
32 #include "util/slab.h"
33
34 #include "drm/freedreno_ringbuffer.h"
35 #include "msm_priv.h"
36
37 /* The legacy implementation of submit/ringbuffer, which still does the
38 * traditional reloc and cmd tracking
39 */
40
41
42 #define INIT_SIZE 0x1000
43
44 static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
45
46
47 struct msm_submit {
48 struct fd_submit base;
49
50 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, submit_bos);
51 DECLARE_ARRAY(struct fd_bo *, bos);
52
53 unsigned seqno;
54
55 /* maps fd_bo to idx in bos table: */
56 struct hash_table *bo_table;
57
58 struct slab_mempool ring_pool;
59
60 /* hash-set of associated rings: */
61 struct set *ring_set;
62
63 struct fd_ringbuffer *primary;
64
65 /* Allow for sub-allocation of stateobj ring buffers (ie. sharing
66 * the same underlying bo)..
67 *
68 * We also rely on previous stateobj having been fully constructed
69 * so we can reclaim extra space at it's end.
70 */
71 struct fd_ringbuffer *suballoc_ring;
72 };
73 FD_DEFINE_CAST(fd_submit, msm_submit);
74
75 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
76 * and sizes. Ie. a finalized buffer can have no more commands appended to
77 * it.
78 */
79 struct msm_cmd {
80 struct fd_bo *ring_bo;
81 unsigned size;
82 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
83 };
84
85 static struct msm_cmd *
86 cmd_new(struct fd_bo *ring_bo)
87 {
88 struct msm_cmd *cmd = malloc(sizeof(*cmd));
89 cmd->ring_bo = fd_bo_ref(ring_bo);
90 cmd->size = 0;
91 cmd->nr_relocs = cmd->max_relocs = 0;
92 cmd->relocs = NULL;
93 return cmd;
94 }
95
96 static void
97 cmd_free(struct msm_cmd *cmd)
98 {
99 fd_bo_del(cmd->ring_bo);
100 free(cmd->relocs);
101 free(cmd);
102 }
103
104 /* for _FD_RINGBUFFER_OBJECT rb's we need to track the bo's and flags to
105 * later copy into the submit when the stateobj rb is later referenced by
106 * a regular rb:
107 */
108 struct msm_reloc_bo {
109 struct fd_bo *bo;
110 unsigned flags;
111 };
112
113 struct msm_ringbuffer {
114 struct fd_ringbuffer base;
115
116 /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
117 unsigned offset;
118
119 union {
120 /* for _FD_RINGBUFFER_OBJECT case: */
121 struct {
122 struct fd_pipe *pipe;
123 DECLARE_ARRAY(struct msm_reloc_bo, reloc_bos);
124 struct set *ring_set;
125 };
126 /* for other cases: */
127 struct {
128 struct fd_submit *submit;
129 DECLARE_ARRAY(struct msm_cmd *, cmds);
130 };
131 } u;
132
133 struct msm_cmd *cmd; /* current cmd */
134 struct fd_bo *ring_bo;
135 };
136 FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer);
137
138 static void finalize_current_cmd(struct fd_ringbuffer *ring);
139 static struct fd_ringbuffer * msm_ringbuffer_init(
140 struct msm_ringbuffer *msm_ring,
141 uint32_t size, enum fd_ringbuffer_flags flags);
142
143 /* add (if needed) bo to submit and return index: */
144 static uint32_t
145 append_bo(struct msm_submit *submit, struct fd_bo *bo, uint32_t flags)
146 {
147 struct msm_bo *msm_bo = to_msm_bo(bo);
148 uint32_t idx;
149 pthread_mutex_lock(&idx_lock);
150 if (likely(msm_bo->current_submit_seqno == submit->seqno)) {
151 idx = msm_bo->idx;
152 } else {
153 uint32_t hash = _mesa_hash_pointer(bo);
154 struct hash_entry *entry;
155
156 entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
157 if (entry) {
158 /* found */
159 idx = (uint32_t)(uintptr_t)entry->data;
160 } else {
161 idx = APPEND(submit, submit_bos);
162 idx = APPEND(submit, bos);
163
164 submit->submit_bos[idx].flags = 0;
165 submit->submit_bos[idx].handle = bo->handle;
166 submit->submit_bos[idx].presumed = 0;
167
168 submit->bos[idx] = fd_bo_ref(bo);
169
170 _mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
171 (void *)(uintptr_t)idx);
172 }
173 msm_bo->current_submit_seqno = submit->seqno;
174 msm_bo->idx = idx;
175 }
176 pthread_mutex_unlock(&idx_lock);
177 if (flags & FD_RELOC_READ)
178 submit->submit_bos[idx].flags |= MSM_SUBMIT_BO_READ;
179 if (flags & FD_RELOC_WRITE)
180 submit->submit_bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
181 return idx;
182 }
183
184 static void
185 append_ring(struct set *set, struct fd_ringbuffer *ring)
186 {
187 uint32_t hash = _mesa_hash_pointer(ring);
188
189 if (!_mesa_set_search_pre_hashed(set, hash, ring)) {
190 fd_ringbuffer_ref(ring);
191 _mesa_set_add_pre_hashed(set, hash, ring);
192 }
193 }
194
195 static void
196 msm_submit_suballoc_ring_bo(struct fd_submit *submit,
197 struct msm_ringbuffer *msm_ring, uint32_t size)
198 {
199 struct msm_submit *msm_submit = to_msm_submit(submit);
200 unsigned suballoc_offset = 0;
201 struct fd_bo *suballoc_bo = NULL;
202
203 if (msm_submit->suballoc_ring) {
204 struct msm_ringbuffer *suballoc_ring =
205 to_msm_ringbuffer(msm_submit->suballoc_ring);
206
207 suballoc_bo = suballoc_ring->ring_bo;
208 suballoc_offset = fd_ringbuffer_size(msm_submit->suballoc_ring) +
209 suballoc_ring->offset;
210
211 suballoc_offset = align(suballoc_offset, 0x10);
212
213 if ((size + suballoc_offset) > suballoc_bo->size) {
214 suballoc_bo = NULL;
215 }
216 }
217
218 if (!suballoc_bo) {
219 // TODO possibly larger size for streaming bo?
220 msm_ring->ring_bo = fd_bo_new_ring(
221 submit->pipe->dev, 0x8000, 0);
222 msm_ring->offset = 0;
223 } else {
224 msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
225 msm_ring->offset = suballoc_offset;
226 }
227
228 struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
229
230 msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
231
232 if (old_suballoc_ring)
233 fd_ringbuffer_del(old_suballoc_ring);
234 }
235
236 static struct fd_ringbuffer *
237 msm_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
238 enum fd_ringbuffer_flags flags)
239 {
240 struct msm_submit *msm_submit = to_msm_submit(submit);
241 struct msm_ringbuffer *msm_ring;
242
243 msm_ring = slab_alloc_st(&msm_submit->ring_pool);
244
245 msm_ring->u.submit = submit;
246
247 /* NOTE: needs to be before _suballoc_ring_bo() since it could
248 * increment the refcnt of the current ring
249 */
250 msm_ring->base.refcnt = 1;
251
252 if (flags & FD_RINGBUFFER_STREAMING) {
253 msm_submit_suballoc_ring_bo(submit, msm_ring, size);
254 } else {
255 if (flags & FD_RINGBUFFER_GROWABLE)
256 size = INIT_SIZE;
257
258 msm_ring->offset = 0;
259 msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size, 0);
260 }
261
262 if (!msm_ringbuffer_init(msm_ring, size, flags))
263 return NULL;
264
265 if (flags & FD_RINGBUFFER_PRIMARY) {
266 debug_assert(!msm_submit->primary);
267 msm_submit->primary = fd_ringbuffer_ref(&msm_ring->base);
268 }
269
270 return &msm_ring->base;
271 }
272
273 static struct drm_msm_gem_submit_reloc *
274 handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
275 {
276 struct msm_cmd *cmd = ring->cmd;
277 struct drm_msm_gem_submit_reloc *relocs;
278
279 relocs = malloc(cmd->nr_relocs * sizeof(*relocs));
280
281 for (unsigned i = 0; i < cmd->nr_relocs; i++) {
282 unsigned idx = cmd->relocs[i].reloc_idx;
283 struct fd_bo *bo = ring->u.reloc_bos[idx].bo;
284 unsigned flags = 0;
285
286 if (ring->u.reloc_bos[idx].flags & MSM_SUBMIT_BO_READ)
287 flags |= FD_RELOC_READ;
288 if (ring->u.reloc_bos[idx].flags & MSM_SUBMIT_BO_WRITE)
289 flags |= FD_RELOC_WRITE;
290
291 relocs[i] = cmd->relocs[i];
292 relocs[i].reloc_idx = append_bo(submit, bo, flags);
293 }
294
295 return relocs;
296 }
297
298 static int
299 msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
300 int *out_fence_fd, uint32_t *out_fence)
301 {
302 struct msm_submit *msm_submit = to_msm_submit(submit);
303 struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
304 struct drm_msm_gem_submit req = {
305 .flags = msm_pipe->pipe,
306 .queueid = msm_pipe->queue_id,
307 };
308 int ret;
309
310 debug_assert(msm_submit->primary);
311
312 finalize_current_cmd(msm_submit->primary);
313 append_ring(msm_submit->ring_set, msm_submit->primary);
314
315 unsigned nr_cmds = 0;
316 unsigned nr_objs = 0;
317
318 set_foreach(msm_submit->ring_set, entry) {
319 struct fd_ringbuffer *ring = (void *)entry->key;
320 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
321 nr_cmds += 1;
322 nr_objs += 1;
323 } else {
324 if (ring != msm_submit->primary)
325 finalize_current_cmd(ring);
326 nr_cmds += to_msm_ringbuffer(ring)->u.nr_cmds;
327 }
328 }
329
330 void *obj_relocs[nr_objs];
331 struct drm_msm_gem_submit_cmd cmds[nr_cmds];
332 unsigned i = 0, o = 0;
333
334 set_foreach(msm_submit->ring_set, entry) {
335 struct fd_ringbuffer *ring = (void *)entry->key;
336 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
337
338 debug_assert(i < nr_cmds);
339
340 // TODO handle relocs:
341 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
342
343 debug_assert(o < nr_objs);
344
345 void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
346 obj_relocs[o++] = relocs;
347
348 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
349 cmds[i].submit_idx =
350 append_bo(msm_submit, msm_ring->ring_bo, FD_RELOC_READ);
351 cmds[i].submit_offset = msm_ring->offset;
352 cmds[i].size = offset_bytes(ring->cur, ring->start);
353 cmds[i].pad = 0;
354 cmds[i].nr_relocs = msm_ring->cmd->nr_relocs;
355 cmds[i].relocs = VOID2U64(relocs);
356
357 i++;
358 } else {
359 for (unsigned j = 0; j < msm_ring->u.nr_cmds; j++) {
360 if (ring->flags & FD_RINGBUFFER_PRIMARY) {
361 cmds[i].type = MSM_SUBMIT_CMD_BUF;
362 } else {
363 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
364 }
365 cmds[i].submit_idx = append_bo(msm_submit,
366 msm_ring->u.cmds[j]->ring_bo, FD_RELOC_READ);
367 cmds[i].submit_offset = msm_ring->offset;
368 cmds[i].size = msm_ring->u.cmds[j]->size;
369 cmds[i].pad = 0;
370 cmds[i].nr_relocs = msm_ring->u.cmds[j]->nr_relocs;
371 cmds[i].relocs = VOID2U64(msm_ring->u.cmds[j]->relocs);
372
373 i++;
374 }
375 }
376 }
377
378 if (in_fence_fd != -1) {
379 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
380 req.fence_fd = in_fence_fd;
381 }
382
383 if (out_fence_fd) {
384 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
385 }
386
387 /* needs to be after get_cmd() as that could create bos/cmds table: */
388 req.bos = VOID2U64(msm_submit->submit_bos),
389 req.nr_bos = msm_submit->nr_submit_bos;
390 req.cmds = VOID2U64(cmds),
391 req.nr_cmds = nr_cmds;
392
393 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
394
395 ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
396 &req, sizeof(req));
397 if (ret) {
398 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
399 msm_dump_submit(&req);
400 } else if (!ret) {
401 if (out_fence)
402 *out_fence = req.fence;
403
404 if (out_fence_fd)
405 *out_fence_fd = req.fence_fd;
406 }
407
408 for (unsigned o = 0; o < nr_objs; o++)
409 free(obj_relocs[o]);
410
411 return ret;
412 }
413
414 static void
415 unref_rings(struct set_entry *entry)
416 {
417 struct fd_ringbuffer *ring = (void *)entry->key;
418 fd_ringbuffer_del(ring);
419 }
420
421 static void
422 msm_submit_destroy(struct fd_submit *submit)
423 {
424 struct msm_submit *msm_submit = to_msm_submit(submit);
425
426 if (msm_submit->primary)
427 fd_ringbuffer_del(msm_submit->primary);
428 if (msm_submit->suballoc_ring)
429 fd_ringbuffer_del(msm_submit->suballoc_ring);
430
431 _mesa_hash_table_destroy(msm_submit->bo_table, NULL);
432 _mesa_set_destroy(msm_submit->ring_set, unref_rings);
433
434 // TODO it would be nice to have a way to debug_assert() if all
435 // rb's haven't been free'd back to the slab, because that is
436 // an indication that we are leaking bo's
437 slab_destroy(&msm_submit->ring_pool);
438
439 for (unsigned i = 0; i < msm_submit->nr_bos; i++)
440 fd_bo_del(msm_submit->bos[i]);
441
442 free(msm_submit->submit_bos);
443 free(msm_submit->bos);
444 free(msm_submit);
445 }
446
447 static const struct fd_submit_funcs submit_funcs = {
448 .new_ringbuffer = msm_submit_new_ringbuffer,
449 .flush = msm_submit_flush,
450 .destroy = msm_submit_destroy,
451 };
452
453 struct fd_submit *
454 msm_submit_new(struct fd_pipe *pipe)
455 {
456 struct msm_submit *msm_submit = calloc(1, sizeof(*msm_submit));
457 struct fd_submit *submit;
458 static unsigned submit_cnt = 0;
459
460 msm_submit->seqno = ++submit_cnt;
461 msm_submit->bo_table = _mesa_hash_table_create(NULL,
462 _mesa_hash_pointer, _mesa_key_pointer_equal);
463 msm_submit->ring_set = _mesa_set_create(NULL,
464 _mesa_hash_pointer, _mesa_key_pointer_equal);
465 // TODO tune size:
466 slab_create(&msm_submit->ring_pool, sizeof(struct msm_ringbuffer), 16);
467
468 submit = &msm_submit->base;
469 submit->pipe = pipe;
470 submit->funcs = &submit_funcs;
471
472 return submit;
473 }
474
475
476 static void
477 finalize_current_cmd(struct fd_ringbuffer *ring)
478 {
479 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
480
481 debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
482
483 if (!msm_ring->cmd)
484 return;
485
486 debug_assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
487
488 unsigned idx = APPEND(&msm_ring->u, cmds);
489
490 msm_ring->u.cmds[idx] = msm_ring->cmd;
491 msm_ring->cmd = NULL;
492
493 msm_ring->u.cmds[idx]->size = offset_bytes(ring->cur, ring->start);
494 }
495
496 static void
497 msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
498 {
499 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
500 struct fd_pipe *pipe = msm_ring->u.submit->pipe;
501
502 debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
503
504 finalize_current_cmd(ring);
505
506 fd_bo_del(msm_ring->ring_bo);
507 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size, 0);
508 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
509
510 ring->start = fd_bo_map(msm_ring->ring_bo);
511 ring->end = &(ring->start[size/4]);
512 ring->cur = ring->start;
513 ring->size = size;
514 }
515
516 static void
517 msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
518 const struct fd_reloc *reloc)
519 {
520 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
521 struct fd_pipe *pipe;
522 unsigned reloc_idx;
523
524 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
525 unsigned idx = APPEND(&msm_ring->u, reloc_bos);
526
527 msm_ring->u.reloc_bos[idx].bo = fd_bo_ref(reloc->bo);
528 msm_ring->u.reloc_bos[idx].flags = reloc->flags;
529
530 /* this gets fixed up at submit->flush() time, since this state-
531 * object rb can be used with many different submits
532 */
533 reloc_idx = idx;
534
535 pipe = msm_ring->u.pipe;
536 } else {
537 struct msm_submit *msm_submit =
538 to_msm_submit(msm_ring->u.submit);
539
540 reloc_idx = append_bo(msm_submit, reloc->bo, reloc->flags);
541
542 pipe = msm_ring->u.submit->pipe;
543 }
544
545 struct drm_msm_gem_submit_reloc *r;
546 unsigned idx = APPEND(msm_ring->cmd, relocs);
547
548 r = &msm_ring->cmd->relocs[idx];
549
550 r->reloc_idx = reloc_idx;
551 r->reloc_offset = reloc->offset;
552 r->or = reloc->or;
553 r->shift = reloc->shift;
554 r->submit_offset = offset_bytes(ring->cur, ring->start) +
555 msm_ring->offset;
556
557 ring->cur++;
558
559 if (pipe->gpu_id >= 500) {
560 idx = APPEND(msm_ring->cmd, relocs);
561 r = &msm_ring->cmd->relocs[idx];
562
563 r->reloc_idx = reloc_idx;
564 r->reloc_offset = reloc->offset;
565 r->or = reloc->orhi;
566 r->shift = reloc->shift - 32;
567 r->submit_offset = offset_bytes(ring->cur, ring->start) +
568 msm_ring->offset;
569
570 ring->cur++;
571 }
572 }
573
574 static void
575 append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
576 {
577 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
578
579 debug_assert(target->flags & _FD_RINGBUFFER_OBJECT);
580
581 set_foreach(msm_target->u.ring_set, entry) {
582 struct fd_ringbuffer *ring = (void *)entry->key;
583
584 append_ring(submit->ring_set, ring);
585
586 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
587 append_stateobj_rings(submit, ring);
588 }
589 }
590 }
591
592 static uint32_t
593 msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
594 struct fd_ringbuffer *target, uint32_t cmd_idx)
595 {
596 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
597 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
598 struct fd_bo *bo;
599 uint32_t size;
600
601 if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
602 (cmd_idx < msm_target->u.nr_cmds)) {
603 bo = msm_target->u.cmds[cmd_idx]->ring_bo;
604 size = msm_target->u.cmds[cmd_idx]->size;
605 } else {
606 bo = msm_target->ring_bo;
607 size = offset_bytes(target->cur, target->start);
608 }
609
610 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
611 .bo = bo,
612 .flags = FD_RELOC_READ,
613 .offset = msm_target->offset,
614 });
615
616 if ((target->flags & _FD_RINGBUFFER_OBJECT) &&
617 !(ring->flags & _FD_RINGBUFFER_OBJECT)) {
618 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
619
620 append_stateobj_rings(msm_submit, target);
621 }
622
623 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
624 append_ring(msm_ring->u.ring_set, target);
625 } else {
626 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
627 append_ring(msm_submit->ring_set, target);
628 }
629
630 return size;
631 }
632
633 static uint32_t
634 msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
635 {
636 if (ring->flags & FD_RINGBUFFER_GROWABLE)
637 return to_msm_ringbuffer(ring)->u.nr_cmds + 1;
638 return 1;
639 }
640
641 static void
642 msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
643 {
644 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
645
646 fd_bo_del(msm_ring->ring_bo);
647 if (msm_ring->cmd)
648 cmd_free(msm_ring->cmd);
649
650 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
651 for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
652 fd_bo_del(msm_ring->u.reloc_bos[i].bo);
653 }
654
655 _mesa_set_destroy(msm_ring->u.ring_set, unref_rings);
656
657 free(msm_ring->u.reloc_bos);
658 free(msm_ring);
659 } else {
660 struct fd_submit *submit = msm_ring->u.submit;
661
662 for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
663 cmd_free(msm_ring->u.cmds[i]);
664 }
665
666 free(msm_ring->u.cmds);
667 slab_free_st(&to_msm_submit(submit)->ring_pool, msm_ring);
668 }
669 }
670
671 static const struct fd_ringbuffer_funcs ring_funcs = {
672 .grow = msm_ringbuffer_grow,
673 .emit_reloc = msm_ringbuffer_emit_reloc,
674 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
675 .cmd_count = msm_ringbuffer_cmd_count,
676 .destroy = msm_ringbuffer_destroy,
677 };
678
679 static inline struct fd_ringbuffer *
680 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
681 enum fd_ringbuffer_flags flags)
682 {
683 struct fd_ringbuffer *ring = &msm_ring->base;
684
685 debug_assert(msm_ring->ring_bo);
686
687 uint8_t *base = fd_bo_map(msm_ring->ring_bo);
688 ring->start = (void *)(base + msm_ring->offset);
689 ring->end = &(ring->start[size/4]);
690 ring->cur = ring->start;
691
692 ring->size = size;
693 ring->flags = flags;
694
695 ring->funcs = &ring_funcs;
696
697 msm_ring->u.cmds = NULL;
698 msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
699
700 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
701
702 return ring;
703 }
704
705 struct fd_ringbuffer *
706 msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
707 {
708 struct msm_ringbuffer *msm_ring = malloc(sizeof(*msm_ring));
709
710 msm_ring->u.pipe = pipe;
711 msm_ring->offset = 0;
712 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size, 0);
713 msm_ring->base.refcnt = 1;
714
715 msm_ring->u.reloc_bos = NULL;
716 msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
717
718 msm_ring->u.ring_set = _mesa_set_create(NULL,
719 _mesa_hash_pointer, _mesa_key_pointer_equal);
720
721 return msm_ringbuffer_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
722 }