2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
30 #include "util/hash_table.h"
32 #include "util/slab.h"
34 #include "drm/freedreno_ringbuffer.h"
37 /* The legacy implementation of submit/ringbuffer, which still does the
38 * traditional reloc and cmd tracking
42 #define INIT_SIZE 0x1000
46 struct fd_submit base
;
48 DECLARE_ARRAY(struct drm_msm_gem_submit_bo
, submit_bos
);
49 DECLARE_ARRAY(struct fd_bo
*, bos
);
51 /* maps fd_bo to idx in bos table: */
52 struct hash_table
*bo_table
;
54 struct slab_mempool ring_pool
;
56 /* hash-set of associated rings: */
59 struct fd_ringbuffer
*primary
;
61 /* Allow for sub-allocation of stateobj ring buffers (ie. sharing
62 * the same underlying bo)..
64 * We also rely on previous stateobj having been fully constructed
65 * so we can reclaim extra space at it's end.
67 struct fd_ringbuffer
*suballoc_ring
;
69 FD_DEFINE_CAST(fd_submit
, msm_submit
);
71 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
72 * and sizes. Ie. a finalized buffer can have no more commands appended to
76 struct fd_bo
*ring_bo
;
78 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc
, relocs
);
81 static struct msm_cmd
*
82 cmd_new(struct fd_bo
*ring_bo
)
84 struct msm_cmd
*cmd
= malloc(sizeof(*cmd
));
85 cmd
->ring_bo
= fd_bo_ref(ring_bo
);
87 cmd
->nr_relocs
= cmd
->max_relocs
= 0;
93 cmd_free(struct msm_cmd
*cmd
)
95 fd_bo_del(cmd
->ring_bo
);
100 /* for _FD_RINGBUFFER_OBJECT rb's we need to track the bo's and flags to
101 * later copy into the submit when the stateobj rb is later referenced by
104 struct msm_reloc_bo
{
109 struct msm_ringbuffer
{
110 struct fd_ringbuffer base
;
112 /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
116 /* for _FD_RINGBUFFER_OBJECT case: */
118 struct fd_pipe
*pipe
;
119 DECLARE_ARRAY(struct msm_reloc_bo
, reloc_bos
);
120 struct set
*ring_set
;
122 /* for other cases: */
124 struct fd_submit
*submit
;
125 DECLARE_ARRAY(struct msm_cmd
*, cmds
);
129 struct msm_cmd
*cmd
; /* current cmd */
130 struct fd_bo
*ring_bo
;
132 FD_DEFINE_CAST(fd_ringbuffer
, msm_ringbuffer
);
134 static void finalize_current_cmd(struct fd_ringbuffer
*ring
);
135 static struct fd_ringbuffer
* msm_ringbuffer_init(
136 struct msm_ringbuffer
*msm_ring
,
137 uint32_t size
, enum fd_ringbuffer_flags flags
);
139 /* add (if needed) bo to submit and return index: */
141 append_bo(struct msm_submit
*submit
, struct fd_bo
*bo
, uint32_t flags
)
143 struct msm_bo
*msm_bo
= to_msm_bo(bo
);
146 /* NOTE: it is legal to use the same bo on different threads for
147 * different submits. But it is not legal to use the same submit
148 * from given threads.
150 idx
= READ_ONCE(msm_bo
->idx
);
152 if (unlikely((idx
>= submit
->nr_submit_bos
) ||
153 (submit
->submit_bos
[idx
].handle
!= bo
->handle
))) {
154 uint32_t hash
= _mesa_hash_pointer(bo
);
155 struct hash_entry
*entry
;
157 entry
= _mesa_hash_table_search_pre_hashed(submit
->bo_table
, hash
, bo
);
160 idx
= (uint32_t)(uintptr_t)entry
->data
;
162 idx
= APPEND(submit
, submit_bos
);
163 idx
= APPEND(submit
, bos
);
165 submit
->submit_bos
[idx
].flags
= 0;
166 submit
->submit_bos
[idx
].handle
= bo
->handle
;
167 submit
->submit_bos
[idx
].presumed
= 0;
169 submit
->bos
[idx
] = fd_bo_ref(bo
);
171 _mesa_hash_table_insert_pre_hashed(submit
->bo_table
, hash
, bo
,
172 (void *)(uintptr_t)idx
);
177 if (flags
& FD_RELOC_READ
)
178 submit
->submit_bos
[idx
].flags
|= MSM_SUBMIT_BO_READ
;
179 if (flags
& FD_RELOC_WRITE
)
180 submit
->submit_bos
[idx
].flags
|= MSM_SUBMIT_BO_WRITE
;
186 append_ring(struct set
*set
, struct fd_ringbuffer
*ring
)
188 uint32_t hash
= _mesa_hash_pointer(ring
);
190 if (!_mesa_set_search_pre_hashed(set
, hash
, ring
)) {
191 fd_ringbuffer_ref(ring
);
192 _mesa_set_add_pre_hashed(set
, hash
, ring
);
197 msm_submit_suballoc_ring_bo(struct fd_submit
*submit
,
198 struct msm_ringbuffer
*msm_ring
, uint32_t size
)
200 struct msm_submit
*msm_submit
= to_msm_submit(submit
);
201 unsigned suballoc_offset
= 0;
202 struct fd_bo
*suballoc_bo
= NULL
;
204 if (msm_submit
->suballoc_ring
) {
205 struct msm_ringbuffer
*suballoc_ring
=
206 to_msm_ringbuffer(msm_submit
->suballoc_ring
);
208 suballoc_bo
= suballoc_ring
->ring_bo
;
209 suballoc_offset
= fd_ringbuffer_size(msm_submit
->suballoc_ring
) +
210 suballoc_ring
->offset
;
212 suballoc_offset
= align(suballoc_offset
, 0x10);
214 if ((size
+ suballoc_offset
) > suballoc_bo
->size
) {
220 // TODO possibly larger size for streaming bo?
221 msm_ring
->ring_bo
= fd_bo_new_ring(
222 submit
->pipe
->dev
, 0x8000, 0);
223 msm_ring
->offset
= 0;
225 msm_ring
->ring_bo
= fd_bo_ref(suballoc_bo
);
226 msm_ring
->offset
= suballoc_offset
;
229 struct fd_ringbuffer
*old_suballoc_ring
= msm_submit
->suballoc_ring
;
231 msm_submit
->suballoc_ring
= fd_ringbuffer_ref(&msm_ring
->base
);
233 if (old_suballoc_ring
)
234 fd_ringbuffer_del(old_suballoc_ring
);
237 static struct fd_ringbuffer
*
238 msm_submit_new_ringbuffer(struct fd_submit
*submit
, uint32_t size
,
239 enum fd_ringbuffer_flags flags
)
241 struct msm_submit
*msm_submit
= to_msm_submit(submit
);
242 struct msm_ringbuffer
*msm_ring
;
244 msm_ring
= slab_alloc_st(&msm_submit
->ring_pool
);
246 msm_ring
->u
.submit
= submit
;
248 /* NOTE: needs to be before _suballoc_ring_bo() since it could
249 * increment the refcnt of the current ring
251 msm_ring
->base
.refcnt
= 1;
253 if (flags
& FD_RINGBUFFER_STREAMING
) {
254 msm_submit_suballoc_ring_bo(submit
, msm_ring
, size
);
256 if (flags
& FD_RINGBUFFER_GROWABLE
)
259 msm_ring
->offset
= 0;
260 msm_ring
->ring_bo
= fd_bo_new_ring(submit
->pipe
->dev
, size
, 0);
263 if (!msm_ringbuffer_init(msm_ring
, size
, flags
))
266 if (flags
& FD_RINGBUFFER_PRIMARY
) {
267 debug_assert(!msm_submit
->primary
);
268 msm_submit
->primary
= fd_ringbuffer_ref(&msm_ring
->base
);
271 return &msm_ring
->base
;
274 static struct drm_msm_gem_submit_reloc
*
275 handle_stateobj_relocs(struct msm_submit
*submit
, struct msm_ringbuffer
*ring
)
277 struct msm_cmd
*cmd
= ring
->cmd
;
278 struct drm_msm_gem_submit_reloc
*relocs
;
280 relocs
= malloc(cmd
->nr_relocs
* sizeof(*relocs
));
282 for (unsigned i
= 0; i
< cmd
->nr_relocs
; i
++) {
283 unsigned idx
= cmd
->relocs
[i
].reloc_idx
;
284 struct fd_bo
*bo
= ring
->u
.reloc_bos
[idx
].bo
;
287 if (ring
->u
.reloc_bos
[idx
].flags
& MSM_SUBMIT_BO_READ
)
288 flags
|= FD_RELOC_READ
;
289 if (ring
->u
.reloc_bos
[idx
].flags
& MSM_SUBMIT_BO_WRITE
)
290 flags
|= FD_RELOC_WRITE
;
292 relocs
[i
] = cmd
->relocs
[i
];
293 relocs
[i
].reloc_idx
= append_bo(submit
, bo
, flags
);
300 msm_submit_flush(struct fd_submit
*submit
, int in_fence_fd
,
301 int *out_fence_fd
, uint32_t *out_fence
)
303 struct msm_submit
*msm_submit
= to_msm_submit(submit
);
304 struct msm_pipe
*msm_pipe
= to_msm_pipe(submit
->pipe
);
305 struct drm_msm_gem_submit req
= {
306 .flags
= msm_pipe
->pipe
,
307 .queueid
= msm_pipe
->queue_id
,
311 debug_assert(msm_submit
->primary
);
313 finalize_current_cmd(msm_submit
->primary
);
314 append_ring(msm_submit
->ring_set
, msm_submit
->primary
);
316 unsigned nr_cmds
= 0;
317 unsigned nr_objs
= 0;
319 set_foreach(msm_submit
->ring_set
, entry
) {
320 struct fd_ringbuffer
*ring
= (void *)entry
->key
;
321 if (ring
->flags
& _FD_RINGBUFFER_OBJECT
) {
325 if (ring
!= msm_submit
->primary
)
326 finalize_current_cmd(ring
);
327 nr_cmds
+= to_msm_ringbuffer(ring
)->u
.nr_cmds
;
331 void *obj_relocs
[nr_objs
];
332 struct drm_msm_gem_submit_cmd cmds
[nr_cmds
];
333 unsigned i
= 0, o
= 0;
335 set_foreach(msm_submit
->ring_set
, entry
) {
336 struct fd_ringbuffer
*ring
= (void *)entry
->key
;
337 struct msm_ringbuffer
*msm_ring
= to_msm_ringbuffer(ring
);
339 debug_assert(i
< nr_cmds
);
341 // TODO handle relocs:
342 if (ring
->flags
& _FD_RINGBUFFER_OBJECT
) {
344 debug_assert(o
< nr_objs
);
346 void *relocs
= handle_stateobj_relocs(msm_submit
, msm_ring
);
347 obj_relocs
[o
++] = relocs
;
349 cmds
[i
].type
= MSM_SUBMIT_CMD_IB_TARGET_BUF
;
351 append_bo(msm_submit
, msm_ring
->ring_bo
, FD_RELOC_READ
);
352 cmds
[i
].submit_offset
= msm_ring
->offset
;
353 cmds
[i
].size
= offset_bytes(ring
->cur
, ring
->start
);
355 cmds
[i
].nr_relocs
= msm_ring
->cmd
->nr_relocs
;
356 cmds
[i
].relocs
= VOID2U64(relocs
);
360 for (unsigned j
= 0; j
< msm_ring
->u
.nr_cmds
; j
++) {
361 if (ring
->flags
& FD_RINGBUFFER_PRIMARY
) {
362 cmds
[i
].type
= MSM_SUBMIT_CMD_BUF
;
364 cmds
[i
].type
= MSM_SUBMIT_CMD_IB_TARGET_BUF
;
366 cmds
[i
].submit_idx
= append_bo(msm_submit
,
367 msm_ring
->u
.cmds
[j
]->ring_bo
, FD_RELOC_READ
);
368 cmds
[i
].submit_offset
= msm_ring
->offset
;
369 cmds
[i
].size
= msm_ring
->u
.cmds
[j
]->size
;
371 cmds
[i
].nr_relocs
= msm_ring
->u
.cmds
[j
]->nr_relocs
;
372 cmds
[i
].relocs
= VOID2U64(msm_ring
->u
.cmds
[j
]->relocs
);
379 if (in_fence_fd
!= -1) {
380 req
.flags
|= MSM_SUBMIT_FENCE_FD_IN
| MSM_SUBMIT_NO_IMPLICIT
;
381 req
.fence_fd
= in_fence_fd
;
385 req
.flags
|= MSM_SUBMIT_FENCE_FD_OUT
;
388 /* needs to be after get_cmd() as that could create bos/cmds table: */
389 req
.bos
= VOID2U64(msm_submit
->submit_bos
),
390 req
.nr_bos
= msm_submit
->nr_submit_bos
;
391 req
.cmds
= VOID2U64(cmds
),
392 req
.nr_cmds
= nr_cmds
;
394 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req
.nr_cmds
, req
.nr_bos
);
396 ret
= drmCommandWriteRead(submit
->pipe
->dev
->fd
, DRM_MSM_GEM_SUBMIT
,
399 ERROR_MSG("submit failed: %d (%s)", ret
, strerror(errno
));
400 msm_dump_submit(&req
);
403 *out_fence
= req
.fence
;
406 *out_fence_fd
= req
.fence_fd
;
409 for (unsigned o
= 0; o
< nr_objs
; o
++)
416 unref_rings(struct set_entry
*entry
)
418 struct fd_ringbuffer
*ring
= (void *)entry
->key
;
419 fd_ringbuffer_del(ring
);
423 msm_submit_destroy(struct fd_submit
*submit
)
425 struct msm_submit
*msm_submit
= to_msm_submit(submit
);
427 if (msm_submit
->primary
)
428 fd_ringbuffer_del(msm_submit
->primary
);
429 if (msm_submit
->suballoc_ring
)
430 fd_ringbuffer_del(msm_submit
->suballoc_ring
);
432 _mesa_hash_table_destroy(msm_submit
->bo_table
, NULL
);
433 _mesa_set_destroy(msm_submit
->ring_set
, unref_rings
);
435 // TODO it would be nice to have a way to debug_assert() if all
436 // rb's haven't been free'd back to the slab, because that is
437 // an indication that we are leaking bo's
438 slab_destroy(&msm_submit
->ring_pool
);
440 for (unsigned i
= 0; i
< msm_submit
->nr_bos
; i
++)
441 fd_bo_del(msm_submit
->bos
[i
]);
443 free(msm_submit
->submit_bos
);
444 free(msm_submit
->bos
);
448 static const struct fd_submit_funcs submit_funcs
= {
449 .new_ringbuffer
= msm_submit_new_ringbuffer
,
450 .flush
= msm_submit_flush
,
451 .destroy
= msm_submit_destroy
,
455 msm_submit_new(struct fd_pipe
*pipe
)
457 struct msm_submit
*msm_submit
= calloc(1, sizeof(*msm_submit
));
458 struct fd_submit
*submit
;
460 msm_submit
->bo_table
= _mesa_hash_table_create(NULL
,
461 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
462 msm_submit
->ring_set
= _mesa_set_create(NULL
,
463 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
465 slab_create(&msm_submit
->ring_pool
, sizeof(struct msm_ringbuffer
), 16);
467 submit
= &msm_submit
->base
;
469 submit
->funcs
= &submit_funcs
;
476 finalize_current_cmd(struct fd_ringbuffer
*ring
)
478 struct msm_ringbuffer
*msm_ring
= to_msm_ringbuffer(ring
);
480 debug_assert(!(ring
->flags
& _FD_RINGBUFFER_OBJECT
));
485 debug_assert(msm_ring
->cmd
->ring_bo
== msm_ring
->ring_bo
);
487 unsigned idx
= APPEND(&msm_ring
->u
, cmds
);
489 msm_ring
->u
.cmds
[idx
] = msm_ring
->cmd
;
490 msm_ring
->cmd
= NULL
;
492 msm_ring
->u
.cmds
[idx
]->size
= offset_bytes(ring
->cur
, ring
->start
);
496 msm_ringbuffer_grow(struct fd_ringbuffer
*ring
, uint32_t size
)
498 struct msm_ringbuffer
*msm_ring
= to_msm_ringbuffer(ring
);
499 struct fd_pipe
*pipe
= msm_ring
->u
.submit
->pipe
;
501 debug_assert(ring
->flags
& FD_RINGBUFFER_GROWABLE
);
503 finalize_current_cmd(ring
);
505 fd_bo_del(msm_ring
->ring_bo
);
506 msm_ring
->ring_bo
= fd_bo_new_ring(pipe
->dev
, size
, 0);
507 msm_ring
->cmd
= cmd_new(msm_ring
->ring_bo
);
509 ring
->start
= fd_bo_map(msm_ring
->ring_bo
);
510 ring
->end
= &(ring
->start
[size
/4]);
511 ring
->cur
= ring
->start
;
516 msm_ringbuffer_emit_reloc(struct fd_ringbuffer
*ring
,
517 const struct fd_reloc
*reloc
)
519 struct msm_ringbuffer
*msm_ring
= to_msm_ringbuffer(ring
);
520 struct fd_pipe
*pipe
;
523 if (ring
->flags
& _FD_RINGBUFFER_OBJECT
) {
524 unsigned idx
= APPEND(&msm_ring
->u
, reloc_bos
);
526 msm_ring
->u
.reloc_bos
[idx
].bo
= fd_bo_ref(reloc
->bo
);
527 msm_ring
->u
.reloc_bos
[idx
].flags
= reloc
->flags
;
529 /* this gets fixed up at submit->flush() time, since this state-
530 * object rb can be used with many different submits
534 pipe
= msm_ring
->u
.pipe
;
536 struct msm_submit
*msm_submit
=
537 to_msm_submit(msm_ring
->u
.submit
);
539 reloc_idx
= append_bo(msm_submit
, reloc
->bo
, reloc
->flags
);
541 pipe
= msm_ring
->u
.submit
->pipe
;
544 struct drm_msm_gem_submit_reloc
*r
;
545 unsigned idx
= APPEND(msm_ring
->cmd
, relocs
);
547 r
= &msm_ring
->cmd
->relocs
[idx
];
549 r
->reloc_idx
= reloc_idx
;
550 r
->reloc_offset
= reloc
->offset
;
552 r
->shift
= reloc
->shift
;
553 r
->submit_offset
= offset_bytes(ring
->cur
, ring
->start
) +
558 if (pipe
->gpu_id
>= 500) {
559 idx
= APPEND(msm_ring
->cmd
, relocs
);
560 r
= &msm_ring
->cmd
->relocs
[idx
];
562 r
->reloc_idx
= reloc_idx
;
563 r
->reloc_offset
= reloc
->offset
;
565 r
->shift
= reloc
->shift
- 32;
566 r
->submit_offset
= offset_bytes(ring
->cur
, ring
->start
) +
574 append_stateobj_rings(struct msm_submit
*submit
, struct fd_ringbuffer
*target
)
576 struct msm_ringbuffer
*msm_target
= to_msm_ringbuffer(target
);
578 debug_assert(target
->flags
& _FD_RINGBUFFER_OBJECT
);
580 set_foreach(msm_target
->u
.ring_set
, entry
) {
581 struct fd_ringbuffer
*ring
= (void *)entry
->key
;
583 append_ring(submit
->ring_set
, ring
);
585 if (ring
->flags
& _FD_RINGBUFFER_OBJECT
) {
586 append_stateobj_rings(submit
, ring
);
592 msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer
*ring
,
593 struct fd_ringbuffer
*target
, uint32_t cmd_idx
)
595 struct msm_ringbuffer
*msm_target
= to_msm_ringbuffer(target
);
596 struct msm_ringbuffer
*msm_ring
= to_msm_ringbuffer(ring
);
600 if ((target
->flags
& FD_RINGBUFFER_GROWABLE
) &&
601 (cmd_idx
< msm_target
->u
.nr_cmds
)) {
602 bo
= msm_target
->u
.cmds
[cmd_idx
]->ring_bo
;
603 size
= msm_target
->u
.cmds
[cmd_idx
]->size
;
605 bo
= msm_target
->ring_bo
;
606 size
= offset_bytes(target
->cur
, target
->start
);
609 msm_ringbuffer_emit_reloc(ring
, &(struct fd_reloc
){
611 .flags
= FD_RELOC_READ
,
612 .offset
= msm_target
->offset
,
615 if ((target
->flags
& _FD_RINGBUFFER_OBJECT
) &&
616 !(ring
->flags
& _FD_RINGBUFFER_OBJECT
)) {
617 struct msm_submit
*msm_submit
= to_msm_submit(msm_ring
->u
.submit
);
619 append_stateobj_rings(msm_submit
, target
);
622 if (ring
->flags
& _FD_RINGBUFFER_OBJECT
) {
623 append_ring(msm_ring
->u
.ring_set
, target
);
625 struct msm_submit
*msm_submit
= to_msm_submit(msm_ring
->u
.submit
);
626 append_ring(msm_submit
->ring_set
, target
);
633 msm_ringbuffer_cmd_count(struct fd_ringbuffer
*ring
)
635 if (ring
->flags
& FD_RINGBUFFER_GROWABLE
)
636 return to_msm_ringbuffer(ring
)->u
.nr_cmds
+ 1;
641 msm_ringbuffer_destroy(struct fd_ringbuffer
*ring
)
643 struct msm_ringbuffer
*msm_ring
= to_msm_ringbuffer(ring
);
645 fd_bo_del(msm_ring
->ring_bo
);
647 cmd_free(msm_ring
->cmd
);
649 if (ring
->flags
& _FD_RINGBUFFER_OBJECT
) {
650 for (unsigned i
= 0; i
< msm_ring
->u
.nr_reloc_bos
; i
++) {
651 fd_bo_del(msm_ring
->u
.reloc_bos
[i
].bo
);
654 _mesa_set_destroy(msm_ring
->u
.ring_set
, unref_rings
);
656 free(msm_ring
->u
.reloc_bos
);
659 struct fd_submit
*submit
= msm_ring
->u
.submit
;
661 for (unsigned i
= 0; i
< msm_ring
->u
.nr_cmds
; i
++) {
662 cmd_free(msm_ring
->u
.cmds
[i
]);
665 free(msm_ring
->u
.cmds
);
666 slab_free_st(&to_msm_submit(submit
)->ring_pool
, msm_ring
);
670 static const struct fd_ringbuffer_funcs ring_funcs
= {
671 .grow
= msm_ringbuffer_grow
,
672 .emit_reloc
= msm_ringbuffer_emit_reloc
,
673 .emit_reloc_ring
= msm_ringbuffer_emit_reloc_ring
,
674 .cmd_count
= msm_ringbuffer_cmd_count
,
675 .destroy
= msm_ringbuffer_destroy
,
678 static inline struct fd_ringbuffer
*
679 msm_ringbuffer_init(struct msm_ringbuffer
*msm_ring
, uint32_t size
,
680 enum fd_ringbuffer_flags flags
)
682 struct fd_ringbuffer
*ring
= &msm_ring
->base
;
684 debug_assert(msm_ring
->ring_bo
);
686 uint8_t *base
= fd_bo_map(msm_ring
->ring_bo
);
687 ring
->start
= (void *)(base
+ msm_ring
->offset
);
688 ring
->end
= &(ring
->start
[size
/4]);
689 ring
->cur
= ring
->start
;
694 ring
->funcs
= &ring_funcs
;
696 msm_ring
->u
.cmds
= NULL
;
697 msm_ring
->u
.nr_cmds
= msm_ring
->u
.max_cmds
= 0;
699 msm_ring
->cmd
= cmd_new(msm_ring
->ring_bo
);
704 struct fd_ringbuffer
*
705 msm_ringbuffer_new_object(struct fd_pipe
*pipe
, uint32_t size
)
707 struct msm_ringbuffer
*msm_ring
= malloc(sizeof(*msm_ring
));
709 msm_ring
->u
.pipe
= pipe
;
710 msm_ring
->offset
= 0;
711 msm_ring
->ring_bo
= fd_bo_new_ring(pipe
->dev
, size
, 0);
712 msm_ring
->base
.refcnt
= 1;
714 msm_ring
->u
.reloc_bos
= NULL
;
715 msm_ring
->u
.nr_reloc_bos
= msm_ring
->u
.max_reloc_bos
= 0;
717 msm_ring
->u
.ring_set
= _mesa_set_create(NULL
,
718 _mesa_hash_pointer
, _mesa_key_pointer_equal
);
720 return msm_ringbuffer_init(msm_ring
, size
, _FD_RINGBUFFER_OBJECT
);