87bb821a748f30dca3ffc7ee6dfc6eedeeb89e13
[mesa.git] / src / panfrost / lib / pan_pool.c
1 /*
2 * © Copyright 2018 Alyssa Rosenzweig
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include "util/hash_table.h"
27 #include "pan_bo.h"
28 #include "pan_pool.h"
29
30 /* Transient command stream pooling: command stream uploads try to simply copy
31 * into whereever we left off. If there isn't space, we allocate a new entry
32 * into the pool and copy there */
33
34 static struct panfrost_bo *
35 panfrost_pool_alloc_backing(struct pan_pool *pool, size_t bo_sz)
36 {
37 /* We don't know what the BO will be used for, so let's flag it
38 * RW and attach it to both the fragment and vertex/tiler jobs.
39 * TODO: if we want fine grained BO assignment we should pass
40 * flags to this function and keep the read/write,
41 * fragment/vertex+tiler pools separate.
42 */
43 struct panfrost_bo *bo = panfrost_bo_create(pool->dev, bo_sz,
44 pool->create_flags);
45
46 uintptr_t flags = PAN_BO_ACCESS_PRIVATE |
47 PAN_BO_ACCESS_RW |
48 PAN_BO_ACCESS_VERTEX_TILER |
49 PAN_BO_ACCESS_FRAGMENT;
50
51 _mesa_hash_table_insert(pool->bos, bo, (void *) flags);
52
53 pool->transient_bo = bo;
54 pool->transient_offset = 0;
55
56 return bo;
57 }
58
59 struct pan_pool
60 panfrost_create_pool(void *memctx, struct panfrost_device *dev,
61 unsigned create_flags, bool prealloc)
62 {
63 struct pan_pool pool = {
64 .dev = dev,
65 .create_flags = create_flags,
66 .transient_offset = 0,
67 .transient_bo = NULL
68 };
69
70 pool.bos = _mesa_hash_table_create(memctx, _mesa_hash_pointer,
71 _mesa_key_pointer_equal);
72
73 if (prealloc)
74 panfrost_pool_alloc_backing(&pool, TRANSIENT_SLAB_SIZE);
75
76 return pool;
77 }
78
79 struct panfrost_transfer
80 panfrost_pool_alloc_aligned(struct pan_pool *pool, size_t sz, unsigned alignment)
81 {
82 assert(alignment == util_next_power_of_two(alignment));
83
84 /* Find or create a suitable BO */
85 struct panfrost_bo *bo = pool->transient_bo;
86 unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
87
88 /* If we don't fit, allocate a new backing */
89 if (unlikely(bo == NULL || (offset + sz) >= TRANSIENT_SLAB_SIZE)) {
90 bo = panfrost_pool_alloc_backing(pool,
91 ALIGN_POT(MAX2(TRANSIENT_SLAB_SIZE, sz), 4096));
92 offset = 0;
93 }
94
95 pool->transient_offset = offset + sz;
96
97 struct panfrost_transfer ret = {
98 .cpu = bo->cpu + offset,
99 .gpu = bo->gpu + offset,
100 };
101
102 return ret;
103 }
104
105 mali_ptr
106 panfrost_pool_upload(struct pan_pool *pool, const void *data, size_t sz)
107 {
108 return panfrost_pool_upload_aligned(pool, data, sz, sz);
109 }
110
111 mali_ptr
112 panfrost_pool_upload_aligned(struct pan_pool *pool, const void *data, size_t sz, unsigned alignment)
113 {
114 struct panfrost_transfer transfer = panfrost_pool_alloc_aligned(pool, sz, alignment);
115 memcpy(transfer.cpu, data, sz);
116 return transfer.gpu;
117 }