f74e0a498cdb417d5f96a8180afe16df82657939
[mesa.git] / src / gallium / winsys / r600 / drm / r600_bo.c
1 /*
2 * Copyright 2010 Dave Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie
25 */
26 #include "r600_priv.h"
27 #include "r600d.h"
28 #include "state_tracker/drm_driver.h"
29 #include "radeon_drm.h"
30
31 struct r600_bo *r600_bo(struct radeon *radeon,
32 unsigned size, unsigned alignment,
33 unsigned binding, unsigned usage)
34 {
35 struct r600_bo *bo;
36 struct radeon_bo *rbo;
37 uint32_t initial_domain, domains;
38
39 /* Staging resources particpate in transfers and blits only
40 * and are used for uploads and downloads from regular
41 * resources. We generate them internally for some transfers.
42 */
43 if (usage == PIPE_USAGE_STAGING)
44 domains = RADEON_GEM_DOMAIN_CPU | RADEON_GEM_DOMAIN_GTT;
45 else
46 domains = (RADEON_GEM_DOMAIN_CPU |
47 RADEON_GEM_DOMAIN_GTT |
48 RADEON_GEM_DOMAIN_VRAM);
49
50 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
51 bo = r600_bomgr_bo_create(radeon->bomgr, size, alignment, *radeon->cfence);
52 if (bo) {
53 bo->domains = domains;
54 return bo;
55 }
56 }
57
58 switch(usage) {
59 case PIPE_USAGE_DYNAMIC:
60 case PIPE_USAGE_STREAM:
61 case PIPE_USAGE_STAGING:
62 initial_domain = RADEON_GEM_DOMAIN_GTT;
63 break;
64 case PIPE_USAGE_DEFAULT:
65 case PIPE_USAGE_STATIC:
66 case PIPE_USAGE_IMMUTABLE:
67 default:
68 initial_domain = RADEON_GEM_DOMAIN_VRAM;
69 break;
70 }
71 rbo = radeon_bo(radeon, 0, size, alignment, initial_domain);
72 if (rbo == NULL) {
73 return NULL;
74 }
75
76 bo = calloc(1, sizeof(struct r600_bo));
77 bo->size = size;
78 bo->alignment = alignment;
79 bo->domains = domains;
80 bo->bo = rbo;
81 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
82 r600_bomgr_bo_init(radeon->bomgr, bo);
83 }
84
85 pipe_reference_init(&bo->reference, 1);
86 return bo;
87 }
88
89 struct r600_bo *r600_bo_handle(struct radeon *radeon, struct winsys_handle *whandle,
90 unsigned *stride, unsigned *array_mode)
91 {
92 struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
93 struct radeon_bo *rbo;
94
95 rbo = bo->bo = radeon_bo(radeon, whandle->handle, 0, 0, 0);
96 if (rbo == NULL) {
97 free(bo);
98 return NULL;
99 }
100
101 pipe_reference_init(&bo->reference, 1);
102 bo->size = rbo->size;
103 bo->domains = (RADEON_GEM_DOMAIN_CPU |
104 RADEON_GEM_DOMAIN_GTT |
105 RADEON_GEM_DOMAIN_VRAM);
106
107 if (stride)
108 *stride = whandle->stride;
109
110 radeon_bo_get_tiling_flags(radeon, rbo, &bo->tiling_flags);
111 if (array_mode) {
112 if (bo->tiling_flags) {
113 if (bo->tiling_flags & RADEON_TILING_MACRO)
114 *array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
115 else if (bo->tiling_flags & RADEON_TILING_MICRO)
116 *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
117 } else {
118 *array_mode = 0;
119 }
120 }
121 return bo;
122 }
123
124 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx)
125 {
126 struct pipe_context *pctx = ctx;
127
128 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
129 radeon_bo_map(radeon, bo->bo);
130 return (uint8_t *) bo->bo->data + bo->offset;
131 }
132
133 if (p_atomic_read(&bo->bo->reference.count) > 1) {
134 if (usage & PIPE_TRANSFER_DONTBLOCK) {
135 return NULL;
136 }
137 if (ctx) {
138 pctx->flush(pctx, NULL);
139 }
140 }
141
142 if (usage & PIPE_TRANSFER_DONTBLOCK) {
143 uint32_t domain;
144
145 if (radeon_bo_busy(radeon, bo->bo, &domain))
146 return NULL;
147 if (radeon_bo_map(radeon, bo->bo)) {
148 return NULL;
149 }
150 goto out;
151 }
152
153 radeon_bo_map(radeon, bo->bo);
154 if (radeon_bo_wait(radeon, bo->bo)) {
155 radeon_bo_unmap(radeon, bo->bo);
156 return NULL;
157 }
158
159 out:
160 return (uint8_t *) bo->bo->data + bo->offset;
161 }
162
163 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
164 {
165 radeon_bo_unmap(radeon, bo->bo);
166 }
167
168 void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
169 {
170 if (bo->manager_id) {
171 if (!r600_bomgr_bo_destroy(radeon->bomgr, bo)) {
172 /* destroy is delayed by buffer manager */
173 return;
174 }
175 }
176 radeon_bo_reference(radeon, &bo->bo, NULL);
177 free(bo);
178 }
179
180 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
181 unsigned stride, struct winsys_handle *whandle)
182 {
183 whandle->stride = stride;
184 switch(whandle->type) {
185 case DRM_API_HANDLE_TYPE_KMS:
186 whandle->handle = bo->bo->handle;
187 break;
188 case DRM_API_HANDLE_TYPE_SHARED:
189 if (radeon_bo_get_name(radeon, bo->bo, &whandle->handle))
190 return FALSE;
191 break;
192 default:
193 return FALSE;
194 }
195
196 return TRUE;
197 }