nouveau: 0xdeadc0de
[mesa.git] / src / mesa / drivers / dri / nouveau_winsys / nouveau_bo.c
1 /*
2 * Copyright 2007 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27
28 #include "nouveau_drmif.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_local.h"
31
32 static void
33 nouveau_mem_free(struct nouveau_device *dev, struct drm_nouveau_mem_alloc *ma,
34 void **map)
35 {
36 struct nouveau_device_priv *nvdev = nouveau_device(dev);
37 struct drm_nouveau_mem_free mf;
38
39 if (map && *map) {
40 drmUnmap(*map, ma->size);
41 *map = NULL;
42 }
43
44 if (ma->size) {
45 mf.offset = ma->offset;
46 mf.flags = ma->flags;
47 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE,
48 &mf, sizeof(mf));
49 ma->size = 0;
50 }
51 }
52
53 static int
54 nouveau_mem_alloc(struct nouveau_device *dev, unsigned size, unsigned align,
55 uint32_t flags, struct drm_nouveau_mem_alloc *ma, void **map)
56 {
57 struct nouveau_device_priv *nvdev = nouveau_device(dev);
58 int ret;
59
60 ma->alignment = align;
61 ma->size = size;
62 ma->flags = flags;
63 if (map)
64 ma->flags |= NOUVEAU_MEM_MAPPED;
65 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC, ma,
66 sizeof(struct drm_nouveau_mem_alloc));
67 if (ret)
68 return ret;
69
70 if (map) {
71 ret = drmMap(nvdev->fd, ma->map_handle, ma->size, map);
72 if (ret) {
73 *map = NULL;
74 nouveau_mem_free(dev, ma, map);
75 return ret;
76 }
77 }
78
79 return 0;
80 }
81
82 static int
83 nouveau_bo_realloc_gpu(struct nouveau_bo_priv *nvbo, uint32_t flags, int size)
84 {
85 int ret;
86
87 if (nvbo->drm.size && nvbo->drm.size != size) {
88 nouveau_mem_free(nvbo->base.device, &nvbo->drm, &nvbo->map);
89 }
90
91 if (size && !nvbo->drm.size) {
92 if (flags) {
93 nvbo->drm.flags = 0;
94 if (flags & NOUVEAU_BO_VRAM)
95 nvbo->drm.flags |= NOUVEAU_MEM_FB;
96 if (flags & NOUVEAU_BO_GART)
97 nvbo->drm.flags |= (NOUVEAU_MEM_AGP |
98 NOUVEAU_MEM_PCI);
99 nvbo->drm.flags |= NOUVEAU_MEM_MAPPED;
100 }
101
102 ret = nouveau_mem_alloc(nvbo->base.device, size,
103 nvbo->drm.alignment, nvbo->drm.flags,
104 &nvbo->drm, &nvbo->map);
105 if (ret) {
106 assert(0);
107 }
108 }
109
110 return 0;
111 }
112
113 static void
114 nouveau_bo_tmp_del(void *priv)
115 {
116 struct nouveau_resource *r = priv;
117
118 nouveau_fence_ref(NULL, (struct nouveau_fence **)&r->priv);
119 nouveau_resource_free(&r);
120 }
121
122 static struct nouveau_resource *
123 nouveau_bo_tmp(struct nouveau_channel *chan, unsigned size,
124 struct nouveau_fence *fence)
125 {
126 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
127 struct nouveau_resource *r = NULL;
128 struct nouveau_fence *ref = NULL;
129
130 if (fence)
131 nouveau_fence_ref(fence, &ref);
132 else
133 nouveau_fence_new(chan, &ref);
134 assert(ref);
135
136 while (nouveau_resource_alloc(nvdev->sa_heap, size, ref, &r)) {
137 nouveau_fence_flush(chan);
138 }
139 nouveau_fence_signal_cb(ref, nouveau_bo_tmp_del, r);
140
141 return r;
142 }
143
144 int
145 nouveau_bo_init(struct nouveau_device *dev)
146 {
147 struct nouveau_device_priv *nvdev = nouveau_device(dev);
148 int ret;
149
150 ret = nouveau_mem_alloc(dev, 128*1024, 0, NOUVEAU_MEM_AGP |
151 NOUVEAU_MEM_PCI, &nvdev->sa, &nvdev->sa_map);
152 if (ret)
153 return ret;
154
155 ret = nouveau_resource_init(&nvdev->sa_heap, 0, nvdev->sa.size);
156 if (ret) {
157 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
158 return ret;
159 }
160
161 return 0;
162 }
163
164 void
165 nouveau_bo_takedown(struct nouveau_device *dev)
166 {
167 struct nouveau_device_priv *nvdev = nouveau_device(dev);
168
169 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
170 }
171
172 int
173 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
174 int size, struct nouveau_bo **bo)
175 {
176 struct nouveau_bo_priv *nvbo;
177 int ret;
178
179 if (!dev || !bo || *bo)
180 return -EINVAL;
181
182 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
183 if (!nvbo)
184 return -ENOMEM;
185 nvbo->base.device = dev;
186 nvbo->drm.alignment = align;
187
188 if (flags & NOUVEAU_BO_PIN) {
189 ret = nouveau_bo_realloc_gpu(nvbo, flags, size);
190 if (ret) {
191 free(nvbo);
192 return ret;
193 }
194 } else {
195 nvbo->sysmem = malloc(size);
196 if (!nvbo->sysmem) {
197 free(nvbo);
198 return -ENOMEM;
199 }
200 }
201
202 nvbo->base.size = size;
203 nvbo->base.offset = nvbo->drm.offset;
204 nvbo->base.handle = bo_to_ptr(nvbo);
205 nvbo->refcount = 1;
206 *bo = &nvbo->base;
207 return 0;
208 }
209
210 int
211 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
212 struct nouveau_bo **bo)
213 {
214 struct nouveau_bo_priv *nvbo;
215
216 if (!dev || !bo || *bo)
217 return -EINVAL;
218
219 nvbo = calloc(1, sizeof(*nvbo));
220 if (!nvbo)
221 return -ENOMEM;
222 nvbo->base.device = dev;
223
224 nvbo->sysmem = ptr;
225 nvbo->user = 1;
226
227 nvbo->base.size = size;
228 nvbo->base.offset = nvbo->drm.offset;
229 nvbo->base.handle = bo_to_ptr(nvbo);
230 nvbo->refcount = 1;
231 *bo = &nvbo->base;
232 return 0;
233 }
234
235 int
236 nouveau_bo_ref(struct nouveau_device *dev, uint64_t handle,
237 struct nouveau_bo **bo)
238 {
239 struct nouveau_bo_priv *nvbo = ptr_to_bo(handle);
240
241 if (!dev || !bo || *bo)
242 return -EINVAL;
243
244 nvbo->refcount++;
245 *bo = &nvbo->base;
246 return 0;
247 }
248
249 void
250 nouveau_bo_del(struct nouveau_bo **bo)
251 {
252 struct nouveau_bo_priv *nvbo;
253
254 if (!bo || !*bo)
255 return;
256 nvbo = nouveau_bo(*bo);
257 *bo = NULL;
258
259 if (--nvbo->refcount)
260 return;
261
262 if (nvbo->fence)
263 nouveau_fence_wait(&nvbo->fence);
264
265 nouveau_bo_realloc_gpu(nvbo, 0, 0);
266 if (nvbo->sysmem && !nvbo->user)
267 free(nvbo->sysmem);
268 free(nvbo);
269 }
270
271 int
272 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
273 {
274 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
275
276 if (!nvbo)
277 return -EINVAL;
278
279 if (flags & NOUVEAU_BO_WR)
280 nouveau_fence_wait(&nvbo->fence);
281 else
282 nouveau_fence_wait(&nvbo->wr_fence);
283
284 if (nvbo->sysmem)
285 bo->map = nvbo->sysmem;
286 else
287 bo->map = nvbo->map;
288 return 0;
289 }
290
291 void
292 nouveau_bo_unmap(struct nouveau_bo *bo)
293 {
294 bo->map = NULL;
295 }
296
297 static int
298 nouveau_bo_upload(struct nouveau_bo_priv *nvbo)
299 {
300 if (nvbo->fence)
301 nouveau_fence_wait(&nvbo->fence);
302 memcpy(nvbo->map, nvbo->sysmem, nvbo->drm.size);
303 return 0;
304 }
305
306 static int
307 nouveau_bo_validate_user(struct nouveau_channel *chan, struct nouveau_bo *bo,
308 struct nouveau_fence *fence, uint32_t flags)
309 {
310 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
311 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
312 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
313 struct nouveau_resource *r;
314
315 if (nvchan->user_charge + bo->size > nvdev->sa.size)
316 return 1;
317 nvchan->user_charge += bo->size;
318
319 if (!(flags & NOUVEAU_BO_GART))
320 return 1;
321
322 r = nouveau_bo_tmp(chan, bo->size, fence);
323 if (!r)
324 return 1;
325
326 memcpy(nvdev->sa_map + r->start, nvbo->sysmem, bo->size);
327
328 nvbo->offset = nvdev->sa.offset + r->start;
329 nvbo->flags = NOUVEAU_BO_GART;
330 return 0;
331 }
332
333 static int
334 nouveau_bo_validate_bo(struct nouveau_channel *chan, struct nouveau_bo *bo,
335 struct nouveau_fence *fence, uint32_t flags)
336 {
337 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
338
339 if (!nvbo->drm.size) {
340 nouveau_bo_realloc_gpu(nvbo, flags, nvbo->base.size);
341 nouveau_bo_upload(nvbo);
342 if (!nvbo->user) {
343 free(nvbo->sysmem);
344 nvbo->sysmem = NULL;
345 }
346 } else
347 if (nvbo->user) {
348 nouveau_bo_upload(nvbo);
349 }
350
351 nvbo->offset = nvbo->drm.offset;
352 if (nvbo->drm.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
353 nvbo->flags = NOUVEAU_BO_GART;
354 else
355 nvbo->flags = NOUVEAU_BO_VRAM;
356
357 return 0;
358 }
359
360 int
361 nouveau_bo_validate(struct nouveau_channel *chan, struct nouveau_bo *bo,
362 struct nouveau_fence *fence, uint32_t flags)
363 {
364 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
365 int ret;
366
367 assert(bo->map == NULL);
368
369 if (nvbo->user) {
370 ret = nouveau_bo_validate_user(chan, bo, fence, flags);
371 if (ret) {
372 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
373 if (ret)
374 return ret;
375 }
376 } else {
377 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
378 if (ret)
379 return ret;
380 }
381
382 if (flags & NOUVEAU_BO_WR)
383 nouveau_fence_ref(fence, &nvbo->wr_fence);
384 nouveau_fence_ref(fence, &nvbo->fence);
385 return 0;
386 }
387