76b98bed675a46c4821df90e310db4d56d89d858
[mesa.git] / src / gallium / winsys / drm / nouveau / nouveau_bo.c
1 /*
2 * Copyright 2007 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27
28 #include "nouveau_drmif.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_local.h"
31
32 static void
33 nouveau_mem_free(struct nouveau_device *dev, struct drm_nouveau_mem_alloc *ma,
34 void **map)
35 {
36 struct nouveau_device_priv *nvdev = nouveau_device(dev);
37 struct drm_nouveau_mem_free mf;
38
39 if (map && *map) {
40 drmUnmap(*map, ma->size);
41 *map = NULL;
42 }
43
44 if (ma->size) {
45 mf.offset = ma->offset;
46 mf.flags = ma->flags;
47 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE,
48 &mf, sizeof(mf));
49 ma->size = 0;
50 }
51 }
52
53 static int
54 nouveau_mem_alloc(struct nouveau_device *dev, unsigned size, unsigned align,
55 uint32_t flags, struct drm_nouveau_mem_alloc *ma, void **map)
56 {
57 struct nouveau_device_priv *nvdev = nouveau_device(dev);
58 int ret;
59
60 ma->alignment = align;
61 ma->size = size;
62 ma->flags = flags;
63 if (map)
64 ma->flags |= NOUVEAU_MEM_MAPPED;
65 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC, ma,
66 sizeof(struct drm_nouveau_mem_alloc));
67 if (ret)
68 return ret;
69
70 if (map) {
71 ret = drmMap(nvdev->fd, ma->map_handle, ma->size, map);
72 if (ret) {
73 *map = NULL;
74 nouveau_mem_free(dev, ma, map);
75 return ret;
76 }
77 }
78
79 return 0;
80 }
81
82 static void
83 nouveau_bo_tmp_del(void *priv)
84 {
85 struct nouveau_resource *r = priv;
86
87 nouveau_fence_ref(NULL, (struct nouveau_fence **)&r->priv);
88 nouveau_resource_free(&r);
89 }
90
91 static unsigned
92 nouveau_bo_tmp_max(struct nouveau_device_priv *nvdev)
93 {
94 struct nouveau_resource *r = nvdev->sa_heap;
95 unsigned max = 0;
96
97 while (r) {
98 if (r->in_use && !nouveau_fence(r->priv)->emitted) {
99 r = r->next;
100 continue;
101 }
102
103 if (max < r->size)
104 max = r->size;
105 r = r->next;
106 }
107
108 return max;
109 }
110
111 static struct nouveau_resource *
112 nouveau_bo_tmp(struct nouveau_channel *chan, unsigned size,
113 struct nouveau_fence *fence)
114 {
115 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
116 struct nouveau_resource *r = NULL;
117 struct nouveau_fence *ref = NULL;
118
119 if (fence)
120 nouveau_fence_ref(fence, &ref);
121 else
122 nouveau_fence_new(chan, &ref);
123 assert(ref);
124
125 while (nouveau_resource_alloc(nvdev->sa_heap, size, ref, &r)) {
126 if (nouveau_bo_tmp_max(nvdev) < size) {
127 nouveau_fence_ref(NULL, &ref);
128 return NULL;
129 }
130
131 nouveau_fence_flush(chan);
132 }
133 nouveau_fence_signal_cb(ref, nouveau_bo_tmp_del, r);
134
135 return r;
136 }
137
138 int
139 nouveau_bo_init(struct nouveau_device *dev)
140 {
141 struct nouveau_device_priv *nvdev = nouveau_device(dev);
142 int ret;
143
144 ret = nouveau_mem_alloc(dev, 128*1024, 0, NOUVEAU_MEM_AGP |
145 NOUVEAU_MEM_PCI, &nvdev->sa, &nvdev->sa_map);
146 if (ret)
147 return ret;
148
149 ret = nouveau_resource_init(&nvdev->sa_heap, 0, nvdev->sa.size);
150 if (ret) {
151 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
152 return ret;
153 }
154
155 return 0;
156 }
157
158 void
159 nouveau_bo_takedown(struct nouveau_device *dev)
160 {
161 struct nouveau_device_priv *nvdev = nouveau_device(dev);
162
163 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
164 }
165
166 int
167 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
168 int size, struct nouveau_bo **bo)
169 {
170 struct nouveau_bo_priv *nvbo;
171 int ret;
172
173 if (!dev || !bo || *bo)
174 return -EINVAL;
175
176 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
177 if (!nvbo)
178 return -ENOMEM;
179 nvbo->base.device = dev;
180 nvbo->base.size = size;
181 nvbo->base.handle = bo_to_ptr(nvbo);
182 nvbo->drm.alignment = align;
183 nvbo->refcount = 1;
184
185 if (flags & NOUVEAU_BO_TILED) {
186 nvbo->tiled = 1;
187 if (flags & NOUVEAU_BO_ZTILE)
188 nvbo->tiled |= 2;
189 flags &= ~NOUVEAU_BO_TILED;
190 }
191
192 ret = nouveau_bo_set_status(&nvbo->base, flags);
193 if (ret) {
194 free(nvbo);
195 return ret;
196 }
197
198 *bo = &nvbo->base;
199 return 0;
200 }
201
202 int
203 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
204 struct nouveau_bo **bo)
205 {
206 struct nouveau_bo_priv *nvbo;
207
208 if (!dev || !bo || *bo)
209 return -EINVAL;
210
211 nvbo = calloc(1, sizeof(*nvbo));
212 if (!nvbo)
213 return -ENOMEM;
214 nvbo->base.device = dev;
215
216 nvbo->sysmem = ptr;
217 nvbo->user = 1;
218
219 nvbo->base.size = size;
220 nvbo->base.offset = nvbo->drm.offset;
221 nvbo->base.handle = bo_to_ptr(nvbo);
222 nvbo->refcount = 1;
223 *bo = &nvbo->base;
224 return 0;
225 }
226
227 int
228 nouveau_bo_ref(struct nouveau_device *dev, uint64_t handle,
229 struct nouveau_bo **bo)
230 {
231 struct nouveau_bo_priv *nvbo = ptr_to_bo(handle);
232
233 if (!dev || !bo || *bo)
234 return -EINVAL;
235
236 nvbo->refcount++;
237 *bo = &nvbo->base;
238 return 0;
239 }
240
241 static void
242 nouveau_bo_del_cb(void *priv)
243 {
244 struct nouveau_bo_priv *nvbo = priv;
245
246 nouveau_fence_ref(NULL, &nvbo->fence);
247 nouveau_mem_free(nvbo->base.device, &nvbo->drm, &nvbo->map);
248 if (nvbo->sysmem && !nvbo->user)
249 free(nvbo->sysmem);
250 free(nvbo);
251 }
252
253 void
254 nouveau_bo_del(struct nouveau_bo **bo)
255 {
256 struct nouveau_bo_priv *nvbo;
257
258 if (!bo || !*bo)
259 return;
260 nvbo = nouveau_bo(*bo);
261 *bo = NULL;
262
263 if (--nvbo->refcount)
264 return;
265
266 if (nvbo->pending)
267 nouveau_pushbuf_flush(nvbo->pending->channel, 0);
268
269 if (nvbo->fence)
270 nouveau_fence_signal_cb(nvbo->fence, nouveau_bo_del_cb, nvbo);
271 else
272 nouveau_bo_del_cb(nvbo);
273 }
274
275 int
276 nouveau_bo_busy(struct nouveau_bo *bo, uint32_t flags)
277 {
278 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
279 struct nouveau_fence *fence;
280
281 if (!nvbo)
282 return -EINVAL;
283
284 /* If the buffer is pending it must be busy, unless
285 * both are RD, in which case we can allow access */
286 if (nvbo->pending) {
287 if ((nvbo->pending->flags & NOUVEAU_BO_RDWR) == NOUVEAU_BO_RD &&
288 (flags & NOUVEAU_BO_RDWR) == NOUVEAU_BO_RD)
289 return 0;
290 else
291 return 1;
292 }
293
294 if (flags & NOUVEAU_BO_WR)
295 fence = nvbo->fence;
296 else
297 fence = nvbo->wr_fence;
298
299 /* If the buffer is not pending and doesn't have a fence
300 * that conflicts with our flags then it can't be busy
301 */
302 if (!fence)
303 return 0;
304 else
305 /* If the fence is signalled the buffer is not busy, else is busy */
306 return !nouveau_fence(fence)->signalled;
307 }
308
309 int
310 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
311 {
312 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
313
314 if (!nvbo)
315 return -EINVAL;
316
317 if (nvbo->pending &&
318 (nvbo->pending->flags & NOUVEAU_BO_WR || flags & NOUVEAU_BO_WR)) {
319 nouveau_pushbuf_flush(nvbo->pending->channel, 0);
320 }
321
322 if (flags & NOUVEAU_BO_WR)
323 nouveau_fence_wait(&nvbo->fence);
324 else
325 nouveau_fence_wait(&nvbo->wr_fence);
326
327 if (nvbo->sysmem)
328 bo->map = nvbo->sysmem;
329 else
330 bo->map = nvbo->map;
331 return 0;
332 }
333
334 void
335 nouveau_bo_unmap(struct nouveau_bo *bo)
336 {
337 bo->map = NULL;
338 }
339
340 static int
341 nouveau_bo_upload(struct nouveau_bo_priv *nvbo)
342 {
343 if (nvbo->fence)
344 nouveau_fence_wait(&nvbo->fence);
345 memcpy(nvbo->map, nvbo->sysmem, nvbo->drm.size);
346 return 0;
347 }
348
349 int
350 nouveau_bo_set_status(struct nouveau_bo *bo, uint32_t flags)
351 {
352 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
353 struct drm_nouveau_mem_alloc new;
354 void *new_map = NULL, *new_sysmem = NULL;
355 unsigned new_flags = 0, ret;
356
357 assert(!bo->map);
358
359 /* Check current memtype vs requested, if they match do nothing */
360 if ((nvbo->drm.flags & NOUVEAU_MEM_FB) && (flags & NOUVEAU_BO_VRAM))
361 return 0;
362 if ((nvbo->drm.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI)) &&
363 (flags & NOUVEAU_BO_GART))
364 return 0;
365 if (nvbo->drm.size == 0 && nvbo->sysmem && (flags & NOUVEAU_BO_LOCAL))
366 return 0;
367
368 memset(&new, 0x00, sizeof(new));
369
370 /* Allocate new memory */
371 if (flags & NOUVEAU_BO_VRAM)
372 new_flags |= NOUVEAU_MEM_FB;
373 else
374 if (flags & NOUVEAU_BO_GART)
375 new_flags |= (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI);
376
377 if (nvbo->tiled && flags) {
378 new_flags |= NOUVEAU_MEM_TILE;
379 if (nvbo->tiled & 2)
380 new_flags |= NOUVEAU_MEM_TILE_ZETA;
381 }
382
383 if (new_flags) {
384 ret = nouveau_mem_alloc(bo->device, bo->size,
385 nvbo->drm.alignment, new_flags,
386 &new, &new_map);
387 if (ret)
388 return ret;
389 } else
390 if (!nvbo->user) {
391 new_sysmem = malloc(bo->size);
392 }
393
394 /* Copy old -> new */
395 /*XXX: use M2MF */
396 if (nvbo->sysmem || nvbo->map) {
397 struct nouveau_pushbuf_bo *pbo = nvbo->pending;
398 nvbo->pending = NULL;
399 nouveau_bo_map(bo, NOUVEAU_BO_RD);
400 memcpy(new_map, bo->map, bo->size);
401 nouveau_bo_unmap(bo);
402 nvbo->pending = pbo;
403 }
404
405 /* Free old memory */
406 if (nvbo->fence)
407 nouveau_fence_wait(&nvbo->fence);
408 nouveau_mem_free(bo->device, &nvbo->drm, &nvbo->map);
409 if (nvbo->sysmem && !nvbo->user)
410 free(nvbo->sysmem);
411
412 nvbo->drm = new;
413 nvbo->map = new_map;
414 if (!nvbo->user)
415 nvbo->sysmem = new_sysmem;
416 bo->flags = flags;
417 bo->offset = nvbo->drm.offset;
418 return 0;
419 }
420
421 static int
422 nouveau_bo_validate_user(struct nouveau_channel *chan, struct nouveau_bo *bo,
423 struct nouveau_fence *fence, uint32_t flags)
424 {
425 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
426 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
427 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
428 struct nouveau_resource *r;
429
430 if (nvchan->user_charge + bo->size > nvdev->sa.size)
431 return 1;
432
433 if (!(flags & NOUVEAU_BO_GART))
434 return 1;
435
436 r = nouveau_bo_tmp(chan, bo->size, fence);
437 if (!r)
438 return 1;
439 nvchan->user_charge += bo->size;
440
441 memcpy(nvdev->sa_map + r->start, nvbo->sysmem, bo->size);
442
443 nvbo->offset = nvdev->sa.offset + r->start;
444 nvbo->flags = NOUVEAU_BO_GART;
445 return 0;
446 }
447
448 static int
449 nouveau_bo_validate_bo(struct nouveau_channel *chan, struct nouveau_bo *bo,
450 struct nouveau_fence *fence, uint32_t flags)
451 {
452 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
453 int ret;
454
455 ret = nouveau_bo_set_status(bo, flags);
456 if (ret) {
457 nouveau_fence_flush(chan);
458
459 ret = nouveau_bo_set_status(bo, flags);
460 if (ret)
461 return ret;
462 }
463
464 if (nvbo->user)
465 nouveau_bo_upload(nvbo);
466
467 nvbo->offset = nvbo->drm.offset;
468 if (nvbo->drm.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
469 nvbo->flags = NOUVEAU_BO_GART;
470 else
471 nvbo->flags = NOUVEAU_BO_VRAM;
472
473 return 0;
474 }
475
476 int
477 nouveau_bo_validate(struct nouveau_channel *chan, struct nouveau_bo *bo,
478 uint32_t flags)
479 {
480 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
481 struct nouveau_fence *fence = nouveau_pushbuf(chan->pushbuf)->fence;
482 int ret;
483
484 assert(bo->map == NULL);
485
486 if (nvbo->user) {
487 ret = nouveau_bo_validate_user(chan, bo, fence, flags);
488 if (ret) {
489 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
490 if (ret)
491 return ret;
492 }
493 } else {
494 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
495 if (ret)
496 return ret;
497 }
498
499 if (flags & NOUVEAU_BO_WR)
500 nouveau_fence_ref(fence, &nvbo->wr_fence);
501 nouveau_fence_ref(fence, &nvbo->fence);
502 return 0;
503 }
504